diff --git a/.azure-pipelines/env.yml b/.azure-pipelines/env.yml index 8eecc9e2d529..3b3ebf6d2eeb 100644 --- a/.azure-pipelines/env.yml +++ b/.azure-pipelines/env.yml @@ -135,15 +135,6 @@ jobs: # TODO(phlax): move this to a script to ensure proper linting etc set -e - # Run everything in postsubmit - if [[ "$(Build.Reason)" != "PullRequest" ]]; then - echo "##vso[task.setvariable variable=build;isoutput=true]true" - echo "##vso[task.setvariable variable=checks;isoutput=true]true" - echo "##vso[task.setvariable variable=docker;isoutput=true]true" - echo "##vso[task.setvariable variable=packaging;isoutput=true]true" - exit 0 - fi - RUN_BUILD=true RUN_CHECKS=true RUN_DOCKER=true @@ -165,6 +156,16 @@ jobs: RUN_RELEASE_TESTS=false fi + # Run ~everything in postsubmit + if [[ "$(Build.Reason)" != "PullRequest" ]]; then + echo "##vso[task.setvariable variable=build;isoutput=true]true" + echo "##vso[task.setvariable variable=checks;isoutput=true]true" + echo "##vso[task.setvariable variable=docker;isoutput=true]true" + echo "##vso[task.setvariable variable=packaging;isoutput=true]true" + echo "##vso[task.setvariable variable=releaseTests;isoutput=true]${RUN_RELEASE_TESTS}" + exit 0 + fi + echo "##vso[task.setvariable variable=build;isoutput=true]${RUN_BUILD}" echo "##vso[task.setvariable variable=checks;isoutput=true]${RUN_CHECKS}" echo "##vso[task.setvariable variable=docker;isoutput=true]${RUN_DOCKER}" diff --git a/.azure-pipelines/pipelines.yml b/.azure-pipelines/pipelines.yml index 99f458d07abd..0b55d0a6219a 100644 --- a/.azure-pipelines/pipelines.yml +++ b/.azure-pipelines/pipelines.yml @@ -80,10 +80,6 @@ stages: - env checkStageDeps: - env - macBuildStageDeps: - - env - windowsBuildStageDeps: - - env # Postsubmit main/release branches - ${{ if eq(variables.pipelinePostsubmit, true) }}: @@ -96,7 +92,3 @@ stages: - env checkStageDeps: - env - macBuildStageDeps: - - env - windowsBuildStageDeps: - - env diff --git a/.azure-pipelines/stage/macos.yml b/.azure-pipelines/stage/macos.yml deleted file mode 100644 index fc990eafd737..000000000000 --- a/.azure-pipelines/stage/macos.yml +++ /dev/null @@ -1,62 +0,0 @@ - -parameters: - -# Auth -- name: authGCP - type: string - default: "" - -- name: runBuild - displayName: "Run build" - type: string - default: true - -jobs: -- job: test - displayName: Build and test - condition: | - and(not(canceled()), - eq(${{ parameters.runBuild }}, 'true')) - timeoutInMinutes: 180 - pool: - vmImage: "macos-11" - steps: - - script: ./ci/mac_ci_setup.sh - displayName: "Install dependencies" - - - bash: | - set -e - GCP_SERVICE_ACCOUNT_KEY_PATH=$(mktemp -t gcp_service_account.XXXXXX.json) - bash -c 'echo "$(GcpServiceAccountKey)"' | base64 --decode > "${GCP_SERVICE_ACCOUNT_KEY_PATH}" - BAZEL_BUILD_EXTRA_OPTIONS+=" --google_credentials=${GCP_SERVICE_ACCOUNT_KEY_PATH}" - ./ci/mac_ci_steps.sh - displayName: "Run Mac CI" - env: - BAZEL_BUILD_EXTRA_OPTIONS: >- - --remote_download_toplevel - --flaky_test_attempts=2 - --remote_cache=grpcs://remotebuildexecution.googleapis.com - --remote_instance_name=projects/envoy-ci/instances/default_instance - ENVOY_RBE: 1 - - - task: PublishTestResults@2 - inputs: - testResultsFiles: "**/bazel-testlogs/**/test.xml" - testRunTitle: "macOS" - timeoutInMinutes: 10 - condition: not(canceled()) - -- job: tested - displayName: Complete - dependsOn: ["test"] - pool: - vmImage: $(agentUbuntu) - # This condition ensures that this (required) job passes if all of - # the preceeding jobs either pass or are skipped - # adapted from: - # https://learn.microsoft.com/en-us/azure/devops/pipelines/process/expressions?view=azure-devops#job-to-job-dependencies-within-one-stage - condition: and(eq(variables['Build.Reason'], 'PullRequest'), in(dependencies.test.result, 'Succeeded', 'SucceededWithIssues', 'Skipped')) - steps: - - checkout: none - - bash: | - echo "macos tested" diff --git a/.azure-pipelines/stage/publish.yml b/.azure-pipelines/stage/publish.yml index b361552e4e20..30e62ebc362c 100644 --- a/.azure-pipelines/stage/publish.yml +++ b/.azure-pipelines/stage/publish.yml @@ -292,6 +292,7 @@ jobs: publishEnvoy: false publishTestResults: false env: + ENVOY_REPO: $(Build.Repository.Name) ${{ if eq(variables['Build.Reason'], 'PullRequest') }}: ENVOY_HEAD_REF: "$(Build.SourceBranch)" ENVOY_BRANCH: "$(System.PullRequest.TargetBranch)" diff --git a/.azure-pipelines/stage/windows.yml b/.azure-pipelines/stage/windows.yml deleted file mode 100644 index fa2729b82254..000000000000 --- a/.azure-pipelines/stage/windows.yml +++ /dev/null @@ -1,125 +0,0 @@ - -parameters: - -# Auth -- name: authGCP - type: string - default: "" - -- name: runBuild - displayName: "Run build" - type: string - default: true - -jobs: -- job: release - displayName: Build and test - condition: | - and(not(canceled()), - eq(${{ parameters.runBuild }}, 'true')) - timeoutInMinutes: 180 - pool: - vmImage: "windows-2019" - steps: - - task: Cache@2 - inputs: - key: '"windows.release" | $(cacheKeyBazel)' - path: $(Build.StagingDirectory)/repository_cache - continueOnError: true - - - bash: | - set -e - ENVOY_SHARED_TMP_DIR="C:\\Users\\VSSADM~1\\AppData\\Local\\Temp\\bazel-shared" - mkdir -p "$ENVOY_SHARED_TMP_DIR" - GCP_SERVICE_ACCOUNT_KEY_PATH=$(mktemp -p "${ENVOY_SHARED_TMP_DIR}" -t gcp_service_account.XXXXXX.json) - bash -c 'echo "$(GcpServiceAccountKey)"' | base64 --decode > "${GCP_SERVICE_ACCOUNT_KEY_PATH}" - export BAZEL_BUILD_EXTRA_OPTIONS+=" --google_credentials=${GCP_SERVICE_ACCOUNT_KEY_PATH}" - export ENVOY_SHARED_TMP_DIR - ci/run_envoy_docker.sh ci/windows_ci_steps.sh - displayName: "Run Windows msvc-cl CI" - env: - CI_TARGET: "windows" - ENVOY_DOCKER_BUILD_DIR: "$(Build.StagingDirectory)" - ENVOY_RBE: "true" - BAZEL_BUILD_EXTRA_OPTIONS: >- - --config=remote-ci - --config=rbe-google - --config=remote-msvc-cl - --jobs=$(RbeJobs) - --flaky_test_attempts=2 - - - task: PublishTestResults@2 - inputs: - testResultsFiles: "**/bazel-out/**/testlogs/**/test.xml" - testRunTitle: "windows" - searchFolder: $(Build.StagingDirectory)/tmp - timeoutInMinutes: 10 - condition: not(canceled()) - - task: PublishBuildArtifacts@1 - inputs: - pathtoPublish: "$(Build.StagingDirectory)/envoy" - artifactName: windows.release - timeoutInMinutes: 10 - condition: not(canceled()) - -- job: docker - displayName: Build Docker image - condition: and(not(canceled()), succeeded(), ne(stageDependencies.env.repo.outputs['changed.mobileOnly'], 'true'), ne(stageDependencies.env.repo.outputs['changed.docsOnly'], 'true'), ne(stageDependencies.env.repo.outputs['changed.examplesOnly'], 'true')) - strategy: - matrix: - windows2019: - imageName: 'windows-2019' - windowsBuildType: "windows" - windowsImageBase: "mcr.microsoft.com/windows/servercore" - windowsImageTag: "ltsc2019" - windows2022: - imageName: 'windows-2022' - windowsBuildType: "windows-ltsc2022" - windowsImageBase: "mcr.microsoft.com/windows/nanoserver" - windowsImageTag: "ltsc2022" - dependsOn: ["release"] - timeoutInMinutes: 120 - pool: - vmImage: $(imageName) - steps: - - task: DownloadBuildArtifacts@0 - inputs: - buildType: current - artifactName: "windows.release" - itemPattern: "windows.release/envoy_binary.tar.gz" - downloadType: single - targetPath: $(Build.StagingDirectory) - - bash: | - set -e - # Convert to Unix-style path so tar doesn't think drive letter is a hostname - STAGING_DIR="/$(echo '$(Build.StagingDirectory)' | tr -d ':' | tr '\\' '/')" - mkdir -p windows/amd64 && tar zxf "${STAGING_DIR}/windows.release/envoy_binary.tar.gz" -C ./windows/amd64 - ci/docker_ci.sh - workingDirectory: $(Build.SourcesDirectory) - env: - CI_BRANCH: $(Build.SourceBranch) - CI_SHA1: $(Build.SourceVersion) - DOCKERHUB_USERNAME: $(DockerUsername) - DOCKERHUB_PASSWORD: $(DockerPassword) - WINDOWS_BUILD_TYPE: $(windowsBuildType) - WINDOWS_IMAGE_BASE: $(windowsImageBase) - WINDOWS_IMAGE_TAG: $(windowsImageTag) - -- job: released - displayName: Complete - dependsOn: ["release", "docker"] - pool: - vmImage: $(agentUbuntu) - # This condition ensures that this (required) job passes if all of - # the preceeding jobs either pass or are skipped - # adapted from: - # https://learn.microsoft.com/en-us/azure/devops/pipelines/process/expressions?view=azure-devops#job-to-job-dependencies-within-one-stage - condition: | - and( - eq(variables['Build.Reason'], 'PullRequest'), - in(dependencies.release.result, 'Succeeded', 'SucceededWithIssues', 'Skipped'), - in(dependencies.docker.result, 'Succeeded', 'SucceededWithIssues', 'Skipped')) - steps: - - checkout: none - - bash: | - echo "windows released" diff --git a/.azure-pipelines/stages.yml b/.azure-pipelines/stages.yml index ab3fbca2075a..bc308c659d4b 100644 --- a/.azure-pipelines/stages.yml +++ b/.azure-pipelines/stages.yml @@ -8,18 +8,6 @@ parameters: default: - env - prechecks -- name: macBuildStageDeps - displayName: "macOS stage dependencies" - type: object - default: - - env - - prechecks -- name: windowsBuildStageDeps - displayName: "Windows stage dependencies" - type: object - default: - - env - - prechecks - name: checkStageDeps displayName: "Check stage dependencies" type: object @@ -60,8 +48,6 @@ stages: - stage: prechecks displayName: Prechecks dependsOn: ["env"] - variables: - RUN_PRECHECKS: $[stageDependencies.env.repo.outputs['run.releaseTests']] jobs: - template: stage/prechecks.yml parameters: @@ -72,7 +58,7 @@ stages: authGPGKey: $(MaintainerGPGKeySecureFileDownloadPath) authGPGPath: $(MaintainerGPGKey.secureFilePath) bucketGCP: $(GcsArtifactBucket) - runPrechecks: variables['RUN_PRECHECKS'] + runPrechecks: stageDependencies.env.repo.outputs['run.releaseTests'] - stage: linux_x64 displayName: Linux x64 @@ -84,10 +70,10 @@ stages: - template: stage/linux.yml parameters: cacheTestResults: ${{ parameters.cacheTestResults }} + pool: envoy-x64-large # these are parsed differently and _must_ be expressed in this way runBuild: variables['RUN_BUILD'] runTests: $(RUN_TESTS) - tmpfsDockerDisabled: true - stage: linux_arm64 displayName: Linux arm64 @@ -159,25 +145,3 @@ stages: - template: stage/verify.yml parameters: authGCP: $(GcpServiceAccountKey) - -- stage: macos - displayName: macOS - dependsOn: ${{ parameters.macBuildStageDeps }} - variables: - RUN_BUILD: $[stageDependencies.env.repo.outputs['run.build']] - jobs: - - template: stage/macos.yml - parameters: - authGCP: $(GcpServiceAccountKey) - runBuild: variables['RUN_BUILD'] - -- stage: windows - displayName: Windows - dependsOn: ${{ parameters.windowsBuildStageDeps }} - variables: - RUN_BUILD: $[stageDependencies.env.repo.outputs['run.build']] - jobs: - - template: stage/windows.yml - parameters: - authGCP: $(GcpServiceAccountKey) - runBuild: variables['RUN_BUILD'] diff --git a/.bazelrc b/.bazelrc index fa3ec866f6fc..a5b1ab886dba 100644 --- a/.bazelrc +++ b/.bazelrc @@ -302,6 +302,7 @@ build:remote-windows --spawn_strategy=remote,local build:remote-windows --strategy=Javac=remote,local build:remote-windows --strategy=Closure=remote,local build:remote-windows --strategy=Genrule=remote,local +build:remote-windows --strategy=CppLink=local build:remote-windows --remote_timeout=7200 build:remote-windows --google_default_credentials=true build:remote-windows --remote_download_toplevel @@ -442,7 +443,7 @@ build:windows --define hot_restart=disabled build:windows --define tcmalloc=disabled build:windows --define wasm=disabled build:windows --define manual_stamp=manual_stamp -build:windows --cxxopt="/std:c++17" +build:windows --cxxopt="/std:c++20" build:windows --output_groups=+pdb_file # TODO(wrowe,sunjayBhatia): Resolve bugs upstream in curl and rules_foreign_cc @@ -485,11 +486,12 @@ build:windows --features=static_link_msvcrt build:windows --dynamic_mode=off # RBE (Google) -build:rbe-google --google_default_credentials=true -build:rbe-google --remote_cache=grpcs://remotebuildexecution.googleapis.com +build:cache-google --google_default_credentials=true +build:cache-google --remote_cache=grpcs://remotebuildexecution.googleapis.com +build:cache-google --remote_instance_name=projects/envoy-ci/instances/default_instance +build:cache-google --remote_timeout=7200 build:rbe-google --remote_executor=grpcs://remotebuildexecution.googleapis.com -build:rbe-google --remote_timeout=7200 -build:rbe-google --remote_instance_name=projects/envoy-ci/instances/default_instance +build:rbe-google --config=cache-google build:rbe-google-bes --bes_backend=grpcs://buildeventservice.googleapis.com build:rbe-google-bes --bes_results_url=https://source.cloud.google.com/results/invocations/ diff --git a/.github/actions/do_ci/action.yml b/.github/actions/do_ci/action.yml deleted file mode 100644 index 374a7371aea5..000000000000 --- a/.github/actions/do_ci/action.yml +++ /dev/null @@ -1,75 +0,0 @@ -inputs: - target: - required: true - type: string - rbe: - type: boolean - default: true - managed: - type: boolean - default: true - - auth_bazel_rbe: - type: string - default: '' - - bazel_extra: - type: string - default: - bazel_rbe_jobs: - type: number - default: 75 - - command_prefix: - type: string - default: ./ci/run_envoy_docker.sh - command_ci: - type: string - default: ./ci/do_ci.sh - catch-errors: - type: boolean - default: false - error-match: - type: string - default: | - ERROR - warning-match: - type: string - default: | - WARNING - notice-match: - type: string - default: | - NOTICE - - env: - type: string - - GITHUB_TOKEN: - required: true - -runs: - using: composite - steps: - - uses: envoyproxy/toolshed/gh-actions/github/run@actions-v0.0.35 - name: 'Run CI target ${{ inputs.target }}' - with: - catch-errors: ${{ inputs.catch-errors }} - container-command: ${{ inputs.command_prefix }} - command-prefix: ${{ inputs.command_ci }} - command: ${{ inputs.target }} - env: ${{ inputs.env }} - error-match: ${{ inputs.error-match }} - notice-match: ${{ inputs.notice-match }} - warning-match: ${{ inputs.warning-match }} - env: - GITHUB_TOKEN: ${{ inputs.GITHUB_TOKEN }} - ENVOY_DOCKER_BUILD_DIR: ${{ runner.temp }} - ENVOY_RBE: ${{ inputs.rbe != 'false' && 1 || '' }} - GCP_SERVICE_ACCOUNT_KEY: ${{ inputs.rbe && inputs.auth_bazel_rbe || '' }} - BAZEL_BUILD_EXTRA_OPTIONS: >- - --config=remote-ci - ${{ inputs.bazel_extra }} - ${{ inputs.rbe != 'false' && format('--jobs={0}', inputs.bazel_rbe_jobs) || '' }} - BAZEL_FAKE_SCM_REVISION: ${{ github.event_name == 'pull_request' && 'e3b4a6e9570da15ac1caffdded17a8bebdc7dfc9' || '' }} - CI_TARGET_BRANCH: ${{ github.event_name == 'pull_request' && github.event.base.ref || github.ref }} diff --git a/.github/actions/env/action.yml b/.github/actions/env/action.yml index d30cab498dc5..40b913b1ea62 100644 --- a/.github/actions/env/action.yml +++ b/.github/actions/env/action.yml @@ -12,11 +12,11 @@ inputs: type: string required: true - repo_ref: + repo-ref: type: string - repo_ref_sha: + repo-ref-sha: type: string - repo_ref_name: + repo-ref-name: type: string trusted_bots: @@ -24,7 +24,7 @@ inputs: default: | trigger-release-envoy[bot] - check_mobile_run: + check-mobile-run: type: boolean default: true @@ -60,24 +60,24 @@ outputs: value: ${{ steps.should_run.outputs.mobile_release_validation }} mobile_tsan: value: ${{ steps.should_run.outputs.mobile_tsan }} - repo_ref: - value: ${{ steps.context.outputs.repo_ref }} - repo_ref_name: - value: ${{ steps.context.outputs.repo_ref_name }} - repo_ref_pr_number: - value: ${{ steps.context.outputs.repo_ref_pr_number }} - repo_ref_sha: - value: ${{ steps.context.outputs.repo_ref_sha }} - repo_ref_sha_short: - value: ${{ steps.context.outputs.repo_ref_sha_short }} - repo_ref_title: - value: ${{ steps.context.outputs.repo_ref_title }} + repo-ref: + value: ${{ steps.context.outputs.repo-ref }} + repo-ref-name: + value: ${{ steps.context.outputs.repo-ref-name }} + repo-ref-pr-number: + value: ${{ steps.context.outputs.repo-ref-pr-number }} + repo-ref-sha: + value: ${{ steps.context.outputs.repo-ref-sha }} + repo-ref-sha-short: + value: ${{ steps.context.outputs.repo-ref-sha-short }} + repo-ref-title: + value: ${{ steps.context.outputs.repo-ref-title }} trusted: value: ${{ steps.trusted.outputs.trusted }} - version_dev: - value: ${{ steps.context.outputs.version_dev }} - version_patch: - value: ${{ steps.context.outputs.version_patch }} + version-dev: + value: ${{ steps.context.outputs.version-dev }} + version-patch: + value: ${{ steps.context.outputs.version-patch }} runs: using: composite @@ -126,16 +126,16 @@ runs: # If we are in a trusted CI run then the provided commit _must_ be either the latest for # this branch, or an antecdent. - run: | - if ! git merge-base --is-ancestor "${{ inputs.repo_ref }}" HEAD &> /dev/null; then - echo "Provided Envoy ref (${{ inputs.repo_ref }}) is not an ancestor of current branch" >&2 + if ! git merge-base --is-ancestor "${{ inputs.repo-ref }}" HEAD &> /dev/null; then + echo "Provided Envoy ref (${{ inputs.repo-ref }}) is not an ancestor of current branch" >&2 exit 1 fi - git checkout "${{ inputs.repo_ref }}" - if: ${{ steps.trusted.outputs.trusted == 'true' && inputs.repo_ref }} + git checkout "${{ inputs.repo-ref }}" + if: ${{ steps.trusted.outputs.trusted == 'true' && inputs.repo-ref }} name: Check provided ref shell: bash - - if: ${{ inputs.check_mobile_run != 'false' }} + - if: ${{ inputs.check-mobile-run != 'false' }} id: should_run name: 'Check what to run' run: ./mobile/tools/what_to_run.sh @@ -151,7 +151,7 @@ runs: fi VERSION_PATCH="$(cat VERSION.txt | cut -d- -f1 | rev | cut -d. -f1 | rev)" # TODO: strip merge from pr names - REF_NAME=${{ inputs.repo_ref_name || github.ref_name }} + REF_NAME=${{ inputs.repo-ref-name || github.ref_name }} if [[ "$REF_NAME" =~ ^refs/pull/ ]]; then REF_NAME="${REF_NAME:10}" REF_PR_NUMBER="$(echo "${REF_NAME}" | cut -d/ -f1)" @@ -160,8 +160,8 @@ runs: fi echo "SET PR NUMBER: ${REF_PR_NUMBER}" - REF="${{ steps.trusted.outputs.trusted != 'true' && inputs.repo_ref || '' }}" - REF_SHA=${{ inputs.repo_ref_sha || github.event.pull_request.head.sha || github.sha }} + REF="${{ steps.trusted.outputs.trusted != 'true' && inputs.repo-ref || '' }}" + REF_SHA=${{ inputs.repo-ref-sha || github.event.pull_request.head.sha || github.sha }} REF_SHA_SHORT="${REF_SHA:0:7}" REF_TITLE=( "${{ steps.trusted.outputs.trusted == 'true' && 'postsubmit' || 'pr' }}/" @@ -169,14 +169,14 @@ runs: "@${REF_SHA_SHORT}") REF_TITLE="$(printf %s "${REF_TITLE[@]}" $'\n')" { - echo "repo_ref=$REF" - echo "repo_ref_name=$REF_NAME" - echo "repo_ref_pr_number=$REF_PR_NUMBER" - echo "repo_ref_sha=$REF_SHA" - echo "repo_ref_title=$REF_TITLE" - echo "repo_ref_sha_short=$REF_SHA_SHORT" - echo "version_dev=$VERSION_DEV" - echo "version_patch=$VERSION_PATCH" + echo "repo-ref=$REF" + echo "repo-ref-name=$REF_NAME" + echo "repo-ref-pr-number=$REF_PR_NUMBER" + echo "repo-ref-sha=$REF_SHA" + echo "repo-ref-title=$REF_TITLE" + echo "repo-ref-sha-short=$REF_SHA_SHORT" + echo "version-dev=$VERSION_DEV" + echo "version-patch=$VERSION_PATCH" } >> "$GITHUB_OUTPUT" shell: bash diff --git a/.github/actions/publish/release/setup/action.yml b/.github/actions/publish/release/setup/action.yml deleted file mode 100644 index 4e0935710d2d..000000000000 --- a/.github/actions/publish/release/setup/action.yml +++ /dev/null @@ -1,26 +0,0 @@ -inputs: - ref: - type: string - required: true - bucket: - type: string - required: true - -runs: - using: composite - steps: - - id: url - run: | - echo "base=https://storage.googleapis.com/${{ inputs.bucket }}/${REF:0:7}/release" \ - >> "$GITHUB_OUTPUT" - env: - REF: ${{ inputs.ref }} - shell: bash - - uses: envoyproxy/toolshed/gh-actions/fetch@actions-v0.0.10 - id: fetch - with: - url: "${{ steps.url.outputs.base }}/release.signed.tar.zst" - - run: | - mkdir -p ${{ runner.temp }}/release.signed - mv ${{ steps.fetch.outputs.path }} ${{ runner.temp }}/release.signed - shell: bash diff --git a/.github/actions/verify/examples/setup/action.yml b/.github/actions/verify/examples/setup/action.yml index 18f3205721ce..7384eb281d0d 100644 --- a/.github/actions/verify/examples/setup/action.yml +++ b/.github/actions/verify/examples/setup/action.yml @@ -16,15 +16,15 @@ runs: env: REF: ${{ inputs.ref }} shell: bash - - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.0.10 + - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.1.1 with: url: "${{ steps.url.outputs.base }}/envoy.tar" variant: dev - - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.0.10 + - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.1.1 with: url: "${{ steps.url.outputs.base }}/envoy-contrib.tar" variant: contrib-dev - - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.0.10 + - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.1.1 with: url: "${{ steps.url.outputs.base }}/envoy-google-vrp.tar" variant: google-vrp-dev diff --git a/.github/workflows/_cache_docker.yml b/.github/workflows/_cache_docker.yml index 673f67ef72bb..032be0081e6f 100644 --- a/.github/workflows/_cache_docker.yml +++ b/.github/workflows/_cache_docker.yml @@ -37,7 +37,7 @@ jobs: docker: runs-on: ubuntu-22.04 steps: - - uses: envoyproxy/toolshed/gh-actions/docker/cache/prime@actions-v0.0.35 + - uses: envoyproxy/toolshed/gh-actions/docker/cache/prime@actions-v0.1.5 name: Prime Docker cache (${{ inputs.image_repo }}:${{ inputs.image_tag }}@sha256:${{ inputs.image_sha }}) with: image_tag: "${{ inputs.image_repo }}:${{ inputs.image_tag }}@sha256:${{ inputs.image_sha }}" diff --git a/.github/workflows/_ci.yml b/.github/workflows/_ci.yml index bbf16933978d..f7e3b9a862df 100644 --- a/.github/workflows/_ci.yml +++ b/.github/workflows/_ci.yml @@ -3,96 +3,81 @@ name: Envoy CI on: workflow_call: secrets: - app_id: - app_key: + app-id: + app-key: + rbe-key: inputs: - target: - required: true + bazel-extra: type: string - rbe: - type: boolean - default: true - managed: - type: boolean - default: true - - auth_bazel_rbe: - type: string - default: '' - - bazel_extra: - type: string - default: - bazel_local_cache: - type: string - default: - bazel_rbe_cache: - type: string - default: grpcs://remotebuildexecution.googleapis.com - bazel_rbe_instance: - type: string - default: projects/envoy-ci/instances/default_instance - bazel_rbe_jobs: + bazel-rbe-jobs: type: number default: 75 - - cache_build_image: + cache-build-image: type: string - - command_prefix: + catch-errors: + type: boolean + default: false + command-prefix: type: string default: ./ci/run_envoy_docker.sh - command_ci: + command-ci: type: string default: ./ci/do_ci.sh - catch-errors: + diskspace-hack: type: boolean default: false error-match: type: string default: | ERROR - warning-match: - type: string - default: | - WARNING notice-match: type: string default: | NOTICE - - diskspace_hack: + rbe: type: boolean - default: false - - run_pre: - type: string - default: - run_pre_with: - type: string - default: - - run_post: - type: string - default: - run_post_with: - type: string - default: - - repo_fetch_depth: + default: true + repo-fetch-depth: type: number default: 1 - repo_ref: + repo-ref: + type: string + runs-on: type: string + default: ubuntu-22.04 skip: type: boolean default: false + source: + type: string + steps-pre: + type: string + steps-pre-name: + type: string + steps-post: + type: string + default: | + - run: | + du -ch "%{{ inputs.temp-dir || runner.temp }}" | grep -E "[0-9]{2,}M|[0-9]G" + shell: bash + steps-post-name: + type: string + target: + type: string + required: true + temp-dir: + type: string trusted: type: boolean default: false - - env: + upload-name: + type: string + upload-path: type: string + warning-match: + type: string + default: | + WARNING concurrency: group: | @@ -105,95 +90,98 @@ concurrency: jobs: do_ci: if: ${{ ! inputs.skip }} - runs-on: ubuntu-22.04 - name: ${{ inputs.command_ci }} ${{ inputs.target }} + runs-on: ${{ inputs.runs-on }} + name: ${{ inputs.command-ci }} ${{ inputs.target }} steps: - - if: ${{ inputs.cache_build_image }} - name: Restore Docker cache (${{ inputs.cache_build_image }}) - uses: envoyproxy/toolshed/gh-actions/docker/cache/restore@actions-v0.0.35 + - if: ${{ inputs.cache-build-image }} + name: Restore Docker cache ${{ inputs.cache-build-image && format('({0})', inputs.cache-build-image) || '' }} + uses: envoyproxy/toolshed/gh-actions/docker/cache/restore@actions-v0.1.5 with: - image_tag: ${{ inputs.cache_build_image }} + image_tag: ${{ inputs.cache-build-image }} - - name: Check workflow context - id: context - run: | - if [[ "${{ inputs.trusted }}" != "false" && -n "${{ secrets.app_id }}" && -n "${{ secrets.app_key }}" ]]; then - echo "use_appauth=true" >> $GITHUB_OUTPUT - fi - - if: ${{ steps.context.outputs.use_appauth == 'true' }} - name: Fetch token for app auth - id: appauth - uses: envoyproxy/toolshed/gh-actions/appauth@actions-v0.0.35 - with: - app_id: ${{ secrets.app_id }} - key: ${{ secrets.app_key }} - - - uses: actions/checkout@v4 + - uses: envoyproxy/toolshed/gh-actions/github/checkout@actions-v0.1.5 + id: checkout name: Checkout Envoy repository with: - fetch-depth: ${{ ! inputs.trusted && inputs.repo_fetch_depth || 0 }} - # WARNING: This allows untrusted code to run!!! - # If this is set, then anything before or after in the job should be regarded as - # compromised. - ref: ${{ ! inputs.trusted && inputs.repo_ref || '' }} - token: ${{ steps.context.outputs.use_appauth == 'true' && steps.appauth.outputs.token || secrets.GITHUB_TOKEN }} + app_id: ${{ inputs.trusted && secrets.app-id || '' }} + app_key: ${{ inputs.trusted && secrets.app-key || '' }} + config: | + fetch-depth: ${{ ! inputs.trusted && inputs.repo-fetch-depth || 0 }} + # WARNING: This allows untrusted code to run!!! + # If this is set, then anything before or after in the job should be regarded as + # compromised. + ref: ${{ ! inputs.trusted && inputs.repo-ref || github.ref }} # If we are in a trusted CI run then the provided commit _must_ be either the latest for # this branch, or an antecdent. - run: | - if ! git merge-base --is-ancestor "${{ inputs.repo_ref }}" HEAD; then - echo "Provided Envoy ref (${{ inputs.repo_ref }}) is not an ancestor of current branch" >&2 + if ! git merge-base --is-ancestor "${{ inputs.repo-ref }}" HEAD; then + echo "Provided Envoy ref (${{ inputs.repo-ref }}) is not an ancestor of current branch" >&2 exit 1 fi - git checkout "${{ inputs.repo_ref }}" + git checkout "${{ inputs.repo-ref }}" if: ${{ inputs.trusted }} name: Check provided ref + shell: bash - name: Add safe directory run: git config --global --add safe.directory /__w/envoy/envoy - - if: ${{ inputs.diskspace_hack }} - uses: envoyproxy/toolshed/gh-actions/diskspace@actions-v0.0.35 + - if: ${{ inputs.diskspace-hack }} + name: Free diskspace + uses: envoyproxy/toolshed/gh-actions/diskspace@actions-v0.1.5 - run: | echo "disk space at beginning of build:" df -h name: "Check disk space at beginning" + shell: bash - - if: ${{ inputs.run_pre }} - name: Run pre action ${{ inputs.run_pre && format('({0})', inputs.run_pre) || '' }} - uses: envoyproxy/toolshed/gh-actions/using/recurse@actions-v0.0.35 + - uses: envoyproxy/toolshed/gh-actions/using/steps@actions-v0.1.5 + name: Run pre steps + if: ${{ inputs.steps-pre }} with: - uses: ${{ inputs.run_pre }} - with: ${{ inputs.run_pre_with }} + name: ${{ inputs.steps-pre-name }} + steps: ${{ inputs.steps-pre }} - - uses: ./.github/actions/do_ci - name: Do CI + - uses: envoyproxy/toolshed/gh-actions/github/run@actions-v0.1.5 + name: 'Run CI target ${{ inputs.target }}' with: - target: ${{ inputs.target }} - rbe: ${{ inputs.rbe }} - managed: ${{ inputs.managed }} - auth_bazel_rbe: ${{ inputs.auth_bazel_rbe }} - bazel_extra: ${{ inputs.bazel_extra }} - bazel_rbe_jobs: ${{ inputs.bazel_rbe_jobs }} - command_prefix: ${{ inputs.command_prefix }} - command_ci: ${{ inputs.command_ci }} catch-errors: ${{ inputs.catch-errors }} + container-command: ${{ inputs.command-prefix }} + command-prefix: ${{ inputs.command-ci }} + command: ${{ inputs.target }} + source: ${{ inputs.source }} error-match: ${{ inputs.error-match }} notice-match: ${{ inputs.notice-match }} warning-match: ${{ inputs.warning-match }} - env: ${{ inputs.env }} - GITHUB_TOKEN: ${{ steps.context.outputs.use_appauth == 'true' && steps.appauth.outputs.token || secrets.GITHUB_TOKEN }} - - - if: ${{ inputs.run_post }} - name: Run post action ${{ inputs.run_pre && format('({0})', inputs.run_post) || '' }} - uses: envoyproxy/toolshed/gh-actions/using/recurse@actions-v0.0.35 + env: + GITHUB_TOKEN: ${{ steps.checkout.outputs.token != '' && steps.checkout.outputs.token || secrets.GITHUB_TOKEN }} + ENVOY_DOCKER_BUILD_DIR: ${{ runner.temp }} + ENVOY_RBE: ${{ inputs.rbe != 'false' && 1 || '' }} + RBE_KEY: ${{ secrets.rbe-key }} + BAZEL_BUILD_EXTRA_OPTIONS: >- + --config=remote-ci + ${{ inputs.bazel-extra }} + ${{ inputs.rbe != 'false' && format('--jobs={0}', inputs.bazel-rbe-jobs) || '' }} + BAZEL_FAKE_SCM_REVISION: ${{ github.event_name == 'pull_request' && 'e3b4a6e9570da15ac1caffdded17a8bebdc7dfc9' || '' }} + CI_TARGET_BRANCH: ${{ github.event_name == 'pull_request' && github.event.base.ref || github.ref }} + + - uses: envoyproxy/toolshed/gh-actions/using/steps@actions-v0.1.5 + name: Run post steps + if: ${{ inputs.steps-post }} with: - uses: ${{ inputs.run_post }} - with: ${{ inputs.run_post_with }} + name: ${{ inputs.steps-post-name }} + steps: ${{ inputs.steps-post }} - run: | echo "disk space at end of build:" df -h - echo - du -ch "${{ runner.temp }}" | grep -E "[0-9]{2,}M|[0-9]G" name: "Check disk space at end" + shell: bash + + - uses: actions/upload-artifact@v3 + name: Upload artefacts + if: ${{ inputs.upload-name && inputs.upload-path }} + with: + name: ${{ inputs.upload-name }} + path: ${{ inputs.upload-path }} diff --git a/.github/workflows/_env.yml b/.github/workflows/_env.yml index b3040fd62723..f59321e124d9 100644 --- a/.github/workflows/_env.yml +++ b/.github/workflows/_env.yml @@ -24,20 +24,19 @@ on: type: string default: 7467652575122d8d54e767a68f141598bd855383 - check_mobile_run: + check-mobile-run: type: boolean default: true - prime_build_image: + prime-build-image: type: boolean default: false - - repo_ref: + repo-ref: type: string default: - repo_ref_sha: + repo-ref-name: type: string default: - repo_ref_name: + repo-ref-sha: type: string default: @@ -76,25 +75,22 @@ on: value: ${{ jobs.repo.outputs.mobile_release_validation }} mobile_tsan: value: ${{ jobs.repo.outputs.mobile_tsan }} - - repo_ref: - value: ${{ jobs.repo.outputs.repo_ref }} - repo_ref_name: - value: ${{ jobs.repo.outputs.repo_ref_name }} - repo_ref_sha: - value: ${{ jobs.repo.outputs.repo_ref_sha }} - repo_ref_sha_short: - value: ${{ jobs.repo.outputs.repo_ref_sha_short }} - repo_ref_title: - value: ${{ jobs.repo.outputs.repo_ref_title }} - + repo-ref: + value: ${{ jobs.repo.outputs.repo-ref }} + repo-ref-name: + value: ${{ jobs.repo.outputs.repo-ref-name }} + repo-ref-sha: + value: ${{ jobs.repo.outputs.repo-ref-sha }} + repo-ref-sha-short: + value: ${{ jobs.repo.outputs.repo-ref-sha-short }} + repo-ref-title: + value: ${{ jobs.repo.outputs.repo-ref-title }} trusted: value: ${{ jobs.repo.outputs.trusted }} - - version_dev: - value: ${{ jobs.repo.outputs.version_dev }} - version_patch: - value: ${{ jobs.repo.outputs.version_patch }} + version-dev: + value: ${{ jobs.repo.outputs.version-dev }} + version-patch: + value: ${{ jobs.repo.outputs.version-patch }} concurrency: group: | @@ -106,8 +102,11 @@ concurrency: jobs: repo: - if: github.repository == 'envoyproxy/envoy' + if: ${{ github.repository == 'envoyproxy/envoy' || vars.ENVOY_CI }} runs-on: ubuntu-22.04 + permissions: + contents: read + pull-requests: read outputs: build_image_ubuntu: ${{ steps.env.outputs.build_image_ubuntu }} build_image_ubuntu_mobile: ${{ steps.env.outputs.build_image_ubuntu_mobile }} @@ -124,57 +123,68 @@ jobs: mobile_ios_tests: ${{ steps.env.outputs.mobile_ios_tests }} mobile_release_validation: ${{ steps.env.outputs.mobile_release_validation }} mobile_tsan: ${{ steps.env.outputs.mobile_tsan }} - repo_ref: ${{ steps.env.outputs.repo_ref }} - repo_ref_name: ${{ steps.env.outputs.repo_ref_name }} - repo_ref_sha: ${{ steps.env.outputs.repo_ref_sha }} - repo_ref_sha_short: ${{ steps.env.outputs.repo_ref_sha_short }} - repo_ref_title: ${{ steps.env.outputs.repo_ref_title }} + repo-ref: ${{ steps.ref.outputs.value }} + repo-ref-name: ${{ steps.env.outputs.repo-ref-name }} + repo-ref-sha: ${{ steps.env.outputs.repo-ref-sha }} + repo-ref-sha_short: ${{ steps.env.outputs.repo-ref-sha-short }} + repo-ref-title: ${{ steps.env.outputs.repo-ref-title }} trusted: ${{ steps.env.outputs.trusted }} - version_dev: ${{ steps.env.outputs.version_dev }} - version_patch: ${{ steps.env.outputs.version_patch }} + version-dev: ${{ steps.env.outputs.version-dev }} + version-patch: ${{ steps.env.outputs.version-patch }} steps: - uses: actions/checkout@v4 name: Checkout Envoy repository with: - fetch-depth: ${{ ! (inputs.check_mobile_run || ! startsWith(github.event_name, 'pull_request')) && 1 || 0 }} + fetch-depth: ${{ ! (inputs.check-mobile-run || ! startsWith(github.event_name, 'pull_request')) && 1 || 0 }} # WARNING: This allows untrusted code to run!!! # If this is set, then anything before or after in the job should be regarded as # compromised. - ref: ${{ startsWith(github.event_name, 'pull_request') && inputs.repo_ref || '' }} + ref: ${{ startsWith(github.event_name, 'pull_request') && inputs.repo-ref || '' }} - uses: ./.github/actions/env name: Generate environment variables id: env with: - check_mobile_run: ${{ inputs.check_mobile_run }} - repo_ref: ${{ inputs.repo_ref }} - repo_ref_name: ${{ inputs.repo_ref_name }} - repo_ref_sha: ${{ inputs.repo_ref_sha }} + check-mobile-run: ${{ inputs.check-mobile-run }} + repo-ref: ${{ inputs.repo-ref }} + repo-ref-name: ${{ inputs.repo-ref-name }} + repo-ref-sha: ${{ inputs.repo-ref-sha }} build_image_repo: ${{ inputs.build_image_repo }} build_image_tag: ${{ inputs.build_image_tag }} build_image_mobile_sha: ${{ inputs.build_image_mobile_sha }} build_image_sha: ${{ inputs.build_image_sha }} + - uses: envoyproxy/toolshed/gh-actions/github/merge-commit@actions-v0.1.5 + id: merge-commit + if: ${{ github.event_name == 'pull_request_target' }} + with: + repository: ${{ github.repository }} + pr: ${{ github.event.number }} + token: ${{ secrets.GITHUB_TOKEN }} + - name: 'Set ref' + id: ref + run: | + echo "value=${{ steps.merge-commit.outputs.sha || steps.env.outputs.repo_ref }}" >> $GITHUB_OUTPUT - name: 'Print env' run: | - echo "version_dev=${{ steps.env.outputs.version_dev }}" - echo "version_patch=${{ steps.env.outputs.version_patch }}" + echo "version-dev=${{ steps.env.outputs.version-dev }}" + echo "version-patch=${{ steps.env.outputs.version-patch }}" echo "trusted=${{ steps.env.outputs.trusted }}" - echo "repo_ref=${{ steps.env.outputs.repo_ref }}" - echo "repo_ref_name=${{ steps.env.outputs.repo_ref_name }}" - echo "repo_ref_pr_number=${{ steps.env.outputs.repo_ref_pr_number }}" - echo "repo_ref_sha=${{ steps.env.outputs.repo_ref_sha }}" - echo "repo_ref_sha_short=${{ steps.env.outputs.repo_ref_sha_short }}" - echo "repo_ref_title=${{ steps.env.outputs.repo_ref_title }}" + echo "repo-ref=${{ steps.ref.outputs.value }}" + echo "repo-ref-name=${{ steps.env.outputs.repo-ref-name }}" + echo "repo-ref-pr-number=${{ steps.env.outputs.repo-ref-pr-number }}" + echo "repo-ref-sha=${{ steps.env.outputs.repo-ref-sha }}" + echo "repo-ref-sha-short=${{ steps.env.outputs.repo-ref-sha-short }}" + echo "repo-ref-title=${{ steps.env.outputs.repo-ref-title }}" echo "build_image_ubuntu=${{ steps.env.outputs.build_image_ubuntu }}" echo "build_image_ubuntu_mobile=${{ steps.env.outputs.build_image_ubuntu_mobile }}" echo - if [[ -n "${{ steps.env.outputs.repo_ref_pr_number }}" ]]; then - echo "PR: https://github.com/envoyproxy/envoy/pull/${{ steps.env.outputs.repo_ref_pr_number }}" + if [[ -n "${{ steps.env.outputs.repo-ref-pr-number }}" ]]; then + echo "PR: https://github.com/${{ github.repository }}/pull/${{ steps.env.outputs.repo-ref-pr-number }}" fi cache: - if: ${{ inputs.prime_build_image }} + if: ${{ inputs.prime-build-image }} uses: ./.github/workflows/_cache_docker.yml with: image_repo: ${{ inputs.build_image_repo }} diff --git a/.github/workflows/_precheck_deps.yml b/.github/workflows/_precheck_deps.yml new file mode 100644 index 000000000000..c0578a4e1af5 --- /dev/null +++ b/.github/workflows/_precheck_deps.yml @@ -0,0 +1,55 @@ +name: Publish + +permissions: + contents: read + +on: + workflow_call: + inputs: + build-image-ubuntu: + type: string + default: '' + dependency-review: + type: boolean + default: false + repo-ref: + type: string + +concurrency: + group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }}-publish + cancel-in-progress: true + +jobs: + prechecks: + strategy: + matrix: + include: + - target: deps + rbe: false + uses: ./.github/workflows/_ci.yml + name: ${{ matrix.target }} + permissions: + contents: read + packages: read + with: + target: ${{ matrix.target }} + rbe: ${{ matrix.rbe }} + bazel-extra: '--config=rbe-envoy-engflow' + cache-build-image: ${{ inputs.build-image-ubuntu }} + repo-ref: ${{ inputs.repo-ref }} + catch-errors: true + error-match: | + ERROR + ClientConnectorError + + dependency-review: + runs-on: ubuntu-22.04 + if: ${{ inputs.dependency-review }} + steps: + - name: Checkout Repository + uses: actions/checkout@v4 + with: + ref: ${{ inputs.repo-ref }} + persist-credentials: false + - name: Dependency Review + uses: actions/dependency-review-action@6c5ccdad469c9f8a2996bfecaec55a631a347034 diff --git a/.github/workflows/_stage_publish.yml b/.github/workflows/_stage_publish.yml index 81aba66daed3..8b4105b805d2 100644 --- a/.github/workflows/_stage_publish.yml +++ b/.github/workflows/_stage_publish.yml @@ -18,13 +18,13 @@ on: build_image_ubuntu: type: string default: '' - version_dev: + version-dev: type: string default: '' head_ref: type: string default: '' - repo_ref: + repo-ref: type: string sha: type: string @@ -42,34 +42,52 @@ jobs: publish_ci: if: ${{ ! inputs.trusted }} name: ${{ matrix.name || matrix.target }} + uses: ./.github/workflows/_ci.yml + with: + target: ${{ matrix.target }} + rbe: false + cache-build-image: ${{ inputs.build_image_ubuntu }} + source: ${{ matrix.source }} + trusted: false + repo-ref: ${{ inputs.repo-ref }} + steps-pre: ${{ matrix.steps-pre }} strategy: fail-fast: false matrix: include: - target: publish name: github - run_pre: ./.github/actions/publish/release/setup - run_pre_with: | - ref: ${{ inputs.repo_ref }} - bucket: envoy-pr - env: | + steps-pre: | + - id: short_name + uses: envoyproxy/toolshed/gh-actions/str/sub@actions-v0.1.2 + with: + length: 7 + string: ${{ inputs.repo-ref }} + min: 40 + - uses: envoyproxy/toolshed/gh-actions/fetch@actions-v0.1.2 + with: + url: https://storage.googleapis.com/envoy-pr/%{{ steps.short_name.outputs.string }}/release/release.signed.tar.zst + path: %{{ runner.temp }}/release.signed + source: | export ENVOY_PUBLISH_DRY_RUN=1 export ENVOY_COMMIT=${{ inputs.sha }} - uses: ./.github/workflows/_ci.yml - with: - target: ${{ matrix.target }} - rbe: false - managed: true - cache_build_image: ${{ inputs.build_image_ubuntu }} - run_pre: ${{ matrix.run_pre }} - run_pre_with: ${{ matrix.run_pre_with }} - env: ${{ matrix.env }} - trusted: false - repo_ref: ${{ inputs.repo_ref }} + export ENVOY_REPO=${{ github.repository }} publish: if: ${{ inputs.trusted }} name: ${{ matrix.name || matrix.target }} + uses: ./.github/workflows/_ci.yml + with: + target: ${{ matrix.target }} + rbe: false + cache-build-image: ${{ inputs.build_image_ubuntu }} + source: ${{ matrix.source }} + trusted: true + repo-ref: ${{ inputs.repo-ref }} + steps-pre: ${{ inputs.steps-pre }} + secrets: + app-id: ${{ secrets.ENVOY_CI_PUBLISH_APP_ID }} + app-key: ${{ secrets.ENVOY_CI_PUBLISH_APP_KEY }} permissions: contents: read packages: read @@ -79,29 +97,21 @@ jobs: include: - target: publish name: github - run_pre: ./.github/actions/publish/release/setup - run_pre_with: | - ref: ${{ inputs.repo_ref }} - bucket: envoy-postsubmit - env: | + steps-pre: | + - id: short_name + uses: envoyproxy/toolshed/gh-actions/str/sub@actions-v0.1.2 + with: + length: 7 + input: ${{ inputs.repo-ref }} + - uses: envoyproxy/toolshed/gh-actions/fetch@actions-v0.1.2 + with: + url: https://storage.googleapis.com/envoy-postsubmit/%{{ steps.short_name.outputs.string }}/release/release.signed.tar.zst + path: %{{ runner.temp }}/release.signed + source: | export ENVOY_COMMIT=${{ inputs.sha }} - if [[ '${{ inputs.version_dev }}' == 'dev' ]]; then + if [[ '${{ inputs.version-dev }}' == 'dev' ]]; then export ENVOY_PUBLISH_DRY_RUN=1 fi - uses: ./.github/workflows/_ci.yml - with: - target: ${{ matrix.target }} - rbe: false - managed: true - cache_build_image: ${{ inputs.build_image_ubuntu }} - run_pre: ${{ matrix.run_pre }} - run_pre_with: ${{ matrix.run_pre_with }} - env: ${{ matrix.env }} - trusted: true - repo_ref: ${{ inputs.repo_ref }} - secrets: - app_id: ${{ secrets.ENVOY_CI_PUBLISH_APP_ID }} - app_key: ${{ secrets.ENVOY_CI_PUBLISH_APP_KEY }} publish_docs: # For normal commits to Envoy main this will trigger an update in the website repo, @@ -111,17 +121,17 @@ jobs: # which builds a static version of the docs for the release and commits it to the archive. # In turn the archive repo triggers an update in the website so the new release docs are # included in the published site - if: ${{ inputs.trusted }} + if: ${{ inputs.trusted && github.repository == 'envoyproxy/envoy' }} runs-on: ubuntu-22.04 needs: - publish steps: - - uses: envoyproxy/toolshed/gh-actions/dispatch@actions-v0.0.35 + - uses: envoyproxy/toolshed/gh-actions/dispatch@actions-v0.1.5 with: app_id: ${{ secrets.ENVOY_CI_SYNC_APP_ID }} key: "${{ secrets.ENVOY_CI_SYNC_APP_KEY }}" ref: main - repository: ${{ inputs.version_dev == 'dev' && 'envoyproxy/envoy-website' || 'envoyproxy/archive' }} + repository: ${{ inputs.version-dev == 'dev' && 'envoyproxy/envoy-website' || 'envoyproxy/archive' }} workflow: envoy-sync.yaml inputs: | - commit_sha: ${{ inputs.version_dev == 'dev' && github.sha || '' }} + commit_sha: ${{ inputs.version-dev == 'dev' && github.sha || '' }} diff --git a/.github/workflows/_stage_verify.yml b/.github/workflows/_stage_verify.yml index a1a40d2b5fd4..1af743bfadae 100644 --- a/.github/workflows/_stage_verify.yml +++ b/.github/workflows/_stage_verify.yml @@ -9,7 +9,7 @@ on: trusted: type: boolean default: false - repo_ref: + repo-ref: type: string given_ref: type: string @@ -21,33 +21,59 @@ concurrency: jobs: verify: name: ${{ matrix.name || matrix.target }} - strategy: - fail-fast: false - matrix: - include: - - target: verify_examples - name: examples - rbe: false - managed: true - cache_build_image: "" - command_prefix: "" - diskspace_hack: true - run_pre: ./.github/actions/verify/examples/setup - run_pre_with: | - bucket: envoy-${{ inputs.trusted && 'postsubmit' || 'pr' }} - ref: ${{ inputs.given_ref }} - env: | - export NO_BUILD_SETUP=1 uses: ./.github/workflows/_ci.yml with: target: ${{ matrix.target }} rbe: ${{ matrix.rbe }} - managed: ${{ matrix.managed }} - cache_build_image: ${{ matrix.cache_build_image }} - diskspace_hack: ${{ matrix.diskspace_hack }} - command_prefix: ${{ matrix.command_prefix }} - run_pre: ${{ matrix.run_pre }} - run_pre_with: ${{ matrix.run_pre_with }} - env: ${{ matrix.env }} + cache-build-image: + command-prefix: + source: ${{ matrix.source }} trusted: ${{ inputs.trusted }} - repo_ref: ${{ inputs.repo_ref }} + repo-ref: ${{ inputs.repo-ref }} + runs-on: envoy-x64-small + steps-pre: ${{ matrix.steps-pre }} + strategy: + fail-fast: false + matrix: + include: + - name: examples + target: verify_examples + source: | + export NO_BUILD_SETUP=1 + rbe: false + steps-pre: | + - id: short_name + uses: envoyproxy/toolshed/gh-actions/str/sub@actions-v0.1.1 + with: + length: 7 + string: ${{ inputs.repo-ref }} + min: 40 + - id: gcp + run: | + PREFIX=https://storage.googleapis.com/envoy- + BUCKET=${{ inputs.trusted && 'postsubmit' || 'pr' }} + NAME=%{{ steps.short_name.outputs.string }} + echo "url=${PREFIX}${BUCKET}/${NAME}" >> $GITHUB_OUTPUT + shell: bash + - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.1.1 + with: + url: | + %{{ steps.gcp.outputs.url }}/envoy.tar + variant: dev + - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.1.1 + with: + url: | + %{{ steps.gcp.outputs.url }}/envoy-contrib.tar + variant: contrib-dev + - uses: envoyproxy/toolshed/gh-actions/docker/fetch@actions-v0.1.1 + with: + url: | + %{{ steps.gcp.outputs.url }}/envoy-google-vrp.tar + variant: google-vrp-dev + - run: docker images | grep envoy + shell: bash + - run: | + export DEBIAN_FRONTEND=noninteractive + sudo apt-get -qq update -y + sudo apt-get -qq install -y --no-install-recommends expect + shell: bash diff --git a/.github/workflows/_workflow-start.yml b/.github/workflows/_workflow-start.yml index 05025292544a..96142f0e0920 100644 --- a/.github/workflows/_workflow-start.yml +++ b/.github/workflows/_workflow-start.yml @@ -19,17 +19,18 @@ jobs: start: runs-on: ubuntu-22.04 permissions: + contents: read statuses: write steps: - uses: actions/checkout@v4 - uses: ./.github/actions/env id: env with: - check_mobile_run: false + check-mobile-run: false - if: ${{ steps.env.outputs.trusted != 'true' }} name: Start status check - uses: envoyproxy/toolshed/gh-actions/status@actions-v0.0.35 + uses: envoyproxy/toolshed/gh-actions/status@actions-v0.1.5 with: authToken: ${{ secrets.GITHUB_TOKEN }} context: ${{ inputs.workflow_name }} diff --git a/.github/workflows/check-deps.yml b/.github/workflows/check-deps.yml deleted file mode 100644 index a6b6a4743445..000000000000 --- a/.github/workflows/check-deps.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: Check dependencies - -permissions: - contents: read - -on: - schedule: - - cron: '0 8 * * *' - workflow_dispatch: - -jobs: - build: - runs-on: ubuntu-22.04 - if: >- - ${{ - github.repository == 'envoyproxy/envoy' - && (github.event.schedule - || !contains(github.actor, '[bot]')) - }} - permissions: - contents: read # to fetch code (actions/checkout) - issues: write # required to open/close dependency issues - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - - - name: Run dependency checker - run: | - TODAY_DATE=$(date -u -I"date") - export TODAY_DATE - bazel run //tools/dependency:check --action_env=TODAY_DATE -- -c release_issues --fix - bazel run //tools/dependency:check --action_env=TODAY_DATE -- -c cves -w error - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/codeql-push.yml b/.github/workflows/codeql-push.yml index 112a43acad17..6e080f198a44 100644 --- a/.github/workflows/codeql-push.yml +++ b/.github/workflows/codeql-push.yml @@ -26,7 +26,7 @@ jobs: steps: - name: Pre-cleanup - uses: envoyproxy/toolshed/gh-actions/diskspace@actions-v0.0.35 + uses: envoyproxy/toolshed/gh-actions/diskspace@actions-v0.1.5 with: to_remove: | /usr/local/lib/android diff --git a/.github/workflows/commands.yml b/.github/workflows/commands.yml index 81f643c2443b..5e92671f9f88 100644 --- a/.github/workflows/commands.yml +++ b/.github/workflows/commands.yml @@ -24,7 +24,7 @@ jobs: actions: write checks: read steps: - - uses: envoyproxy/toolshed/gh-actions/retest@actions-v0.0.35 + - uses: envoyproxy/toolshed/gh-actions/retest@actions-v0.1.5 with: token: ${{ secrets.GITHUB_TOKEN }} azp_org: cncf diff --git a/.github/workflows/depsreview.yml b/.github/workflows/depsreview.yml deleted file mode 100644 index 3890070d58d5..000000000000 --- a/.github/workflows/depsreview.yml +++ /dev/null @@ -1,16 +0,0 @@ -name: 'Dependency Review' -on: [pull_request] - -concurrency: - group: ${{ github.head_ref-github.workflow || github.run_id }} - cancel-in-progress: true - -jobs: - dependency-review: - runs-on: ubuntu-22.04 - if: github.repository == 'envoyproxy/envoy' - steps: - - name: 'Checkout Repository' - uses: actions/checkout@v4 - - name: 'Dependency Review' - uses: actions/dependency-review-action@6c5ccdad469c9f8a2996bfecaec55a631a347034 diff --git a/.github/workflows/envoy-dependency.yml b/.github/workflows/envoy-dependency.yml index ab25e02a3d97..4ebe135de871 100644 --- a/.github/workflows/envoy-dependency.yml +++ b/.github/workflows/envoy-dependency.yml @@ -4,6 +4,8 @@ permissions: contents: read on: + schedule: + - cron: '0 8 * * *' workflow_dispatch: inputs: task: @@ -15,6 +17,7 @@ on: - bazel - bazel-api - build-image + - check dependency: description: Dependency to update (if applicable) version: @@ -22,11 +25,11 @@ on: pr: type: boolean default: true - pr_message: + pr-message: description: Additional message for PR, eg to fix an issue (optional) concurrency: - group: ${{ github.run_id }}-${{ github.workflow }} + group: ${{ github.head_ref || github.run_id }}-${{ github.workflow }} cancel-in-progress: true env: @@ -34,8 +37,12 @@ env: COMMITTER_EMAIL: 148525496+dependency-envoy[bot]@users.noreply.github.com jobs: - update_bazel: - if: startsWith(inputs.task, 'bazel') + update-bazel: + if: >- + ${{ + github.event_name == 'workflow_dispatch' + && startsWith(inputs.task, 'bazel') + }} name: >- Update dep (${{ inputs.pr && 'PR/' || '' }}${{ inputs.task == 'bazel' && 'bazel' || 'bazel/api' }}/${{ inputs.dependency }}/${{ inputs.version }}) @@ -43,13 +50,13 @@ jobs: steps: - id: checkout name: Checkout Envoy repository - uses: envoyproxy/toolshed/gh-actions/github/checkout@actions-v0.0.35 + uses: envoyproxy/toolshed/gh-actions/github/checkout@actions-v0.1.5 with: app_id: ${{ secrets.ENVOY_CI_DEP_APP_ID }} app_key: ${{ secrets.ENVOY_CI_DEP_APP_KEY }} - id: version name: Shorten (possible) SHA - uses: envoyproxy/toolshed/gh-actions/str/sub@actions-v0.0.35 + uses: envoyproxy/toolshed/gh-actions/str/sub@actions-v0.1.5 with: string: ${{ inputs.version }} length: 7 @@ -64,19 +71,19 @@ jobs: TARGET: ${{ inputs.task == 'bazel' && 'update' || 'api-update' }} TASK: ${{ inputs.task == 'bazel' && 'bazel' || 'api/bazel' }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - uses: envoyproxy/toolshed/gh-actions/upload/diff@actions-v0.0.35 + - uses: envoyproxy/toolshed/gh-actions/upload/diff@actions-v0.1.5 name: Upload diff with: name: ${{ inputs.dependency }}-${{ steps.version.outputs.string }} - name: Create a PR if: ${{ inputs.pr }} - uses: envoyproxy/toolshed/gh-actions/github/pr@actions-v0.0.35 + uses: envoyproxy/toolshed/gh-actions/github/pr@actions-v0.1.5 with: base: main body: | Created by Envoy dependency bot for @${{ github.actor }} - ${{ inputs.pr_message }} + ${{ inputs.pr-message }} branch: >- dependency/${{ inputs.task }}/${{ inputs.dependency }}/${{ steps.version.outputs.string }} commit-message: | @@ -90,23 +97,24 @@ jobs: -> ${{ steps.version.outputs.string }} GITHUB_TOKEN: ${{ steps.checkout.outputs.token }} - update_build_image: - if: github.event.inputs.task == 'build-image' + update-build-image: + if: >- + ${{ + github.event_name == 'workflow_dispatch' + && github.event.inputs.task == 'build-image' + }} name: Update build image (PR) runs-on: ubuntu-22.04 steps: - - name: Fetch token for app auth - id: appauth - uses: envoyproxy/toolshed/gh-actions/appauth@actions-v0.0.35 - with: - app_id: ${{ secrets.ENVOY_CI_DEP_APP_ID }} - key: ${{ secrets.ENVOY_CI_DEP_APP_KEY }} - - uses: actions/checkout@v4 + - uses: envoyproxy/toolshed/gh-actions/github/checkout@actions-v0.1.5 + id: checkout name: Checkout Envoy repository with: - path: envoy - fetch-depth: 0 - token: ${{ steps.appauth.outputs.token }} + config: | + path: envoy + fetch-depth: 0 + app_id: ${{ secrets.ENVOY_CI_DEP_APP_ID }} + app_key: ${{ secrets.ENVOY_CI_DEP_APP_KEY }} - uses: actions/checkout@v4 name: Checkout Envoy build tools repository with: @@ -137,7 +145,7 @@ jobs: - name: Check Docker SHAs id: build-images - uses: envoyproxy/toolshed/gh-actions/docker/shas@actions-v0.0.35 + uses: envoyproxy/toolshed/gh-actions/docker/shas@actions-v0.1.5 with: images: | sha: envoyproxy/envoy-build-ubuntu:${{ steps.build-tools.outputs.tag }} @@ -166,7 +174,7 @@ jobs: name: Update SHAs working-directory: envoy - name: Create a PR - uses: envoyproxy/toolshed/gh-actions/github/pr@actions-v0.0.35 + uses: envoyproxy/toolshed/gh-actions/github/pr@actions-v0.1.5 with: base: main body: Created by Envoy dependency bot @@ -178,5 +186,29 @@ jobs: Signed-off-by: ${{ env.COMMITTER_NAME }} <${{ env.COMMITTER_EMAIL }}> title: 'deps: Bump build images -> `${{ steps.build-tools.outputs.tag_short }}`' - GITHUB_TOKEN: ${{ steps.appauth.outputs.token }} + GITHUB_TOKEN: ${{ steps.checkout.outputs.token }} working-directory: envoy + + scheduled: + runs-on: ubuntu-22.04 + if: >- + ${{ + github.repository == 'envoyproxy/envoy' + && (github.event.schedule + || (!contains(github.actor, '[bot]') + && inputs.task == 'check')) + }} + permissions: + contents: read + issues: write + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Run dependency checker + run: | + TODAY_DATE=$(date -u -I"date") + export TODAY_DATE + bazel run //tools/dependency:check --action_env=TODAY_DATE -- -c release_issues --fix + bazel run //tools/dependency:check --action_env=TODAY_DATE -- -c cves -w error + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/envoy-macos.yml b/.github/workflows/envoy-macos.yml new file mode 100644 index 000000000000..0c01e4a451b2 --- /dev/null +++ b/.github/workflows/envoy-macos.yml @@ -0,0 +1,60 @@ +name: Envoy/macos + +permissions: + contents: read + +on: + push: + branches: + - main + - release/v* + pull_request_target: + +concurrency: + group: ${{ github.event.inputs.head_ref || github.run_id }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + env: + uses: ./.github/workflows/_env.yml + permissions: + contents: read + pull-requests: read + with: + prime-build-image: false + check-mobile-run: false + + macos: + needs: + - env + strategy: + fail-fast: false + matrix: + include: + - target: ci/mac_ci_steps.sh + name: macOS + uses: ./.github/workflows/_ci.yml + name: CI ${{ matrix.name || matrix.target }} + secrets: + rbe-key: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }} + with: + target: ${{ matrix.target }} + runs-on: macos-12-xl + command-ci: + command-prefix: + repo-ref: ${{ needs.env.outputs.repo_ref }} + steps-post: + steps-pre: | + - run: ./ci/mac_ci_setup.sh + shell: bash + name: Setup macos + source: | + GCP_SERVICE_ACCOUNT_KEY_PATH=$(mktemp -t gcp_service_account.XXXXXX.json) + bash -c "echo \"${RBE_KEY}\" | base64 --decode > \"${GCP_SERVICE_ACCOUNT_KEY_PATH}\"" + _BAZEL_BUILD_EXTRA_OPTIONS=( + --remote_download_toplevel + --flaky_test_attempts=2 + --config=cache-google + --config=ci + --google_credentials=${GCP_SERVICE_ACCOUNT_KEY_PATH}) + export BAZEL_BUILD_EXTRA_OPTIONS=${_BAZEL_BUILD_EXTRA_OPTIONS[*]} diff --git a/.github/workflows/envoy-prechecks.yml b/.github/workflows/envoy-prechecks.yml index 142f55f93a9d..2520b5414a20 100644 --- a/.github/workflows/envoy-prechecks.yml +++ b/.github/workflows/envoy-prechecks.yml @@ -13,10 +13,10 @@ on: - '**/requirements*.txt' - '**/go.mod' - '**/*.bzl' - - 'WORKSPACE' - - '.github/workflows/envoy-prechecks.yml' - - '.github/workflows/_*.yml' - - '.github/actions/do_ci/action.yml' + - tools/dependency/BUILD + - WORKSPACE + - .github/workflows/envoy-prechecks.yml + - .github/workflows/_*.yml concurrency: group: ${{ github.event.inputs.head_ref || github.run_id }}-${{ github.workflow }} @@ -26,35 +26,22 @@ jobs: env: uses: ./.github/workflows/_env.yml with: - prime_build_image: true - check_mobile_run: false + prime-build-image: true + check-mobile-run: false permissions: contents: read packages: read + pull-requests: read - prechecks: + deps: needs: - env - strategy: - fail-fast: false - matrix: - include: - - target: deps - rbe: false - managed: true - uses: ./.github/workflows/_ci.yml - name: CI ${{ matrix.target }} + uses: ./.github/workflows/_precheck_deps.yml + name: Precheck ${{ needs.env.outputs.repo-ref-title }} + with: + build-image-ubuntu: ${{ needs.env.outputs.build_image_ubuntu }} + dependency-review: ${{ github.event_name == 'pull_request_target' && github.repository == 'envoyproxy/envoy' }} + repo-ref: ${{ needs.env.outputs.repo-ref }} permissions: contents: read packages: read - with: - target: ${{ matrix.target }} - rbe: ${{ matrix.rbe }} - bazel_extra: '--config=rbe-envoy-engflow' - managed: ${{ matrix.managed }} - cache_build_image: ${{ needs.env.outputs.build_image_ubuntu }} - repo_ref: ${{ github.event.pull_request.head.sha }} - catch-errors: true - error-match: | - ERROR - ClientConnectorError diff --git a/.github/workflows/envoy-publish.yml b/.github/workflows/envoy-publish.yml index 9890338b00f5..d5c4178a40eb 100644 --- a/.github/workflows/envoy-publish.yml +++ b/.github/workflows/envoy-publish.yml @@ -26,21 +26,23 @@ jobs: env: if: >- ${{ - github.repository == 'envoyproxy/envoy' + (github.repository == 'envoyproxy/envoy' + || vars.ENVOY_CI) && (!contains(github.actor, '[bot]') || github.actor == 'trigger-workflow-envoy[bot]' || github.actor == 'trigger-release-envoy[bot]') }} uses: ./.github/workflows/_env.yml with: - check_mobile_run: false - prime_build_image: true - repo_ref: ${{ inputs.ref }} - repo_ref_sha: ${{ inputs.sha }} - repo_ref_name: ${{ inputs.head_ref }} + check-mobile-run: false + prime-build-image: true + repo-ref: ${{ inputs.ref }} + repo-ref-sha: ${{ inputs.sha }} + repo-ref-name: ${{ inputs.head_ref }} permissions: contents: read packages: read + pull-requests: read check: if: ${{ github.event_name != 'pull_request' }} @@ -57,12 +59,12 @@ jobs: - env - check uses: ./.github/workflows/_stage_publish.yml - name: Publish ${{ needs.env.outputs.repo_ref_title }} + name: Publish ${{ needs.env.outputs.repo-ref-title }} with: build_image_ubuntu: ${{ needs.env.outputs.build_image_ubuntu }} trusted: ${{ needs.env.outputs.trusted == 'true' && true || false }} - version_dev: ${{ needs.env.outputs.version_dev }} - repo_ref: ${{ inputs.ref }} + version-dev: ${{ needs.env.outputs.version-dev }} + repo-ref: ${{ inputs.ref }} sha: ${{ inputs.sha }} permissions: contents: read @@ -75,10 +77,12 @@ jobs: verify: uses: ./.github/workflows/_stage_verify.yml - name: Verify ${{ needs.env.outputs.repo_ref_title }} + name: Verify ${{ needs.env.outputs.repo-ref-title }} needs: - env + permissions: + contents: read with: trusted: ${{ needs.env.outputs.trusted == 'true' && true || false }} given_ref: ${{ inputs.ref }} - repo_ref: ${{ inputs.ref }} + repo-ref: ${{ inputs.ref }} diff --git a/.github/workflows/envoy-release.yml b/.github/workflows/envoy-release.yml index 976a45b4c2b5..300d5a4db61d 100644 --- a/.github/workflows/envoy-release.yml +++ b/.github/workflows/envoy-release.yml @@ -18,25 +18,31 @@ on: default: create-release type: choice options: - - create-release - summary: - type: boolean - default: true - description: Use changelog summary (required to publish release) - author: - description: >- - Author: User/email, eg 'Myname ' - (used by create-release, default: `changelogs/summary.md` last committer) + - create-release + - sync-version-histories pr: type: boolean default: true description: Create a PR - pr_message: + pr-message: description: Additional message for PR, eg to fix an issue or additional signoff (optional) wip: type: boolean default: false description: WIP + author: + description: >- + Author: User/email, eg 'Myname ' + (used by create-release, default: `changelogs/summary.md` last committer) + summary: + type: boolean + default: true + description: Use changelog summary (required to publish release) + +env: + COMMITTER_NAME: publish-envoy[bot] + COMMITTER_EMAIL: 140627008+publish-envoy[bot]@users.noreply.github.com + jobs: ## Triggerable actions @@ -49,10 +55,13 @@ jobs: steps: - id: checkout name: Checkout Envoy repository - uses: envoyproxy/toolshed/gh-actions/github/checkout@actions-v0.0.35 + uses: envoyproxy/toolshed/gh-actions/github/checkout@actions-v0.1.5 with: app_id: ${{ secrets.ENVOY_CI_PUBLISH_APP_ID }} app_key: ${{ secrets.ENVOY_CI_PUBLISH_APP_KEY }} + committer-name: ${{ env.COMMITTER_NAME }} + committer-email: ${{ env.COMMITTER_EMAIL }} + strip-prefix: release/ - run: | if [[ ! -s "changelogs/summary.md" ]]; then if [[ "${{ inputs.summary }}" == "false" ]]; then @@ -62,47 +71,41 @@ jobs: echo "::error::Changelog summary (changelogs/summary.md) is empty!" exit 1 fi + COMMITTER=$(git log -n 1 --format='%an <%ae>' -- changelogs/summary.md) + echo "committer=${COMMITTER}" >> $GITHUB_OUTPUT + id: changelog name: Check changelog summary - - run: | - BRANCHNAME="${GITHUB_REF_NAME#release/}" - echo "name=${BRANCHNAME}" >> $GITHUB_OUTPUT - echo "full_name=release/create/${BRANCHNAME}" >> $GITHUB_OUTPUT - name: Get branch name - id: branch - env: - GITHUB_REF_NAME: ${{ github.ref_name }} - if: ${{ inputs.author }} name: Validate signoff email - uses: envoyproxy/toolshed/gh-actions/email/validate@actions-v0.0.35 + uses: envoyproxy/toolshed/gh-actions/email/validate@actions-v0.1.5 with: email: ${{ inputs.author }} + - uses: envoyproxy/toolshed/gh-actions/github/run@actions-v0.1.5 + name: Create release + with: + source: | + BAZEL_ARGS=(--) + BAZEL_RUN_ARGS=(--config=ci) + if [[ -n "${{ inputs.author }}" ]]; then + BAZEL_ARGS+=( + "--release-author=${{ inputs.author }}" + "--signoff=${{ steps.changelog.outputs.committer }}") + else + BAZEL_ARGS+=("--release-author=${{ steps.changelog.outputs.committer }}") + fi + command: >- + bazel + run + "${BAZEL_RUN_ARGS[@]}" + @envoy_repo//:release + "${BAZEL_ARGS[@]}" - run: | - git config --global user.name $COMMITTER_NAME - git config --global user.email $COMMITTER_EMAIL - name: Configure committer - env: - COMMITTER_NAME: publish-envoy[bot] - COMMITTER_EMAIL: 140627008+publish-envoy[bot]@users.noreply.github.com - - run: | - BAZEL_ARGS=(-- -l debug -v debug) - BAZEL_RUN_ARGS=(--config=ci) - CHANGELOG_COMMITTER="$(git log -n 1 --format="%an <%ae>" -- changelogs/summary.md)" - if [[ -n "$AUTHOR" ]]; then - BAZEL_ARGS+=( - --release-author="${AUTHOR}" - --release-signoff="${CHANGELOG_COMMITTER}") - else - BAZEL_ARGS+=(--release-author="${CHANGELOG_COMMITTER}") - fi - bazel run "${BAZEL_RUN_ARGS[@]}" @envoy_repo//:release "${BAZEL_ARGS[@]}" VERSION=$(cat VERSION.txt) echo "version=v${VERSION}" >> $GITHUB_OUTPUT - name: Create release + name: Release version id: release - env: - AUTHOR: ${{ inputs.author }} - name: Create a PR - uses: envoyproxy/toolshed/gh-actions/github/pr@actions-v0.0.35 + uses: envoyproxy/toolshed/gh-actions/github/pr@actions-v0.1.5 with: base: ${{ github.ref_name }} commit: false @@ -110,8 +113,8 @@ jobs: body: | Created by Envoy publish bot for @${{ github.actor }} ${{ ! inputs.summary && ':warning: Created without changelog summary, this will need to be updated before publishing' || '' }} - branch: ${{ steps.branch.outputs.full_name }} - diff-upload: release-${{ steps.branch.outputs.name }} + branch: release/create/${{ steps.checkout.outputs.branch_name }} + diff-upload: release-${{ steps.checkout.outputs.branch_name }} diff-show: true dry-run: ${{ ! inputs.pr }} wip: ${{ ! inputs.summary || inputs.wip }} @@ -120,6 +123,46 @@ jobs: repo: Release ${{ steps.release.outputs.version }} GITHUB_TOKEN: ${{ steps.checkout.outputs.token }} + sync_version_histories: + runs-on: ubuntu-22.04 + if: github.event_name == 'workflow_dispatch' && inputs.task == 'sync-version-histories' + name: Sync version histories + steps: + - id: checkout + name: Checkout Envoy repository + uses: envoyproxy/toolshed/gh-actions/github/checkout@actions-v0.1.5 + with: + app_id: ${{ secrets.ENVOY_CI_PUBLISH_APP_ID }} + app_key: ${{ secrets.ENVOY_CI_PUBLISH_APP_KEY }} + committer-name: ${{ env.COMMITTER_NAME }} + committer-email: ${{ env.COMMITTER_EMAIL }} + - uses: envoyproxy/toolshed/gh-actions/github/run@actions-v0.1.5 + name: Sync version histories + with: + command: >- + bazel + run + --config=ci @envoy_repo//:sync + -- + --signoff="${{ env.COMMITTER_NAME }} <${{ env.COMMITTER_EMAIL }}>" + - name: Create a PR + uses: envoyproxy/toolshed/gh-actions/github/pr@actions-v0.1.5 + with: + append-commit-message: true + base: ${{ github.ref_name }} + commit: false + body: | + Created by Envoy publish bot for @${{ github.actor }} + branch: release/sync/${{ steps.checkout.outputs.branch_name }} + diff-upload: version-histories-${{ steps.checkout.outputs.branch_name }} + diff-show: true + dry-run: ${{ ! inputs.pr }} + GITHUB_TOKEN: ${{ steps.checkout.outputs.token }} + title: >- + ${{ steps.branch.outputs.name != 'main' && '[${{ steps.branch.outputs.name }}]' || '' }} + repo: Sync version histories + + ## Triggered actions # On release to `main`: @@ -132,23 +175,18 @@ jobs: if: github.event_name == 'release' && endsWith(github.ref, '.0') name: Create release branch steps: - - name: Fetch token for app auth - id: appauth - uses: envoyproxy/toolshed/gh-actions/appauth@actions-v0.0.35 - with: - app_id: ${{ secrets.ENVOY_CI_PUBLISH_APP_ID }} - key: ${{ secrets.ENVOY_CI_PUBLISH_APP_KEY }} - name: Checkout repository - uses: actions/checkout@v4 + uses: envoyproxy/toolshed/gh-actions/github/checkout@actions-v0.1.5 with: - token: ${{ steps.appauth.outputs.token }} + app_id: ${{ secrets.ENVOY_CI_PUBLISH_APP_ID }} + app_key: ${{ secrets.ENVOY_CI_PUBLISH_APP_KEY }} + committer-name: ${{ env.COMMITTER_NAME }} + committer-email: ${{ env.COMMITTER_EMAIL }} - name: Create release branch run: | version="$(cut -d- -f1 < VERSION.txt | cut -d. -f-2)" release_branch="release/v${version}" commit_sha="$(git rev-parse HEAD)" - git config --global user.name "$COMMITTER_NAME" - git config --global user.email "$COMMITTER_EMAIL" echo "Creating ${release_branch} from ${commit_sha}" git checkout -b "$release_branch" bazel run @envoy_repo//:dev -- --patch @@ -156,6 +194,3 @@ jobs: git commit . -m "repo: Remove mobile ci for release branch" git log git push origin "$release_branch" - env: - COMMITTER_NAME: publish-envoy[bot] - COMMITTER_EMAIL: 140627008+publish-envoy[bot]@users.noreply.github.com diff --git a/.github/workflows/envoy-sync.yml b/.github/workflows/envoy-sync.yml index c01e875d209f..59df253a26de 100644 --- a/.github/workflows/envoy-sync.yml +++ b/.github/workflows/envoy-sync.yml @@ -28,7 +28,7 @@ jobs: - data-plane-api - mobile-website steps: - - uses: envoyproxy/toolshed/gh-actions/dispatch@actions-v0.0.35 + - uses: envoyproxy/toolshed/gh-actions/dispatch@actions-v0.1.5 with: repository: "envoyproxy/${{ matrix.downstream }}" ref: main diff --git a/.github/workflows/envoy-windows.yml b/.github/workflows/envoy-windows.yml new file mode 100644 index 000000000000..02bd5b7fbae0 --- /dev/null +++ b/.github/workflows/envoy-windows.yml @@ -0,0 +1,106 @@ +name: Envoy/windows + +permissions: + contents: read + +on: + push: + branches: + - main + - release/v* + pull_request_target: + +concurrency: + group: ${{ github.event.inputs.head_ref || github.run_id }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + env: + uses: ./.github/workflows/_env.yml + permissions: + contents: read + pull-requests: read + with: + prime-build-image: false + check-mobile-run: false + + windows: + needs: + - env + strategy: + fail-fast: false + matrix: + include: + - target: ci/windows_ci_steps.sh + name: Windows 2019 + uses: ./.github/workflows/_ci.yml + name: CI ${{ matrix.name || matrix.target }} + secrets: + rbe-key: ${{ secrets.GCP_SERVICE_ACCOUNT_KEY }} + with: + target: ${{ matrix.target }} + runs-on: envoy-win19-small + command-ci: + repo-ref: ${{ needs.env.outputs.repo-ref }} + steps-post: + temp-dir: 'C:\Users\runner\AppData\Local\Temp\bazel-shared' + upload-name: windows.release + upload-path: 'C:\Users\runner\AppData\Local\Temp\envoy' + source: | + export ENVOY_SHARED_TMP_DIR="C:\Users\runner\AppData\Local\Temp\bazel-shared" + export ENVOY_DOCKER_BUILD_DIR="C:\Users\runner\AppData\Local\Temp" + mkdir -p "$ENVOY_SHARED_TMP_DIR" + GCP_SERVICE_ACCOUNT_KEY_PATH=$(mktemp -p "${ENVOY_SHARED_TMP_DIR}" -t gcp_service_account.XXXXXX.json) + bash -c "echo \"${RBE_KEY}\" | base64 --decode > \"${GCP_SERVICE_ACCOUNT_KEY_PATH}\"" + _BAZEL_BUILD_EXTRA_OPTIONS=( + --config=remote-ci + --config=rbe-google + --config=remote-msvc-cl + --google_credentials=${GCP_SERVICE_ACCOUNT_KEY_PATH} + --jobs=75 + --flaky_test_attempts=2) + export BAZEL_BUILD_EXTRA_OPTIONS=${_BAZEL_BUILD_EXTRA_OPTIONS[*]} + + docker: + needs: + - env + - windows + strategy: + fail-fast: false + matrix: + include: + - target: windows2019 + name: Windows 2019 + runs-on: envoy-win19-small + build-type: windows + image-base: mcr.microsoft.com/windows/servercore + image-tag: ltsc2019 + - target: windows2022 + name: Windows 2022 + runs-on: envoy-win22-small + build-type: windows-ltsc2022 + image-base: mcr.microsoft.com/windows/nanoserver + image-tag: ltsc2022 + runs-on: ${{ matrix.runs-on }} + steps: + - uses: actions/checkout@v4 + with: + ref: ${{ needs.env.outputs.repo_ref }} + - uses: actions/download-artifact@v3 + with: + name: windows.release + - run: | + # Convert to Unix-style path so tar doesn't think drive letter is a hostname + STAGING_DIR="$(echo $PWD | tr -d ':' | tr '\\' '/')" + mkdir -p windows/amd64 && tar zxf "${STAGING_DIR}/envoy_binary.tar.gz" -C ./windows/amd64 + CI_SHA1=$(git rev-parse head) + export CI_SHA1 + ci/docker_ci.sh + shell: bash + env: + CI_BRANCH: ${{ github.ref }} + DOCKERHUB_USERNAME: ${{ needs.env.outputs.trusted == 'true' && secrets.DOCKERHUB_USERNAME || '' }} + DOCKERHUB_PASSWORD: ${{ needs.env.outputs.trusted == 'true' && secrets.DOCKERHUB_PASSWORD || '' }} + WINDOWS_BUILD_TYPE: ${{ matrix.build-type }} + WINDOWS_IMAGE_BASE: ${{ matrix.image-base }} + WINDOWS_IMAGE_TAG: ${{ matrix.image-tag }} diff --git a/.github/workflows/mobile-android_build.yml b/.github/workflows/mobile-android_build.yml index b45265f84916..53a438225414 100644 --- a/.github/workflows/mobile-android_build.yml +++ b/.github/workflows/mobile-android_build.yml @@ -19,6 +19,7 @@ jobs: uses: ./.github/workflows/_env.yml permissions: contents: read + pull-requests: read androidbuild: if: ${{ needs.env.outputs.mobile_android_build == 'true' }} @@ -44,7 +45,7 @@ jobs: run: | cd mobile ./bazelw build \ - --config=mobile-remote-ci \ + --config=mobile-remote-release-clang \ --fat_apk_cpu=x86_64 \ --linkopt=-fuse-ld=lld \ //:android_dist @@ -58,7 +59,7 @@ jobs: contents: read packages: read name: java_helloworld - runs-on: macos-12 + runs-on: envoy-x64-small timeout-minutes: 50 steps: - uses: actions/checkout@v4 @@ -68,10 +69,16 @@ jobs: java-package: jdk architecture: x64 distribution: zulu - - run: | + - name: 'Install dependencies' + run: | cd mobile - ./ci/mac_ci_setup.sh --android - name: 'Install dependencies' + ./ci/linux_ci_setup.sh + # https://github.blog/changelog/2023-02-23-hardware-accelerated-android-virtualization-on-actions-windows-and-linux-larger-hosted-runners/ + - name: Enable KVM group permissions + run: | + echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules + sudo udevadm control --reload-rules + sudo udevadm trigger --name-match=kvm - uses: nick-fields/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd name: 'Start emulator' with: @@ -81,22 +88,23 @@ jobs: # Return to using: # cd mobile && ./bazelw mobile-install --fat_apk_cpu=x86_64 --start_app //examples/java/hello_world:hello_envoy # When https://github.com/envoyproxy/envoy-mobile/issues/853 is fixed. - - name: 'Start java app' + - name: 'Start Java app' env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | cd mobile ./bazelw build \ - --config=mobile-remote-ci-macos \ + --config=mobile-remote-release-clang \ --fat_apk_cpu=x86_64 \ + --linkopt=-fuse-ld=lld \ //examples/java/hello_world:hello_envoy - adb install -r --no-incremental bazel-bin/examples/java/hello_world/hello_envoy.apk - adb shell am start -n io.envoyproxy.envoymobile.helloenvoy/.MainActivity + "${ANDROID_HOME}"/platform-tools/adb install -r --no-incremental bazel-bin/examples/java/hello_world/hello_envoy.apk + "${ANDROID_HOME}"/platform-tools/adb shell am start -n io.envoyproxy.envoymobile.helloenvoy/.MainActivity - name: 'Check connectivity' run: | - timeout 30 adb logcat -e "received headers with status 301" -m 1 || { + timeout 30 "${ANDROID_HOME}"/platform-tools/adb logcat -e "received headers with status 301" -m 1 || { echo "Failed checking for headers in adb logcat" >&2 - timeout 30 adb logcat || { + timeout 30 "${ANDROID_HOME}"/platform-tools/adb logcat || { echo "Failed dumping adb logcat" >&2 } exit 1 @@ -111,7 +119,7 @@ jobs: contents: read packages: read name: kotlin_helloworld - runs-on: macos-12 + runs-on: envoy-x64-small timeout-minutes: 50 steps: - uses: actions/checkout@v4 @@ -124,7 +132,13 @@ jobs: - name: 'Install dependencies' run: | cd mobile - ./ci/mac_ci_setup.sh --android + ./ci/linux_ci_setup.sh + # https://github.blog/changelog/2023-02-23-hardware-accelerated-android-virtualization-on-actions-windows-and-linux-larger-hosted-runners/ + - name: Enable KVM group permissions + run: | + echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules + sudo udevadm control --reload-rules + sudo udevadm trigger --name-match=kvm - uses: nick-fields/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd name: 'Start emulator' with: @@ -134,22 +148,23 @@ jobs: # Return to using: # ./bazelw mobile-install --fat_apk_cpu=x86_64 --start_app //examples/kotlin/hello_world:hello_envoy_kt # When https://github.com/envoyproxy/envoy-mobile/issues/853 is fixed. - - name: 'Start kotlin app' + - name: 'Start Kotlin app' env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | cd mobile ./bazelw build \ - --config=mobile-remote-ci-macos \ + --config=mobile-remote-release-clang \ --fat_apk_cpu=x86_64 \ + --linkopt=-fuse-ld=lld \ //examples/kotlin/hello_world:hello_envoy_kt - adb install -r --no-incremental bazel-bin/examples/kotlin/hello_world/hello_envoy_kt.apk - adb shell am start -n io.envoyproxy.envoymobile.helloenvoykotlin/.MainActivity + "${ANDROID_HOME}"/platform-tools/adb install -r --no-incremental bazel-bin/examples/kotlin/hello_world/hello_envoy_kt.apk + "${ANDROID_HOME}"/platform-tools/adb shell am start -n io.envoyproxy.envoymobile.helloenvoykotlin/.MainActivity - name: 'Check connectivity' run: | - timeout 30 adb logcat -e "received headers with status 200" -m 1 || { + timeout 30 "${ANDROID_HOME}"/platform-tools/adb logcat -e "received headers with status 200" -m 1 || { echo "Failed checking for headers in adb logcat" >&2 - timeout 30 adb logcat || { + timeout 30 "${ANDROID_HOME}"/platform-tools/adb logcat || { echo "Failed dumping adb logcat" >&2 } exit 1 @@ -164,7 +179,7 @@ jobs: contents: read packages: read name: kotlin_baseline_app - runs-on: macos-12 + runs-on: envoy-x64-small timeout-minutes: 50 steps: - uses: actions/checkout@v4 @@ -177,7 +192,13 @@ jobs: - name: 'Install dependencies' run: | cd mobile - ./ci/mac_ci_setup.sh --android + ./ci/linux_ci_setup.sh + # https://github.blog/changelog/2023-02-23-hardware-accelerated-android-virtualization-on-actions-windows-and-linux-larger-hosted-runners/ + - name: Enable KVM group permissions + run: | + echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules + sudo udevadm control --reload-rules + sudo udevadm trigger --name-match=kvm - uses: nick-fields/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd name: 'Start emulator' with: @@ -187,22 +208,23 @@ jobs: # Return to using: # ./bazelw mobile-install --fat_apk_cpu=x86_64 --start_app //examples/kotlin/hello_world:hello_envoy_kt # When https://github.com/envoyproxy/envoy-mobile/issues/853 is fixed. - - name: 'Start kotlin app' + - name: 'Start Kotlin app' env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | cd mobile ./bazelw build \ - --config=mobile-remote-ci-macos \ + --config=mobile-remote-release-clang \ --fat_apk_cpu=x86_64 \ + --linkopt=-fuse-ld=lld \ //test/kotlin/apps/baseline:hello_envoy_kt - adb install -r --no-incremental bazel-bin/test/kotlin/apps/baseline/hello_envoy_kt.apk - adb shell am start -n io.envoyproxy.envoymobile.helloenvoybaselinetest/.MainActivity + "${ANDROID_HOME}"/platform-tools/adb install -r --no-incremental bazel-bin/test/kotlin/apps/baseline/hello_envoy_kt.apk + "${ANDROID_HOME}"/platform-tools/adb shell am start -n io.envoyproxy.envoymobile.helloenvoybaselinetest/.MainActivity - name: 'Check connectivity' run: | - timeout 30 adb logcat -e "received headers with status 301" -m 1 || { + timeout 30 "${ANDROID_HOME}"/platform-tools/adb logcat -e "received headers with status 301" -m 1 || { echo "Failed checking for headers in adb logcat" >&2 - timeout 30 adb logcat || { + timeout 30 "${ANDROID_HOME}"/platform-tools/adb logcat || { echo "Failed dumping adb logcat" >&2 } exit 1 @@ -217,7 +239,7 @@ jobs: contents: read packages: read name: kotlin_experimental_app - runs-on: macos-12 + runs-on: envoy-x64-small timeout-minutes: 50 steps: - uses: actions/checkout@v4 @@ -230,7 +252,13 @@ jobs: - name: 'Install dependencies' run: | cd mobile - ./ci/mac_ci_setup.sh --android + ./ci/linux_ci_setup.sh + # https://github.blog/changelog/2023-02-23-hardware-accelerated-android-virtualization-on-actions-windows-and-linux-larger-hosted-runners/ + - name: Enable KVM group permissions + run: | + echo 'KERNEL=="kvm", GROUP="kvm", MODE="0666", OPTIONS+="static_node=kvm"' | sudo tee /etc/udev/rules.d/99-kvm4all.rules + sudo udevadm control --reload-rules + sudo udevadm trigger --name-match=kvm - uses: nick-fields/retry@943e742917ac94714d2f408a0e8320f2d1fcafcd name: 'Start emulator' with: @@ -240,23 +268,24 @@ jobs: # Return to using: # ./bazelw mobile-install --fat_apk_cpu=x86_64 --start_app //examples/kotlin/hello_world:hello_envoy_kt # When https://github.com/envoyproxy/envoy-mobile/issues/853 is fixed. - - name: 'Start kotlin app' + - name: 'Start Kotlin app' env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | cd mobile ./bazelw build \ - --config=mobile-remote-ci-macos \ + --config=mobile-remote-release-clang \ --fat_apk_cpu=x86_64 \ --define envoy_mobile_listener=enabled \ + --linkopt=-fuse-ld=lld \ //test/kotlin/apps/experimental:hello_envoy_kt - adb install -r --no-incremental bazel-bin/test/kotlin/apps/experimental/hello_envoy_kt.apk - adb shell am start -n io.envoyproxy.envoymobile.helloenvoyexperimentaltest/.MainActivity + "${ANDROID_HOME}"/platform-tools/adb install -r --no-incremental bazel-bin/test/kotlin/apps/experimental/hello_envoy_kt.apk + "${ANDROID_HOME}"/platform-tools/adb shell am start -n io.envoyproxy.envoymobile.helloenvoyexperimentaltest/.MainActivity - name: 'Check connectivity' run: | - timeout 30 adb logcat -e "received headers with status 200" -m 1 || { + timeout 30 "${ANDROID_HOME}"/platform-tools/adb logcat -e "received headers with status 200" -m 1 || { echo "Failed checking for headers in adb logcat" >&2 - timeout 30 adb logcat || { + timeout 30 "${ANDROID_HOME}"/platform-tools/adb logcat || { echo "Failed dumping adb logcat" >&2 } exit 1 diff --git a/.github/workflows/mobile-android_tests.yml b/.github/workflows/mobile-android_tests.yml index be15734f8477..2a7248894ed5 100644 --- a/.github/workflows/mobile-android_tests.yml +++ b/.github/workflows/mobile-android_tests.yml @@ -17,9 +17,10 @@ jobs: env: uses: ./.github/workflows/_env.yml with: - prime_build_image: true + prime-build-image: true permissions: contents: read + pull-requests: read javatestslinux: if: ${{ needs.env.outputs.mobile_android_tests == 'true' }} @@ -34,7 +35,7 @@ jobs: - name: Pre-cleanup # Using the defaults in # https://github.com/envoyproxy/toolshed/blob/main/gh-actions/diskspace/action.yml. - uses: envoyproxy/toolshed/gh-actions/diskspace@actions-v0.0.35 + uses: envoyproxy/toolshed/gh-actions/diskspace@actions-v0.1.5 - uses: actions/checkout@v4 - name: Add safe directory run: git config --global --add safe.directory /__w/envoy/envoy @@ -68,7 +69,7 @@ jobs: - name: Pre-cleanup # Using the defaults in # https://github.com/envoyproxy/toolshed/blob/main/gh-actions/diskspace/action.yml. - uses: envoyproxy/toolshed/gh-actions/diskspace@actions-v0.0.35 + uses: envoyproxy/toolshed/gh-actions/diskspace@actions-v0.1.5 - uses: actions/checkout@v4 - name: Add safe directory run: git config --global --add safe.directory /__w/envoy/envoy diff --git a/.github/workflows/mobile-asan.yml b/.github/workflows/mobile-asan.yml index a92e3730cfe3..5faace5c24e1 100644 --- a/.github/workflows/mobile-asan.yml +++ b/.github/workflows/mobile-asan.yml @@ -19,6 +19,7 @@ jobs: uses: ./.github/workflows/_env.yml permissions: contents: read + pull-requests: read asan: if: ${{ needs.env.outputs.mobile_asan == 'true' }} diff --git a/.github/workflows/mobile-cc_tests.yml b/.github/workflows/mobile-cc_tests.yml index a9001c1df8d8..2a37430af34d 100644 --- a/.github/workflows/mobile-cc_tests.yml +++ b/.github/workflows/mobile-cc_tests.yml @@ -19,6 +19,7 @@ jobs: uses: ./.github/workflows/_env.yml permissions: contents: read + pull-requests: read cctests: if: ${{ needs.env.outputs.mobile_cc_tests == 'true' }} diff --git a/.github/workflows/mobile-compile_time_options.yml b/.github/workflows/mobile-compile_time_options.yml index b03ceb2983c2..e3d3f9c936c0 100644 --- a/.github/workflows/mobile-compile_time_options.yml +++ b/.github/workflows/mobile-compile_time_options.yml @@ -19,6 +19,7 @@ jobs: uses: ./.github/workflows/_env.yml permissions: contents: read + pull-requests: read cc_test_no_yaml: needs: env @@ -68,10 +69,7 @@ jobs: cd mobile ./bazelw build \ --config=mobile-remote-ci \ - --define=admin_html=disabled \ - --define=admin_functionality=disabled \ --define envoy_exceptions=disabled \ - --define envoy_mobile_listener=disabled \ --define=envoy_yaml=disabled \ --copt=-fno-unwind-tables \ --copt=-fno-exceptions \ @@ -102,7 +100,6 @@ jobs: --config=mobile-remote-ci \ --define=signal_trace=disabled \ --define=envoy_mobile_request_compression=disabled \ - --define=envoy_enable_http_datagrams=disabled \ --define=google_grpc=disabled \ --@com_envoyproxy_protoc_gen_validate//bazel:template-flavor= \ $TARGETS @@ -135,7 +132,6 @@ jobs: --define=envoy_mobile_request_compression=disabled \ --define=envoy_mobile_stats_reporting=disabled \ --define=envoy_mobile_swift_cxx_interop=disabled \ - --define=envoy_enable_http_datagrams=disabled \ --define=google_grpc=disabled \ --@envoy//bazel:http3=False \ --@com_envoyproxy_protoc_gen_validate//bazel:template-flavor= \ diff --git a/.github/workflows/mobile-core.yml b/.github/workflows/mobile-core.yml index 037eb72b3284..460f1c7dcace 100644 --- a/.github/workflows/mobile-core.yml +++ b/.github/workflows/mobile-core.yml @@ -19,6 +19,7 @@ jobs: uses: ./.github/workflows/_env.yml permissions: contents: read + pull-requests: read unittests: if: ${{ github.repository == 'envoyproxy/envoy' }} @@ -46,6 +47,5 @@ jobs: --build_tests_only \ --action_env=LD_LIBRARY_PATH \ --test_env=ENVOY_IP_TEST_VERSIONS=v4only \ - --define envoy_mobile_listener=disabled \ --config=mobile-remote-ci \ //test/common/... diff --git a/.github/workflows/mobile-coverage.yml b/.github/workflows/mobile-coverage.yml index 8d3aaa8e93b5..bd7b7214a990 100644 --- a/.github/workflows/mobile-coverage.yml +++ b/.github/workflows/mobile-coverage.yml @@ -19,6 +19,7 @@ jobs: uses: ./.github/workflows/_env.yml permissions: contents: read + pull-requests: read coverage: if: ${{ needs.env.outputs.mobile_coverage == 'true' }} diff --git a/.github/workflows/mobile-docs.yml b/.github/workflows/mobile-docs.yml index 936674a46568..d4d226946ede 100644 --- a/.github/workflows/mobile-docs.yml +++ b/.github/workflows/mobile-docs.yml @@ -19,6 +19,7 @@ jobs: uses: ./.github/workflows/_env.yml permissions: contents: read + pull-requests: read docs: if: ${{ github.repository == 'envoyproxy/envoy' }} diff --git a/.github/workflows/mobile-format.yml b/.github/workflows/mobile-format.yml index 777a62f56c93..13bf1b2dbb20 100644 --- a/.github/workflows/mobile-format.yml +++ b/.github/workflows/mobile-format.yml @@ -19,6 +19,7 @@ jobs: uses: ./.github/workflows/_env.yml permissions: contents: read + pull-requests: read formatall: if: ${{ needs.env.outputs.mobile_formatting == 'true' }} diff --git a/.github/workflows/mobile-ios_build.yml b/.github/workflows/mobile-ios_build.yml index ca5b865880b5..3fa51f594200 100644 --- a/.github/workflows/mobile-ios_build.yml +++ b/.github/workflows/mobile-ios_build.yml @@ -19,6 +19,7 @@ jobs: uses: ./.github/workflows/_env.yml permissions: contents: read + pull-requests: read iosbuild: if: ${{ needs.env.outputs.mobile_ios_build == 'true' }} diff --git a/.github/workflows/mobile-ios_tests.yml b/.github/workflows/mobile-ios_tests.yml index 150429a30d05..6016dd03e86f 100644 --- a/.github/workflows/mobile-ios_tests.yml +++ b/.github/workflows/mobile-ios_tests.yml @@ -19,6 +19,7 @@ jobs: uses: ./.github/workflows/_env.yml permissions: contents: read + pull-requests: read swifttests: if: ${{ needs.env.outputs.mobile_ios_tests == 'true' }} diff --git a/.github/workflows/mobile-release.yml b/.github/workflows/mobile-release.yml index 3489f87e9777..3357cca65187 100644 --- a/.github/workflows/mobile-release.yml +++ b/.github/workflows/mobile-release.yml @@ -15,6 +15,7 @@ jobs: uses: ./.github/workflows/_env.yml permissions: contents: read + pull-requests: read android_release_artifacts: if: >- diff --git a/.github/workflows/mobile-release_validation.yml b/.github/workflows/mobile-release_validation.yml index 156ad5fbd71d..76775184fddb 100644 --- a/.github/workflows/mobile-release_validation.yml +++ b/.github/workflows/mobile-release_validation.yml @@ -19,6 +19,7 @@ jobs: uses: ./.github/workflows/_env.yml permissions: contents: read + pull-requests: read validate_swiftpm_example: if: ${{ needs.env.outputs.mobile_release_validation == 'true' }} diff --git a/.github/workflows/mobile-tsan.yml b/.github/workflows/mobile-tsan.yml index 27386c81fd3a..b06b2a8da3fc 100644 --- a/.github/workflows/mobile-tsan.yml +++ b/.github/workflows/mobile-tsan.yml @@ -19,6 +19,7 @@ jobs: uses: ./.github/workflows/_env.yml permissions: contents: read + pull-requests: read tsan: if: ${{ needs.env.outputs.mobile_tsan == 'true' }} diff --git a/.github/workflows/workflow-complete.yml b/.github/workflows/workflow-complete.yml index 8cf3824deae8..d5c2a84079f3 100644 --- a/.github/workflows/workflow-complete.yml +++ b/.github/workflows/workflow-complete.yml @@ -18,6 +18,7 @@ jobs: if: ${{ github.actor == 'trigger-workflow-envoy[bot]' }} runs-on: ubuntu-22.04 permissions: + contents: read statuses: write steps: - name: 'Download artifact' @@ -53,7 +54,7 @@ jobs: echo "state=${STATE}" >> "$GITHUB_OUTPUT" id: job - name: Complete status check - uses: envoyproxy/toolshed/gh-actions/status@actions-v0.0.35 + uses: envoyproxy/toolshed/gh-actions/status@actions-v0.1.5 with: authToken: ${{ secrets.GITHUB_TOKEN }} context: Verify/examples diff --git a/CODEOWNERS b/CODEOWNERS index 257c2577dbdd..9ca4bae5c252 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -192,6 +192,8 @@ extensions/filters/http/oauth2 @derekargueta @mattklein123 /*/extensions/filters/http/rate_limit_quota @tyxia @yanavlasov # HTTP Bandwidth Limit /*/extensions/filters/http/bandwidth_limit @nitgoy @mattklein123 @yanavlasov @tonya11en +# HTTP Basic Auth +/*/extensions/filters/http/basic_auth @zhaohuabing @wbpcode # Original IP detection /*/extensions/http/original_ip_detection/custom_header @alyssawilk @mattklein123 /*/extensions/http/original_ip_detection/xff @alyssawilk @mattklein123 diff --git a/api/BUILD b/api/BUILD index 76facfe2dda1..9ce02ee5a8ab 100644 --- a/api/BUILD +++ b/api/BUILD @@ -163,6 +163,7 @@ proto_library( "//envoy/extensions/filters/http/aws_lambda/v3:pkg", "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", "//envoy/extensions/filters/http/bandwidth_limit/v3:pkg", + "//envoy/extensions/filters/http/basic_auth/v3:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", "//envoy/extensions/filters/http/cache/v3:pkg", "//envoy/extensions/filters/http/cdn_loop/v3:pkg", @@ -306,6 +307,8 @@ proto_library( "//envoy/extensions/stat_sinks/graphite_statsd/v3:pkg", "//envoy/extensions/stat_sinks/open_telemetry/v3:pkg", "//envoy/extensions/stat_sinks/wasm/v3:pkg", + "//envoy/extensions/tracers/opentelemetry/resource_detectors/v3:pkg", + "//envoy/extensions/tracers/opentelemetry/samplers/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", "//envoy/extensions/transport_sockets/http_11_proxy/v3:pkg", "//envoy/extensions/transport_sockets/internal_upstream/v3:pkg", diff --git a/api/bazel/envoy_http_archive.bzl b/api/bazel/envoy_http_archive.bzl index 15fd65b2af27..849e2500678f 100644 --- a/api/bazel/envoy_http_archive.bzl +++ b/api/bazel/envoy_http_archive.bzl @@ -1,6 +1,6 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -def envoy_http_archive(name, locations, **kwargs): +def envoy_http_archive(name, locations, location_name = None, **kwargs): # `existing_rule_keys` contains the names of repositories that have already # been defined in the Bazel workspace. By skipping repos with existing keys, # users can override dependency versions by using standard Bazel repository @@ -10,7 +10,7 @@ def envoy_http_archive(name, locations, **kwargs): # This repository has already been defined, probably because the user # wants to override the version. Do nothing. return - location = locations[name] + location = locations[location_name or name] # HTTP tarball at a given URL. Add a BUILD file if requested. http_archive( diff --git a/api/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto b/api/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto index 83fdd27b378c..bc5a470608db 100644 --- a/api/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto +++ b/api/contrib/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto @@ -22,4 +22,9 @@ message KafkaBroker { // The prefix to use when emitting :ref:`statistics `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; + + // Set to true if broker filter should attempt to serialize the received responses from the + // upstream broker instead of passing received bytes as is. + // Disabled by default. + bool force_response_rewrite = 2; } diff --git a/api/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto b/api/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto index 3611c1d6759f..5ca0de69651d 100644 --- a/api/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto +++ b/api/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto @@ -21,4 +21,9 @@ option (udpa.annotations.file_status).package_version_status = FROZEN; message KafkaBroker { // The prefix to use when emitting :ref:`statistics `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; + + // Set to true if broker filter should attempt to serialize the received responses from the + // upstream broker instead of passing received bytes as is. + // Disabled by default. + bool force_response_rewrite = 2; } diff --git a/api/envoy/config/trace/v3/opentelemetry.proto b/api/envoy/config/trace/v3/opentelemetry.proto index 7ae6a964bd72..59028326f220 100644 --- a/api/envoy/config/trace/v3/opentelemetry.proto +++ b/api/envoy/config/trace/v3/opentelemetry.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package envoy.config.trace.v3; +import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "envoy/config/core/v3/http_service.proto"; @@ -18,6 +19,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // Configuration for the OpenTelemetry tracer. // [#extension: envoy.tracers.opentelemetry] +// [#next-free-field: 6] message OpenTelemetryConfig { // The upstream gRPC cluster that will receive OTLP traces. // Note that the tracer drops traces if the server does not read data fast enough. @@ -43,4 +45,16 @@ message OpenTelemetryConfig { // The name for the service. This will be populated in the ResourceSpan Resource attributes. // If it is not provided, it will default to "unknown_service:envoy". string service_name = 2; + + // An ordered list of resource detectors + // [#extension-category: envoy.tracers.opentelemetry.resource_detectors] + repeated core.v3.TypedExtensionConfig resource_detectors = 4; + + // Specifies the sampler to be used by the OpenTelemetry tracer. + // The configured sampler implements the Sampler interface defined by the OpenTelemetry specification. + // This field can be left empty. In this case, the default Envoy sampling decision is used. + // + // See: `OpenTelemetry sampler specification `_ + // [#extension-category: envoy.tracers.opentelemetry.samplers] + core.v3.TypedExtensionConfig sampler = 5; } diff --git a/api/envoy/extensions/filters/http/basic_auth/v3/BUILD b/api/envoy/extensions/filters/http/basic_auth/v3/BUILD new file mode 100644 index 000000000000..1c1a6f6b4423 --- /dev/null +++ b/api/envoy/extensions/filters/http/basic_auth/v3/BUILD @@ -0,0 +1,12 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = [ + "//envoy/config/core/v3:pkg", + "@com_github_cncf_udpa//udpa/annotations:pkg", + ], +) diff --git a/api/envoy/extensions/filters/http/basic_auth/v3/basic_auth.proto b/api/envoy/extensions/filters/http/basic_auth/v3/basic_auth.proto new file mode 100644 index 000000000000..df23868a4260 --- /dev/null +++ b/api/envoy/extensions/filters/http/basic_auth/v3/basic_auth.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package envoy.extensions.filters.http.basic_auth.v3; + +import "envoy/config/core/v3/base.proto"; + +import "udpa/annotations/sensitive.proto"; +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.filters.http.basic_auth.v3"; +option java_outer_classname = "BasicAuthProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/basic_auth/v3;basic_authv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Basic Auth] +// Basic Auth :ref:`configuration overview `. +// [#extension: envoy.filters.http.basic_auth] + +// Basic HTTP authentication. +// +// Example: +// +// .. code-block:: yaml +// +// users: +// inline_string: |- +// user1:{SHA}hashed_user1_password +// user2:{SHA}hashed_user2_password +// +message BasicAuth { + // Username-password pairs used to verify user credentials in the "Authorization" header. + // The value needs to be the htpasswd format. + // Reference to https://httpd.apache.org/docs/2.4/programs/htpasswd.html + config.core.v3.DataSource users = 1 [(udpa.annotations.sensitive) = true]; +} diff --git a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto index bba080473fa2..9ea4703b6d71 100644 --- a/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto +++ b/api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto @@ -10,6 +10,8 @@ import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/http_status.proto"; +import "google/protobuf/wrappers.proto"; + import "envoy/annotations/deprecation.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; @@ -26,7 +28,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] -// [#next-free-field: 20] +// [#next-free-field: 23] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; @@ -91,7 +93,10 @@ message ExtAuthz { type.v3.HttpStatus status_on_error = 7; // Specifies a list of metadata namespaces whose values, if present, will be passed to the - // ext_authz service. :ref:`filter_metadata ` is passed as an opaque ``protobuf::Struct``. + // ext_authz service. The :ref:`filter_metadata ` + // is passed as an opaque ``protobuf::Struct``. + // + // Please note that this field exclusively applies to the gRPC ext_authz service and has no effect on the HTTP service. // // For example, if the ``jwt_authn`` filter is used and :ref:`payload_in_metadata // ` is set, @@ -105,13 +110,28 @@ message ExtAuthz { repeated string metadata_context_namespaces = 8; // Specifies a list of metadata namespaces whose values, if present, will be passed to the - // ext_authz service. :ref:`typed_filter_metadata ` is passed as an ``protobuf::Any``. + // ext_authz service. :ref:`typed_filter_metadata ` + // is passed as a ``protobuf::Any``. + // + // Please note that this field exclusively applies to the gRPC ext_authz service and has no effect on the HTTP service. // - // It works in a way similar to ``metadata_context_namespaces`` but allows envoy and external authz server to share the protobuf message definition - // in order to do a safe parsing. + // It works in a way similar to ``metadata_context_namespaces`` but allows Envoy and ext_authz server to share + // the protobuf message definition in order to do a safe parsing. // repeated string typed_metadata_context_namespaces = 16; + // Specifies a list of route metadata namespaces whose values, if present, will be passed to the + // ext_authz service at :ref:`route_metadata_context ` in + // :ref:`CheckRequest `. + // :ref:`filter_metadata ` is passed as an opaque ``protobuf::Struct``. + repeated string route_metadata_context_namespaces = 21; + + // Specifies a list of route metadata namespaces whose values, if present, will be passed to the + // ext_authz service at :ref:`route_metadata_context ` in + // :ref:`CheckRequest `. + // :ref:`typed_filter_metadata ` is passed as an ``protobuf::Any``. + repeated string route_typed_metadata_context_namespaces = 22; + // Specifies if the filter is enabled. // // If :ref:`runtime_key ` is specified, @@ -191,6 +211,10 @@ message ExtAuthz { // When this field is true, Envoy will include the SNI name used for TLSClientHello, if available, in the // :ref:`tls_session`. bool include_tls_session = 18; + + // Whether to increment cluster statistics (e.g. cluster..upstream_rq_*) on authorization failure. + // Defaults to true. + google.protobuf.BoolValue charge_cluster_response_stats = 20; } // Configuration for buffering the request data. diff --git a/api/envoy/extensions/tracers/opentelemetry/resource_detectors/v3/BUILD b/api/envoy/extensions/tracers/opentelemetry/resource_detectors/v3/BUILD new file mode 100644 index 000000000000..ee92fb652582 --- /dev/null +++ b/api/envoy/extensions/tracers/opentelemetry/resource_detectors/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/tracers/opentelemetry/resource_detectors/v3/environment_resource_detector.proto b/api/envoy/extensions/tracers/opentelemetry/resource_detectors/v3/environment_resource_detector.proto new file mode 100644 index 000000000000..df62fc2d9e42 --- /dev/null +++ b/api/envoy/extensions/tracers/opentelemetry/resource_detectors/v3/environment_resource_detector.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package envoy.extensions.tracers.opentelemetry.resource_detectors.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.tracers.opentelemetry.resource_detectors.v3"; +option java_outer_classname = "EnvironmentResourceDetectorProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/tracers/opentelemetry/resource_detectors/v3;resource_detectorsv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Environment Resource Detector config] + +// Configuration for the Environment Resource detector extension. +// The resource detector reads from the ``OTEL_RESOURCE_ATTRIBUTES`` +// environment variable, as per the OpenTelemetry specification. +// +// See: +// +// `OpenTelemetry specification `_ +// +// [#extension: envoy.tracers.opentelemetry.resource_detectors.environment] +message EnvironmentResourceDetectorConfig { +} diff --git a/api/envoy/extensions/tracers/opentelemetry/samplers/v3/BUILD b/api/envoy/extensions/tracers/opentelemetry/samplers/v3/BUILD new file mode 100644 index 000000000000..ee92fb652582 --- /dev/null +++ b/api/envoy/extensions/tracers/opentelemetry/samplers/v3/BUILD @@ -0,0 +1,9 @@ +# DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. + +load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") + +licenses(["notice"]) # Apache 2 + +api_proto_package( + deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], +) diff --git a/api/envoy/extensions/tracers/opentelemetry/samplers/v3/always_on_sampler.proto b/api/envoy/extensions/tracers/opentelemetry/samplers/v3/always_on_sampler.proto new file mode 100644 index 000000000000..241dc06eb1fc --- /dev/null +++ b/api/envoy/extensions/tracers/opentelemetry/samplers/v3/always_on_sampler.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; + +package envoy.extensions.tracers.opentelemetry.samplers.v3; + +import "udpa/annotations/status.proto"; + +option java_package = "io.envoyproxy.envoy.extensions.tracers.opentelemetry.samplers.v3"; +option java_outer_classname = "AlwaysOnSamplerProto"; +option java_multiple_files = true; +option go_package = "github.com/envoyproxy/go-control-plane/envoy/extensions/tracers/opentelemetry/samplers/v3;samplersv3"; +option (udpa.annotations.file_status).package_version_status = ACTIVE; + +// [#protodoc-title: Always On Sampler config] +// Configuration for the "AlwaysOn" Sampler extension. +// The sampler follows the "AlwaysOn" implementation from the OpenTelemetry +// SDK specification. +// +// See: +// `AlwaysOn sampler specification `_ +// [#extension: envoy.tracers.opentelemetry.samplers.always_on] + +message AlwaysOnSamplerConfig { +} diff --git a/api/envoy/service/auth/v3/attribute_context.proto b/api/envoy/service/auth/v3/attribute_context.proto index 77af84436de9..152672685bcc 100644 --- a/api/envoy/service/auth/v3/attribute_context.proto +++ b/api/envoy/service/auth/v3/attribute_context.proto @@ -38,7 +38,7 @@ option (udpa.annotations.file_status).package_version_status = ACTIVE; // - field mask to send // - which return values from request_context are copied back // - which return values are copied into request_headers] -// [#next-free-field: 13] +// [#next-free-field: 14] message AttributeContext { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.AttributeContext"; @@ -183,6 +183,9 @@ message AttributeContext { // Dynamic metadata associated with the request. config.core.v3.Metadata metadata_context = 11; + // Metadata associated with the selected route. + config.core.v3.Metadata route_metadata_context = 13; + // TLS session details of the underlying connection. // This is not populated by default and will be populated if ext_authz filter's // :ref:`include_tls_session ` is set to true. diff --git a/api/versioning/BUILD b/api/versioning/BUILD index 9f8638e33ee2..fe64655d843b 100644 --- a/api/versioning/BUILD +++ b/api/versioning/BUILD @@ -101,6 +101,7 @@ proto_library( "//envoy/extensions/filters/http/aws_lambda/v3:pkg", "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", "//envoy/extensions/filters/http/bandwidth_limit/v3:pkg", + "//envoy/extensions/filters/http/basic_auth/v3:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", "//envoy/extensions/filters/http/cache/v3:pkg", "//envoy/extensions/filters/http/cdn_loop/v3:pkg", @@ -245,6 +246,8 @@ proto_library( "//envoy/extensions/stat_sinks/graphite_statsd/v3:pkg", "//envoy/extensions/stat_sinks/open_telemetry/v3:pkg", "//envoy/extensions/stat_sinks/wasm/v3:pkg", + "//envoy/extensions/tracers/opentelemetry/resource_detectors/v3:pkg", + "//envoy/extensions/tracers/opentelemetry/samplers/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", "//envoy/extensions/transport_sockets/http_11_proxy/v3:pkg", "//envoy/extensions/transport_sockets/internal_upstream/v3:pkg", diff --git a/bazel/repositories.bzl b/bazel/repositories.bzl index 044bf232ff1a..9f7ab5854d4e 100644 --- a/bazel/repositories.bzl +++ b/bazel/repositories.bzl @@ -287,6 +287,7 @@ def envoy_dependencies(skip_targets = []): _com_github_grpc_grpc() _com_github_unicode_org_icu() _com_github_intel_ipp_crypto_crypto_mb() + _com_github_intel_ipp_crypto_crypto_mb_fips() _com_github_intel_qatlib() _com_github_jbeder_yaml_cpp() _com_github_libevent_libevent() @@ -533,6 +534,23 @@ def _com_github_intel_ipp_crypto_crypto_mb(): build_file_content = BUILD_ALL_CONTENT, ) +def _com_github_intel_ipp_crypto_crypto_mb_fips(): + # Temporary fix for building ipp-crypto when boringssl-fips is used. + # Build will fail if bn2lebinpad patch is applied. Remove this archive + # when upstream dependency fixes this issue. + external_http_archive( + name = "com_github_intel_ipp_crypto_crypto_mb_fips", + # Patch removes from CMakeLists.txt instructions to + # to create dynamic *.so library target. Linker fails when linking + # with boringssl_fips library. Envoy uses only static library + # anyways, so created dynamic library would not be used anyways. + patches = ["@envoy//bazel/foreign_cc:ipp-crypto-skip-dynamic-lib.patch"], + patch_args = ["-p1"], + build_file_content = BUILD_ALL_CONTENT, + # Use existing ipp-crypto repository location name to avoid redefinition. + location_name = "com_github_intel_ipp_crypto_crypto_mb", + ) + def _com_github_intel_qatlib(): external_http_archive( name = "com_github_intel_qatlib", diff --git a/bazel/repository_locations.bzl b/bazel/repository_locations.bzl index cf6b50a3f8b0..d1faa45b75da 100644 --- a/bazel/repository_locations.bzl +++ b/bazel/repository_locations.bzl @@ -176,11 +176,11 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "Shellcheck rules for bazel", project_desc = "Now you do not need to depend on the system shellcheck version in your bazel-managed (mono)repos.", project_url = "https://github.com/aignas/rules_shellcheck", - version = "0.1.1", - sha256 = "4e7cc56d344d0adfd20283f7ad8cb4fba822c0b15ce122665b00dd87a27a74b6", + version = "0.2.4", + sha256 = "ce4d0e7a9beb1fb3f0d37424465060491a91dae68de1ef1c92ee57d94c773b46", strip_prefix = "rules_shellcheck-{version}", - urls = ["https://github.com/aignas/rules_shellcheck/archive/v{version}.tar.gz"], - release_date = "2022-05-30", + urls = ["https://github.com/aignas/rules_shellcheck/archive/{version}.tar.gz"], + release_date = "2023-10-27", use_category = ["build"], cpe = "N/A", license = "MIT", @@ -679,7 +679,7 @@ REPOSITORY_LOCATIONS_SPEC = dict( release_date = "2023-08-31", cpe = "cpe:2.3:a:google:brotli:*", license = "MIT", - license_url = "https://github.com/google/brotli/blob/{version}/LICENSE", + license_url = "https://github.com/google/brotli/blob/v{version}/LICENSE", ), com_github_facebook_zstd = dict( project_name = "zstd", @@ -1123,12 +1123,12 @@ REPOSITORY_LOCATIONS_SPEC = dict( project_name = "QUICHE", project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols", project_url = "https://github.com/google/quiche", - version = "30c4298fbadc820dbbbf7721c72b279722856930", - sha256 = "ec26667dd7e0d6e22d2d7f34d5310a5fd18da6449488f4aab24b3f2a89cff795", + version = "acfc063373a7c3d691d34fa87678bb368c07b15e", + sha256 = "668b86b0645384234a34c46cc5d2a03fc9b86111117e9ccd5081170d7d2f3cc6", urls = ["https://github.com/google/quiche/archive/{version}.tar.gz"], strip_prefix = "quiche-{version}", use_category = ["controlplane", "dataplane_core"], - release_date = "2023-10-18", + release_date = "2023-10-24", cpe = "N/A", license = "BSD-3-Clause", license_url = "https://github.com/google/quiche/blob/{version}/LICENSE", diff --git a/changelogs/1.25.11.yaml b/changelogs/1.25.11.yaml new file mode 100644 index 000000000000..4beae10fad69 --- /dev/null +++ b/changelogs/1.25.11.yaml @@ -0,0 +1,7 @@ +date: October 16, 2023 + +bug_fixes: +- area: http + change: | + Fixed a bug where processing of deferred streams with the value of ``http.max_requests_per_io_cycle`` more than 1, + can cause a crash. diff --git a/changelogs/1.26.6.yaml b/changelogs/1.26.6.yaml new file mode 100644 index 000000000000..a5caeaa72fa5 --- /dev/null +++ b/changelogs/1.26.6.yaml @@ -0,0 +1,10 @@ +date: October 17, 2023 + +bug_fixes: +- area: tracing + change: | + Fixed a bug in the Datadog tracer where Datadog's "operation name" field would contain what should be in the "resource name" field. +- area: http + change: | + Fixed a bug where processing of deferred streams with the value of ``http.max_requests_per_io_cycle`` more than 1, + can cause a crash. diff --git a/changelogs/1.27.2.yaml b/changelogs/1.27.2.yaml new file mode 100644 index 000000000000..91d3633c0154 --- /dev/null +++ b/changelogs/1.27.2.yaml @@ -0,0 +1,10 @@ +date: October 16, 2023 + +bug_fixes: +- area: tracing + change: | + Fixed a bug in the Datadog tracer where Datadog's "operation name" field would contain what should be in the "resource name" field. +- area: http + change: | + Fixed a bug where processing of deferred streams with the value of ``http.max_requests_per_io_cycle`` more than 1, + can cause a crash. diff --git a/changelogs/current.yaml b/changelogs/current.yaml index cb74cc6886e8..ef01b2c786be 100644 --- a/changelogs/current.yaml +++ b/changelogs/current.yaml @@ -9,6 +9,10 @@ behavior_changes: the flag to false explicitly. See doc :ref:`Http filter route specific config ` or issue https://github.com/envoyproxy/envoy/issues/29461 for more specific detail and examples. +- area: listener + change: | + undeprecated runtime key ``overload.global_downstream_max_connections`` until :ref:`downstream connections monitor + ` extension becomes stable. minor_behavior_changes: - area: upstream @@ -23,6 +27,11 @@ minor_behavior_changes: Added new configuration field :ref:`rate_limited_as_resource_exhausted ` to allow for setting if rate limit grpc response should be RESOURCE_EXHAUSTED instead of the default UNAVAILABLE. +- area: http2 + change: | + Flip the runtime guard ``envoy.reloadable_features.defer_processing_backedup_streams`` to be on by default. + This feature improves flow control within the proxy by deferring work on the receiving end if the other + end is backed up. bug_fixes: # *Changes expected to improve the state of the world and are unlikely to have negative effects* @@ -36,6 +45,13 @@ bug_fixes: - area: grpc change: | Fixed a bug in gRPC async client cache which intermittently causes CPU spikes due to busy loop in timer expiration. +- area: quic + change: | + Fixed a bug in QUIC and HCM interaction which could cause use-after-free during asynchronous certificates retrieval. + The fix is guarded by runtime ``envoy.reloadable_features.quic_fix_filter_manager_uaf``. +- area: redis + change: | + Fixed a bug causing crash if incoming redis key does not match against a prefix_route and catch_all_route is not defined. removed_config_or_runtime: # *Normally occurs at the end of the* :ref:`deprecation period ` @@ -60,8 +76,16 @@ removed_config_or_runtime: change: | Removed the deprecated ``envoy.reloadable_features.service_sanitize_non_utf8_strings`` runtime flag and legacy code path. +- area: access log + change: | + Removed the deprecated ``envoy.reloadable_features.format_ports_as_numbers`` + runtime flag and legacy code path. new_features: +- area: filters + change: | + Added :ref:`the Basic Auth filter `, which can be used to + authenticate user credentials in the HTTP Authentication heaer defined in `RFC7617 `_. - area: upstream change: | Added :ref:`enable_full_scan ` @@ -73,7 +97,6 @@ new_features: change: | added :ref:`per_endpoint_stats ` to get some metrics for each endpoint in a cluster. - - area: jwt change: | The jwt filter can now serialize non-primitive custom claims when maping claims to headers. @@ -90,5 +113,26 @@ new_features: returns an error or cannot be reached with :ref:`status_on_error ` configuration flag. +- area: tracing + change: | + Added support for configuring resource detectors on the OpenTelemetry tracer. +- area: tracing + change: | + Added support to configure a sampler for the OpenTelemetry tracer. +- area: ext_authz + change: | + New config parameter :ref:`charge_cluster_response_stats + ` + for not incrementing cluster statistics on ext_authz response. Default true, no behavior change. +- area: ext_authz + change: | + forward :ref:`filter_metadata ` selected by + :ref:`route_metadata_context_namespaces + ` + and :ref:`typed_filter_metadata ` selected by + :ref:`route_typed_metadata_context_namespaces + ` + from the metadata of the selected route to external auth service. + This metadata propagation is independent from the dynamic metadata from connection and request. deprecated: diff --git a/ci/build_setup.sh b/ci/build_setup.sh index ccfa25aa3121..c56a6eb746a7 100755 --- a/ci/build_setup.sh +++ b/ci/build_setup.sh @@ -80,12 +80,15 @@ function setup_clang_toolchain() { echo "clang toolchain with ${ENVOY_STDLIB} configured" } -export BUILD_DIR=${BUILD_DIR:-/build} -if [[ ! -d "${BUILD_DIR}" ]] -then - echo "${BUILD_DIR} mount missing - did you forget -v :${BUILD_DIR}? Creating." - mkdir -p "${BUILD_DIR}" +if [[ -z "${BUILD_DIR}" ]]; then + echo "BUILD_DIR not set - defaulting to ~/.cache/envoy-bazel" >&2 + BUILD_DIR="${HOME}/.cache/envoy-bazel" fi +if [[ ! -d "${BUILD_DIR}" ]]; then + echo "${BUILD_DIR} missing - Creating." >&2 + mkdir -p "${BUILD_DIR}" +fi +export BUILD_DIR # Environment setup. export ENVOY_TEST_TMPDIR="${ENVOY_TEST_TMPDIR:-$BUILD_DIR/tmp}" diff --git a/ci/do_ci.sh b/ci/do_ci.sh index fb008e6ed362..fc25e25c4068 100755 --- a/ci/do_ci.sh +++ b/ci/do_ci.sh @@ -163,7 +163,7 @@ function bazel_binary_build() { # The COMPILE_TYPE variable is redundant in this case and is only here for # readability. It is already set in the .bazelrc config for sizeopt. COMPILE_TYPE="opt" - CONFIG_ARGS="--config=sizeopt" + CONFIG_ARGS=("--config=sizeopt") elif [[ "${BINARY_TYPE}" == "fastbuild" ]]; then COMPILE_TYPE="fastbuild" fi @@ -181,7 +181,7 @@ function bazel_binary_build() { # This is a workaround for https://github.com/bazelbuild/bazel/issues/11834 [[ -n "${ENVOY_RBE}" ]] && rm -rf bazel-bin/"${ENVOY_BIN}"* - bazel build "${BAZEL_BUILD_OPTIONS[@]}" --remote_download_toplevel -c "${COMPILE_TYPE}" "${BUILD_TARGET}" ${CONFIG_ARGS} + bazel build "${BAZEL_BUILD_OPTIONS[@]}" --remote_download_toplevel -c "${COMPILE_TYPE}" "${BUILD_TARGET}" "${CONFIG_ARGS[@]}" collect_build_profile "${BINARY_TYPE}"_build # Copy the built envoy binary somewhere that we can access outside of the @@ -191,14 +191,14 @@ function bazel_binary_build() { if [[ "${COMPILE_TYPE}" == "dbg" || "${COMPILE_TYPE}" == "opt" ]]; then # Generate dwp file for debugging since we used split DWARF to reduce binary # size - bazel build "${BAZEL_BUILD_OPTIONS[@]}" --remote_download_toplevel -c "${COMPILE_TYPE}" "${BUILD_DEBUG_INFORMATION}" ${CONFIG_ARGS} + bazel build "${BAZEL_BUILD_OPTIONS[@]}" --remote_download_toplevel -c "${COMPILE_TYPE}" "${BUILD_DEBUG_INFORMATION}" "${CONFIG_ARGS[@]}" # Copy the debug information cp -f bazel-bin/"${ENVOY_BIN}".dwp "${FINAL_DELIVERY_DIR}"/envoy.dwp fi # Validation tools for the tools image. bazel build "${BAZEL_BUILD_OPTIONS[@]}" --remote_download_toplevel -c "${COMPILE_TYPE}" \ - //test/tools/schema_validator:schema_validator_tool ${CONFIG_ARGS} + //test/tools/schema_validator:schema_validator_tool "${CONFIG_ARGS[@]}" # Build su-exec utility bazel build "${BAZEL_BUILD_OPTIONS[@]}" --remote_download_toplevel -c "${COMPILE_TYPE}" external:su-exec @@ -514,6 +514,7 @@ case $CI_TARGET in TODAY_DATE=$(date -u -I"date") export TODAY_DATE bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/dependency:check \ + --//tools/dependency:preload_cve_data \ --action_env=TODAY_DATE \ -- -v warn \ -c cves release_dates releases @@ -788,6 +789,7 @@ case $CI_TARGET in setup_clang_toolchain BUILD_SHA="$(git rev-parse HEAD)" ENVOY_COMMIT="${ENVOY_COMMIT:-${BUILD_SHA}}" + ENVOY_REPO="${ENVOY_REPO:-envoyproxy/envoy}" VERSION_DEV="$(cut -d- -f2 < VERSION.txt)" PUBLISH_ARGS=( --publish-commitish="$ENVOY_COMMIT" @@ -798,7 +800,8 @@ case $CI_TARGET in fi bazel run "${BAZEL_BUILD_OPTIONS[@]}" \ @envoy_repo//:publish \ - -- "${PUBLISH_ARGS[@]}" + -- --repo="$ENVOY_REPO" \ + "${PUBLISH_ARGS[@]}" ;; release|release.server_only) diff --git a/ci/mac_ci_steps.sh b/ci/mac_ci_steps.sh index dc779a665c71..2a83986768f9 100755 --- a/ci/mac_ci_steps.sh +++ b/ci/mac_ci_steps.sh @@ -2,15 +2,6 @@ set -e -function finish { - echo "disk space at end of build:" - df -h -} -trap finish EXIT - -echo "disk space at beginning of build:" -df -h - read -ra BAZEL_BUILD_EXTRA_OPTIONS <<< "${BAZEL_BUILD_EXTRA_OPTIONS:-}" read -ra BAZEL_EXTRA_TEST_OPTIONS <<< "${BAZEL_EXTRA_TEST_OPTIONS:-}" diff --git a/ci/run_envoy_docker.sh b/ci/run_envoy_docker.sh index 33a667d4c913..8e4e0b6d2e54 100755 --- a/ci/run_envoy_docker.sh +++ b/ci/run_envoy_docker.sh @@ -91,6 +91,10 @@ VOLUMES=( -v "${ENVOY_DOCKER_BUILD_DIR}":"${BUILD_DIR_MOUNT_DEST}" -v "${SOURCE_DIR}":"${SOURCE_DIR_MOUNT_DEST}") +if ! is_windows; then + export BUILD_DIR="${BUILD_DIR_MOUNT_DEST}" +fi + if [[ -n "$ENVOY_DOCKER_IN_DOCKER" || -n "$ENVOY_SHARED_TMP_DIR" ]]; then # Create a "shared" directory that has the same path in/outside the container # This allows the host docker engine to see artefacts using a temporary path created inside the container, @@ -111,6 +115,7 @@ fi docker run --rm \ "${ENVOY_DOCKER_OPTIONS[@]}" \ "${VOLUMES[@]}" \ + -e BUILD_DIR \ -e HTTP_PROXY \ -e HTTPS_PROXY \ -e NO_PROXY \ diff --git a/ci/windows_ci_steps.sh b/ci/windows_ci_steps.sh index c16d7392602a..8881be13dc99 100755 --- a/ci/windows_ci_steps.sh +++ b/ci/windows_ci_steps.sh @@ -74,13 +74,13 @@ fi if [[ $1 == "//source/exe:envoy-static" ]]; then BUILD_ENVOY_STATIC=1 shift - TEST_TARGETS=$* + TEST_TARGETS=("${@}") elif [[ $# -gt 0 ]]; then BUILD_ENVOY_STATIC=0 - TEST_TARGETS=$* + TEST_TARGETS=("$@") else BUILD_ENVOY_STATIC=1 - TEST_TARGETS='//test/...' + TEST_TARGETS=('//test/...') fi # Complete envoy-static build @@ -97,8 +97,8 @@ if [[ $BUILD_ENVOY_STATIC -eq 1 ]]; then fi # Test invocations of known-working tests on Windows -if [[ $TEST_TARGETS == "//test/..." ]]; then - bazel "${BAZEL_STARTUP_OPTIONS[@]}" test "${BAZEL_BUILD_OPTIONS[@]}" $TEST_TARGETS --test_tag_filters=-skip_on_windows,-fails_on_${FAIL_GROUP} --build_tests_only +if [[ "${TEST_TARGETS[*]}" == "//test/..." ]]; then + bazel "${BAZEL_STARTUP_OPTIONS[@]}" test "${BAZEL_BUILD_OPTIONS[@]}" "${TEST_TARGETS[@]}" --test_tag_filters=-skip_on_windows,-fails_on_${FAIL_GROUP} --build_tests_only # Build tests that are known flaky or failing to ensure no compilation regressions bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //test/... --test_tag_filters=fails_on_${FAIL_GROUP} --build_tests_only @@ -108,8 +108,8 @@ if [[ $TEST_TARGETS == "//test/..." ]]; then # not triggered by envoy-static or //test/... targets and not deliberately tagged skip_on_windows bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //bazel/... --build_tag_filters=-skip_on_windows fi -elif [[ -n "$TEST_TARGETS" ]]; then - bazel "${BAZEL_STARTUP_OPTIONS[@]}" test "${BAZEL_BUILD_OPTIONS[@]}" $TEST_TARGETS --build_tests_only +elif [[ -n "${TEST_TARGETS[*]}" ]]; then + bazel "${BAZEL_STARTUP_OPTIONS[@]}" test "${BAZEL_BUILD_OPTIONS[@]}" "${TEST_TARGETS[@]}" --build_tests_only fi # Summarize known unbuildable or inapplicable tests (example) diff --git a/contrib/contrib_build_config.bzl b/contrib/contrib_build_config.bzl index f2cdaea8bfdc..11109e3878dd 100644 --- a/contrib/contrib_build_config.bzl +++ b/contrib/contrib_build_config.bzl @@ -15,7 +15,7 @@ CONTRIB_EXTENSIONS = { # "envoy.filters.network.client_ssl_auth": "//contrib/client_ssl_auth/filters/network/source:config", - "envoy.filters.network.kafka_broker": "//contrib/kafka/filters/network/source:kafka_broker_config_lib", + "envoy.filters.network.kafka_broker": "//contrib/kafka/filters/network/source/broker:config_lib", "envoy.filters.network.kafka_mesh": "//contrib/kafka/filters/network/source/mesh:config_lib", "envoy.filters.network.mysql_proxy": "//contrib/mysql_proxy/filters/network/source:config", "envoy.filters.network.postgres_proxy": "//contrib/postgres_proxy/filters/network/source:config", diff --git a/contrib/cryptomb/private_key_providers/source/BUILD b/contrib/cryptomb/private_key_providers/source/BUILD index 199acb8fe411..610b66513373 100644 --- a/contrib/cryptomb/private_key_providers/source/BUILD +++ b/contrib/cryptomb/private_key_providers/source/BUILD @@ -22,7 +22,10 @@ envoy_cmake( defines = [ "OPENSSL_USE_STATIC_LIBS=TRUE", ], - lib_source = "@com_github_intel_ipp_crypto_crypto_mb//:all", + lib_source = select({ + "//bazel:boringssl_fips": "@com_github_intel_ipp_crypto_crypto_mb_fips//:all", + "//conditions:default": "@com_github_intel_ipp_crypto_crypto_mb//:all", + }), out_static_libs = ["libcrypto_mb.a"], tags = ["skip_on_windows"], target_compatible_with = envoy_contrib_linux_x86_64_constraints(), diff --git a/contrib/generic_proxy/filters/network/source/BUILD b/contrib/generic_proxy/filters/network/source/BUILD index e46ae6c4ba10..0699a6ff6add 100644 --- a/contrib/generic_proxy/filters/network/source/BUILD +++ b/contrib/generic_proxy/filters/network/source/BUILD @@ -171,4 +171,6 @@ envoy_cc_library( ":file_access_log_lib", "//contrib/generic_proxy/filters/network/source/interface:stream_interface", ], + # Ensure this factory in the source is always linked in. + alwayslink = 1, ) diff --git a/contrib/generic_proxy/filters/network/test/proxy_test.cc b/contrib/generic_proxy/filters/network/test/proxy_test.cc index 779d1f9980b5..0ff8359ce145 100644 --- a/contrib/generic_proxy/filters/network/test/proxy_test.cc +++ b/contrib/generic_proxy/filters/network/test/proxy_test.cc @@ -764,8 +764,6 @@ TEST_F(FilterTest, NewStreamAndReplyNormallyWithMultipleFrames) { EXPECT_EQ(1, filter_->activeStreamsForTest().size()); EXPECT_EQ(0, filter_->frameHandlersForTest().size()); - std::cout << "OK decoding" << std::endl; - auto active_stream = filter_->activeStreamsForTest().begin()->get(); EXPECT_CALL( @@ -913,6 +911,8 @@ TEST_F(FilterTest, TestStats) { auto active_stream = filter_->activeStreamsForTest().begin()->get(); Buffer::OwnedImpl buffer; buffer.add("123"); + // Mock response. + active_stream->onResponseStart(std::make_unique()); active_stream->onEncodingSuccess(buffer, true); EXPECT_EQ(1, filter_config_->stats().response_.value()); EXPECT_EQ(0, filter_config_->stats().request_active_.value()); diff --git a/contrib/kafka/filters/network/source/BUILD b/contrib/kafka/filters/network/source/BUILD index ec50a777c50d..a7e075125bfe 100644 --- a/contrib/kafka/filters/network/source/BUILD +++ b/contrib/kafka/filters/network/source/BUILD @@ -2,7 +2,6 @@ load("@base_pip3//:requirements.bzl", "requirement") load("@rules_python//python:defs.bzl", "py_binary", "py_library") load( "//bazel:envoy_build_system.bzl", - "envoy_cc_contrib_extension", "envoy_cc_library", "envoy_contrib_package", ) @@ -11,39 +10,7 @@ licenses(["notice"]) # Apache 2 envoy_contrib_package() -# Kafka network filter. -# Broker filter public docs: https://envoyproxy.io/docs/envoy/latest/configuration/listeners/network_filters/kafka_broker_filter - -envoy_cc_contrib_extension( - name = "kafka_broker_config_lib", - srcs = ["broker/config.cc"], - hdrs = ["broker/config.h"], - deps = [ - ":kafka_broker_filter_lib", - "//source/extensions/filters/network:well_known_names", - "//source/extensions/filters/network/common:factory_base_lib", - "@envoy_api//contrib/envoy/extensions/filters/network/kafka_broker/v3:pkg_cc_proto", - ], -) - -envoy_cc_library( - name = "kafka_broker_filter_lib", - srcs = ["broker/filter.cc"], - hdrs = [ - "broker/filter.h", - "external/request_metrics.h", - "external/response_metrics.h", - ], - deps = [ - ":kafka_request_codec_lib", - ":kafka_response_codec_lib", - "//envoy/buffer:buffer_interface", - "//envoy/network:connection_interface", - "//envoy/network:filter_interface", - "//source/common/common:assert_lib", - "//source/common/common:minimal_logger_lib", - ], -) +# Common code for Kafka filters (Kafka type abstractions, protocol, metrics, etc.). envoy_cc_library( name = "abstract_codec_lib", @@ -201,6 +168,16 @@ py_library( srcs = ["protocol/generator.py"], ) +envoy_cc_library( + name = "kafka_metrics_lib", + hdrs = [ + "external/request_metrics.h", + "external/response_metrics.h", + ], + deps = [ + ], +) + envoy_cc_library( name = "parser_lib", hdrs = ["parser.h"], diff --git a/contrib/kafka/filters/network/source/broker/BUILD b/contrib/kafka/filters/network/source/broker/BUILD new file mode 100644 index 000000000000..6af3b702e8b7 --- /dev/null +++ b/contrib/kafka/filters/network/source/broker/BUILD @@ -0,0 +1,71 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_contrib_extension", + "envoy_cc_library", + "envoy_contrib_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_contrib_package() + +# Kafka-broker network filter. +# Broker filter public docs: https://envoyproxy.io/docs/envoy/latest/configuration/listeners/network_filters/kafka_broker_filter + +envoy_cc_contrib_extension( + name = "config_lib", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + ":filter_config_lib", + ":filter_lib", + "//source/extensions/filters/network:well_known_names", + "//source/extensions/filters/network/common:factory_base_lib", + "@envoy_api//contrib/envoy/extensions/filters/network/kafka_broker/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "filter_config_lib", + srcs = [], + hdrs = [ + "filter_config.h", + ], + deps = [ + "@envoy_api//contrib/envoy/extensions/filters/network/kafka_broker/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "filter_lib", + srcs = ["filter.cc"], + hdrs = [ + "filter.h", + ], + deps = [ + ":filter_config_lib", + ":rewriter_lib", + "//contrib/kafka/filters/network/source:kafka_metrics_lib", + "//contrib/kafka/filters/network/source:kafka_request_codec_lib", + "//contrib/kafka/filters/network/source:kafka_response_codec_lib", + "//envoy/buffer:buffer_interface", + "//envoy/network:connection_interface", + "//envoy/network:filter_interface", + "//source/common/common:assert_lib", + "//source/common/common:minimal_logger_lib", + ], +) + +envoy_cc_library( + name = "rewriter_lib", + srcs = ["rewriter.cc"], + hdrs = [ + "rewriter.h", + ], + deps = [ + ":filter_config_lib", + "//contrib/kafka/filters/network/source:kafka_response_codec_lib", + "//envoy/buffer:buffer_interface", + "//source/common/common:minimal_logger_lib", + ], +) diff --git a/contrib/kafka/filters/network/source/broker/config.cc b/contrib/kafka/filters/network/source/broker/config.cc index 459ce5fd20d8..532407a67480 100644 --- a/contrib/kafka/filters/network/source/broker/config.cc +++ b/contrib/kafka/filters/network/source/broker/config.cc @@ -5,6 +5,7 @@ #include "envoy/stats/scope.h" #include "contrib/kafka/filters/network/source/broker/filter.h" +#include "contrib/kafka/filters/network/source/broker/filter_config.h" namespace Envoy { namespace Extensions { @@ -15,13 +16,10 @@ namespace Broker { Network::FilterFactoryCb KafkaConfigFactory::createFilterFactoryFromProtoTyped( const KafkaBrokerProtoConfig& proto_config, Server::Configuration::FactoryContext& context) { - ASSERT(!proto_config.stat_prefix().empty()); - - const std::string& stat_prefix = proto_config.stat_prefix(); - - return [&context, stat_prefix](Network::FilterManager& filter_manager) -> void { + const BrokerFilterConfig filter_config{proto_config}; + return [&context, filter_config](Network::FilterManager& filter_manager) -> void { Network::FilterSharedPtr filter = - std::make_shared(context.scope(), context.timeSource(), stat_prefix); + std::make_shared(context.scope(), context.timeSource(), filter_config); filter_manager.addFilter(filter); }; } diff --git a/contrib/kafka/filters/network/source/broker/filter.cc b/contrib/kafka/filters/network/source/broker/filter.cc index 855226780ebd..8e7ba9a299cd 100644 --- a/contrib/kafka/filters/network/source/broker/filter.cc +++ b/contrib/kafka/filters/network/source/broker/filter.cc @@ -70,19 +70,23 @@ absl::flat_hash_map& KafkaMetricsFacadeImpl::getRequestA } KafkaBrokerFilter::KafkaBrokerFilter(Stats::Scope& scope, TimeSource& time_source, - const std::string& stat_prefix) - : KafkaBrokerFilter{ - std::make_shared(scope, time_source, stat_prefix)} {}; - -KafkaBrokerFilter::KafkaBrokerFilter(const KafkaMetricsFacadeSharedPtr& metrics) - : metrics_{metrics}, response_decoder_{new ResponseDecoder({metrics})}, + const BrokerFilterConfig& filter_config) + : KafkaBrokerFilter{filter_config, std::make_shared( + scope, time_source, filter_config.stat_prefix_)} {}; + +KafkaBrokerFilter::KafkaBrokerFilter(const BrokerFilterConfig& filter_config, + const KafkaMetricsFacadeSharedPtr& metrics) + : metrics_{metrics}, response_rewriter_{createRewriter(filter_config)}, + response_decoder_{new ResponseDecoder({metrics, response_rewriter_})}, request_decoder_{ new RequestDecoder({std::make_shared(*response_decoder_), metrics})} {}; KafkaBrokerFilter::KafkaBrokerFilter(KafkaMetricsFacadeSharedPtr metrics, + ResponseRewriterSharedPtr response_rewriter, ResponseDecoderSharedPtr response_decoder, RequestDecoderSharedPtr request_decoder) - : metrics_{metrics}, response_decoder_{response_decoder}, request_decoder_{request_decoder} {}; + : metrics_{metrics}, response_rewriter_{response_rewriter}, response_decoder_{response_decoder}, + request_decoder_{request_decoder} {}; Network::FilterStatus KafkaBrokerFilter::onNewConnection() { return Network::FilterStatus::Continue; @@ -107,6 +111,7 @@ Network::FilterStatus KafkaBrokerFilter::onWrite(Buffer::Instance& data, bool) { ENVOY_LOG(trace, "data from Kafka broker [{} response bytes]", data.length()); try { response_decoder_->onData(data); + response_rewriter_->process(data); return Network::FilterStatus::Continue; } catch (const EnvoyException& e) { ENVOY_LOG(debug, "could not process data from Kafka broker: {}", e.what()); diff --git a/contrib/kafka/filters/network/source/broker/filter.h b/contrib/kafka/filters/network/source/broker/filter.h index 207115838000..60d88b3d2cfa 100644 --- a/contrib/kafka/filters/network/source/broker/filter.h +++ b/contrib/kafka/filters/network/source/broker/filter.h @@ -6,6 +6,8 @@ #include "source/common/common/logger.h" #include "absl/container/flat_hash_map.h" +#include "contrib/kafka/filters/network/source/broker/filter_config.h" +#include "contrib/kafka/filters/network/source/broker/rewriter.h" #include "contrib/kafka/filters/network/source/external/request_metrics.h" #include "contrib/kafka/filters/network/source/external/response_metrics.h" #include "contrib/kafka/filters/network/source/parser.h" @@ -111,7 +113,8 @@ class KafkaMetricsFacadeImpl : public KafkaMetricsFacade { /** * Implementation of Kafka broker-level filter. * Uses two decoders - request and response ones, that are connected using Forwarder instance. - * There's also a KafkaMetricsFacade, that is listening on codec events. + * KafkaMetricsFacade is listening for both request/response events to keep metrics. + * ResponseRewriter is listening for response events to capture and rewrite them if needed. * * +---------------------------------------------------+ * | | @@ -123,13 +126,18 @@ class KafkaMetricsFacadeImpl : public KafkaMetricsFacade { * | | v v v * +------+---+------+ +----+----+ +---------+---+----+ * |KafkaBrokerFilter| |Forwarder| |KafkaMetricsFacade| - * +----------+------+ +----+----+ +---------+--------+ - * | | ^ - * | | | - * | v | - * | +-------+-------+ | - * +---------->+ResponseDecoder+---------------+ - * +---------------+ + * +------+---+------+ +----+----+ +---------+--------+ + * | | | ^ + * | | | | + * | | v | + * | | +-------+-------+ | + * | +---------->+ResponseDecoder+---------------+ + * | +-------+-------+ + * | | + * | v + * | +-------+--------+ + * +-------------->+ResponseRewriter+ + * +----------------+ */ class KafkaBrokerFilter : public Network::Filter, private Logger::Loggable { public: @@ -138,12 +146,15 @@ class KafkaBrokerFilter : public Network::Filter, private Logger::Loggable(); + } else { + return std::make_shared(); + } +} + +} // namespace Broker +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/source/broker/rewriter.h b/contrib/kafka/filters/network/source/broker/rewriter.h new file mode 100644 index 000000000000..bde1f6627575 --- /dev/null +++ b/contrib/kafka/filters/network/source/broker/rewriter.h @@ -0,0 +1,76 @@ +#pragma once + +#include + +#include "envoy/buffer/buffer.h" + +#include "source/common/common/logger.h" + +#include "contrib/kafka/filters/network/source/broker/filter_config.h" +#include "contrib/kafka/filters/network/source/response_codec.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Broker { + +/** + * Responsible for modifying any outbound requests. + */ +class ResponseRewriter : public ResponseCallback { +public: + virtual ~ResponseRewriter() = default; + + /** + * Performs any desired payload changes. + * @param buffer buffer with the original data from upstream + */ + virtual void process(Buffer::Instance& buffer) PURE; +}; + +using ResponseRewriterSharedPtr = std::shared_ptr; + +/** + * Uses captured response objects instead of original data. + * Entry point for any response payload changes. + */ +class ResponseRewriterImpl : public ResponseRewriter, private Logger::Loggable { +public: + // ResponseCallback + void onMessage(AbstractResponseSharedPtr response) override; + void onFailedParse(ResponseMetadataSharedPtr parse_failure) override; + + // ResponseRewriter + void process(Buffer::Instance& buffer) override; + + size_t getStoredResponseCountForTest() const; + +private: + std::vector responses_to_rewrite_; +}; + +/** + * Does nothing, letting the data from upstream pass without any changes. + * It allows us to avoid the unnecessary deserialization-then-serialization steps. + */ +class DoNothingRewriter : public ResponseRewriter { +public: + // ResponseCallback + void onMessage(AbstractResponseSharedPtr response) override; + void onFailedParse(ResponseMetadataSharedPtr parse_failure) override; + + // ResponseRewriter + void process(Buffer::Instance& buffer) override; +}; + +/** + * Factory method that creates a rewriter depending on configuration. + */ +ResponseRewriterSharedPtr createRewriter(const BrokerFilterConfig& config); + +} // namespace Broker +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/test/BUILD b/contrib/kafka/filters/network/test/BUILD index f7bf15eba151..a06c076e904c 100644 --- a/contrib/kafka/filters/network/test/BUILD +++ b/contrib/kafka/filters/network/test/BUILD @@ -247,7 +247,7 @@ envoy_cc_test( srcs = ["metrics_integration_test.cc"], deps = [ ":message_utilities", - "//contrib/kafka/filters/network/source:kafka_broker_filter_lib", + "//contrib/kafka/filters/network/source:kafka_metrics_lib", "//test/common/stats:stat_test_utility_lib", ], ) diff --git a/contrib/kafka/filters/network/test/broker/BUILD b/contrib/kafka/filters/network/test/broker/BUILD index 1edda5875b4d..c7b20d851cf3 100644 --- a/contrib/kafka/filters/network/test/broker/BUILD +++ b/contrib/kafka/filters/network/test/broker/BUILD @@ -12,7 +12,7 @@ envoy_cc_test( name = "config_unit_test", srcs = ["config_unit_test.cc"], deps = [ - "//contrib/kafka/filters/network/source:kafka_broker_config_lib", + "//contrib/kafka/filters/network/source/broker:config_lib", "//test/mocks/server:factory_context_mocks", ], ) @@ -21,7 +21,7 @@ envoy_cc_test( name = "filter_unit_test", srcs = ["filter_unit_test.cc"], deps = [ - "//contrib/kafka/filters/network/source:kafka_broker_filter_lib", + "//contrib/kafka/filters/network/source/broker:filter_lib", "//envoy/event:timer_interface", "//test/mocks/network:network_mocks", "//test/mocks/stats:stats_mocks", @@ -32,10 +32,19 @@ envoy_cc_test( name = "filter_protocol_test", srcs = ["filter_protocol_test.cc"], deps = [ - "//contrib/kafka/filters/network/source:kafka_broker_filter_lib", + "//contrib/kafka/filters/network/source/broker:filter_lib", "//contrib/kafka/filters/network/test:buffer_based_test_lib", "//contrib/kafka/filters/network/test:message_utilities", "//test/common/stats:stat_test_utility_lib", "//test/test_common:test_time_lib", ], ) + +envoy_cc_test( + name = "rewriter_unit_test", + srcs = ["rewriter_unit_test.cc"], + deps = [ + "//contrib/kafka/filters/network/source/broker:rewriter_lib", + "//source/common/buffer:buffer_lib", + ], +) diff --git a/contrib/kafka/filters/network/test/broker/filter_protocol_test.cc b/contrib/kafka/filters/network/test/broker/filter_protocol_test.cc index 54c9e915ef24..9b5bf4276e0a 100644 --- a/contrib/kafka/filters/network/test/broker/filter_protocol_test.cc +++ b/contrib/kafka/filters/network/test/broker/filter_protocol_test.cc @@ -35,7 +35,7 @@ class KafkaBrokerFilterProtocolTest : public testing::Test, Stats::TestUtil::TestStore store_; Stats::Scope& scope_{*store_.rootScope()}; Event::TestRealTimeSystem time_source_; - KafkaBrokerFilter testee_{scope_, time_source_, "prefix"}; + KafkaBrokerFilter testee_{scope_, time_source_, {"prefix", false}}; Network::FilterStatus consumeRequestFromBuffer() { return testee_.onData(RequestB::buffer_, false); diff --git a/contrib/kafka/filters/network/test/broker/filter_unit_test.cc b/contrib/kafka/filters/network/test/broker/filter_unit_test.cc index a91316250db8..0b272e713dd0 100644 --- a/contrib/kafka/filters/network/test/broker/filter_unit_test.cc +++ b/contrib/kafka/filters/network/test/broker/filter_unit_test.cc @@ -4,6 +4,7 @@ #include "test/mocks/stats/mocks.h" #include "contrib/kafka/filters/network/source/broker/filter.h" +#include "contrib/kafka/filters/network/source/broker/filter_config.h" #include "contrib/kafka/filters/network/source/external/requests.h" #include "gmock/gmock.h" #include "gtest/gtest.h" @@ -32,6 +33,15 @@ class MockKafkaMetricsFacade : public KafkaMetricsFacade { using MockKafkaMetricsFacadeSharedPtr = std::shared_ptr; +class MockResponseRewriter : public ResponseRewriter { +public: + MOCK_METHOD(void, onMessage, (AbstractResponseSharedPtr)); + MOCK_METHOD(void, onFailedParse, (ResponseMetadataSharedPtr)); + MOCK_METHOD(void, process, (Buffer::Instance&)); +}; + +using MockResponseRewriterSharedPtr = std::shared_ptr; + class MockResponseDecoder : public ResponseDecoder { public: MockResponseDecoder() : ResponseDecoder{{}} {}; @@ -92,12 +102,13 @@ class MockResponse : public AbstractResponse { class KafkaBrokerFilterUnitTest : public testing::Test { protected: MockKafkaMetricsFacadeSharedPtr metrics_{std::make_shared()}; + MockResponseRewriterSharedPtr response_rewriter_{std::make_shared()}; MockResponseDecoderSharedPtr response_decoder_{std::make_shared()}; MockRequestDecoderSharedPtr request_decoder_{std::make_shared()}; NiceMock filter_callbacks_; - KafkaBrokerFilter testee_{metrics_, response_decoder_, request_decoder_}; + KafkaBrokerFilter testee_{metrics_, response_rewriter_, response_decoder_, request_decoder_}; void initialize() { testee_.initializeReadFilterCallbacks(filter_callbacks_); @@ -138,6 +149,7 @@ TEST_F(KafkaBrokerFilterUnitTest, ShouldAcceptDataSentByKafkaBroker) { // given Buffer::OwnedImpl data; EXPECT_CALL(*response_decoder_, onData(_)); + EXPECT_CALL(*response_rewriter_, process(_)); // when initialize(); diff --git a/contrib/kafka/filters/network/test/broker/integration_test/envoy_config_yaml.j2 b/contrib/kafka/filters/network/test/broker/integration_test/envoy_config_yaml.j2 index af945c5c61d7..bc3819f9f4fe 100644 --- a/contrib/kafka/filters/network/test/broker/integration_test/envoy_config_yaml.j2 +++ b/contrib/kafka/filters/network/test/broker/integration_test/envoy_config_yaml.j2 @@ -10,6 +10,7 @@ static_resources: typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker stat_prefix: testfilter + force_response_rewrite: true - name: tcp typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy diff --git a/contrib/kafka/filters/network/test/broker/rewriter_unit_test.cc b/contrib/kafka/filters/network/test/broker/rewriter_unit_test.cc new file mode 100644 index 000000000000..4196b4c2588f --- /dev/null +++ b/contrib/kafka/filters/network/test/broker/rewriter_unit_test.cc @@ -0,0 +1,76 @@ +#include "source/common/buffer/buffer_impl.h" + +#include "contrib/kafka/filters/network/source/broker/filter_config.h" +#include "contrib/kafka/filters/network/source/broker/rewriter.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace NetworkFilters { +namespace Kafka { +namespace Broker { + +static void putBytesIntoBuffer(Buffer::Instance& buffer, const uint32_t size) { + std::vector data(size, 42); + absl::string_view sv = {data.data(), data.size()}; + buffer.add(sv); +} + +static Buffer::InstancePtr makeRandomBuffer(const uint32_t size) { + Buffer::InstancePtr result = std::make_unique(); + putBytesIntoBuffer(*result, size); + return result; +} + +class FakeResponse : public AbstractResponse { +public: + FakeResponse(const size_t size) : AbstractResponse{{0, 0, 0}}, size_{size} {} + + uint32_t computeSize() const override { return size_; }; + + virtual uint32_t encode(Buffer::Instance& dst) const override { + putBytesIntoBuffer(dst, size_); + return size_; + }; + +private: + size_t size_; +}; + +TEST(ResponseRewriterImplUnitTest, ShouldRewriteBuffer) { + // given + ResponseRewriterImpl testee; + + auto response1 = std::make_shared(7); + auto response2 = std::make_shared(13); + auto response3 = std::make_shared(42); + + // when - 1 + testee.onMessage(response1); + testee.onMessage(response2); + testee.onMessage(response3); + + // then - 1 + ASSERT_EQ(testee.getStoredResponseCountForTest(), 3); + + // when - 2 + auto buffer = makeRandomBuffer(4242); + testee.process(*buffer); + + // then - 2 + ASSERT_EQ(testee.getStoredResponseCountForTest(), 0); + ASSERT_EQ(buffer->length(), (3 * 4) + 7 + 13 + 42); // 4 bytes for message length +} + +TEST(ResponseRewriterUnitTest, ShouldCreateProperRewriter) { + ResponseRewriterSharedPtr r1 = createRewriter({"aaa", true}); + ASSERT_NE(std::dynamic_pointer_cast(r1), nullptr); + ResponseRewriterSharedPtr r2 = createRewriter({"aaa", false}); + ASSERT_NE(std::dynamic_pointer_cast(r2), nullptr); +} + +} // namespace Broker +} // namespace Kafka +} // namespace NetworkFilters +} // namespace Extensions +} // namespace Envoy diff --git a/contrib/kafka/filters/network/test/mesh/integration_test/envoy_config_yaml.j2 b/contrib/kafka/filters/network/test/mesh/integration_test/envoy_config_yaml.j2 index fbb22d2af3a9..cb2cdeeee807 100644 --- a/contrib/kafka/filters/network/test/mesh/integration_test/envoy_config_yaml.j2 +++ b/contrib/kafka/filters/network/test/mesh/integration_test/envoy_config_yaml.j2 @@ -10,6 +10,7 @@ static_resources: typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.kafka_broker.v3.KafkaBroker stat_prefix: testfilter + force_response_rewrite: true - name: mesh typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.kafka_mesh.v3alpha.KafkaMesh diff --git a/docs/inventories/v1.28/objects.inv b/docs/inventories/v1.28/objects.inv new file mode 100644 index 000000000000..c454862b2315 Binary files /dev/null and b/docs/inventories/v1.28/objects.inv differ diff --git a/docs/root/api-v3/config/trace/opentelemetry/resource_detectors.rst b/docs/root/api-v3/config/trace/opentelemetry/resource_detectors.rst new file mode 100644 index 000000000000..87790ac145ec --- /dev/null +++ b/docs/root/api-v3/config/trace/opentelemetry/resource_detectors.rst @@ -0,0 +1,10 @@ +OpenTelemetry Resource Detectors +================================ + +Resource detectors that can be configured with the OpenTelemetry Tracer: + +.. toctree:: + :glob: + :maxdepth: 3 + + ../../../extensions/tracers/opentelemetry/resource_detectors/v3/* diff --git a/docs/root/api-v3/config/trace/opentelemetry/samplers.rst b/docs/root/api-v3/config/trace/opentelemetry/samplers.rst new file mode 100644 index 000000000000..705155f640b9 --- /dev/null +++ b/docs/root/api-v3/config/trace/opentelemetry/samplers.rst @@ -0,0 +1,10 @@ +OpenTelemetry Samplers +====================== + +Samplers that can be configured with the OpenTelemetry Tracer: + +.. toctree:: + :glob: + :maxdepth: 3 + + ../../../extensions/tracers/opentelemetry/samplers/v3/* diff --git a/docs/root/api-v3/config/trace/trace.rst b/docs/root/api-v3/config/trace/trace.rst index 8f8d039a18d8..1bd09c1a1300 100644 --- a/docs/root/api-v3/config/trace/trace.rst +++ b/docs/root/api-v3/config/trace/trace.rst @@ -12,3 +12,5 @@ HTTP tracers :maxdepth: 2 v3/* + opentelemetry/resource_detectors + opentelemetry/samplers diff --git a/docs/root/configuration/http/http_filters/_include/compressor-filter-request-response.yaml b/docs/root/configuration/http/http_filters/_include/compressor-filter-request-response.yaml new file mode 100644 index 000000000000..3d1667c133ad --- /dev/null +++ b/docs/root/configuration/http/http_filters/_include/compressor-filter-request-response.yaml @@ -0,0 +1,80 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 80 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: AUTO + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: app + domains: + - "*" + routes: + - match: + prefix: "/" + route: + cluster: service + http_filters: + # This filter is only enabled for responses. + - name: envoy.filters.http.compressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_compressor_enabled + compressor_library: + name: for_response + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + memory_level: 3 + window_bits: 10 + compression_level: BEST_COMPRESSION + compression_strategy: DEFAULT_STRATEGY + # This filter is only enabled for requests. + - name: envoy.filters.http.compressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + response_direction_config: + common_config: + enabled: + default_value: false + runtime_key: response_compressor_enabled + request_direction_config: + common_config: + enabled: + default_value: true + runtime_key: request_compressor_enabled + compressor_library: + name: for_request + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + memory_level: 9 + window_bits: 15 + compression_level: BEST_SPEED + compression_strategy: DEFAULT_STRATEGY + - name: envoy.filters.http.router + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + clusters: + - name: service + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: service + port_value: 8000 diff --git a/docs/root/configuration/http/http_filters/_include/compressor-filter.yaml b/docs/root/configuration/http/http_filters/_include/compressor-filter.yaml new file mode 100644 index 000000000000..7c3a2215a0e4 --- /dev/null +++ b/docs/root/configuration/http/http_filters/_include/compressor-filter.yaml @@ -0,0 +1,72 @@ +static_resources: + listeners: + - address: + socket_address: + address: 0.0.0.0 + port_value: 80 + filter_chains: + - filters: + - name: envoy.filters.network.http_connection_manager + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager + codec_type: AUTO + stat_prefix: ingress_http + route_config: + name: local_route + virtual_hosts: + - name: local_service + domains: ["*"] + typed_per_filter_config: + envoy.filters.http.compression: + "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.CompressorPerRoute + disabled: true + routes: + - match: { prefix: "/static" } + route: { cluster: service } + typed_per_filter_config: + envoy.filters.http.compression: + "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.CompressorPerRoute + overrides: + response_direction_config: + - match: { prefix: "/" } + route: { cluster: service } + http_filters: + - name: envoy.filters.http.compressor + typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor + response_direction_config: + common_config: + min_content_length: 100 + content_type: + - text/html + - application/json + disable_on_etag_header: true + request_direction_config: + common_config: + enabled: + default_value: false + runtime_key: request_compressor_enabled + compressor_library: + name: text_optimized + typed_config: + "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip + memory_level: 3 + window_bits: 10 + compression_level: BEST_COMPRESSION + compression_strategy: DEFAULT_STRATEGY + - name: envoy.filters.http.router + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.router.v3.Router + clusters: + - name: service + type: STRICT_DNS + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: service + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: service + port_value: 8000 diff --git a/docs/root/configuration/http/http_filters/basic_auth_filter.rst b/docs/root/configuration/http/http_filters/basic_auth_filter.rst new file mode 100644 index 000000000000..da8b160fd054 --- /dev/null +++ b/docs/root/configuration/http/http_filters/basic_auth_filter.rst @@ -0,0 +1,46 @@ +.. _config_http_filters_basic_auth: + +Basic Auth +========== + +This HTTP filter can be used to authenticate user credentials in the HTTP Authentication header defined +in `RFC7617 `. + +The filter will extract the username and password from the HTTP Authentication header and verify them +against the configured username and password list. + +If the username and password are valid, the request will be forwared to the next filter in the filter chains. +If they're invalid or not provided in the HTTP request, the request will be denied with a 401 Unauthorized response. + +Configuration +------------- + +* This filter should be configured with the type URL ``type.googleapis.com/envoy.extensions.filters.http.basic_auth.v3.BasicAuth``. +* :ref:`v3 API reference ` + +``users`` is a list of username-password pairs used to verify user credentials in the "Authorization" header. + The value needs to be the `htpasswd ` format. + + +An example configuration of the filter may look like the following: + +.. code-block:: yaml + + users: + inline_string: |- + user1:{SHA}hashed_user1_password + user2:{SHA}hashed_user2_password + +Note that only SHA format is currently supported. Other formats may be added in the future. + +Statistics +---------- + +The HTTP basic auth filter outputs statistics in the ``http..basic_auth.`` namespace. + +.. csv-table:: + :header: Name, Type, Description + :widths: 1, 1, 2 + + allowed, Counter, Total number of allowed requests + denied, Counter, Total number of denied requests diff --git a/docs/root/configuration/http/http_filters/compressor_filter.rst b/docs/root/configuration/http/http_filters/compressor_filter.rst index 9c3c0dba9a31..9bcc66fb4796 100644 --- a/docs/root/configuration/http/http_filters/compressor_filter.rst +++ b/docs/root/configuration/http/http_filters/compressor_filter.rst @@ -26,32 +26,11 @@ compression only. Other compression libraries can be supported as extensions. An example configuration of the filter may look like the following: -.. code-block:: yaml - - http_filters: - - name: envoy.filters.http.compressor - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor - response_direction_config: - common_config: - min_content_length: 100 - content_type: - - text/html - - application/json - disable_on_etag_header: true - request_direction_config: - common_config: - enabled: - default_value: false - runtime_key: request_compressor_enabled - compressor_library: - name: text_optimized - typed_config: - "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip - memory_level: 3 - window_bits: 10 - compression_level: BEST_COMPRESSION - compression_strategy: DEFAULT_STRATEGY +.. literalinclude:: _include/compressor-filter.yaml + :language: yaml + :linenos: + :lines: 33-56 + :caption: :download:`compressor-filter.yaml <_include/compressor-filter.yaml>` By *default* request compression is disabled, but when enabled it will be *skipped* if: @@ -132,27 +111,11 @@ Per-Route Configuration Response compression can be enabled and disabled on individual virtual hosts and routes. For example, to disable response compression for a particular virtual host, but enable response compression for its ``/static`` route: -.. code-block:: yaml - - route_config: - name: local_route - virtual_hosts: - - name: local_service - domains: ["*"] - typed_per_filter_config: - envoy.filters.http.compression: - "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.CompressorPerRoute - disabled: true - routes: - - match: { prefix: "/static" } - route: { cluster: some_service } - typed_per_filter_config: - envoy.filters.http.compression: - "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.CompressorPerRoute - overrides: - response_direction_config: - - match: { prefix: "/" } - route: { cluster: some_service } +.. literalinclude:: _include/compressor-filter.yaml + :language: yaml + :linenos: + :lines: 14-32 + :caption: :download:`compressor-filter.yaml <_include/compressor-filter.yaml>` Using different compressors for requests and responses -------------------------------------------------------- @@ -160,48 +123,11 @@ Using different compressors for requests and responses If different compression libraries are desired for requests and responses, it is possible to install multiple compressor filters enabled only for requests or responses. For instance: -.. code-block:: yaml - - http_filters: - # This filter is only enabled for responses. - - name: envoy.filters.http.compressor - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor - request_direction_config: - common_config: - enabled: - default_value: false - runtime_key: request_compressor_enabled - compressor_library: - name: for_response - typed_config: - "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip - memory_level: 3 - window_bits: 10 - compression_level: BEST_COMPRESSION - compression_strategy: DEFAULT_STRATEGY - # This filter is only enabled for requests. - - name: envoy.filters.http.compressor - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.compressor.v3.Compressor - response_direction_config: - common_config: - enabled: - default_value: false - runtime_key: response_compressor_enabled - request_direction_config: - common_config: - enabled: - default_value: true - runtime_key: request_compressor_enabled - compressor_library: - name: for_request - typed_config: - "@type": type.googleapis.com/envoy.extensions.compression.gzip.compressor.v3.Gzip - memory_level: 9 - window_bits: 15 - compression_level: BEST_SPEED - compression_strategy: DEFAULT_STRATEGY +.. literalinclude:: _include/compressor-filter-request-response.yaml + :language: yaml + :linenos: + :lines: 25-64 + :caption: :download:`compressor-filter-request-response.yaml <_include/compressor-filter-request-response.yaml>` .. _compressor-statistics: diff --git a/docs/root/configuration/http/http_filters/http_filters.rst b/docs/root/configuration/http/http_filters/http_filters.rst index bbf38623d02f..eb1333ad0e03 100644 --- a/docs/root/configuration/http/http_filters/http_filters.rst +++ b/docs/root/configuration/http/http_filters/http_filters.rst @@ -11,6 +11,7 @@ HTTP filters aws_lambda_filter aws_request_signing_filter bandwidth_limit_filter + basic_auth_filter buffer_filter cache_filter cdn_loop_filter diff --git a/docs/versions.yaml b/docs/versions.yaml index 1178a8461cc4..06b7cca3c7d4 100644 --- a/docs/versions.yaml +++ b/docs/versions.yaml @@ -21,3 +21,4 @@ "1.25": 1.25.11 "1.26": 1.26.6 "1.27": 1.27.2 +"1.28": 1.28.0 diff --git a/envoy/http/filter.h b/envoy/http/filter.h index 361eacc24474..5ccb8bec3706 100644 --- a/envoy/http/filter.h +++ b/envoy/http/filter.h @@ -441,6 +441,37 @@ class StreamFilterCallbacks { * @return absl::string_view the name of the filter as configured in the filter chain. */ virtual absl::string_view filterConfigName() const PURE; + + /** + * The downstream request headers if present. + */ + virtual RequestHeaderMapOptRef requestHeaders() PURE; + + /** + * The downstream request trailers if present. + */ + virtual RequestTrailerMapOptRef requestTrailers() PURE; + + /** + * Retrieves a pointer to the continue headers if present. + */ + virtual ResponseHeaderMapOptRef informationalHeaders() PURE; + + /** + * Retrieves a pointer to the response headers if present. + * Note that response headers might be set multiple times (e.g. if a local reply is issued after + * headers have been received but before headers have been encoded), so it is not safe in general + * to assume that any set of headers will be valid for the duration of the stream. + */ + virtual ResponseHeaderMapOptRef responseHeaders() PURE; + + /** + * Retrieves a pointer to the last response trailers if present. + * Note that response headers might be set multiple times (e.g. if a local reply is issued after + * headers have been received but before headers have been encoded), so it is not safe in general + * to assume that any set of headers will be valid for the duration of the stream. + */ + virtual ResponseTrailerMapOptRef responseTrailers() PURE; }; class DecoderFilterWatermarkCallbacks { @@ -608,12 +639,6 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks, */ virtual void encode1xxHeaders(ResponseHeaderMapPtr&& headers) PURE; - /** - * Returns the headers provided to encode1xxHeaders. Returns absl::nullopt if - * no headers have been provided yet. - */ - virtual ResponseHeaderMapOptRef informationalHeaders() const PURE; - /** * Called with headers to be encoded, optionally indicating end of stream. * @@ -630,12 +655,6 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks, virtual void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream, absl::string_view details) PURE; - /** - * Returns the headers provided to encodeHeaders. Returns absl::nullopt if no headers have been - * provided yet. - */ - virtual ResponseHeaderMapOptRef responseHeaders() const PURE; - /** * Called with data to be encoded, optionally indicating end of stream. * @param data supplies the data to be encoded. @@ -649,12 +668,6 @@ class StreamDecoderFilterCallbacks : public virtual StreamFilterCallbacks, */ virtual void encodeTrailers(ResponseTrailerMapPtr&& trailers) PURE; - /** - * Returns the trailers provided to encodeTrailers. Returns absl::nullopt if no headers have been - * provided yet. - */ - virtual ResponseTrailerMapOptRef responseTrailers() const PURE; - /** * Called with metadata to be encoded. * diff --git a/examples/ext_authz/Dockerfile-opa b/examples/ext_authz/Dockerfile-opa index 7c5e544b987c..ff19250effc1 100644 --- a/examples/ext_authz/Dockerfile-opa +++ b/examples/ext_authz/Dockerfile-opa @@ -1 +1 @@ -FROM openpolicyagent/opa:0.57.1-istio@sha256:f76fb8c743d36265a58eae0dcc95a5587699c25a85afb0797dd6be88e77e3653 +FROM openpolicyagent/opa:0.58.0-istio@sha256:f53e69eeee948b1d725877751720864221f6353e515211d54455f08b5abad671 diff --git a/examples/grpc-bridge/client/requirements.txt b/examples/grpc-bridge/client/requirements.txt index 04c24d3edc2e..475500a3a2c8 100644 --- a/examples/grpc-bridge/client/requirements.txt +++ b/examples/grpc-bridge/client/requirements.txt @@ -100,119 +100,119 @@ charset-normalizer==3.3.0 \ --hash=sha256:f8888e31e3a85943743f8fc15e71536bda1c81d5aa36d014a3c0c44481d7db6e \ --hash=sha256:fc52b79d83a3fe3a360902d3f5d79073a993597d48114c29485e9431092905d8 # via requests -grpcio==1.59.0 \ - --hash=sha256:0ae444221b2c16d8211b55326f8ba173ba8f8c76349bfc1768198ba592b58f74 \ - --hash=sha256:0b84445fa94d59e6806c10266b977f92fa997db3585f125d6b751af02ff8b9fe \ - --hash=sha256:14890da86a0c0e9dc1ea8e90101d7a3e0e7b1e71f4487fab36e2bfd2ecadd13c \ - --hash=sha256:15f03bd714f987d48ae57fe092cf81960ae36da4e520e729392a59a75cda4f29 \ - --hash=sha256:1a839ba86764cc48226f50b924216000c79779c563a301586a107bda9cbe9dcf \ - --hash=sha256:225e5fa61c35eeaebb4e7491cd2d768cd8eb6ed00f2664fa83a58f29418b39fd \ - --hash=sha256:228b91ce454876d7eed74041aff24a8f04c0306b7250a2da99d35dd25e2a1211 \ - --hash=sha256:2ea95cd6abbe20138b8df965b4a8674ec312aaef3147c0f46a0bac661f09e8d0 \ - --hash=sha256:2f120d27051e4c59db2f267b71b833796770d3ea36ca712befa8c5fff5da6ebd \ - --hash=sha256:34341d9e81a4b669a5f5dca3b2a760b6798e95cdda2b173e65d29d0b16692857 \ - --hash=sha256:3859917de234a0a2a52132489c4425a73669de9c458b01c9a83687f1f31b5b10 \ - --hash=sha256:38823bd088c69f59966f594d087d3a929d1ef310506bee9e3648317660d65b81 \ - --hash=sha256:38da5310ef84e16d638ad89550b5b9424df508fd5c7b968b90eb9629ca9be4b9 \ - --hash=sha256:3b8ff795d35a93d1df6531f31c1502673d1cebeeba93d0f9bd74617381507e3f \ - --hash=sha256:50eff97397e29eeee5df106ea1afce3ee134d567aa2c8e04fabab05c79d791a7 \ - --hash=sha256:5711c51e204dc52065f4a3327dca46e69636a0b76d3e98c2c28c4ccef9b04c52 \ - --hash=sha256:598f3530231cf10ae03f4ab92d48c3be1fee0c52213a1d5958df1a90957e6a88 \ - --hash=sha256:611d9aa0017fa386809bddcb76653a5ab18c264faf4d9ff35cb904d44745f575 \ - --hash=sha256:61bc72a00ecc2b79d9695220b4d02e8ba53b702b42411397e831c9b0589f08a3 \ - --hash=sha256:63982150a7d598281fa1d7ffead6096e543ff8be189d3235dd2b5604f2c553e5 \ - --hash=sha256:6c4b1cc3a9dc1924d2eb26eec8792fedd4b3fcd10111e26c1d551f2e4eda79ce \ - --hash=sha256:81d86a096ccd24a57fa5772a544c9e566218bc4de49e8c909882dae9d73392df \ - --hash=sha256:849c47ef42424c86af069a9c5e691a765e304079755d5c29eff511263fad9c2a \ - --hash=sha256:871371ce0c0055d3db2a86fdebd1e1d647cf21a8912acc30052660297a5a6901 \ - --hash=sha256:8cd2d38c2d52f607d75a74143113174c36d8a416d9472415eab834f837580cf7 \ - --hash=sha256:936b2e04663660c600d5173bc2cc84e15adbad9c8f71946eb833b0afc205b996 \ - --hash=sha256:93e9cb546e610829e462147ce724a9cb108e61647a3454500438a6deef610be1 \ - --hash=sha256:956f0b7cb465a65de1bd90d5a7475b4dc55089b25042fe0f6c870707e9aabb1d \ - --hash=sha256:986de4aa75646e963466b386a8c5055c8b23a26a36a6c99052385d6fe8aaf180 \ - --hash=sha256:aca8a24fef80bef73f83eb8153f5f5a0134d9539b4c436a716256b311dda90a6 \ - --hash=sha256:acf70a63cf09dd494000007b798aff88a436e1c03b394995ce450be437b8e54f \ - --hash=sha256:b34c7a4c31841a2ea27246a05eed8a80c319bfc0d3e644412ec9ce437105ff6c \ - --hash=sha256:b95ec8ecc4f703f5caaa8d96e93e40c7f589bad299a2617bdb8becbcce525539 \ - --hash=sha256:ba0ca727a173ee093f49ead932c051af463258b4b493b956a2c099696f38aa66 \ - --hash=sha256:c041a91712bf23b2a910f61e16565a05869e505dc5a5c025d429ca6de5de842c \ - --hash=sha256:c0488c2b0528e6072010182075615620071371701733c63ab5be49140ed8f7f0 \ - --hash=sha256:c173a87d622ea074ce79be33b952f0b424fa92182063c3bda8625c11d3585d09 \ - --hash=sha256:c251d22de8f9f5cca9ee47e4bade7c5c853e6e40743f47f5cc02288ee7a87252 \ - --hash=sha256:c4dfdb49f4997dc664f30116af2d34751b91aa031f8c8ee251ce4dcfc11277b0 \ - --hash=sha256:ca87ee6183421b7cea3544190061f6c1c3dfc959e0b57a5286b108511fd34ff4 \ - --hash=sha256:ceb1e68135788c3fce2211de86a7597591f0b9a0d2bb80e8401fd1d915991bac \ - --hash=sha256:d09bd2a4e9f5a44d36bb8684f284835c14d30c22d8ec92ce796655af12163588 \ - --hash=sha256:d0fcf53df684fcc0154b1e61f6b4a8c4cf5f49d98a63511e3f30966feff39cd0 \ - --hash=sha256:d74f7d2d7c242a6af9d4d069552ec3669965b74fed6b92946e0e13b4168374f9 \ - --hash=sha256:de2599985b7c1b4ce7526e15c969d66b93687571aa008ca749d6235d056b7205 \ - --hash=sha256:e5378785dce2b91eb2e5b857ec7602305a3b5cf78311767146464bfa365fc897 \ - --hash=sha256:ec78aebb9b6771d6a1de7b6ca2f779a2f6113b9108d486e904bde323d51f5589 \ - --hash=sha256:f1feb034321ae2f718172d86b8276c03599846dc7bb1792ae370af02718f91c5 \ - --hash=sha256:f21917aa50b40842b51aff2de6ebf9e2f6af3fe0971c31960ad6a3a2b24988f4 \ - --hash=sha256:f367e4b524cb319e50acbdea57bb63c3b717c5d561974ace0b065a648bb3bad3 \ - --hash=sha256:f6cfe44a5d7c7d5f1017a7da1c8160304091ca5dc64a0f85bca0d63008c3137a \ - --hash=sha256:fa66cac32861500f280bb60fe7d5b3e22d68c51e18e65367e38f8669b78cea3b \ - --hash=sha256:fc8bf2e7bc725e76c0c11e474634a08c8f24bcf7426c0c6d60c8f9c6e70e4d4a \ - --hash=sha256:fe976910de34d21057bcb53b2c5e667843588b48bf11339da2a75f5c4c5b4055 +grpcio==1.59.2 \ + --hash=sha256:023088764012411affe7db183d1ada3ad9daf2e23ddc719ff46d7061de661340 \ + --hash=sha256:08d77e682f2bf730a4961eea330e56d2f423c6a9b91ca222e5b1eb24a357b19f \ + --hash=sha256:0a4a3833c0e067f3558538727235cd8a49709bff1003200bbdefa2f09334e4b1 \ + --hash=sha256:0a754aff9e3af63bdc4c75c234b86b9d14e14a28a30c4e324aed1a9b873d755f \ + --hash=sha256:11168ef43e4a43ff1b1a65859f3e0ef1a173e277349e7fb16923ff108160a8cd \ + --hash=sha256:128e20f57c5f27cb0157e73756d1586b83c1b513ebecc83ea0ac37e4b0e4e758 \ + --hash=sha256:1f9524d1d701e399462d2c90ba7c193e49d1711cf429c0d3d97c966856e03d00 \ + --hash=sha256:1ff16d68bf453275466a9a46739061a63584d92f18a0f5b33d19fc97eb69867c \ + --hash=sha256:2067274c88bc6de89c278a672a652b4247d088811ece781a4858b09bdf8448e3 \ + --hash=sha256:2171c39f355ba5b551c5d5928d65aa6c69807fae195b86ef4a7d125bcdb860a9 \ + --hash=sha256:242adc47725b9a499ee77c6a2e36688fa6c96484611f33b1be4c57ab075a92dd \ + --hash=sha256:27f879ae604a7fcf371e59fba6f3ff4635a4c2a64768bd83ff0cac503142fef4 \ + --hash=sha256:2b230028a008ae1d0f430acb227d323ff8a619017415cf334c38b457f814119f \ + --hash=sha256:3059668df17627f0e0fa680e9ef8c995c946c792612e9518f5cc1503be14e90b \ + --hash=sha256:31176aa88f36020055ace9adff2405a33c8bdbfa72a9c4980e25d91b2f196873 \ + --hash=sha256:36f53c2b3449c015880e7d55a89c992c357f176327b0d2873cdaaf9628a37c69 \ + --hash=sha256:3b4368b33908f683a363f376dfb747d40af3463a6e5044afee07cf9436addf96 \ + --hash=sha256:3c61d641d4f409c5ae46bfdd89ea42ce5ea233dcf69e74ce9ba32b503c727e29 \ + --hash=sha256:4abb717e320e74959517dc8e84a9f48fbe90e9abe19c248541e9418b1ce60acd \ + --hash=sha256:4c93f4abbb54321ee6471e04a00139c80c754eda51064187963ddf98f5cf36a4 \ + --hash=sha256:535561990e075fa6bd4b16c4c3c1096b9581b7bb35d96fac4650f1181e428268 \ + --hash=sha256:53c9aa5ddd6857c0a1cd0287225a2a25873a8e09727c2e95c4aebb1be83a766a \ + --hash=sha256:5d573e70a6fe77555fb6143c12d3a7d3fa306632a3034b4e7c59ca09721546f8 \ + --hash=sha256:6009386a2df66159f64ac9f20425ae25229b29b9dd0e1d3dd60043f037e2ad7e \ + --hash=sha256:686e975a5d16602dc0982c7c703948d17184bd1397e16c8ee03511ecb8c4cdda \ + --hash=sha256:6959fb07e8351e20501ffb8cc4074c39a0b7ef123e1c850a7f8f3afdc3a3da01 \ + --hash=sha256:6b25ed37c27e652db01be341af93fbcea03d296c024d8a0e680017a268eb85dd \ + --hash=sha256:6da6dea3a1bacf99b3c2187e296db9a83029ed9c38fd4c52b7c9b7326d13c828 \ + --hash=sha256:72ca2399097c0b758198f2ff30f7178d680de8a5cfcf3d9b73a63cf87455532e \ + --hash=sha256:73abb8584b0cf74d37f5ef61c10722adc7275502ab71789a8fe3cb7ef04cf6e2 \ + --hash=sha256:74100fecaec8a535e380cf5f2fb556ff84957d481c13e54051c52e5baac70541 \ + --hash=sha256:75c6ecb70e809cf1504465174343113f51f24bc61e22a80ae1c859f3f7034c6d \ + --hash=sha256:7cf05053242f61ba94014dd3a986e11a083400a32664058f80bf4cf817c0b3a1 \ + --hash=sha256:9411e24328a2302e279e70cae6e479f1fddde79629fcb14e03e6d94b3956eabf \ + --hash=sha256:a213acfbf186b9f35803b52e4ca9addb153fc0b67f82a48f961be7000ecf6721 \ + --hash=sha256:bb7e0fe6ad73b7f06d7e2b689c19a71cf5cc48f0c2bf8608469e51ffe0bd2867 \ + --hash=sha256:c2504eed520958a5b77cc99458297cb7906308cb92327f35fb7fbbad4e9b2188 \ + --hash=sha256:c35aa9657f5d5116d23b934568e0956bd50c615127810fffe3ac356a914c176a \ + --hash=sha256:c5f09cffa619adfb44799fa4a81c2a1ad77c887187613fb0a8f201ab38d89ba1 \ + --hash=sha256:c978f864b35f2261e0819f5cd88b9830b04dc51bcf055aac3c601e525a10d2ba \ + --hash=sha256:cbe946b3e6e60a7b4618f091e62a029cb082b109a9d6b53962dd305087c6e4fd \ + --hash=sha256:cc3e4cd087f07758b16bef8f31d88dbb1b5da5671d2f03685ab52dece3d7a16e \ + --hash=sha256:cf0dead5a2c5a3347af2cfec7131d4f2a2e03c934af28989c9078f8241a491fa \ + --hash=sha256:d2794f0e68b3085d99b4f6ff9c089f6fdd02b32b9d3efdfbb55beac1bf22d516 \ + --hash=sha256:d2fa68a96a30dd240be80bbad838a0ac81a61770611ff7952b889485970c4c71 \ + --hash=sha256:d6f70406695e3220f09cd7a2f879333279d91aa4a8a1d34303b56d61a8180137 \ + --hash=sha256:d8f9cd4ad1be90b0cf350a2f04a38a36e44a026cac1e036ac593dc48efe91d52 \ + --hash=sha256:da2d94c15f88cd40d7e67f7919d4f60110d2b9d5b1e08cf354c2be773ab13479 \ + --hash=sha256:e1727c1c0e394096bb9af185c6923e8ea55a5095b8af44f06903bcc0e06800a2 \ + --hash=sha256:e420ced29b5904cdf9ee5545e23f9406189d8acb6750916c2db4793dada065c6 \ + --hash=sha256:e82c5cf1495244adf5252f925ac5932e5fd288b3e5ab6b70bec5593074b7236c \ + --hash=sha256:f1ef0d39bc1feb420caf549b3c657c871cad4ebbcf0580c4d03816b0590de0cf \ + --hash=sha256:f8753a6c88d1d0ba64302309eecf20f70d2770f65ca02d83c2452279085bfcd3 \ + --hash=sha256:f93dbf58f03146164048be5426ffde298b237a5e059144847e4940f5b80172c3 # via # -r requirements.in # grpcio-tools -grpcio-tools==1.59.0 \ - --hash=sha256:0548e901894399886ff4a4cd808cb850b60c021feb4a8977a0751f14dd7e55d9 \ - --hash=sha256:05bf7b3ed01c8a562bb7e840f864c58acedbd6924eb616367c0bd0a760bdf483 \ - --hash=sha256:1d551ff42962c7c333c3da5c70d5e617a87dee581fa2e2c5ae2d5137c8886779 \ - --hash=sha256:1df755951f204e65bf9232a9cac5afe7d6b8e4c87ac084d3ecd738fdc7aa4174 \ - --hash=sha256:204e08f807b1d83f5f0efea30c4e680afe26a43dec8ba614a45fa698a7ef0a19 \ - --hash=sha256:240a7a3c2c54f77f1f66085a635bca72003d02f56a670e7db19aec531eda8f78 \ - --hash=sha256:26eb2eebf150a33ebf088e67c1acf37eb2ac4133d9bfccbaa011ad2148c08b42 \ - --hash=sha256:27a7f226b741b2ebf7e2d0779d2c9b17f446d1b839d59886c1619e62cc2ae472 \ - --hash=sha256:2d970aa26854f535ffb94ea098aa8b43de020d9a14682e4a15dcdaeac7801b27 \ - --hash=sha256:2ee960904dde12a7fa48e1591a5b3eeae054bdce57bacf9fd26685a98138f5bf \ - --hash=sha256:335e2f355a0c544a88854e2c053aff8a3f398b84a263a96fa19d063ca1fe513a \ - --hash=sha256:387662bee8e4c0b52cc0f61eaaca0ca583f5b227103f685b76083a3590a71a3e \ - --hash=sha256:40cbf712769242c2ba237745285ef789114d7fcfe8865fc4817d87f20015e99a \ - --hash=sha256:4499d4bc5aa9c7b645018d8b0db4bebd663d427aabcd7bee7777046cb1bcbca7 \ - --hash=sha256:498e7be0b14385980efa681444ba481349c131fc5ec88003819f5d929646947c \ - --hash=sha256:4a10e59cca462208b489478340b52a96d64e8b8b6f1ac097f3e8cb211d3f66c0 \ - --hash=sha256:4ee443abcd241a5befb05629013fbf2eac637faa94aaa3056351aded8a31c1bc \ - --hash=sha256:51d9595629998d8b519126c5a610f15deb0327cd6325ed10796b47d1d292e70b \ - --hash=sha256:520c0c83ea79d14b0679ba43e19c64ca31d30926b26ad2ca7db37cbd89c167e2 \ - --hash=sha256:5b2d6da553980c590487f2e7fd3ec9c1ad8805ff2ec77977b92faa7e3ca14e1f \ - --hash=sha256:6119f62c462d119c63227b9534210f0f13506a888151b9bf586f71e7edf5088b \ - --hash=sha256:6aec8a4ed3808b7dfc1276fe51e3e24bec0eeaf610d395bcd42934647cf902a3 \ - --hash=sha256:71cc6db1d66da3bc3730d9937bddc320f7b1f1dfdff6342bcb5741515fe4110b \ - --hash=sha256:784aa52965916fec5afa1a28eeee6f0073bb43a2a1d7fedf963393898843077a \ - --hash=sha256:821dba464d84ebbcffd9d420302404db2fa7a40c7ff4c4c4c93726f72bfa2769 \ - --hash=sha256:868892ad9e00651a38dace3e4924bae82fc4fd4df2c65d37b74381570ee8deb1 \ - --hash=sha256:882b809b42b5464bee55288f4e60837297f9618e53e69ae3eea6d61b05ce48fa \ - --hash=sha256:8c4634b3589efa156a8d5860c0a2547315bd5c9e52d14c960d716fe86e0927be \ - --hash=sha256:8f0da5861ee276ca68493b217daef358960e8527cc63c7cb292ca1c9c54939af \ - --hash=sha256:962d1a3067129152cee3e172213486cb218a6bad703836991f46f216caefcf00 \ - --hash=sha256:99b3bde646720bbfb77f263f5ba3e1a0de50632d43c38d405a0ef9c7e94373cd \ - --hash=sha256:9af7e138baa9b2895cf1f3eb718ac96fc5ae2f8e31fca405e21e0e5cd1643c52 \ - --hash=sha256:9ed05197c5ab071e91bcef28901e97ca168c4ae94510cb67a14cb4931b94255a \ - --hash=sha256:9fc02a6e517c34dcf885ff3b57260b646551083903e3d2c780b4971ce7d4ab7c \ - --hash=sha256:a4f6cae381f21fee1ef0a5cbbbb146680164311157ae618edf3061742d844383 \ - --hash=sha256:aa4018f2d8662ac4d9830445d3d253a11b3e096e8afe20865547137aa1160e93 \ - --hash=sha256:b519f2ecde9a579cad2f4a7057d5bb4e040ad17caab8b5e691ed7a13b9db0be9 \ - --hash=sha256:b8e95d921cc2a1521d4750eedefec9f16031457920a6677edebe9d1b2ad6ae60 \ - --hash=sha256:bb87158dbbb9e5a79effe78d54837599caa16df52d8d35366e06a91723b587ae \ - --hash=sha256:bfa4b2b7d21c5634b62e5f03462243bd705adc1a21806b5356b8ce06d902e160 \ - --hash=sha256:c683be38a9bf4024c223929b4cd2f0a0858c94e9dc8b36d7eaa5a48ce9323a6f \ - --hash=sha256:cb63055739808144b541986291679d643bae58755d0eb082157c4d4c04443905 \ - --hash=sha256:d0f0806de1161c7f248e4c183633ee7a58dfe45c2b77ddf0136e2e7ad0650b1b \ - --hash=sha256:db030140d0da2368319e2f23655df3baec278c7e0078ecbe051eaf609a69382c \ - --hash=sha256:de156c18b0c638aaee3be6ad650c8ba7dec94ed4bac26403aec3dce95ffe9407 \ - --hash=sha256:df85096fcac7cea8aa5bd84b7a39c4cdbf556b93669bb4772eb96aacd3222a4e \ - --hash=sha256:e312ddc2d8bec1a23306a661ad52734f984c9aad5d8f126ebb222a778d95407d \ - --hash=sha256:eeed386971bb8afc3ec45593df6a1154d680d87be1209ef8e782e44f85f47e64 \ - --hash=sha256:ef3e8aca2261f7f07436d4e2111556c1fb9bf1f9cfcdf35262743ccdee1b6ce9 \ - --hash=sha256:f14a6e4f700dfd30ff8f0e6695f944affc16ae5a1e738666b3fae4e44b65637e \ - --hash=sha256:f1c684c0d9226d04cadafced620a46ab38c346d0780eaac7448da96bf12066a3 \ - --hash=sha256:f381ae3ad6a5eb27aad8d810438937d8228977067c54e0bd456fce7e11fdbf3d \ - --hash=sha256:f6263b85261b62471cb97b7505df72d72b8b62e5e22d8184924871a6155b4dbf \ - --hash=sha256:f965707da2b48a33128615bcfebedd215a3a30e346447e885bb3da37a143177a +grpcio-tools==1.59.2 \ + --hash=sha256:072a7ce979ea4f7579c3c99fcbde3d1882c3d1942a3b51d159f67af83b714cd8 \ + --hash=sha256:09749e832e06493841000275248b031f7154665900d1e1b0e42fc17a64bf904d \ + --hash=sha256:09d809ca88999b2578119683f9f0f6a9b42de95ea21550852114a1540b6a642c \ + --hash=sha256:12cc7698fad48866f68fdef831685cb31ef5814ac605d248c4e5fc964a6fb3f6 \ + --hash=sha256:12fdee2de80d83eadb1294e0f8a0cb6cefcd2e4988ed680038ab09cd04361ee4 \ + --hash=sha256:17ef468836d7cf0b2419f4d5c7ac84ec2d598a1ae410773585313edacf7c393e \ + --hash=sha256:1e949e66d4555ce319fd7acef90df625138078d8729c4dc6f6a9f05925034433 \ + --hash=sha256:2a9ce2a209871ed1c5ae2229e6f4f5a3ea96d83b7871df5d9773d72a72545683 \ + --hash=sha256:2f410375830a9bb7140a07da4d75bf380e0958377bed50d77d1dae302de4314e \ + --hash=sha256:32141ef309543a446337e934f0b7a2565a6fca890ff4e543630a09ef72c8d00b \ + --hash=sha256:3491cb69c909d586c23d7e6d0ac87844ca22f496f505ce429c0d3301234f2cf3 \ + --hash=sha256:3cf9949a2aadcece3c1e0dd59249aea53dbfc8cc94f7d707797acd67cf6cf931 \ + --hash=sha256:41b5dd6a06c2563ac3b3adda6d875b15e63eb7b1629e85fc9af608c3a76c4c82 \ + --hash=sha256:48782727c5cff8b8c96e028a8a58614ff6a37eadc0db85866516210c7aafe9ae \ + --hash=sha256:4a1810bc5de51cc162a19ed3c11da8ddc64d8cfcba049ef337c20fcb397f048b \ + --hash=sha256:531f87c8e884c6a2e58f040039dfbfe997a4e33baa58f7c7d9993db37b1f5ad0 \ + --hash=sha256:55c401599d5093c4cfa83b8f0ee9757b4d6d3029b10bd67be2cffeada7a44961 \ + --hash=sha256:5f2ce5ecd63c492949b03af73b1dd6d502c567cc2f9c2057137e518b0c702a01 \ + --hash=sha256:670f5889853215999eb3511a623dd7dff01b1ce1a64610d13366e0fd337f8c79 \ + --hash=sha256:6e735a26e8ea8bb89dc69343d1d00ea607449c6d81e21f339ee118562f3d1931 \ + --hash=sha256:724f4f0eecc17fa66216eebfff145631070f04ed7fb4ddf7a7d1c4f954ecc2a1 \ + --hash=sha256:75905266cf90f1866b322575c2edcd4b36532c33fc512bb1b380dc58d84b1030 \ + --hash=sha256:77ec33ddee691e60511e2a7c793aad4cf172ae20e08d95c786cbba395f6203a7 \ + --hash=sha256:7ec536cdae870a74080c665cfb1dca8d0784a931aa3c26376ef971a3a51b59d4 \ + --hash=sha256:7f0e26af7c07bfa906c91ca9f5932514928a7f032f5f20aecad6b5541037de7e \ + --hash=sha256:896f5cdf58f658025a4f7e4ea96c81183b4b6a4b1b4d92ae66d112ac91f062f1 \ + --hash=sha256:99ddc0f5304071a355c261ae49ea5d29b9e9b6dcf422dfc55ada70a243e27e8f \ + --hash=sha256:9b2885c0e2c9a97bde33497a919032afbd8b5c6dc2f8d4dd4198e77226e0de05 \ + --hash=sha256:9c106ebbed0db446f59f0efe5c3fce33a0a21bf75b392966585e4b5934891b92 \ + --hash=sha256:a2ccb59dfbf2ebd668a5a7c4b7bb2b859859641d2b199114b557cd045aac6102 \ + --hash=sha256:a3cb707da722a0b6c4021fc2cc1c005a8d4037d8ad0252f93df318b9b8a6b4f3 \ + --hash=sha256:a85da4200295ee17e3c1ae068189a43844420ed7e9d531a042440f52de486dfb \ + --hash=sha256:b0b712acec00a9cbc2204c271d638062a2cb8ce74f25d158b023ff6e93182659 \ + --hash=sha256:b0dc271a200dbab6547b2c73fcbdb7efe94c31cb633aa20d073f7cf4493493e1 \ + --hash=sha256:b38f8edb2909702c2478b52f6213982c21e4f66f739ac953b91f97863ba2c06a \ + --hash=sha256:b53db1523015a3acda75722357df6c94afae37f6023800c608e09a5c05393804 \ + --hash=sha256:ba8dba19e7b2b6f7369004533866f222ba483b9e14d2d152ecf9339c0df1283a \ + --hash=sha256:cbeeb3d8ec4cb25c92e17bfbdcef3c3669e85c5ee787a6e581cb942bc0ae2b88 \ + --hash=sha256:d08b398509ea4d544bcecddd9a21f59dc556396916c3915904cac206af2db72b \ + --hash=sha256:d634b65cc8ee769edccf1647d8a16861a27e0d8cbd787c711168d2c5e9bddbd1 \ + --hash=sha256:db0925545180223fabd6da9b34513efac83aa16673ef8b1cb0cc678e8cf0923c \ + --hash=sha256:dd5c78f8e7c6e721b9009c92481a0e3b30a9926ef721120723a03b8a34a34fb9 \ + --hash=sha256:dee5f7e7a56177234e61a483c70ca2ae34e73128372c801bb7039993870889f1 \ + --hash=sha256:df35d145bc2f6e5f57b74cb69f66526675a5f2dcf7d54617ce0deff0c82cca0a \ + --hash=sha256:e21fc172522d2dda815223a359b2aca9bc317a1b5e5dea5a58cd5079333af133 \ + --hash=sha256:e972746000aa192521715f776fab617a3437bed29e90fe0e0fd0d0d6f498d7d4 \ + --hash=sha256:eb597d6bf9f5bfa54d00546e828f0d4e2c69250d1bc17c27903c0c7b66372135 \ + --hash=sha256:ec2fbb02ebb9f2ae1b1c69cccf913dee8c41f5acad94014d3ce11b53720376e3 \ + --hash=sha256:ed8e6632d8d839456332d97b96db10bd2dbf3078e728d063394ac2d54597ad80 \ + --hash=sha256:f50ff312b88918c5a6461e45c5e03869749a066b1c24a7327e8e13e117efe4fc \ + --hash=sha256:f518f22a3082de00f0d7a216e96366a87e6973111085ba1603c3bfa7dba2e728 \ + --hash=sha256:f52e0ce8f2dcf1f160c847304016c446075a83ab925d98933d4681bfa8af2962 \ + --hash=sha256:fa1b9dee7811fad081816e884d063c4dd4946dba61aa54243b4c76c311090c48 \ + --hash=sha256:feca316e17cfead823af6eae0fc20c0d5299a94d71cfb7531a0e92d050a5fb2f # via -r requirements.in idna==3.4 \ --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ diff --git a/examples/locality-load-balancing/verify.sh b/examples/locality-load-balancing/verify.sh index c6a2855df8ca..7f6727811257 100755 --- a/examples/locality-load-balancing/verify.sh +++ b/examples/locality-load-balancing/verify.sh @@ -79,7 +79,7 @@ make_healthy backend-local-1-1 make_healthy backend-local-2-1 run_log "Scale backend-local-1 to 5 replicas." -"${DOCKER_COMPOSE[@]}" -p ${NAME} up --scale backend-local-1=5 -d --build +"${DOCKER_COMPOSE[@]}" -p "${NAME}" up --scale backend-local-1=5 -d --build wait_for 5 check_health backend-local-1-2 healthy wait_for 5 check_health backend-local-1-3 healthy wait_for 5 check_health backend-local-1-4 healthy diff --git a/examples/mysql/Dockerfile-mysql b/examples/mysql/Dockerfile-mysql index 78d35f595fe0..dc99e678a59a 100644 --- a/examples/mysql/Dockerfile-mysql +++ b/examples/mysql/Dockerfile-mysql @@ -1 +1 @@ -FROM mysql:8.1.0@sha256:f61944ff3f2961363a4d22913b2ac581523273679d7e14dd26e8db8c9f571a7e +FROM mysql:8.2.0@sha256:1773f3c7aa9522f0014d0ad2bbdaf597ea3b1643c64c8ccc2123c64afd8b82b1 diff --git a/examples/shared/echo/Dockerfile b/examples/shared/echo/Dockerfile index 81f8c35019fd..16d356efc745 100644 --- a/examples/shared/echo/Dockerfile +++ b/examples/shared/echo/Dockerfile @@ -1 +1 @@ -FROM jmalloc/echo-server@sha256:57110914108448e6692cd28fc602332357f91951d74ca12217a347b1f7df599c +FROM jmalloc/echo-server@sha256:86f2c45aa7e7ebe1be30b21f8cfff25a7ed6e3b059751822d4b35bf244a688d5 diff --git a/mobile/ci/linux_ci_setup.sh b/mobile/ci/linux_ci_setup.sh new file mode 100755 index 000000000000..c74829272178 --- /dev/null +++ b/mobile/ci/linux_ci_setup.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -e + +# Set up necessary Android SDK and NDK. +ANDROID_HOME=$ANDROID_SDK_ROOT +SDKMANAGER="${ANDROID_SDK_ROOT}/cmdline-tools/latest/bin/sdkmanager" +$SDKMANAGER --uninstall "ndk-bundle" +echo "y" | $SDKMANAGER "ndk;21.4.7075529" +$SDKMANAGER --install "build-tools;30.0.3" +echo "ANDROID_NDK_HOME=${ANDROID_HOME}/ndk/21.4.7075529" >> "$GITHUB_ENV" diff --git a/mobile/ci/start_android_emulator.sh b/mobile/ci/start_android_emulator.sh index 0ba2e4c4d36b..8b582c343304 100755 --- a/mobile/ci/start_android_emulator.sh +++ b/mobile/ci/start_android_emulator.sh @@ -16,8 +16,8 @@ check_emulator_status() { done } -echo "y" | "${ANDROID_HOME}/cmdline-tools/latest/bin/sdkmanager" --install 'system-images;android-30;google_atd;x86_64' --channel=3 -echo "no" | "${ANDROID_HOME}/cmdline-tools/latest/bin/avdmanager" create avd -n test_android_emulator -k 'system-images;android-30;google_atd;x86_64' --device pixel_4 --force +echo "y" | "${ANDROID_HOME}/cmdline-tools/latest/bin/sdkmanager" --install 'system-images;android-30;google_apis;x86_64' --channel=3 +echo "no" | "${ANDROID_HOME}/cmdline-tools/latest/bin/avdmanager" create avd -n test_android_emulator -k 'system-images;android-30;google_apis;x86_64' --device pixel_4 --force "${ANDROID_HOME}"/emulator/emulator -accel-check # This is only available on macOS. if [[ -n $(which system_profiler) ]]; then @@ -25,8 +25,10 @@ if [[ -n $(which system_profiler) ]]; then fi # shellcheck disable=SC2094 -nohup "${ANDROID_HOME}/emulator/emulator" -partition-size 1024 -avd test_android_emulator -no-snapshot-load > nohup.out 2>&1 | tail -f nohup.out & { - check_emulator_status +nohup "${ANDROID_HOME}/emulator/emulator" -no-window -accel on -gpu swiftshader_indirect -no-snapshot -noaudio -no-boot-anim -avd test_android_emulator > nohup.out 2>&1 | tail -f nohup.out & { + if [[ "$(uname -s)" == "Darwin" ]]; then + check_emulator_status + fi # shellcheck disable=SC2016 "${ANDROID_HOME}/platform-tools/adb" wait-for-device shell 'while [[ -z $(getprop sys.boot_completed | tr -d '\''\r'\'') ]]; do sleep 1; done; input keyevent 82' } diff --git a/mobile/docs/root/api/starting_envoy.rst b/mobile/docs/root/api/starting_envoy.rst index cc2a86c01f8e..0385c0ccccb4 100644 --- a/mobile/docs/root/api/starting_envoy.rst +++ b/mobile/docs/root/api/starting_envoy.rst @@ -395,6 +395,23 @@ This allows HTTP/3 to be used for the first request to the hosts and avoid the H // Swift builder.addQuicHint("www.example.com", 443) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +``addQuicCanonicalSuffix`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Add a canonical suffix that's known to speak QUIC. +This feature works as a extension to QUIC hints in such way that: +if `.abc.com` is added to canonical suffix, and `foo.abc.com` is added to QUIC hint, then all requests to +`*.abc.com` will be considered QUIC ready. + +**Example**:: + + // Kotlin + builder.addQuicCanonicalSuffix(".example.com") + + // Swift + builder.addQuicCanonicalSuffix(".example.com") + ~~~~~~~~~~~~~~~~~~~~~~~ ``enableSocketTagging`` ~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/mobile/library/cc/engine_builder.cc b/mobile/library/cc/engine_builder.cc index c469f5daf0ec..11841e6e528a 100644 --- a/mobile/library/cc/engine_builder.cc +++ b/mobile/library/cc/engine_builder.cc @@ -52,14 +52,6 @@ XdsBuilder& XdsBuilder::setAuthenticationToken(std::string token_header, std::st return *this; } -XdsBuilder& XdsBuilder::setJwtAuthenticationToken(std::string token, - const int token_lifetime_in_seconds) { - jwt_token_ = std::move(token); - jwt_token_lifetime_in_seconds_ = - token_lifetime_in_seconds > 0 ? token_lifetime_in_seconds : DefaultJwtTokenLifetimeSeconds; - return *this; -} - XdsBuilder& XdsBuilder::setSslRootCerts(std::string root_certs) { ssl_root_certs_ = std::move(root_certs); return *this; @@ -106,12 +98,6 @@ void XdsBuilder::build(envoy::config::bootstrap::v3::Bootstrap* bootstrap) const auto* auth_token_metadata = grpc_service.add_initial_metadata(); auth_token_metadata->set_key(authentication_token_header_); auth_token_metadata->set_value(authentication_token_); - } else if (!jwt_token_.empty()) { - auto& jwt = *grpc_service.mutable_google_grpc() - ->add_call_credentials() - ->mutable_service_account_jwt_access(); - jwt.set_json_key(jwt_token_); - jwt.set_token_lifetime_seconds(jwt_token_lifetime_in_seconds_); } if (!sni_.empty()) { auto& channel_args = @@ -313,6 +299,11 @@ EngineBuilder& EngineBuilder::addQuicHint(std::string host, int port) { return *this; } +EngineBuilder& EngineBuilder::addQuicCanonicalSuffix(std::string suffix) { + quic_suffixes_.emplace_back(std::move(suffix)); + return *this; +} + #endif EngineBuilder& EngineBuilder::setForceAlwaysUsev6(bool value) { @@ -467,6 +458,9 @@ std::unique_ptr EngineBuilder::generate entry->set_hostname(host); entry->set_port(port); } + for (const auto& suffix : quic_suffixes_) { + cache_config.mutable_alternate_protocols_cache_options()->add_canonical_suffixes(suffix); + } auto* cache_filter = hcm->add_http_filters(); cache_filter->set_name("alternate_protocols_cache"); cache_filter->mutable_typed_config()->PackFrom(cache_config); @@ -835,6 +829,11 @@ std::unique_ptr EngineBuilder::generate entry->set_hostname(host); entry->set_port(port); } + for (const auto& suffix : quic_suffixes_) { + alpn_options.mutable_auto_config() + ->mutable_alternate_protocols_cache_options() + ->add_canonical_suffixes(suffix); + } base_cluster->mutable_transport_socket()->mutable_typed_config()->PackFrom(h3_proxy_socket); (*base_cluster->mutable_typed_extension_protocol_options()) diff --git a/mobile/library/cc/engine_builder.h b/mobile/library/cc/engine_builder.h index e51aa730f085..f3e5d5037817 100644 --- a/mobile/library/cc/engine_builder.h +++ b/mobile/library/cc/engine_builder.h @@ -22,7 +22,6 @@ namespace Envoy { namespace Platform { -constexpr int DefaultJwtTokenLifetimeSeconds = 60 * 60 * 24 * 90; // 90 days constexpr int DefaultXdsTimeout = 5; // Forward declaration so it can be referenced by XdsBuilder. @@ -58,24 +57,10 @@ class XdsBuilder final { // https://cloud.google.com/docs/authentication/api-keys for details), invoke: // builder.setAuthenticationToken("x-goog-api-key", api_key_token) // - // If this method is called, then don't call setJwtAuthenticationToken. - // // `token_header`: the header name for which the the `token` will be set as a value. // `token`: the authentication token. XdsBuilder& setAuthenticationToken(std::string token_header, std::string token); - // Sets JWT as the authentication method to the xDS management server, using the given token. - // - // If setAuthenticationToken is called, then invocations of this method will be ignored. - // - // `token`: the JWT token used to authenticate the client to the xDS management server. - // `token_lifetime_in_seconds`: the lifetime of the JWT token, in seconds. If none - // (or 0) is specified, then DefaultJwtTokenLifetimeSeconds is used. - // TODO(abeyad): Deprecate and remove this. - XdsBuilder& - setJwtAuthenticationToken(std::string token, - int token_lifetime_in_seconds = DefaultJwtTokenLifetimeSeconds); - // Sets the PEM-encoded server root certificates used to negotiate the TLS handshake for the gRPC // connection. If no root certs are specified, the operating system defaults are used. XdsBuilder& setSslRootCerts(std::string root_certs); @@ -127,8 +112,6 @@ class XdsBuilder final { int xds_server_port_; std::string authentication_token_header_; std::string authentication_token_; - std::string jwt_token_; - int jwt_token_lifetime_in_seconds_ = DefaultJwtTokenLifetimeSeconds; std::string ssl_root_certs_; std::string sni_; std::string rtds_resource_name_; @@ -175,6 +158,7 @@ class EngineBuilder { EngineBuilder& setHttp3ConnectionOptions(std::string options); EngineBuilder& setHttp3ClientConnectionOptions(std::string options); EngineBuilder& addQuicHint(std::string host, int port); + EngineBuilder& addQuicCanonicalSuffix(std::string suffix); #endif EngineBuilder& enableInterfaceBinding(bool interface_binding_on); EngineBuilder& enableDrainPostDnsRefresh(bool drain_post_dns_refresh_on); @@ -261,6 +245,7 @@ class EngineBuilder { std::string http3_connection_options_ = ""; std::string http3_client_connection_options_ = ""; std::vector> quic_hints_; + std::vector quic_suffixes_; bool always_use_v6_ = false; int dns_min_refresh_seconds_ = 60; int max_connections_per_host_ = 7; diff --git a/mobile/library/common/engine_common.cc b/mobile/library/common/engine_common.cc index 79e60393b31f..61875a82f0e2 100644 --- a/mobile/library/common/engine_common.cc +++ b/mobile/library/common/engine_common.cc @@ -71,10 +71,12 @@ EngineCommon::EngineCommon(std::unique_ptr&& options) Buffer::WatermarkFactorySharedPtr watermark_factory) { // TODO(alyssawilk) use InstanceLite not InstanceImpl. auto local_address = Network::Utility::getLocalAddress(options.localAddressIpVersion()); - return std::make_unique( - init_manager, options, time_system, local_address, hooks, restarter, store, - access_log_lock, component_factory, std::move(random_generator), tls, thread_factory, - file_system, std::move(process_context), watermark_factory); + auto server = std::make_unique( + init_manager, options, time_system, hooks, restarter, store, access_log_lock, + std::move(random_generator), tls, thread_factory, file_system, + std::move(process_context), watermark_factory); + server->initialize(local_address, component_factory); + return server; }; base_ = std::make_unique( *options_, real_time_system_, default_listener_hooks_, prod_component_factory_, diff --git a/mobile/library/common/jni/BUILD b/mobile/library/common/jni/BUILD index 6dfa3a14842a..15097aedcb3d 100644 --- a/mobile/library/common/jni/BUILD +++ b/mobile/library/common/jni/BUILD @@ -47,6 +47,20 @@ cc_library( ], ) +cc_library( + name = "jni_helper_lib", + srcs = [ + "jni_helper.cc", + ], + hdrs = [ + "jni_helper.h", + ], + deps = [ + "//library/common/jni/import:jni_import_lib", + "@envoy//source/common/common:assert_lib", + ], +) + # Implementations of the various "native" Java methods for classes # in library/java/io/envoyproxy/envoymobile. # TODO(RyanTheOptimist): Is there a better name for this? I'm not sure what diff --git a/mobile/library/common/jni/jni_helper.cc b/mobile/library/common/jni/jni_helper.cc new file mode 100644 index 000000000000..ead2d6a85f7a --- /dev/null +++ b/mobile/library/common/jni/jni_helper.cc @@ -0,0 +1,226 @@ +#include "library/common/jni/jni_helper.h" + +#include "source/common/common/assert.h" + +namespace Envoy { +namespace JNI { + +jmethodID JniHelper::getMethodId(jclass clazz, const char* name, const char* signature) { + jmethodID method_id = env_->GetMethodID(clazz, name, signature); + rethrowException(); + return method_id; +} + +jmethodID JniHelper::getStaticMethodId(jclass clazz, const char* name, const char* signature) { + jmethodID method_id = env_->GetStaticMethodID(clazz, name, signature); + rethrowException(); + return method_id; +} + +LocalRefUniquePtr JniHelper::findClass(const char* class_name) { + LocalRefUniquePtr result(env_->FindClass(class_name), LocalRefDeleter(env_)); + rethrowException(); + return result; +} + +LocalRefUniquePtr JniHelper::getObjectClass(jobject object) { + return {env_->GetObjectClass(object), LocalRefDeleter(env_)}; +} + +void JniHelper::throwNew(const char* java_class_name, const char* message) { + LocalRefUniquePtr java_class = findClass(java_class_name); + if (java_class != nullptr) { + jint error = env_->ThrowNew(java_class.get(), message); + RELEASE_ASSERT(error == JNI_OK, fmt::format("Failed calling ThrowNew.")); + } +} + +LocalRefUniquePtr JniHelper::exceptionOccurred() { + return {env_->ExceptionOccurred(), LocalRefDeleter(env_)}; +} + +GlobalRefUniquePtr JniHelper::newGlobalRef(jobject object) { + GlobalRefUniquePtr result(env_->NewGlobalRef(object), GlobalRefDeleter(env_)); + RELEASE_ASSERT(result != nullptr, "Failed calling NewGlobalRef."); + return result; +} + +LocalRefUniquePtr JniHelper::newObject(jclass clazz, jmethodID method_id, ...) { + va_list args; + va_start(args, method_id); + LocalRefUniquePtr result(env_->NewObjectV(clazz, method_id, args), + LocalRefDeleter(env_)); + rethrowException(); + va_end(args); + return result; +} + +LocalRefUniquePtr JniHelper::newStringUtf(const char* str) { + LocalRefUniquePtr result(env_->NewStringUTF(str), LocalRefDeleter(env_)); + rethrowException(); + return result; +} + +StringUtfUniquePtr JniHelper::getStringUtfChars(jstring str, jboolean* is_copy) { + StringUtfUniquePtr result(env_->GetStringUTFChars(str, is_copy), StringUtfDeleter(env_, str)); + rethrowException(); + return result; +} + +jsize JniHelper::getArrayLength(jarray array) { return env_->GetArrayLength(array); } + +#define DEFINE_NEW_ARRAY(JAVA_TYPE, JNI_TYPE) \ + LocalRefUniquePtr JniHelper::new##JAVA_TYPE##Array(jsize length) { \ + LocalRefUniquePtr result(env_->New##JAVA_TYPE##Array(length), \ + LocalRefDeleter(env_)); \ + rethrowException(); \ + return result; \ + } + +DEFINE_NEW_ARRAY(Byte, jbyteArray) +DEFINE_NEW_ARRAY(Char, jcharArray) +DEFINE_NEW_ARRAY(Short, jshortArray) +DEFINE_NEW_ARRAY(Int, jintArray) +DEFINE_NEW_ARRAY(Long, jlongArray) +DEFINE_NEW_ARRAY(Float, jfloatArray) +DEFINE_NEW_ARRAY(Double, jdoubleArray) +DEFINE_NEW_ARRAY(Boolean, jbooleanArray) + +LocalRefUniquePtr JniHelper::newObjectArray(jsize length, jclass element_class, + jobject initial_element) { + LocalRefUniquePtr result( + env_->NewObjectArray(length, element_class, initial_element), LocalRefDeleter(env_)); + + return result; +} + +#define DEFINE_GET_ARRAY_ELEMENTS(JAVA_TYPE, JNI_ARRAY_TYPE, JNI_ELEMENT_TYPE) \ + ArrayElementsUniquePtr \ + JniHelper::get##JAVA_TYPE##ArrayElements(JNI_ARRAY_TYPE array, jboolean* is_copy) { \ + ArrayElementsUniquePtr result( \ + env_->Get##JAVA_TYPE##ArrayElements(array, is_copy), \ + ArrayElementsDeleter(env_, array)); \ + rethrowException(); \ + return result; \ + } + +DEFINE_GET_ARRAY_ELEMENTS(Byte, jbyteArray, jbyte) +DEFINE_GET_ARRAY_ELEMENTS(Char, jcharArray, jchar) +DEFINE_GET_ARRAY_ELEMENTS(Short, jshortArray, jshort) +DEFINE_GET_ARRAY_ELEMENTS(Int, jintArray, jint) +DEFINE_GET_ARRAY_ELEMENTS(Long, jlongArray, jlong) +DEFINE_GET_ARRAY_ELEMENTS(Float, jfloatArray, jfloat) +DEFINE_GET_ARRAY_ELEMENTS(Double, jdoubleArray, jdouble) +DEFINE_GET_ARRAY_ELEMENTS(Boolean, jbooleanArray, jboolean) + +LocalRefUniquePtr JniHelper::getObjectArrayElement(jobjectArray array, jsize index) { + LocalRefUniquePtr result(env_->GetObjectArrayElement(array, index), + LocalRefDeleter(env_)); + rethrowException(); + return result; +} + +void JniHelper::setObjectArrayElement(jobjectArray array, jsize index, jobject value) { + env_->SetObjectArrayElement(array, index, value); + rethrowException(); +} + +PrimitiveArrayCriticalUniquePtr JniHelper::getPrimitiveArrayCritical(jarray array, + jboolean* is_copy) { + PrimitiveArrayCriticalUniquePtr result(env_->GetPrimitiveArrayCritical(array, is_copy), + PrimitiveArrayCriticalDeleter(env_, array)); + rethrowException(); + return result; +} + +#define DEFINE_CALL_METHOD(JAVA_TYPE, JNI_TYPE) \ + JNI_TYPE JniHelper::call##JAVA_TYPE##Method(jobject object, jmethodID method_id, ...) { \ + va_list args; \ + va_start(args, method_id); \ + JNI_TYPE result = env_->Call##JAVA_TYPE##MethodV(object, method_id, args); \ + va_end(args); \ + rethrowException(); \ + return result; \ + } + +DEFINE_CALL_METHOD(Byte, jbyte) +DEFINE_CALL_METHOD(Char, jchar) +DEFINE_CALL_METHOD(Short, jshort) +DEFINE_CALL_METHOD(Int, jint) +DEFINE_CALL_METHOD(Long, jlong) +DEFINE_CALL_METHOD(Float, jfloat) +DEFINE_CALL_METHOD(Double, jdouble) +DEFINE_CALL_METHOD(Boolean, jboolean) + +void JniHelper::callVoidMethod(jobject object, jmethodID method_id, ...) { + va_list args; + va_start(args, method_id); + env_->CallVoidMethodV(object, method_id, args); + va_end(args); + rethrowException(); +} + +LocalRefUniquePtr JniHelper::callObjectMethod(jobject object, jmethodID method_id, ...) { + va_list args; + va_start(args, method_id); + LocalRefUniquePtr result(env_->CallObjectMethodV(object, method_id, args), + LocalRefDeleter(env_)); + va_end(args); + rethrowException(); + return result; +} + +#define DEFINE_CALL_STATIC_METHOD(JAVA_TYPE, JNI_TYPE) \ + JNI_TYPE JniHelper::callStatic##JAVA_TYPE##Method(jclass clazz, jmethodID method_id, ...) { \ + va_list args; \ + va_start(args, method_id); \ + JNI_TYPE result = env_->CallStatic##JAVA_TYPE##MethodV(clazz, method_id, args); \ + va_end(args); \ + rethrowException(); \ + return result; \ + } + +DEFINE_CALL_STATIC_METHOD(Byte, jbyte) +DEFINE_CALL_STATIC_METHOD(Char, jchar) +DEFINE_CALL_STATIC_METHOD(Short, jshort) +DEFINE_CALL_STATIC_METHOD(Int, jint) +DEFINE_CALL_STATIC_METHOD(Long, jlong) +DEFINE_CALL_STATIC_METHOD(Float, jfloat) +DEFINE_CALL_STATIC_METHOD(Double, jdouble) +DEFINE_CALL_STATIC_METHOD(Boolean, jboolean) + +void JniHelper::callStaticVoidMethod(jclass clazz, jmethodID method_id, ...) { + va_list args; + va_start(args, method_id); + env_->CallStaticVoidMethodV(clazz, method_id, args); + va_end(args); + rethrowException(); +} + +LocalRefUniquePtr JniHelper::callStaticObjectMethod(jclass clazz, jmethodID method_id, + ...) { + va_list args; + va_start(args, method_id); + LocalRefUniquePtr result(env_->CallStaticObjectMethodV(clazz, method_id, args), + LocalRefDeleter(env_)); + va_end(args); + rethrowException(); + return result; +} + +jlong JniHelper::getDirectBufferCapacity(jobject buffer) { + jlong result = env_->GetDirectBufferCapacity(buffer); + RELEASE_ASSERT(result != -1, "Failed calling GetDirectBufferCapacity."); + return result; +} + +void JniHelper::rethrowException() { + if (env_->ExceptionCheck()) { + auto throwable = exceptionOccurred(); + env_->ExceptionClear(); + env_->Throw(throwable.release()); + } +} + +} // namespace JNI +} // namespace Envoy diff --git a/mobile/library/common/jni/jni_helper.h b/mobile/library/common/jni/jni_helper.h new file mode 100644 index 000000000000..6ac0388f5b05 --- /dev/null +++ b/mobile/library/common/jni/jni_helper.h @@ -0,0 +1,317 @@ +#pragma once + +#include + +#include "library/common/jni/import/jni_import.h" + +namespace Envoy { +namespace JNI { + +/** A custom deleter to delete JNI global ref. */ +class GlobalRefDeleter { +public: + explicit GlobalRefDeleter(JNIEnv* env) : env_(env) {} + + void operator()(jobject object) const { + if (object != nullptr) { + env_->DeleteGlobalRef(object); + } + } + +private: + JNIEnv* const env_; +}; + +/** A unique pointer for JNI global ref. */ +template +using GlobalRefUniquePtr = std::unique_ptr::type, GlobalRefDeleter>; + +/** A custom deleter to delete JNI local ref. */ +class LocalRefDeleter { +public: + explicit LocalRefDeleter(JNIEnv* env) : env_(env) {} + + void operator()(jobject object) const { + if (object != nullptr) { + env_->DeleteLocalRef(object); + } + } + +private: + JNIEnv* const env_; +}; + +/** A unique pointer for JNI local ref. */ +template +using LocalRefUniquePtr = std::unique_ptr::type, LocalRefDeleter>; + +/** A custom deleter for UTF strings. */ +class StringUtfDeleter { +public: + StringUtfDeleter(JNIEnv* env, jstring j_str) : env_(env), j_str_(j_str) {} + + void operator()(const char* c_str) const { + if (c_str != nullptr) { + env_->ReleaseStringUTFChars(j_str_, c_str); + } + } + +private: + JNIEnv* const env_; + jstring j_str_; +}; + +/** A unique pointer for JNI UTF string. */ +using StringUtfUniquePtr = std::unique_ptr; + +/** A custom deleter to delete JNI array elements. */ +template class ArrayElementsDeleter { +public: + ArrayElementsDeleter(JNIEnv* env, ArrayType array) : env_(env), array_(array) {} + + void operator()(ElementType* elements) const { + if (elements == nullptr) { + return; + } + if constexpr (std::is_same_v) { + env_->ReleaseByteArrayElements(array_, elements, 0); + } else if constexpr (std::is_same_v) { + env_->ReleaseCharArrayElements(array_, elements, 0); + } else if constexpr (std::is_same_v) { + env_->ReleaseShortArrayElements(array_, elements, 0); + } else if constexpr (std::is_same_v) { + env_->ReleaseIntArrayElements(array_, elements, 0); + } else if constexpr (std::is_same_v) { + env_->ReleaseLongArrayElements(array_, elements, 0); + } else if constexpr (std::is_same_v) { + env_->ReleaseFloatArrayElements(array_, elements, 0); + } else if constexpr (std::is_same_v) { + env_->ReleaseDoubleArrayElements(array_, elements, 0); + } else if constexpr (std::is_same_v) { + env_->ReleaseBooleanArrayElements(array_, elements, 0); + } + } + +private: + JNIEnv* const env_; + ArrayType array_; +}; + +/** A unique pointer for JNI array elements. */ +template +using ArrayElementsUniquePtr = std::unique_ptr< + typename std::remove_pointer::type, + ArrayElementsDeleter::type>>; + +/** A custom deleter for JNI primitive array critical. */ +class PrimitiveArrayCriticalDeleter { +public: + PrimitiveArrayCriticalDeleter(JNIEnv* env, jarray array) : env_(env), array_(array) {} + + void operator()(void* c_array) const { + if (c_array != nullptr) { + env_->ReleasePrimitiveArrayCritical(array_, c_array, 0); + } + } + +private: + JNIEnv* const env_; + jarray array_; +}; + +/** A unique pointer for JNI primitive array critical. */ +using PrimitiveArrayCriticalUniquePtr = std::unique_ptr; + +/** + * A thin wrapper around JNI API with memory-safety. + * + * NOTE: Do not put any other helper functions that are not part of the JNI API here. + */ +class JniHelper { +public: + explicit JniHelper(JNIEnv* env) : env_(env) {} + + /** + * Gets the object method with the given signature. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#getmethodid + */ + jmethodID getMethodId(jclass clazz, const char* name, const char* signature); + + /** + * Gets the static method with the given signature. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#getstaticmethodid + */ + jmethodID getStaticMethodId(jclass clazz, const char* name, const char* signature); + + /** + * Finds the given `class_name` using Java classloader. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#findclass + */ + LocalRefUniquePtr findClass(const char* class_name); + + /** + * Returns the class of a given `object`. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#getobjectclass + */ + LocalRefUniquePtr getObjectClass(jobject object); + + /** + * Throws Java exception with the specified class name and error message. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#thrownew + */ + void throwNew(const char* java_class_name, const char* message); + + /** + * Determines if an exception is being thrown. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#exceptionoccurred + */ + LocalRefUniquePtr exceptionOccurred(); + + /** + * Creates a new global reference to the object referred to by the `object` argument. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#newglobalref + */ + GlobalRefUniquePtr newGlobalRef(jobject object); + + /** + * Creates a new instance of a given `clazz` from the given `method_id`. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#newobject-newobjecta-newobjectv + */ + LocalRefUniquePtr newObject(jclass clazz, jmethodID method_id, ...); + + /** + * Creates a new Java string from the given `str`. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#newstringutf + */ + LocalRefUniquePtr newStringUtf(const char* str); + + /** Gets the pointer to an array of bytes representing `str`. */ + StringUtfUniquePtr getStringUtfChars(jstring str, jboolean* is_copy); + + /** + * Gets the size of the array. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#getarraylength + */ + jsize getArrayLength(jarray array); + +/** A macro to create `NewArray`. helper function. */ +#define DECLARE_NEW_ARRAY(JAVA_TYPE, JNI_TYPE) \ + LocalRefUniquePtr new##JAVA_TYPE##Array(jsize length); + + /** + * Helper functions for `NewArray`. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#newprimitivetypearray-routines + */ + DECLARE_NEW_ARRAY(Byte, jbyteArray) + DECLARE_NEW_ARRAY(Char, jcharArray) + DECLARE_NEW_ARRAY(Short, jshortArray) + DECLARE_NEW_ARRAY(Int, jintArray) + DECLARE_NEW_ARRAY(Long, jlongArray) + DECLARE_NEW_ARRAY(Float, jfloatArray) + DECLARE_NEW_ARRAY(Double, jdoubleArray) + DECLARE_NEW_ARRAY(Boolean, jbooleanArray) + LocalRefUniquePtr newObjectArray(jsize length, jclass element_class, + jobject initial_element = nullptr); + +/** A macro to create `GetArrayElement` function. */ +#define DECLARE_GET_ARRAY_ELEMENTS(JAVA_TYPE, JNI_ARRAY_TYPE, JNI_ELEMENT_TYPE) \ + ArrayElementsUniquePtr get##JAVA_TYPE##ArrayElements( \ + JNI_ARRAY_TYPE array, jboolean* is_copy); + + /** + * Helper functions for `GetArrayElements`. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#getprimitivetypearrayelements-routines + */ + DECLARE_GET_ARRAY_ELEMENTS(Byte, jbyteArray, jbyte) + DECLARE_GET_ARRAY_ELEMENTS(Char, jcharArray, jchar) + DECLARE_GET_ARRAY_ELEMENTS(Short, jshortArray, jshort) + DECLARE_GET_ARRAY_ELEMENTS(Int, jintArray, jint) + DECLARE_GET_ARRAY_ELEMENTS(Long, jlongArray, jlong) + DECLARE_GET_ARRAY_ELEMENTS(Float, jfloatArray, jfloat) + DECLARE_GET_ARRAY_ELEMENTS(Double, jdoubleArray, jdouble) + DECLARE_GET_ARRAY_ELEMENTS(Boolean, jbooleanArray, jboolean) + + /** + * Gets an element of a given `array` with the specified `index`. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#getobjectarrayelement + */ + LocalRefUniquePtr getObjectArrayElement(jobjectArray array, jsize index); + + /** + * Sets an element of a given `array` with the specified `index. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#setobjectarrayelement + */ + void setObjectArrayElement(jobjectArray array, jsize index, jobject value); + + PrimitiveArrayCriticalUniquePtr getPrimitiveArrayCritical(jarray array, jboolean* is_copy); + +/** A macro to create `CallMethod` helper function. */ +#define DECLARE_CALL_METHOD(JAVA_TYPE, JNI_TYPE) \ + JNI_TYPE call##JAVA_TYPE##Method(jobject object, jmethodID method_id, ...); + + /** + * Helper functions for `CallMethod`. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#calltypemethod-routines-calltypemethoda-routines-calltypemethodv-routines + */ + DECLARE_CALL_METHOD(Byte, jbyte) + DECLARE_CALL_METHOD(Char, jchar) + DECLARE_CALL_METHOD(Short, jshort) + DECLARE_CALL_METHOD(Int, jint) + DECLARE_CALL_METHOD(Long, jlong) + DECLARE_CALL_METHOD(Float, jfloat) + DECLARE_CALL_METHOD(Double, jdouble) + DECLARE_CALL_METHOD(Boolean, jboolean) + void callVoidMethod(jobject object, jmethodID method_id, ...); + LocalRefUniquePtr callObjectMethod(jobject object, jmethodID method_id, ...); + +/** A macro to create `CallStaticMethod` helper function. */ +#define DECLARE_CALL_STATIC_METHOD(JAVA_TYPE, JNI_TYPE) \ + JNI_TYPE callStatic##JAVA_TYPE##Method(jclass clazz, jmethodID method_id, ...); + + /** + * Helper functions for `CallStaticMethod`. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#callstatictypemethod-routines-callstatictypemethoda-routines-callstatictypemethodv-routines + */ + DECLARE_CALL_STATIC_METHOD(Byte, jbyte) + DECLARE_CALL_STATIC_METHOD(Char, jchar) + DECLARE_CALL_STATIC_METHOD(Short, jshort) + DECLARE_CALL_STATIC_METHOD(Int, jint) + DECLARE_CALL_STATIC_METHOD(Long, jlong) + DECLARE_CALL_STATIC_METHOD(Float, jfloat) + DECLARE_CALL_STATIC_METHOD(Double, jdouble) + DECLARE_CALL_STATIC_METHOD(Boolean, jboolean) + void callStaticVoidMethod(jclass clazz, jmethodID method_id, ...); + LocalRefUniquePtr callStaticObjectMethod(jclass clazz, jmethodID method_id, ...); + + /** + * Returns the capacity of the memory region referenced by the given `java.nio.Buffer` object. + * + * https://docs.oracle.com/en/java/javase/17/docs/specs/jni/functions.html#getdirectbuffercapacity + */ + jlong getDirectBufferCapacity(jobject buffer); + +private: + /** Rethrows the Java exception occurred. */ + void rethrowException(); + + JNIEnv* const env_; +}; + +} // namespace JNI +} // namespace Envoy diff --git a/mobile/library/common/jni/jni_interface.cc b/mobile/library/common/jni/jni_interface.cc index 955a5eb35978..74a4845ccce8 100644 --- a/mobile/library/common/jni/jni_interface.cc +++ b/mobile/library/common/jni/jni_interface.cc @@ -1211,9 +1211,9 @@ void configureBuilder(JNIEnv* env, jstring grpc_stats_domain, jlong connect_time jboolean enable_dns_cache, jlong dns_cache_save_interval_seconds, jboolean enable_drain_post_dns_refresh, jboolean enable_http3, jstring http3_connection_options, jstring http3_client_connection_options, - jobjectArray quic_hints, jboolean enable_gzip_decompression, - jboolean enable_brotli_decompression, jboolean enable_socket_tagging, - jboolean enable_interface_binding, + jobjectArray quic_hints, jobjectArray quic_canonical_suffixes, + jboolean enable_gzip_decompression, jboolean enable_brotli_decompression, + jboolean enable_socket_tagging, jboolean enable_interface_binding, jlong h2_connection_keepalive_idle_interval_milliseconds, jlong h2_connection_keepalive_timeout_seconds, jlong max_connections_per_host, jlong stats_flush_seconds, jlong stream_idle_timeout_seconds, @@ -1252,6 +1252,11 @@ void configureBuilder(JNIEnv* env, jstring grpc_stats_domain, jlong connect_time for (std::pair& entry : hints) { builder.addQuicHint(entry.first, stoi(entry.second)); } + std::vector suffixes = javaObjectArrayToStringVector(env, quic_canonical_suffixes); + for (std::string& suffix : suffixes) { + builder.addQuicCanonicalSuffix(suffix); + } + #endif builder.enableInterfaceBinding(enable_interface_binding == JNI_TRUE); builder.enableDrainPostDnsRefresh(enable_drain_post_dns_refresh == JNI_TRUE); @@ -1301,18 +1306,17 @@ extern "C" JNIEXPORT jlong JNICALL Java_io_envoyproxy_envoymobile_engine_JniLibr jlong dns_cache_save_interval_seconds, jboolean enable_drain_post_dns_refresh, jboolean enable_http3, jstring http3_connection_options, jstring http3_client_connection_options, jobjectArray quic_hints, - jboolean enable_gzip_decompression, jboolean enable_brotli_decompression, - jboolean enable_socket_tagging, jboolean enable_interface_binding, - jlong h2_connection_keepalive_idle_interval_milliseconds, + jobjectArray quic_canonical_suffixes, jboolean enable_gzip_decompression, + jboolean enable_brotli_decompression, jboolean enable_socket_tagging, + jboolean enable_interface_binding, jlong h2_connection_keepalive_idle_interval_milliseconds, jlong h2_connection_keepalive_timeout_seconds, jlong max_connections_per_host, jlong stats_flush_seconds, jlong stream_idle_timeout_seconds, jlong per_try_idle_timeout_seconds, jstring app_version, jstring app_id, jboolean trust_chain_verification, jobjectArray filter_chain, jobjectArray stat_sinks, jboolean enable_platform_certificates_validation, jobjectArray runtime_guards, jstring rtds_resource_name, jlong rtds_timeout_seconds, jstring xds_address, jlong xds_port, - jstring xds_auth_header, jstring xds_auth_token, jstring xds_jwt_token, - jlong xds_jwt_token_lifetime, jstring xds_root_certs, jstring xds_sni, jstring node_id, - jstring node_region, jstring node_zone, jstring node_sub_zone, + jstring xds_auth_header, jstring xds_auth_token, jstring xds_root_certs, jstring xds_sni, + jstring node_id, jstring node_region, jstring node_zone, jstring node_sub_zone, jbyteArray serialized_node_metadata, jstring cds_resources_locator, jlong cds_timeout_seconds, jboolean enable_cds) { Envoy::Platform::EngineBuilder builder; @@ -1322,8 +1326,8 @@ extern "C" JNIEXPORT jlong JNICALL Java_io_envoyproxy_envoymobile_engine_JniLibr dns_query_timeout_seconds, dns_min_refresh_seconds, dns_preresolve_hostnames, enable_dns_cache, dns_cache_save_interval_seconds, enable_drain_post_dns_refresh, enable_http3, http3_connection_options, http3_client_connection_options, - quic_hints, enable_gzip_decompression, enable_brotli_decompression, - enable_socket_tagging, enable_interface_binding, + quic_hints, quic_canonical_suffixes, enable_gzip_decompression, + enable_brotli_decompression, enable_socket_tagging, enable_interface_binding, h2_connection_keepalive_idle_interval_milliseconds, h2_connection_keepalive_timeout_seconds, max_connections_per_host, stats_flush_seconds, stream_idle_timeout_seconds, per_try_idle_timeout_seconds, @@ -1340,10 +1344,6 @@ extern "C" JNIEXPORT jlong JNICALL Java_io_envoyproxy_envoymobile_engine_JniLibr xds_builder.setAuthenticationToken(std::move(native_xds_auth_header), getCppString(env, xds_auth_token)); } - std::string native_jwt_token = getCppString(env, xds_jwt_token); - if (!native_jwt_token.empty()) { - xds_builder.setJwtAuthenticationToken(std::move(native_jwt_token), xds_jwt_token_lifetime); - } std::string native_root_certs = getCppString(env, xds_root_certs); if (!native_root_certs.empty()) { xds_builder.setSslRootCerts(std::move(native_root_certs)); diff --git a/mobile/library/java/io/envoyproxy/envoymobile/engine/EnvoyConfiguration.java b/mobile/library/java/io/envoyproxy/envoymobile/engine/EnvoyConfiguration.java index ac03d3184406..d43d666df579 100644 --- a/mobile/library/java/io/envoyproxy/envoymobile/engine/EnvoyConfiguration.java +++ b/mobile/library/java/io/envoyproxy/envoymobile/engine/EnvoyConfiguration.java @@ -40,6 +40,7 @@ public enum TrustChainVerification { public final String http3ConnectionOptions; public final String http3ClientConnectionOptions; public final Map quicHints; + public final List quicCanonicalSuffixes; public final Boolean enableGzipDecompression; public final Boolean enableBrotliDecompression; public final Boolean enableSocketTagging; @@ -66,8 +67,6 @@ public enum TrustChainVerification { public final Integer xdsPort; public final String xdsAuthHeader; public final String xdsAuthToken; - public final String xdsJwtToken; - public final Integer xdsJwtTokenLifetime; public final String xdsRootCerts; public final String xdsSni; public final String nodeId; @@ -111,6 +110,8 @@ public enum TrustChainVerification { * HTTP/3. * @param quicHints A list of host port pairs that's known * to speak QUIC. + * @param quicCanonicalSuffixes A list of canonical suffixes that are + * known to speak QUIC. * @param enableGzipDecompression whether to enable response gzip * decompression. * compression. @@ -147,9 +148,6 @@ public enum TrustChainVerification { * @param xdsAuthToken the token to send as the authentication * header value to authenticate with the * xDS server. - * @param xdsJwtToken the JWT token to use for authenticating - * with the xDS server. - * @param xdsJwtTokenLifetime the lifetime of the JWT token. * @param xdsRootCerts the root certificates to use for the TLS * handshake during connection establishment * with the xDS management server. @@ -171,11 +169,12 @@ public EnvoyConfiguration( int dnsMinRefreshSeconds, List dnsPreresolveHostnames, boolean enableDNSCache, int dnsCacheSaveIntervalSeconds, boolean enableDrainPostDnsRefresh, boolean enableHttp3, String http3ConnectionOptions, String http3ClientConnectionOptions, - Map quicHints, boolean enableGzipDecompression, - boolean enableBrotliDecompression, boolean enableSocketTagging, - boolean enableInterfaceBinding, int h2ConnectionKeepaliveIdleIntervalMilliseconds, - int h2ConnectionKeepaliveTimeoutSeconds, int maxConnectionsPerHost, int statsFlushSeconds, - int streamIdleTimeoutSeconds, int perTryIdleTimeoutSeconds, String appVersion, String appId, + Map quicHints, List quicCanonicalSuffixes, + boolean enableGzipDecompression, boolean enableBrotliDecompression, + boolean enableSocketTagging, boolean enableInterfaceBinding, + int h2ConnectionKeepaliveIdleIntervalMilliseconds, int h2ConnectionKeepaliveTimeoutSeconds, + int maxConnectionsPerHost, int statsFlushSeconds, int streamIdleTimeoutSeconds, + int perTryIdleTimeoutSeconds, String appVersion, String appId, TrustChainVerification trustChainVerification, List nativeFilterChain, List httpPlatformFilterFactories, @@ -183,10 +182,9 @@ public EnvoyConfiguration( Map keyValueStores, List statSinks, Map runtimeGuards, boolean enablePlatformCertificatesValidation, String rtdsResourceName, Integer rtdsTimeoutSeconds, String xdsAddress, Integer xdsPort, - String xdsAuthHeader, String xdsAuthToken, String xdsJwtToken, Integer xdsJwtTokenLifetime, - String xdsRootCerts, String xdsSni, String nodeId, String nodeRegion, String nodeZone, - String nodeSubZone, Struct nodeMetadata, String cdsResourcesLocator, - Integer cdsTimeoutSeconds, boolean enableCds) { + String xdsAuthHeader, String xdsAuthToken, String xdsRootCerts, String xdsSni, String nodeId, + String nodeRegion, String nodeZone, String nodeSubZone, Struct nodeMetadata, + String cdsResourcesLocator, Integer cdsTimeoutSeconds, boolean enableCds) { JniLibrary.load(); this.grpcStatsDomain = grpcStatsDomain; this.connectTimeoutSeconds = connectTimeoutSeconds; @@ -206,6 +204,7 @@ public EnvoyConfiguration( for (Map.Entry hostAndPort : quicHints.entrySet()) { this.quicHints.put(hostAndPort.getKey(), String.valueOf(hostAndPort.getValue())); } + this.quicCanonicalSuffixes = quicCanonicalSuffixes; this.enableGzipDecompression = enableGzipDecompression; this.enableBrotliDecompression = enableBrotliDecompression; this.enableSocketTagging = enableSocketTagging; @@ -248,8 +247,6 @@ public EnvoyConfiguration( this.xdsPort = xdsPort; this.xdsAuthHeader = xdsAuthHeader; this.xdsAuthToken = xdsAuthToken; - this.xdsJwtToken = xdsJwtToken; - this.xdsJwtTokenLifetime = xdsJwtTokenLifetime; this.xdsRootCerts = xdsRootCerts; this.xdsSni = xdsSni; this.nodeId = nodeId; @@ -273,20 +270,22 @@ public long createBootstrap() { byte[][] dnsPreresolve = JniBridgeUtility.stringsToJniBytes(dnsPreresolveHostnames); byte[][] runtimeGuards = JniBridgeUtility.mapToJniBytes(this.runtimeGuards); byte[][] quicHints = JniBridgeUtility.mapToJniBytes(this.quicHints); + byte[][] quicSuffixes = JniBridgeUtility.stringsToJniBytes(quicCanonicalSuffixes); return JniLibrary.createBootstrap( grpcStatsDomain, connectTimeoutSeconds, dnsRefreshSeconds, dnsFailureRefreshSecondsBase, dnsFailureRefreshSecondsMax, dnsQueryTimeoutSeconds, dnsMinRefreshSeconds, dnsPreresolve, enableDNSCache, dnsCacheSaveIntervalSeconds, enableDrainPostDnsRefresh, enableHttp3, - http3ConnectionOptions, http3ClientConnectionOptions, quicHints, enableGzipDecompression, - enableBrotliDecompression, enableSocketTagging, enableInterfaceBinding, - h2ConnectionKeepaliveIdleIntervalMilliseconds, h2ConnectionKeepaliveTimeoutSeconds, - maxConnectionsPerHost, statsFlushSeconds, streamIdleTimeoutSeconds, - perTryIdleTimeoutSeconds, appVersion, appId, enforceTrustChainVerification, filterChain, - statsSinks, enablePlatformCertificatesValidation, runtimeGuards, rtdsResourceName, - rtdsTimeoutSeconds, xdsAddress, xdsPort, xdsAuthHeader, xdsAuthToken, xdsJwtToken, - xdsJwtTokenLifetime, xdsRootCerts, xdsSni, nodeId, nodeRegion, nodeZone, nodeSubZone, - nodeMetadata.toByteArray(), cdsResourcesLocator, cdsTimeoutSeconds, enableCds); + http3ConnectionOptions, http3ClientConnectionOptions, quicHints, quicSuffixes, + enableGzipDecompression, enableBrotliDecompression, enableSocketTagging, + enableInterfaceBinding, h2ConnectionKeepaliveIdleIntervalMilliseconds, + h2ConnectionKeepaliveTimeoutSeconds, maxConnectionsPerHost, statsFlushSeconds, + streamIdleTimeoutSeconds, perTryIdleTimeoutSeconds, appVersion, appId, + enforceTrustChainVerification, filterChain, statsSinks, + enablePlatformCertificatesValidation, runtimeGuards, rtdsResourceName, rtdsTimeoutSeconds, + xdsAddress, xdsPort, xdsAuthHeader, xdsAuthToken, xdsRootCerts, xdsSni, nodeId, nodeRegion, + nodeZone, nodeSubZone, nodeMetadata.toByteArray(), cdsResourcesLocator, cdsTimeoutSeconds, + enableCds); } static class ConfigurationException extends RuntimeException { diff --git a/mobile/library/java/io/envoyproxy/envoymobile/engine/JniLibrary.java b/mobile/library/java/io/envoyproxy/envoymobile/engine/JniLibrary.java index 5535346c5bb3..b5e703725e06 100644 --- a/mobile/library/java/io/envoyproxy/envoymobile/engine/JniLibrary.java +++ b/mobile/library/java/io/envoyproxy/envoymobile/engine/JniLibrary.java @@ -311,16 +311,15 @@ public static native long createBootstrap( long dnsQueryTimeoutSeconds, long dnsMinRefreshSeconds, byte[][] dnsPreresolveHostnames, boolean enableDNSCache, long dnsCacheSaveIntervalSeconds, boolean enableDrainPostDnsRefresh, boolean enableHttp3, String http3ConnectionOptions, String http3ClientConnectionOptions, - byte[][] quicHints, boolean enableGzipDecompression, boolean enableBrotliDecompression, - boolean enableSocketTagging, boolean enableInterfaceBinding, - long h2ConnectionKeepaliveIdleIntervalMilliseconds, long h2ConnectionKeepaliveTimeoutSeconds, - long maxConnectionsPerHost, long statsFlushSeconds, long streamIdleTimeoutSeconds, - long perTryIdleTimeoutSeconds, String appVersion, String appId, + byte[][] quicHints, byte[][] quicCanonicalSuffixes, boolean enableGzipDecompression, + boolean enableBrotliDecompression, boolean enableSocketTagging, + boolean enableInterfaceBinding, long h2ConnectionKeepaliveIdleIntervalMilliseconds, + long h2ConnectionKeepaliveTimeoutSeconds, long maxConnectionsPerHost, long statsFlushSeconds, + long streamIdleTimeoutSeconds, long perTryIdleTimeoutSeconds, String appVersion, String appId, boolean trustChainVerification, byte[][] filterChain, byte[][] statSinks, boolean enablePlatformCertificatesValidation, byte[][] runtimeGuards, String rtdsResourceName, long rtdsTimeoutSeconds, String xdsAddress, long xdsPort, String xdsAuthenticationHeader, - String xdsAuthenticationToken, String xdsJwtToken, long xdsJwtTokenLifetime, - String xdsRootCerts, String xdsSni, String nodeId, String nodeRegion, String nodeZone, - String nodeSubZone, byte[] nodeMetadata, String cdsResourcesLocator, long cdsTimeoutSeconds, - boolean enableCds); + String xdsAuthenticationToken, String xdsRootCerts, String xdsSni, String nodeId, + String nodeRegion, String nodeZone, String nodeSubZone, byte[] nodeMetadata, + String cdsResourcesLocator, long cdsTimeoutSeconds, boolean enableCds); } diff --git a/mobile/library/java/org/chromium/net/impl/CronvoyEngineBuilderImpl.java b/mobile/library/java/org/chromium/net/impl/CronvoyEngineBuilderImpl.java index 5a58ce912e7a..7813bb95592a 100644 --- a/mobile/library/java/org/chromium/net/impl/CronvoyEngineBuilderImpl.java +++ b/mobile/library/java/org/chromium/net/impl/CronvoyEngineBuilderImpl.java @@ -50,6 +50,7 @@ final static class Pkp { // See setters below for verbose descriptions. private final Context mApplicationContext; private final Map mQuicHints = new HashMap<>(); + private final List mQuicCanonicalSuffixes = new LinkedList<>(); private final List mPkps = new LinkedList<>(); private boolean mPublicKeyPinningBypassForLocalTrustAnchorsEnabled; private String mUserAgent; @@ -227,6 +228,13 @@ public CronvoyEngineBuilderImpl addQuicHint(String host, int port, int alternate Map quicHints() { return mQuicHints; } + public CronvoyEngineBuilderImpl addQuicCanonicalSuffix(String suffix) { + mQuicCanonicalSuffixes.add(suffix); + return this; + } + + List quicCanonicalSuffixes() { return mQuicCanonicalSuffixes; } + @Override public CronvoyEngineBuilderImpl addPublicKeyPins(String hostName, Set pinsSha256, boolean includeSubdomains, Date expirationDate) { diff --git a/mobile/library/java/org/chromium/net/impl/NativeCronvoyEngineBuilderImpl.java b/mobile/library/java/org/chromium/net/impl/NativeCronvoyEngineBuilderImpl.java index 2d45070f23e5..be2a5cc64d3f 100644 --- a/mobile/library/java/org/chromium/net/impl/NativeCronvoyEngineBuilderImpl.java +++ b/mobile/library/java/org/chromium/net/impl/NativeCronvoyEngineBuilderImpl.java @@ -127,15 +127,16 @@ private EnvoyConfiguration createEnvoyConfiguration() { mDnsFailureRefreshSecondsMax, mDnsQueryTimeoutSeconds, mDnsMinRefreshSeconds, mDnsPreresolveHostnames, mEnableDNSCache, mDnsCacheSaveIntervalSeconds, mEnableDrainPostDnsRefresh, quicEnabled(), quicConnectionOptions(), - quicClientConnectionOptions(), quicHints(), mEnableGzipDecompression, brotliEnabled(), - mEnableSocketTag, mEnableInterfaceBinding, mH2ConnectionKeepaliveIdleIntervalMilliseconds, - mH2ConnectionKeepaliveTimeoutSeconds, mMaxConnectionsPerHost, mStatsFlushSeconds, - mStreamIdleTimeoutSeconds, mPerTryIdleTimeoutSeconds, mAppVersion, mAppId, - mTrustChainVerification, nativeFilterChain, platformFilterChain, stringAccessors, - keyValueStores, statSinks, runtimeGuards, mEnablePlatformCertificatesValidation, + quicClientConnectionOptions(), quicHints(), quicCanonicalSuffixes(), + mEnableGzipDecompression, brotliEnabled(), mEnableSocketTag, mEnableInterfaceBinding, + mH2ConnectionKeepaliveIdleIntervalMilliseconds, mH2ConnectionKeepaliveTimeoutSeconds, + mMaxConnectionsPerHost, mStatsFlushSeconds, mStreamIdleTimeoutSeconds, + mPerTryIdleTimeoutSeconds, mAppVersion, mAppId, mTrustChainVerification, nativeFilterChain, + platformFilterChain, stringAccessors, keyValueStores, statSinks, runtimeGuards, + mEnablePlatformCertificatesValidation, /*rtdsResourceName=*/"", /*rtdsTimeoutSeconds=*/0, /*xdsAddress=*/"", /*xdsPort=*/0, /*xdsAuthenticationHeader=*/"", /*xdsAuthenticationToken=*/"", - /*xdsJwtToken=*/"", /*xdsJwtTokenLifetime=*/0, /*xdsSslRootCerts=*/"", + /*xdsSslRootCerts=*/"", /*xdsSni=*/"", mNodeId, mNodeRegion, mNodeZone, mNodeSubZone, Struct.getDefaultInstance(), /*cdsResourcesLocator=*/"", /*cdsTimeoutSeconds=*/0, /*enableCds=*/false); diff --git a/mobile/library/kotlin/io/envoyproxy/envoymobile/EngineBuilder.kt b/mobile/library/kotlin/io/envoyproxy/envoymobile/EngineBuilder.kt index 2fe601d87168..1359814b436c 100644 --- a/mobile/library/kotlin/io/envoyproxy/envoymobile/EngineBuilder.kt +++ b/mobile/library/kotlin/io/envoyproxy/envoymobile/EngineBuilder.kt @@ -33,14 +33,11 @@ class Custom(val yaml: String) : BaseConfiguration() */ open class XdsBuilder(internal val xdsServerAddress: String, internal val xdsServerPort: Int) { companion object { - private const val DEFAULT_JWT_TOKEN_LIFETIME_IN_SECONDS: Int = 60 * 60 * 24 * 90 // 90 days private const val DEFAULT_XDS_TIMEOUT_IN_SECONDS: Int = 5 } internal var authHeader: String? = null internal var authToken: String? = null - internal var jwtToken: String? = null - internal var jwtTokenLifetimeInSeconds: Int = DEFAULT_JWT_TOKEN_LIFETIME_IN_SECONDS internal var sslRootCerts: String? = null internal var sni: String? = null internal var rtdsResourceName: String? = null @@ -63,25 +60,6 @@ open class XdsBuilder(internal val xdsServerAddress: String, internal val xdsSer return this } - /** - * Sets JWT as the authentication method to the xDS management server, using the given token. - * - * @param token The JWT token used to authenticate the client to the xDS management server. - * @param tokenLifetimeInSeconds The lifetime of the JWT token, in seconds. If none - * (or 0) is specified, then defaultJwtTokenLifetimeSeconds is used. - * @return this builder. - */ - fun setJwtAuthenticationToken( - token: String, - tokenLifetimeInSeconds: Int = DEFAULT_JWT_TOKEN_LIFETIME_IN_SECONDS - ): XdsBuilder { - this.jwtToken = token - this.jwtTokenLifetimeInSeconds = - if (tokenLifetimeInSeconds > 0) tokenLifetimeInSeconds - else DEFAULT_JWT_TOKEN_LIFETIME_IN_SECONDS - return this - } - /** * Sets the PEM-encoded server root certificates used to negotiate the TLS handshake for the gRPC * connection. If no root certs are specified, the operating system defaults are used. @@ -183,6 +161,7 @@ open class EngineBuilder(private val configuration: BaseConfiguration = Standard private var http3ConnectionOptions = "" private var http3ClientConnectionOptions = "" private var quicHints = mutableMapOf() + private var quicCanonicalSuffixes = mutableListOf() private var enableGzipDecompression = true private var enableBrotliDecompression = false private var enableSocketTagging = false @@ -684,6 +663,17 @@ open class EngineBuilder(private val configuration: BaseConfiguration = Standard return this } + /** + * Add a host suffix that's known to speak QUIC. + * + * @param suffix the suffix string. + * @return This builder. + */ + fun addQuicCanonicalSuffix(suffix: String): EngineBuilder { + this.quicCanonicalSuffixes.add(suffix) + return this + } + /** * Builds and runs a new Engine instance with the provided configuration. * @@ -708,6 +698,7 @@ open class EngineBuilder(private val configuration: BaseConfiguration = Standard http3ConnectionOptions, http3ClientConnectionOptions, quicHints, + quicCanonicalSuffixes, enableGzipDecompression, enableBrotliDecompression, enableSocketTagging, @@ -734,8 +725,6 @@ open class EngineBuilder(private val configuration: BaseConfiguration = Standard xdsBuilder?.xdsServerPort ?: 0, xdsBuilder?.authHeader, xdsBuilder?.authToken, - xdsBuilder?.jwtToken, - xdsBuilder?.jwtTokenLifetimeInSeconds ?: 0, xdsBuilder?.sslRootCerts, xdsBuilder?.sni, nodeId, diff --git a/mobile/library/objective-c/EnvoyConfiguration.h b/mobile/library/objective-c/EnvoyConfiguration.h index 0f9fb7d43cb5..87dec257ef59 100644 --- a/mobile/library/objective-c/EnvoyConfiguration.h +++ b/mobile/library/objective-c/EnvoyConfiguration.h @@ -25,6 +25,7 @@ NS_ASSUME_NONNULL_BEGIN @property (nonatomic, assign) UInt32 dnsCacheSaveIntervalSeconds; @property (nonatomic, assign) BOOL enableHttp3; @property (nonatomic, strong) NSDictionary *quicHints; +@property (nonatomic, strong) NSArray *quicCanonicalSuffixes; @property (nonatomic, assign) BOOL enableGzipDecompression; @property (nonatomic, assign) BOOL enableBrotliDecompression; @property (nonatomic, assign) BOOL enableInterfaceBinding; @@ -54,8 +55,6 @@ NS_ASSUME_NONNULL_BEGIN @property (nonatomic, assign) UInt32 xdsServerPort; @property (nonatomic, strong, nullable) NSString *xdsAuthHeader; @property (nonatomic, strong, nullable) NSString *xdsAuthToken; -@property (nonatomic, strong, nullable) NSString *xdsJwtToken; -@property (nonatomic, assign) UInt32 xdsJwtTokenLifetimeSeconds; @property (nonatomic, strong, nullable) NSString *xdsSslRootCerts; @property (nonatomic, strong, nullable) NSString *xdsSni; @property (nonatomic, strong, nullable) NSString *rtdsResourceName; @@ -80,6 +79,7 @@ NS_ASSUME_NONNULL_BEGIN dnsCacheSaveIntervalSeconds:(UInt32)dnsCacheSaveIntervalSeconds enableHttp3:(BOOL)enableHttp3 quicHints:(NSDictionary *)quicHints + quicCanonicalSuffixes:(NSArray *)quicCanonicalSuffixes enableGzipDecompression:(BOOL)enableGzipDecompression enableBrotliDecompression:(BOOL)enableBrotliDecompression enableInterfaceBinding:(BOOL)enableInterfaceBinding @@ -117,8 +117,6 @@ NS_ASSUME_NONNULL_BEGIN xdsServerPort:(UInt32)xdsServerPort xdsAuthHeader:(nullable NSString *)xdsAuthHeader xdsAuthToken:(nullable NSString *)xdsAuthToken - xdsJwtToken:(nullable NSString *)xdsJwtToken - xdsJwtTokenLifetimeSeconds:(UInt32)xdsJwtTokenLifetimeSeconds xdsSslRootCerts:(nullable NSString *)xdsSslRootCerts xdsSni:(nullable NSString *)xdsSni rtdsResourceName:(nullable NSString *)rtdsResourceName diff --git a/mobile/library/objective-c/EnvoyConfiguration.mm b/mobile/library/objective-c/EnvoyConfiguration.mm index f52ac74a4425..0719a75faac4 100644 --- a/mobile/library/objective-c/EnvoyConfiguration.mm +++ b/mobile/library/objective-c/EnvoyConfiguration.mm @@ -79,6 +79,7 @@ - (instancetype)initWithGrpcStatsDomain:(nullable NSString *)grpcStatsDomain dnsCacheSaveIntervalSeconds:(UInt32)dnsCacheSaveIntervalSeconds enableHttp3:(BOOL)enableHttp3 quicHints:(NSDictionary *)quicHints + quicCanonicalSuffixes:(NSArray *)quicCanonicalSuffixes enableGzipDecompression:(BOOL)enableGzipDecompression enableBrotliDecompression:(BOOL)enableBrotliDecompression enableInterfaceBinding:(BOOL)enableInterfaceBinding @@ -116,8 +117,6 @@ - (instancetype)initWithGrpcStatsDomain:(nullable NSString *)grpcStatsDomain xdsServerPort:(UInt32)xdsServerPort xdsAuthHeader:(nullable NSString *)xdsAuthHeader xdsAuthToken:(nullable NSString *)xdsAuthToken - xdsJwtToken:(nullable NSString *)xdsJwtToken - xdsJwtTokenLifetimeSeconds:(UInt32)xdsJwtTokenLifetimeSeconds xdsSslRootCerts:(nullable NSString *)xdsSslRootCerts xdsSni:(nullable NSString *)xdsSni rtdsResourceName:(nullable NSString *)rtdsResourceName @@ -142,6 +141,7 @@ - (instancetype)initWithGrpcStatsDomain:(nullable NSString *)grpcStatsDomain self.dnsCacheSaveIntervalSeconds = dnsCacheSaveIntervalSeconds; self.enableHttp3 = enableHttp3; self.quicHints = quicHints; + self.quicCanonicalSuffixes = quicCanonicalSuffixes; self.enableGzipDecompression = enableGzipDecompression; self.enableBrotliDecompression = enableBrotliDecompression; self.enableInterfaceBinding = enableInterfaceBinding; @@ -172,8 +172,6 @@ - (instancetype)initWithGrpcStatsDomain:(nullable NSString *)grpcStatsDomain self.xdsServerPort = xdsServerPort; self.xdsAuthHeader = xdsAuthHeader; self.xdsAuthToken = xdsAuthToken; - self.xdsJwtToken = xdsJwtToken; - self.xdsJwtTokenLifetimeSeconds = xdsJwtTokenLifetimeSeconds; self.xdsSslRootCerts = xdsSslRootCerts; self.xdsSni = xdsSni; self.rtdsResourceName = rtdsResourceName; @@ -205,6 +203,9 @@ - (instancetype)initWithGrpcStatsDomain:(nullable NSString *)grpcStatsDomain for (NSString *host in self.quicHints) { builder.addQuicHint([host toCXXString], [[self.quicHints objectForKey:host] intValue]); } + for (NSString *suffix in self.quicCanonicalSuffixes) { + builder.addQuicCanonicalSuffix([suffix toCXXString]); + } #endif builder.enableGzipDecompression(self.enableGzipDecompression); @@ -275,10 +276,6 @@ - (instancetype)initWithGrpcStatsDomain:(nullable NSString *)grpcStatsDomain xdsBuilder.setAuthenticationToken([self.xdsAuthHeader toCXXString], [self.xdsAuthToken toCXXString]); } - if (self.xdsJwtToken != nil) { - xdsBuilder.setJwtAuthenticationToken([self.xdsJwtToken toCXXString], - self.xdsJwtTokenLifetimeSeconds); - } if (self.xdsSslRootCerts != nil) { xdsBuilder.setSslRootCerts([self.xdsSslRootCerts toCXXString]); } diff --git a/mobile/library/swift/EngineBuilder.swift b/mobile/library/swift/EngineBuilder.swift index 42132ceff7dd..cdcc49f183fe 100644 --- a/mobile/library/swift/EngineBuilder.swift +++ b/mobile/library/swift/EngineBuilder.swift @@ -14,15 +14,12 @@ import Foundation /// This class is typically used as input to the EngineBuilder's setXds() method. @objcMembers open class XdsBuilder: NSObject { - public static let defaultJwtTokenLifetimeInSeconds: UInt32 = 60 * 60 * 24 * 90 // 90 days public static let defaultXdsTimeoutInSeconds: UInt32 = 5 let xdsServerAddress: String let xdsServerPort: UInt32 var authHeader: String? var authToken: String? - var jwtToken: String? - var jwtTokenLifetimeInSeconds: UInt32 = XdsBuilder.defaultJwtTokenLifetimeInSeconds var sslRootCerts: String? var sni: String? var rtdsResourceName: String? @@ -56,25 +53,6 @@ open class XdsBuilder: NSObject { return self } - /// Sets JWT as the authentication method to the xDS management server, using the given token. - /// - /// - parameter token: The JWT token used to authenticate the client to the xDS - /// management server. - /// - parameter tokenLifetimeInSeconds: the lifetime of the JWT token, in seconds. If - /// none (or 0) is specified, then - /// defaultJwtTokenLifetimeSeconds is used. - /// - /// - returns: This builder. - @discardableResult - public func setJwtAuthenticationToken( - token: String, - tokenLifetimeInSeconds: UInt32 = XdsBuilder.defaultJwtTokenLifetimeInSeconds) -> Self { - self.jwtToken = token - self.jwtTokenLifetimeInSeconds = (tokenLifetimeInSeconds > 0) ? - tokenLifetimeInSeconds : XdsBuilder.defaultJwtTokenLifetimeInSeconds - return self - } - /// Sets the PEM-encoded server root certificates used to negotiate the TLS handshake for the gRPC /// connection. If no root certs are specified, the operating system defaults are used. /// @@ -183,6 +161,7 @@ open class EngineBuilder: NSObject { private var enableHttp3: Bool = false #endif private var quicHints: [String: Int] = [:] + private var quicCanonicalSuffixes: [String] = [] private var enableInterfaceBinding: Bool = false private var enforceTrustChainVerification: Bool = true private var enablePlatformCertificateValidation: Bool = false @@ -400,6 +379,17 @@ open class EngineBuilder: NSObject { self.quicHints[host] = port return self } + + /// Add a host suffix that's known to support QUIC. + /// + /// - parameter suffix: the string representation of the host suffix + /// + /// - returns: This builder. + @discardableResult + public func addQuicCanonicalSuffix(_ suffix: String) -> Self { + self.quicCanonicalSuffixes.append(suffix) + return self + } #endif /// Add an interval at which to flush Envoy stats. @@ -787,8 +777,6 @@ open class EngineBuilder: NSObject { var xdsServerPort: UInt32 = 0 var xdsAuthHeader: String? var xdsAuthToken: String? - var xdsJwtToken: String? - var xdsJwtTokenLifetimeSeconds: UInt32 = 0 var xdsSslRootCerts: String? var xdsSni: String? var rtdsResourceName: String? @@ -802,8 +790,6 @@ open class EngineBuilder: NSObject { xdsServerPort = self.xdsBuilder?.xdsServerPort ?? 0 xdsAuthHeader = self.xdsBuilder?.authHeader xdsAuthToken = self.xdsBuilder?.authToken - xdsJwtToken = self.xdsBuilder?.jwtToken - xdsJwtTokenLifetimeSeconds = self.xdsBuilder?.jwtTokenLifetimeInSeconds ?? 0 xdsSslRootCerts = self.xdsBuilder?.sslRootCerts xdsSni = self.xdsBuilder?.sni rtdsResourceName = self.xdsBuilder?.rtdsResourceName @@ -826,6 +812,7 @@ open class EngineBuilder: NSObject { dnsCacheSaveIntervalSeconds: self.dnsCacheSaveIntervalSeconds, enableHttp3: self.enableHttp3, quicHints: self.quicHints.mapValues { NSNumber(value: $0) }, + quicCanonicalSuffixes: self.quicCanonicalSuffixes, enableGzipDecompression: self.enableGzipDecompression, enableBrotliDecompression: self.enableBrotliDecompression, enableInterfaceBinding: self.enableInterfaceBinding, @@ -856,8 +843,6 @@ open class EngineBuilder: NSObject { xdsServerPort: xdsServerPort, xdsAuthHeader: xdsAuthHeader, xdsAuthToken: xdsAuthToken, - xdsJwtToken: xdsJwtToken, - xdsJwtTokenLifetimeSeconds: xdsJwtTokenLifetimeSeconds, xdsSslRootCerts: xdsSslRootCerts, xdsSni: xdsSni, rtdsResourceName: rtdsResourceName, @@ -902,6 +887,9 @@ private extension EngineBuilder { for (host, port) in self.quicHints { cxxBuilder.addQuicHint(host.toCXX(), Int32(port)) } + for (suffix) in self.quicCanonicalSuffixes { + cxxBuilder.addQuicCanonicalSuffix(suffix.toCXX()) + } #endif cxxBuilder.enableGzipDecompression(self.enableGzipDecompression) cxxBuilder.enableBrotliDecompression(self.enableBrotliDecompression) @@ -964,10 +952,6 @@ private extension EngineBuilder { cxxXdsBuilder.setAuthenticationToken(xdsAuthHeader.toCXX(), xdsBuilder.authToken?.toCXX() ?? "".toCXX()) } - if let xdsJwtToken = xdsBuilder.jwtToken { - cxxXdsBuilder.setJwtAuthenticationToken(xdsJwtToken.toCXX(), - Int32(xdsBuilder.jwtTokenLifetimeInSeconds)) - } if let xdsSslRootCerts = xdsBuilder.sslRootCerts { cxxXdsBuilder.setSslRootCerts(xdsSslRootCerts.toCXX()) } diff --git a/mobile/test/cc/unit/envoy_config_test.cc b/mobile/test/cc/unit/envoy_config_test.cc index f5291baf20a3..ed1ccd5fd353 100644 --- a/mobile/test/cc/unit/envoy_config_test.cc +++ b/mobile/test/cc/unit/envoy_config_test.cc @@ -38,6 +38,8 @@ TEST(TestConfig, ConfigIsApplied) { .setHttp3ClientConnectionOptions("MPQC") .addQuicHint("www.abc.com", 443) .addQuicHint("www.def.com", 443) + .addQuicCanonicalSuffix(".opq.com") + .addQuicCanonicalSuffix(".xyz.com") #endif .addConnectTimeoutSeconds(123) .addDnsRefreshSeconds(456) @@ -74,6 +76,8 @@ TEST(TestConfig, ConfigIsApplied) { "client_connection_options: \"MPQC\"", "hostname: \"www.abc.com\"", "hostname: \"www.def.com\"", + "canonical_suffixes: \".opq.com\"", + "canonical_suffixes: \".xyz.com\"", #endif "key: \"dns_persistent_cache\" save_interval { seconds: 101 }", "key: \"always_use_v6\" value { bool_value: true }", @@ -322,47 +326,6 @@ TEST(TestConfig, XdsConfig) { .at("grpc.default_authority") .string_value(), "fake-td.googleapis.com"); - - // With JWT security credentials. - xds_builder = - XdsBuilder(/*xds_server_address=*/"fake-td.googleapis.com", /*xds_server_port=*/12345); - xds_builder.setJwtAuthenticationToken(/*token=*/"my_jwt_token", - /*token_lifetime_in_seconds=*/500); - xds_builder.setSslRootCerts(/*root_certs=*/"my_root_cert"); - xds_builder.setSni(/*sni=*/"fake-td.googleapis.com"); - engine_builder.setXds(std::move(xds_builder)); - bootstrap = engine_builder.generateBootstrap(); - auto& ads_config_with_jwt_tokens = bootstrap->dynamic_resources().ads_config(); - EXPECT_EQ(ads_config_with_jwt_tokens.api_type(), envoy::config::core::v3::ApiConfigSource::GRPC); - EXPECT_EQ(ads_config_with_jwt_tokens.grpc_services(0).google_grpc().target_uri(), - "fake-td.googleapis.com:12345"); - EXPECT_EQ(ads_config_with_jwt_tokens.grpc_services(0).google_grpc().stat_prefix(), "ads"); - EXPECT_EQ(ads_config_with_jwt_tokens.grpc_services(0) - .google_grpc() - .channel_credentials() - .ssl_credentials() - .root_certs() - .inline_string(), - "my_root_cert"); - EXPECT_EQ(ads_config_with_jwt_tokens.grpc_services(0) - .google_grpc() - .call_credentials(0) - .service_account_jwt_access() - .json_key(), - "my_jwt_token"); - EXPECT_EQ(ads_config_with_jwt_tokens.grpc_services(0) - .google_grpc() - .call_credentials(0) - .service_account_jwt_access() - .token_lifetime_seconds(), - 500); - EXPECT_EQ(ads_config_with_jwt_tokens.grpc_services(0) - .google_grpc() - .channel_args() - .args() - .at("grpc.default_authority") - .string_value(), - "fake-td.googleapis.com"); } TEST(TestConfig, CopyConstructor) { diff --git a/mobile/test/common/integration/client_integration_test.cc b/mobile/test/common/integration/client_integration_test.cc index 373a08fe6e75..dbe16a80f331 100644 --- a/mobile/test/common/integration/client_integration_test.cc +++ b/mobile/test/common/integration/client_integration_test.cc @@ -72,7 +72,10 @@ class ClientIntegrationTest : public BaseClientIntegrationTest, if (add_quic_hints_) { auto address = fake_upstreams_[0]->localAddress(); auto upstream_port = fake_upstreams_[0]->localAddress()->ip()->port(); - builder_.addQuicHint("www.lyft.com", upstream_port); + // With canonical suffix, having a quic hint of foo.lyft.com will make + // www.lyft.com being recognized as QUIC ready. + builder_.addQuicCanonicalSuffix(".lyft.com"); + builder_.addQuicHint("foo.lyft.com", upstream_port); ASSERT(test_key_value_store_); // Force www.lyft.com to resolve to the fake upstream. It's the only domain diff --git a/mobile/test/common/jni/BUILD b/mobile/test/common/jni/BUILD index 42e43f48749f..caa7d222ae37 100644 --- a/mobile/test/common/jni/BUILD +++ b/mobile/test/common/jni/BUILD @@ -83,3 +83,23 @@ envoy_mobile_so_to_jni_lib( testonly = True, native_dep = "libenvoy_jni_with_test_and_listener_extensions.so", ) + +cc_library( + name = "jni_helper_test_lib", + srcs = [ + "jni_helper_test.cc", + ], + deps = [ + "//library/common/jni:jni_helper_lib", + ], + alwayslink = True, +) + +cc_binary( + name = "libenvoy_jni_helper_test.so", + testonly = True, + linkshared = True, + deps = [ + ":jni_helper_test_lib", + ], +) diff --git a/mobile/test/common/jni/jni_helper_test.cc b/mobile/test/common/jni/jni_helper_test.cc new file mode 100644 index 000000000000..d687192ec6a2 --- /dev/null +++ b/mobile/test/common/jni/jni_helper_test.cc @@ -0,0 +1,186 @@ +#include + +#include "library/common/jni/jni_helper.h" + +// NOLINT(namespace-envoy) + +// This file contains JNI implementation used by +// `test/java/io/envoyproxy/envoymobile/jni/JniHelperTest.java` unit tests. + +extern "C" JNIEXPORT void JNICALL Java_io_envoyproxy_envoymobile_jni_JniHelperTest_getMethodId( + JNIEnv* env, jclass, jclass clazz, jstring name, jstring signature) { + Envoy::JNI::JniHelper jni_helper(env); + Envoy::JNI::StringUtfUniquePtr name_ptr = jni_helper.getStringUtfChars(name, nullptr); + Envoy::JNI::StringUtfUniquePtr sig_ptr = jni_helper.getStringUtfChars(signature, nullptr); + jni_helper.getMethodId(clazz, name_ptr.get(), sig_ptr.get()); +} + +extern "C" JNIEXPORT void JNICALL +Java_io_envoyproxy_envoymobile_jni_JniHelperTest_getStaticMethodId(JNIEnv* env, jclass, + jclass clazz, jstring name, + jstring signature) { + Envoy::JNI::JniHelper jni_helper(env); + Envoy::JNI::StringUtfUniquePtr name_ptr = jni_helper.getStringUtfChars(name, nullptr); + Envoy::JNI::StringUtfUniquePtr sig_ptr = jni_helper.getStringUtfChars(signature, nullptr); + jni_helper.getStaticMethodId(clazz, name_ptr.get(), sig_ptr.get()); +} + +extern "C" JNIEXPORT jclass JNICALL Java_io_envoyproxy_envoymobile_jni_JniHelperTest_findClass( + JNIEnv* env, jclass, jstring class_name) { + Envoy::JNI::JniHelper jni_helper(env); + Envoy::JNI::StringUtfUniquePtr class_name_ptr = jni_helper.getStringUtfChars(class_name, nullptr); + Envoy::JNI::LocalRefUniquePtr clazz = jni_helper.findClass(class_name_ptr.get()); + return clazz.release(); +} + +extern "C" JNIEXPORT jclass JNICALL Java_io_envoyproxy_envoymobile_jni_JniHelperTest_getObjectClass( + JNIEnv* env, jclass, jobject object) { + Envoy::JNI::JniHelper jni_helper(env); + return jni_helper.getObjectClass(object).release(); +} + +extern "C" JNIEXPORT void JNICALL Java_io_envoyproxy_envoymobile_jni_JniHelperTest_throwNew( + JNIEnv* env, jclass, jstring class_name, jstring message) { + Envoy::JNI::JniHelper jni_helper(env); + Envoy::JNI::StringUtfUniquePtr class_name_ptr = jni_helper.getStringUtfChars(class_name, nullptr); + Envoy::JNI::StringUtfUniquePtr message_ptr = jni_helper.getStringUtfChars(message, nullptr); + jni_helper.throwNew(class_name_ptr.get(), message_ptr.get()); +} + +extern "C" JNIEXPORT jobject JNICALL Java_io_envoyproxy_envoymobile_jni_JniHelperTest_newObject( + JNIEnv* env, jclass, jclass clazz, jstring name, jstring signature) { + Envoy::JNI::JniHelper jni_helper(env); + Envoy::JNI::StringUtfUniquePtr name_ptr = jni_helper.getStringUtfChars(name, nullptr); + Envoy::JNI::StringUtfUniquePtr sig_ptr = jni_helper.getStringUtfChars(signature, nullptr); + jmethodID method_id = jni_helper.getMethodId(clazz, name_ptr.get(), sig_ptr.get()); + return jni_helper.newObject(clazz, method_id).release(); +} + +extern "C" JNIEXPORT jint JNICALL +Java_io_envoyproxy_envoymobile_jni_JniHelperTest_getArrayLength(JNIEnv* env, jclass, jarray array) { + Envoy::JNI::JniHelper jni_helper(env); + return jni_helper.getArrayLength(array); +} + +#define DEFINE_JNI_NEW_ARRAY(JAVA_TYPE, JNI_TYPE) \ + extern "C" JNIEXPORT JNI_TYPE JNICALL \ + Java_io_envoyproxy_envoymobile_jni_JniHelperTest_new##JAVA_TYPE##Array(JNIEnv* env, jclass, \ + jsize length) { \ + Envoy::JNI::JniHelper jni_helper(env); \ + return jni_helper.new##JAVA_TYPE##Array(length).release(); \ + } + +DEFINE_JNI_NEW_ARRAY(Byte, jbyteArray) +DEFINE_JNI_NEW_ARRAY(Char, jcharArray) +DEFINE_JNI_NEW_ARRAY(Short, jshortArray) +DEFINE_JNI_NEW_ARRAY(Int, jintArray) +DEFINE_JNI_NEW_ARRAY(Long, jlongArray) +DEFINE_JNI_NEW_ARRAY(Float, jfloatArray) +DEFINE_JNI_NEW_ARRAY(Double, jdoubleArray) +DEFINE_JNI_NEW_ARRAY(Boolean, jbooleanArray) + +extern "C" JNIEXPORT jobjectArray JNICALL +Java_io_envoyproxy_envoymobile_jni_JniHelperTest_newObjectArray(JNIEnv* env, jclass, jsize length, + jclass element_class, + jobject initial_element) { + Envoy::JNI::JniHelper jni_helper(env); + return jni_helper.newObjectArray(length, element_class, initial_element).release(); +} + +extern "C" JNIEXPORT jobject JNICALL +Java_io_envoyproxy_envoymobile_jni_JniHelperTest_getObjectArrayElement(JNIEnv* env, jclass, + jobjectArray array, + jsize index) { + Envoy::JNI::JniHelper jni_helper(env); + return jni_helper.getObjectArrayElement(array, index).release(); +} + +extern "C" JNIEXPORT void JNICALL +Java_io_envoyproxy_envoymobile_jni_JniHelperTest_setObjectArrayElement(JNIEnv* env, jclass, + jobjectArray array, + jsize index, jobject value) { + Envoy::JNI::JniHelper jni_helper(env); + jni_helper.setObjectArrayElement(array, index, value); +} + +#define DEFINE_JNI_CALL_METHOD(JAVA_TYPE, JNI_TYPE) \ + extern "C" JNIEXPORT JNI_TYPE JNICALL \ + Java_io_envoyproxy_envoymobile_jni_JniHelperTest_call##JAVA_TYPE##Method( \ + JNIEnv* env, jclass, jclass clazz, jobject object, jstring name, jstring signature) { \ + Envoy::JNI::JniHelper jni_helper(env); \ + Envoy::JNI::StringUtfUniquePtr name_ptr = jni_helper.getStringUtfChars(name, nullptr); \ + Envoy::JNI::StringUtfUniquePtr sig_ptr = jni_helper.getStringUtfChars(signature, nullptr); \ + jmethodID method_id = jni_helper.getMethodId(clazz, name_ptr.get(), sig_ptr.get()); \ + return jni_helper.call##JAVA_TYPE##Method(object, method_id); \ + } + +DEFINE_JNI_CALL_METHOD(Byte, jbyte) +DEFINE_JNI_CALL_METHOD(Char, jchar) +DEFINE_JNI_CALL_METHOD(Short, jshort) +DEFINE_JNI_CALL_METHOD(Int, jint) +DEFINE_JNI_CALL_METHOD(Long, jlong) +DEFINE_JNI_CALL_METHOD(Float, jfloat) +DEFINE_JNI_CALL_METHOD(Double, jdouble) +DEFINE_JNI_CALL_METHOD(Boolean, jboolean) + +extern "C" JNIEXPORT void JNICALL Java_io_envoyproxy_envoymobile_jni_JniHelperTest_callVoidMethod( + JNIEnv* env, jclass, jclass clazz, jobject object, jstring name, jstring signature) { + Envoy::JNI::JniHelper jni_helper(env); + Envoy::JNI::StringUtfUniquePtr name_ptr = jni_helper.getStringUtfChars(name, nullptr); + Envoy::JNI::StringUtfUniquePtr sig_ptr = jni_helper.getStringUtfChars(signature, nullptr); + jmethodID method_id = jni_helper.getMethodId(clazz, name_ptr.get(), sig_ptr.get()); + jni_helper.callVoidMethod(object, method_id); +} + +extern "C" JNIEXPORT jobject JNICALL +Java_io_envoyproxy_envoymobile_jni_JniHelperTest_callObjectMethod(JNIEnv* env, jclass, jclass clazz, + jobject object, jstring name, + jstring signature) { + Envoy::JNI::JniHelper jni_helper(env); + Envoy::JNI::StringUtfUniquePtr name_ptr = jni_helper.getStringUtfChars(name, nullptr); + Envoy::JNI::StringUtfUniquePtr sig_ptr = jni_helper.getStringUtfChars(signature, nullptr); + jmethodID method_id = jni_helper.getMethodId(clazz, name_ptr.get(), sig_ptr.get()); + return jni_helper.callObjectMethod(object, method_id).release(); +} + +#define DEFINE_JNI_CALL_STATIC_METHOD(JAVA_TYPE, JNI_TYPE) \ + extern "C" JNIEXPORT JNI_TYPE JNICALL \ + Java_io_envoyproxy_envoymobile_jni_JniHelperTest_callStatic##JAVA_TYPE##Method( \ + JNIEnv* env, jclass, jclass clazz, jstring name, jstring signature) { \ + Envoy::JNI::JniHelper jni_helper(env); \ + Envoy::JNI::StringUtfUniquePtr name_ptr = jni_helper.getStringUtfChars(name, nullptr); \ + Envoy::JNI::StringUtfUniquePtr sig_ptr = jni_helper.getStringUtfChars(signature, nullptr); \ + jmethodID method_id = jni_helper.getStaticMethodId(clazz, name_ptr.get(), sig_ptr.get()); \ + return jni_helper.callStatic##JAVA_TYPE##Method(clazz, method_id); \ + } + +DEFINE_JNI_CALL_STATIC_METHOD(Byte, jbyte) +DEFINE_JNI_CALL_STATIC_METHOD(Char, jchar) +DEFINE_JNI_CALL_STATIC_METHOD(Short, jshort) +DEFINE_JNI_CALL_STATIC_METHOD(Int, jint) +DEFINE_JNI_CALL_STATIC_METHOD(Long, jlong) +DEFINE_JNI_CALL_STATIC_METHOD(Float, jfloat) +DEFINE_JNI_CALL_STATIC_METHOD(Double, jdouble) +DEFINE_JNI_CALL_STATIC_METHOD(Boolean, jboolean) + +extern "C" JNIEXPORT void JNICALL +Java_io_envoyproxy_envoymobile_jni_JniHelperTest_callStaticVoidMethod(JNIEnv* env, jclass, + jclass clazz, jstring name, + jstring signature) { + Envoy::JNI::JniHelper jni_helper(env); + Envoy::JNI::StringUtfUniquePtr name_ptr = jni_helper.getStringUtfChars(name, nullptr); + Envoy::JNI::StringUtfUniquePtr sig_ptr = jni_helper.getStringUtfChars(signature, nullptr); + jmethodID method_id = jni_helper.getStaticMethodId(clazz, name_ptr.get(), sig_ptr.get()); + jni_helper.callStaticVoidMethod(clazz, method_id); +} + +extern "C" JNIEXPORT jobject JNICALL +Java_io_envoyproxy_envoymobile_jni_JniHelperTest_callStaticObjectMethod(JNIEnv* env, jclass, + jclass clazz, jstring name, + jstring signature) { + Envoy::JNI::JniHelper jni_helper(env); + Envoy::JNI::StringUtfUniquePtr name_ptr = jni_helper.getStringUtfChars(name, nullptr); + Envoy::JNI::StringUtfUniquePtr sig_ptr = jni_helper.getStringUtfChars(signature, nullptr); + jmethodID method_id = jni_helper.getStaticMethodId(clazz, name_ptr.get(), sig_ptr.get()); + return jni_helper.callStaticObjectMethod(clazz, method_id).release(); +} diff --git a/mobile/test/java/io/envoyproxy/envoymobile/engine/EnvoyConfigurationTest.kt b/mobile/test/java/io/envoyproxy/envoymobile/engine/EnvoyConfigurationTest.kt index b3d6c05cea97..44c536d51e4e 100644 --- a/mobile/test/java/io/envoyproxy/envoymobile/engine/EnvoyConfigurationTest.kt +++ b/mobile/test/java/io/envoyproxy/envoymobile/engine/EnvoyConfigurationTest.kt @@ -81,6 +81,7 @@ class EnvoyConfigurationTest { http3ConnectionOptions: String = "5RTO", http3ClientConnectionOptions: String = "MPQC", quicHints: Map = mapOf("www.abc.com" to 443, "www.def.com" to 443), + quicCanonicalSuffixes: MutableList = mutableListOf(".opq.com", ".xyz.com"), enableGzipDecompression: Boolean = true, enableBrotliDecompression: Boolean = false, enableSocketTagging: Boolean = false, @@ -105,8 +106,6 @@ class EnvoyConfigurationTest { xdsPort: Int = 0, xdsAuthHeader: String = "", xdsAuthToken: String = "", - xdsJwtToken: String = "", - xdsJwtTokenLifetimeSeconds: Int = 0, xdsSslRootCerts: String = "", xdsSni: String = "", nodeId: String = "", @@ -135,6 +134,7 @@ class EnvoyConfigurationTest { http3ConnectionOptions, http3ClientConnectionOptions, quicHints, + quicCanonicalSuffixes, enableGzipDecompression, enableBrotliDecompression, enableSocketTagging, @@ -161,8 +161,6 @@ class EnvoyConfigurationTest { xdsPort, xdsAuthHeader, xdsAuthToken, - xdsJwtToken, - xdsJwtTokenLifetimeSeconds, xdsSslRootCerts, xdsSni, nodeId, @@ -212,6 +210,9 @@ class EnvoyConfigurationTest { assertThat(resolvedTemplate).contains("hostname: www.abc.com"); assertThat(resolvedTemplate).contains("hostname: www.def.com"); assertThat(resolvedTemplate).contains("port: 443"); + assertThat(resolvedTemplate).contains("canonical_suffixes:"); + assertThat(resolvedTemplate).contains(".opq.com"); + assertThat(resolvedTemplate).contains(".xyz.com"); assertThat(resolvedTemplate).contains("connection_options: 5RTO"); assertThat(resolvedTemplate).contains("client_connection_options: MPQC"); diff --git a/mobile/test/java/io/envoyproxy/envoymobile/jni/BUILD b/mobile/test/java/io/envoyproxy/envoymobile/jni/BUILD new file mode 100644 index 000000000000..b82998b551f8 --- /dev/null +++ b/mobile/test/java/io/envoyproxy/envoymobile/jni/BUILD @@ -0,0 +1,12 @@ +load("@envoy_mobile//bazel:kotlin_test.bzl", "envoy_mobile_android_test") + +envoy_mobile_android_test( + name = "jni_helper_test", + srcs = [ + "JniHelperTest.java", + ], + native_deps = [ + "//test/common/jni:libenvoy_jni_helper_test.so", + ], + native_lib_name = "envoy_jni_helper_test", +) diff --git a/mobile/test/java/io/envoyproxy/envoymobile/jni/JniHelperTest.java b/mobile/test/java/io/envoyproxy/envoymobile/jni/JniHelperTest.java new file mode 100644 index 000000000000..43964f1aee75 --- /dev/null +++ b/mobile/test/java/io/envoyproxy/envoymobile/jni/JniHelperTest.java @@ -0,0 +1,299 @@ +package io.envoyproxy.envoymobile.jni; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.robolectric.RobolectricTestRunner; + +@RunWith(RobolectricTestRunner.class) +public class JniHelperTest { + public JniHelperTest() { System.loadLibrary("envoy_jni_helper_test"); } + + //================================================================================ + // Native methods for testing. + //================================================================================ + public static native void getMethodId(Class clazz, String name, String signature); + public static native void getStaticMethodId(Class clazz, String name, String signature); + public static native Class findClass(String className); + public static native Class getObjectClass(Object object); + public static native Object newObject(Class clazz, String name, String signature); + public static native void throwNew(String className, String message); + public static native int getArrayLength(int[] array); + public static native byte[] newByteArray(int length); + public static native char[] newCharArray(int length); + public static native short[] newShortArray(int length); + public static native int[] newIntArray(int length); + public static native long[] newLongArray(int length); + public static native float[] newFloatArray(int length); + public static native double[] newDoubleArray(int length); + public static native boolean[] newBooleanArray(int length); + public static native Object[] newObjectArray(int length, Class elementClass, + Object initialElement); + public static native Object getObjectArrayElement(Object[] array, int index); + public static native void setObjectArrayElement(Object[] array, int index, Object value); + public static native byte callByteMethod(Class clazz, Object instance, String name, + String signature); + public static native char callCharMethod(Class clazz, Object instance, String name, + String signature); + public static native short callShortMethod(Class clazz, Object instance, String name, + String signature); + public static native int callIntMethod(Class clazz, Object instance, String name, + String signature); + public static native long callLongMethod(Class clazz, Object instance, String name, + String signature); + public static native float callFloatMethod(Class clazz, Object instance, String name, + String signature); + public static native double callDoubleMethod(Class clazz, Object instance, String name, + String signature); + public static native boolean callBooleanMethod(Class clazz, Object instance, String name, + String signature); + public static native void callVoidMethod(Class clazz, Object instance, String name, + String signature); + public static native Object callObjectMethod(Class clazz, Object instance, String name, + String signature); + public static native byte callStaticByteMethod(Class clazz, String name, String signature); + public static native char callStaticCharMethod(Class clazz, String name, String signature); + public static native short callStaticShortMethod(Class clazz, String name, String signature); + public static native int callStaticIntMethod(Class clazz, String name, String signature); + public static native long callStaticLongMethod(Class clazz, String name, String signature); + public static native float callStaticFloatMethod(Class clazz, String name, String signature); + public static native double callStaticDoubleMethod(Class clazz, String name, String signature); + public static native boolean callStaticBooleanMethod(Class clazz, String name, + String signature); + public static native void callStaticVoidMethod(Class clazz, String name, String signature); + public static native Object callStaticObjectMethod(Class clazz, String name, String signature); + + //================================================================================ + // Object methods used for CallMethod tests. + //================================================================================ + public byte byteMethod() { return 1; } + public char charMethod() { return 'a'; } + public short shortMethod() { return 1; } + public int intMethod() { return 1; } + public long longMethod() { return 1; } + public float floatMethod() { return 3.14f; } + public double doubleMethod() { return 3.14; } + public boolean booleanMethod() { return true; } + public void voidMethod() {} + public String objectMethod() { return "Hello"; } + + //================================================================================ + // Static methods used for CallStaticMethod tests. + //================================================================================ + public static byte staticByteMethod() { return 1; } + public static char staticCharMethod() { return 'a'; } + public static short staticShortMethod() { return 1; } + public static int staticIntMethod() { return 1; } + public static long staticLongMethod() { return 1; } + public static float staticFloatMethod() { return 3.14f; } + public static double staticDoubleMethod() { return 3.14; } + public static boolean staticBooleanMethod() { return true; } + public static void staticVoidMethod() {} + public static String staticObjectMethod() { return "Hello"; } + + static class Foo {} + + @Test + public void testMethodId() { + getMethodId(Foo.class, "", "()V"); + } + + @Test + public void testStaticMethodId() { + getStaticMethodId(JniHelperTest.class, "staticVoidMethod", "()V"); + } + + @Test + public void testFindClass() { + assertThat(findClass("java/lang/Exception")).isEqualTo(Exception.class); + } + + @Test + public void testGetObjectClass() { + String s = "Hello"; + assertThat(getObjectClass(s)).isEqualTo(String.class); + } + + @Test + public void testNewObject() { + assertThat(newObject(Foo.class, "", "()V")).isInstanceOf(Foo.class); + } + + @Test + public void testThrowNew() { + assertThatThrownBy(() -> throwNew("java/lang/RuntimeException", "Test")) + .isInstanceOf(RuntimeException.class) + .hasMessageContaining("Test"); + } + + @Test + public void testGetArrayLength() { + assertThat(getArrayLength(new int[] {1, 2, 3})).isEqualTo(3); + } + + @Test + public void testNewCharArray() { + assertThat(newCharArray(3)).isEqualTo(new char[] {0, 0, 0}); + } + + @Test + public void testNewShortArray() { + assertThat(newShortArray(3)).isEqualTo(new short[] {0, 0, 0}); + } + + @Test + public void testNewIntArray() { + assertThat(newIntArray(3)).isEqualTo(new int[] {0, 0, 0}); + } + + @Test + public void testNewLongArray() { + assertThat(newLongArray(3)).isEqualTo(new long[] {0, 0, 0}); + } + + @Test + public void testNewFloatArray() { + assertThat(newFloatArray(3)).isEqualTo(new float[] {0, 0, 0}); + } + + @Test + public void testNewDoubleArray() { + assertThat(newDoubleArray(3)).isEqualTo(new double[] {0, 0, 0}); + } + + @Test + public void testNewBooleanArray() { + assertThat(newBooleanArray(3)).isEqualTo(new boolean[] {false, false, false}); + } + + @Test + public void testNewObjectArray() { + assertThat(newObjectArray(3, String.class, "foo")) + .isEqualTo(new String[] {"foo", "foo", "foo"}); + } + + @Test + public void testGetObjectArrayElement() { + Object[] array = new Object[] {1, 2, 3}; + assertThat(getObjectArrayElement(array, 1)).isEqualTo(2); + } + + @Test + public void testSetObjectArrayElement() { + Object[] array = new Object[] {1, 2, 3}; + setObjectArrayElement(array, 1, 200); + assertThat(array).isEqualTo(new Object[] {1, 200, 3}); + } + + @Test + public void testCallByteMethod() { + assertThat(callByteMethod(JniHelperTest.class, this, "byteMethod", "()B")).isEqualTo((byte)1); + } + + @Test + public void testCallCharMethod() { + assertThat(callCharMethod(JniHelperTest.class, this, "charMethod", "()C")).isEqualTo('a'); + } + + @Test + public void testCallShortMethod() { + assertThat(callShortMethod(JniHelperTest.class, this, "shortMethod", "()S")) + .isEqualTo((short)1); + } + + @Test + public void testCallIntMethod() { + assertThat(callIntMethod(JniHelperTest.class, this, "intMethod", "()I")).isEqualTo(1); + } + + @Test + public void testCallLongMethod() { + assertThat(callLongMethod(JniHelperTest.class, this, "longMethod", "()J")).isEqualTo(1L); + } + + @Test + public void testCallFloatMethod() { + assertThat(callFloatMethod(JniHelperTest.class, this, "floatMethod", "()F")).isEqualTo(3.14f); + } + + @Test + public void testCallDoubleMethod() { + assertThat(callDoubleMethod(JniHelperTest.class, this, "doubleMethod", "()D")).isEqualTo(3.14); + } + + @Test + public void testCallBooleanMethod() { + assertThat(callBooleanMethod(JniHelperTest.class, this, "booleanMethod", "()Z")) + .isEqualTo(true); + } + + @Test + public void testCallVoidMethod() { + callVoidMethod(JniHelperTest.class, this, "voidMethod", "()V"); + } + + @Test + public void testCallObjectMethod() { + assertThat(callObjectMethod(JniHelperTest.class, this, "objectMethod", "()Ljava/lang/String;")) + .isEqualTo("Hello"); + } + + @Test + public void testCallStaticByteMethod() { + assertThat(callStaticByteMethod(JniHelperTest.class, "staticByteMethod", "()B")) + .isEqualTo((byte)1); + } + + @Test + public void testCallStaticCharMethod() { + assertThat(callStaticCharMethod(JniHelperTest.class, "staticCharMethod", "()C")).isEqualTo('a'); + } + + @Test + public void testCallStaticShortMethod() { + assertThat(callStaticShortMethod(JniHelperTest.class, "staticShortMethod", "()S")) + .isEqualTo((short)1); + } + + @Test + public void testCallStaticIntMethod() { + assertThat(callStaticIntMethod(JniHelperTest.class, "staticIntMethod", "()I")).isEqualTo(1); + } + + @Test + public void testCallStaticLongMethod() { + assertThat(callStaticLongMethod(JniHelperTest.class, "staticLongMethod", "()J")).isEqualTo(1L); + } + + @Test + public void testCallStaticFloatMethod() { + assertThat(callStaticFloatMethod(JniHelperTest.class, "staticFloatMethod", "()F")) + .isEqualTo(3.14f); + } + + @Test + public void testCallStaticDoubleMethod() { + assertThat(callStaticDoubleMethod(JniHelperTest.class, "staticDoubleMethod", "()D")) + .isEqualTo(3.14); + } + + @Test + public void testCallStaticBooleanMethod() { + assertThat(callStaticBooleanMethod(JniHelperTest.class, "staticBooleanMethod", "()Z")) + .isEqualTo(true); + } + + @Test + public void testCallStaticVoidMethod() { + callStaticVoidMethod(JniHelperTest.class, "staticVoidMethod", "()V"); + } + + @Test + public void testCallStaticObjectMethod() { + assertThat( + callStaticObjectMethod(JniHelperTest.class, "staticObjectMethod", "()Ljava/lang/String;")) + .isEqualTo("Hello"); + } +} diff --git a/mobile/test/kotlin/io/envoyproxy/envoymobile/EngineBuilderTest.kt b/mobile/test/kotlin/io/envoyproxy/envoymobile/EngineBuilderTest.kt index 7a7b212f186b..2d137602a0c2 100644 --- a/mobile/test/kotlin/io/envoyproxy/envoymobile/EngineBuilderTest.kt +++ b/mobile/test/kotlin/io/envoyproxy/envoymobile/EngineBuilderTest.kt @@ -209,7 +209,6 @@ class EngineBuilderTest { fun `specifying xDS works`() { var xdsBuilder = XdsBuilder("fake_test_address", 0) xdsBuilder.setAuthenticationToken("x-goog-api-key", "A1B2C3") - xdsBuilder.setJwtAuthenticationToken("my_jwt_token") xdsBuilder.setSslRootCerts("my_root_certs") xdsBuilder.setSni("fake_test_address") xdsBuilder.addRuntimeDiscoveryService("some_rtds_resource") @@ -224,7 +223,6 @@ class EngineBuilderTest { assertThat(engine.envoyConfiguration.xdsAddress).isEqualTo("fake_test_address") assertThat(engine.envoyConfiguration.xdsAuthHeader).isEqualTo("x-goog-api-key") assertThat(engine.envoyConfiguration.xdsAuthToken).isEqualTo("A1B2C3") - assertThat(engine.envoyConfiguration.xdsJwtToken).isEqualTo("my_jwt_token") assertThat(engine.envoyConfiguration.xdsRootCerts).isEqualTo("my_root_certs") assertThat(engine.envoyConfiguration.xdsSni).isEqualTo("fake_test_address") assertThat(engine.envoyConfiguration.rtdsResourceName).isEqualTo("some_rtds_resource") diff --git a/mobile/test/swift/EngineBuilderTests.swift b/mobile/test/swift/EngineBuilderTests.swift index c2713374592b..eb275f91d9d8 100644 --- a/mobile/test/swift/EngineBuilderTests.swift +++ b/mobile/test/swift/EngineBuilderTests.swift @@ -407,21 +407,6 @@ final class EngineBuilderTests: XCTestCase { XCTAssertTrue(bootstrapDebugDescription.contains("fake_ssl_root_certs")) XCTAssertTrue(bootstrapDebugDescription.contains("fake_sni_address")) } - - func testAddingXdsJwtSecurityConfigurationWhenRunningEnvoy() { - let xdsBuilder = XdsBuilder(xdsServerAddress: "FAKE_SWIFT_ADDRESS", xdsServerPort: 0) - .setJwtAuthenticationToken(token: "fake_jwt_token", tokenLifetimeInSeconds: 12345) - .setSslRootCerts(rootCerts: "fake_ssl_root_certs") - .setSni(sni: "fake_sni_address") - .addRuntimeDiscoveryService(resourceName: "some_rtds_resource", timeoutInSeconds: 14325) - let bootstrapDebugDescription = EngineBuilder() - .addEngineType(MockEnvoyEngine.self) - .setXds(xdsBuilder) - .bootstrapDebugDescription() - XCTAssertTrue(bootstrapDebugDescription.contains("fake_jwt_token")) - XCTAssertTrue(bootstrapDebugDescription.contains("fake_ssl_root_certs")) - XCTAssertTrue(bootstrapDebugDescription.contains("fake_sni_address")) - } #endif func testXDSDefaultValues() { diff --git a/mobile/tools/ktfmt.sh b/mobile/tools/ktfmt.sh index 59ce244ab79c..2f585038a07c 100755 --- a/mobile/tools/ktfmt.sh +++ b/mobile/tools/ktfmt.sh @@ -11,10 +11,10 @@ readonly ktfmt_url ktfmt_sha256="97fc7fbd194d01a9fa45d8147c0552403003d55bac4ab89d84d7bb4d5e3f48de" readonly ktfmt_sha256 -jdk_url="https://cdn.azul.com/zulu/bin/zulu11.1+23-ea-jdk11-linux_x64.tar.gz" +jdk_url="https://cdn.azul.com/zulu/bin/zulu11.68.17-ca-jdk11.0.21-linux_x64.tar.gz" readonly jdk_url -jdk_sha256="7cd09d542fa5623df5a59447304c3a41c0b682d3ca26b5e9d99e5214cf21fdd7" +jdk_sha256="725aba257da4bca14959060fea3faf59005eafdc2d5ccc3cb745403c5b60fb27" readonly jdk_sha256 script_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" diff --git a/source/common/common/logger.h b/source/common/common/logger.h index f3ad2bc01bc7..a90e43628e1e 100644 --- a/source/common/common/logger.h +++ b/source/common/common/logger.h @@ -39,6 +39,7 @@ const static bool should_log = true; FUNCTION(aws) \ FUNCTION(assert) \ FUNCTION(backtrace) \ + FUNCTION(basic_auth) \ FUNCTION(cache_filter) \ FUNCTION(client) \ FUNCTION(config) \ diff --git a/source/common/formatter/stream_info_formatter.cc b/source/common/formatter/stream_info_formatter.cc index 1bc5ff5d6e9d..a164985e188d 100644 --- a/source/common/formatter/stream_info_formatter.cc +++ b/source/common/formatter/stream_info_formatter.cc @@ -530,15 +530,14 @@ class StreamInfoAddressFormatterProvider : public StreamInfoFormatterProvider { return SubstitutionFormatUtils::unspecifiedValue(); } - if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.format_ports_as_numbers")) { - if (extraction_type_ == StreamInfoAddressFieldExtractionType::JustPort) { - const auto port = StreamInfo::Utility::extractDownstreamAddressJustPort(*address); - if (port) { - return ValueUtil::numberValue(*port); - } - return SubstitutionFormatUtils::unspecifiedValue(); + if (extraction_type_ == StreamInfoAddressFieldExtractionType::JustPort) { + const auto port = StreamInfo::Utility::extractDownstreamAddressJustPort(*address); + if (port) { + return ValueUtil::numberValue(*port); } + return SubstitutionFormatUtils::unspecifiedValue(); } + return ValueUtil::stringValue(toString(*address)); } diff --git a/source/common/http/async_client_impl.cc b/source/common/http/async_client_impl.cc index 18167336065a..04ca4bb217d4 100644 --- a/source/common/http/async_client_impl.cc +++ b/source/common/http/async_client_impl.cc @@ -140,6 +140,8 @@ void AsyncStreamImpl::encodeTrailers(ResponseTrailerMapPtr&& trailers) { } void AsyncStreamImpl::sendHeaders(RequestHeaderMap& headers, bool end_stream) { + request_headers_ = &headers; + if (Http::Headers::get().MethodValues.Head == headers.getMethodValue()) { is_head_request_ = true; } @@ -182,6 +184,8 @@ void AsyncStreamImpl::sendData(Buffer::Instance& data, bool end_stream) { } void AsyncStreamImpl::sendTrailers(RequestTrailerMap& trailers) { + request_trailers_ = &trailers; + ASSERT(dispatcher().isThreadSafe()); // See explanation in sendData. if (local_closed_) { diff --git a/source/common/http/async_client_impl.h b/source/common/http/async_client_impl.h index 4d5c80527a33..47e99aee70d3 100644 --- a/source/common/http/async_client_impl.h +++ b/source/common/http/async_client_impl.h @@ -208,13 +208,10 @@ class AsyncStreamImpl : public virtual AsyncClient::Stream, } // The async client won't pause if sending 1xx headers so simply swallow any. void encode1xxHeaders(ResponseHeaderMapPtr&&) override {} - ResponseHeaderMapOptRef informationalHeaders() const override { return {}; } void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream, absl::string_view details) override; - ResponseHeaderMapOptRef responseHeaders() const override { return {}; } void encodeData(Buffer::Instance& data, bool end_stream) override; void encodeTrailers(ResponseTrailerMapPtr&& trailers) override; - ResponseTrailerMapOptRef responseTrailers() const override { return {}; } void encodeMetadata(MetadataMapPtr&&) override {} void onDecoderFilterAboveWriteBufferHighWatermark() override { ++high_watermark_calls_; @@ -254,6 +251,13 @@ class AsyncStreamImpl : public virtual AsyncClient::Stream, void setUpstreamOverrideHost(absl::string_view) override {} absl::optional upstreamOverrideHost() const override { return {}; } absl::string_view filterConfigName() const override { return ""; } + RequestHeaderMapOptRef requestHeaders() override { return makeOptRefFromPtr(request_headers_); } + RequestTrailerMapOptRef requestTrailers() override { + return makeOptRefFromPtr(request_trailers_); + } + ResponseHeaderMapOptRef informationalHeaders() override { return {}; } + ResponseHeaderMapOptRef responseHeaders() override { return {}; } + ResponseTrailerMapOptRef responseTrailers() override { return {}; } // ScopeTrackedObject void dumpState(std::ostream& os, int indent_level) const override { @@ -275,6 +279,8 @@ class AsyncStreamImpl : public virtual AsyncClient::Stream, Buffer::InstancePtr buffered_body_; Buffer::BufferMemoryAccountSharedPtr account_{nullptr}; absl::optional buffer_limit_{absl::nullopt}; + RequestHeaderMap* request_headers_{}; + RequestTrailerMap* request_trailers_{}; bool encoded_response_headers_{}; bool is_grpc_request_{}; bool is_head_request_{false}; diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 4b8b59d8238c..3e07fc8efbc7 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1518,10 +1518,14 @@ void ConnectionManagerImpl::ActiveStream::decodeTrailers(RequestTrailerMapPtr&& void ConnectionManagerImpl::ActiveStream::decodeMetadata(MetadataMapPtr&& metadata_map) { resetIdleTimer(); - // After going through filters, the ownership of metadata_map will be passed to terminal filter. - // The terminal filter may encode metadata_map to the next hop immediately or store metadata_map - // and encode later when connection pool is ready. - filter_manager_.decodeMetadata(*metadata_map); + if (!state_.deferred_to_next_io_iteration_) { + // After going through filters, the ownership of metadata_map will be passed to terminal filter. + // The terminal filter may encode metadata_map to the next hop immediately or store metadata_map + // and encode later when connection pool is ready. + filter_manager_.decodeMetadata(*metadata_map); + } else { + deferred_metadata_.push(std::move(metadata_map)); + } } void ConnectionManagerImpl::ActiveStream::disarmRequestTimeout() { @@ -2229,12 +2233,18 @@ bool ConnectionManagerImpl::ActiveStream::onDeferredRequestProcessing() { return false; } state_.deferred_to_next_io_iteration_ = false; - bool end_stream = - state_.deferred_end_stream_ && deferred_data_ == nullptr && request_trailers_ == nullptr; + bool end_stream = state_.deferred_end_stream_ && deferred_data_ == nullptr && + request_trailers_ == nullptr && deferred_metadata_.empty(); filter_manager_.decodeHeaders(*request_headers_, end_stream); if (end_stream) { return true; } + // Send metadata before data, as data may have an associated end_stream. + while (!deferred_metadata_.empty()) { + MetadataMapPtr& metadata = deferred_metadata_.front(); + filter_manager_.decodeMetadata(*metadata); + deferred_metadata_.pop(); + } // Filter manager will return early from decodeData and decodeTrailers if // request has completed. if (deferred_data_ != nullptr) { diff --git a/source/common/http/conn_manager_impl.h b/source/common/http/conn_manager_impl.h index 3b6c9c5a5a41..c7a330115344 100644 --- a/source/common/http/conn_manager_impl.h +++ b/source/common/http/conn_manager_impl.h @@ -520,6 +520,7 @@ class ConnectionManagerImpl : Logger::Loggable, std::shared_ptr still_alive_ = std::make_shared(true); std::unique_ptr deferred_data_; + std::queue deferred_metadata_; }; using ActiveStreamPtr = std::unique_ptr; diff --git a/source/common/http/filter_manager.cc b/source/common/http/filter_manager.cc index fbe7277b9ab4..a3af10905664 100644 --- a/source/common/http/filter_manager.cc +++ b/source/common/http/filter_manager.cc @@ -352,6 +352,22 @@ OptRef ActiveStreamFilterBase::upstreamCallbacks( return parent_.filter_manager_callbacks_.upstreamCallbacks(); } +RequestHeaderMapOptRef ActiveStreamFilterBase::requestHeaders() { + return parent_.filter_manager_callbacks_.requestHeaders(); +} +RequestTrailerMapOptRef ActiveStreamFilterBase::requestTrailers() { + return parent_.filter_manager_callbacks_.requestTrailers(); +} +ResponseHeaderMapOptRef ActiveStreamFilterBase::informationalHeaders() { + return parent_.filter_manager_callbacks_.informationalHeaders(); +} +ResponseHeaderMapOptRef ActiveStreamFilterBase::responseHeaders() { + return parent_.filter_manager_callbacks_.responseHeaders(); +} +ResponseTrailerMapOptRef ActiveStreamFilterBase::responseTrailers() { + return parent_.filter_manager_callbacks_.responseTrailers(); +} + bool ActiveStreamDecoderFilter::canContinue() { // It is possible for the connection manager to respond directly to a request even while // a filter is trying to continue. If a response has already happened, we should not @@ -477,10 +493,6 @@ void ActiveStreamDecoderFilter::encode1xxHeaders(ResponseHeaderMapPtr&& headers) } } -ResponseHeaderMapOptRef ActiveStreamDecoderFilter::informationalHeaders() const { - return parent_.filter_manager_callbacks_.informationalHeaders(); -} - void ActiveStreamDecoderFilter::encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream, absl::string_view details) { parent_.streamInfo().setResponseCodeDetails(details); @@ -488,10 +500,6 @@ void ActiveStreamDecoderFilter::encodeHeaders(ResponseHeaderMapPtr&& headers, bo parent_.encodeHeaders(nullptr, *parent_.filter_manager_callbacks_.responseHeaders(), end_stream); } -ResponseHeaderMapOptRef ActiveStreamDecoderFilter::responseHeaders() const { - return parent_.filter_manager_callbacks_.responseHeaders(); -} - void ActiveStreamDecoderFilter::encodeData(Buffer::Instance& data, bool end_stream) { parent_.encodeData(nullptr, data, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); @@ -502,10 +510,6 @@ void ActiveStreamDecoderFilter::encodeTrailers(ResponseTrailerMapPtr&& trailers) parent_.encodeTrailers(nullptr, *parent_.filter_manager_callbacks_.responseTrailers()); } -ResponseTrailerMapOptRef ActiveStreamDecoderFilter::responseTrailers() const { - return parent_.filter_manager_callbacks_.responseTrailers(); -} - void ActiveStreamDecoderFilter::encodeMetadata(MetadataMapPtr&& metadata_map_ptr) { parent_.encodeMetadata(nullptr, std::move(metadata_map_ptr)); } diff --git a/source/common/http/filter_manager.h b/source/common/http/filter_manager.h index b276327444d4..030c6709f047 100644 --- a/source/common/http/filter_manager.h +++ b/source/common/http/filter_manager.h @@ -106,6 +106,11 @@ struct ActiveStreamFilterBase : public virtual StreamFilterCallbacks, OptRef downstreamCallbacks() override; OptRef upstreamCallbacks() override; absl::string_view filterConfigName() const override { return filter_context_.config_name; } + RequestHeaderMapOptRef requestHeaders() override; + RequestTrailerMapOptRef requestTrailers() override; + ResponseHeaderMapOptRef informationalHeaders() override; + ResponseHeaderMapOptRef responseHeaders() override; + ResponseTrailerMapOptRef responseTrailers() override; // Functions to set or get iteration state. bool canIterate() { return iteration_state_ == IterationState::Continue; } @@ -218,13 +223,10 @@ struct ActiveStreamDecoderFilter : public ActiveStreamFilterBase, const absl::optional grpc_status, absl::string_view details) override; void encode1xxHeaders(ResponseHeaderMapPtr&& headers) override; - ResponseHeaderMapOptRef informationalHeaders() const override; void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream, absl::string_view details) override; - ResponseHeaderMapOptRef responseHeaders() const override; void encodeData(Buffer::Instance& data, bool end_stream) override; void encodeTrailers(ResponseTrailerMapPtr&& trailers) override; - ResponseTrailerMapOptRef responseTrailers() const override; void encodeMetadata(MetadataMapPtr&& metadata_map_ptr) override; void onDecoderFilterAboveWriteBufferHighWatermark() override; void onDecoderFilterBelowWriteBufferLowWatermark() override; diff --git a/source/common/network/tcp_listener_impl.cc b/source/common/network/tcp_listener_impl.cc index a3ea313ddb49..7ff12abac413 100644 --- a/source/common/network/tcp_listener_impl.cc +++ b/source/common/network/tcp_listener_impl.cc @@ -25,14 +25,15 @@ bool TcpListenerImpl::rejectCxOverGlobalLimit() const { if (ignore_global_conn_limit_) { return false; } - + // TODO(nezdolik): deprecate `overload.global_downstream_max_connections` key once + // downstream connections monitor extension is stable. if (track_global_cx_limit_in_overload_manager_) { - // Check if deprecated runtime flag `overload.global_downstream_max_connections` is configured + // Check if runtime flag `overload.global_downstream_max_connections` is configured // simultaneously with downstream connections monitor in overload manager. if (runtime_.threadsafeSnapshot()->get(GlobalMaxCxRuntimeKey)) { ENVOY_LOG_ONCE_MISC( warn, - "Global downstream connections limits is configured via deprecated runtime key {} and in " + "Global downstream connections limits is configured via runtime key {} and in " "{}. Using overload manager config.", GlobalMaxCxRuntimeKey, Server::OverloadProactiveResources::get().GlobalDownstreamMaxConnections); diff --git a/source/common/quic/envoy_quic_dispatcher.cc b/source/common/quic/envoy_quic_dispatcher.cc index 665b51ac4797..dd4f14183b0a 100644 --- a/source/common/quic/envoy_quic_dispatcher.cc +++ b/source/common/quic/envoy_quic_dispatcher.cc @@ -177,13 +177,26 @@ void EnvoyQuicDispatcher::closeConnectionsWithFilterChain( // Retain the number of connections in the list early because closing the connection will change // the size. const size_t num_connections = connections.size(); + bool delete_sessions_immediately = false; for (size_t i = 0; i < num_connections; ++i) { Network::Connection& connection = connections.front().get(); // This will remove the connection from the list. And the last removal will remove connections // from the map as well. connection.close(Network::ConnectionCloseType::NoFlush); + if (!delete_sessions_immediately && + dynamic_cast(connection).fix_quic_lifetime_issues()) { + // If `envoy.reloadable_features.quic_fix_filter_manager_uaf` is true, the closed sessions + // need to be deleted right away to consistently handle quic lifetimes. Because upon + // returning the filter chain configs will be destroyed, and no longer safe to be accessed. + // If any filters access those configs during destruction, it'll be use-after-free + delete_sessions_immediately = true; + } } ASSERT(connections_by_filter_chain_.find(filter_chain) == connections_by_filter_chain_.end()); + if (delete_sessions_immediately) { + // Explicitly destroy closed sessions in the current call stack. + DeleteSessions(); + } } } diff --git a/source/common/quic/quic_filter_manager_connection_impl.cc b/source/common/quic/quic_filter_manager_connection_impl.cc index e9a94780a349..2f2068f3a58a 100644 --- a/source/common/quic/quic_filter_manager_connection_impl.cc +++ b/source/common/quic/quic_filter_manager_connection_impl.cc @@ -27,6 +27,8 @@ QuicFilterManagerConnectionImpl::QuicFilterManagerConnectionImpl( stream_info_->protocol(Http::Protocol::Http3); network_connection_->connectionSocket()->connectionInfoProvider().setSslConnection( Ssl::ConnectionInfoConstSharedPtr(quic_ssl_info_)); + fix_quic_lifetime_issues_ = + Runtime::runtimeFeatureEnabled("envoy.reloadable_features.quic_fix_filter_manager_uaf"); } void QuicFilterManagerConnectionImpl::addWriteFilter(Network::WriteFilterSharedPtr filter) { @@ -179,7 +181,9 @@ void QuicFilterManagerConnectionImpl::onConnectionCloseEvent( network_connection_ = nullptr; } - filter_manager_ = nullptr; + if (!fix_quic_lifetime_issues_) { + filter_manager_ = nullptr; + } if (!codec_stats_.has_value()) { // The connection was closed before it could be used. Stats are not recorded. return; diff --git a/source/common/quic/quic_filter_manager_connection_impl.h b/source/common/quic/quic_filter_manager_connection_impl.h index f64278028f83..4a1a57a093d3 100644 --- a/source/common/quic/quic_filter_manager_connection_impl.h +++ b/source/common/quic/quic_filter_manager_connection_impl.h @@ -171,6 +171,8 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, max_headers_count_ = max_headers_count; } + bool fix_quic_lifetime_issues() const { return fix_quic_lifetime_issues_; } + protected: // Propagate connection close to network_connection_callbacks_. void onConnectionCloseEvent(const quic::QuicConnectionCloseFrame& frame, @@ -207,10 +209,10 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, // Called when aggregated buffered bytes across all the streams declines to low watermark. void onSendBufferLowWatermark(); - // Currently ConnectionManagerImpl is the one and only filter. If more network - // filters are added, ConnectionManagerImpl should always be the last one. - // Its onRead() is only called once to trigger ReadFilter::onNewConnection() - // and the rest incoming data bypasses these filters. + // ConnectionManagerImpl should always be the last filter. Its onRead() is only called once to + // trigger ReadFilter::onNewConnection() and the rest incoming data bypasses these filters. + // It has the same life time as this connection, so do all the filters. If the connection gets + // defer-deleted, they will be defer-deleted together. std::unique_ptr filter_manager_; std::unique_ptr stream_info_; @@ -224,6 +226,7 @@ class QuicFilterManagerConnectionImpl : public Network::ConnectionImplBase, EnvoyQuicSimulatedWatermarkBuffer write_buffer_watermark_simulation_; Buffer::OwnedImpl empty_buffer_; absl::optional close_type_during_initialize_; + bool fix_quic_lifetime_issues_{false}; }; } // namespace Quic diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index fb5b91a9a949..c572b3d2820a 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -36,6 +36,7 @@ RUNTIME_GUARD(envoy_reloadable_features_conn_pool_delete_when_idle); RUNTIME_GUARD(envoy_reloadable_features_convert_legacy_lb_config); RUNTIME_GUARD(envoy_reloadable_features_copy_response_code_to_downstream_stream_info); RUNTIME_GUARD(envoy_reloadable_features_count_unused_mapped_pages_as_free); +RUNTIME_GUARD(envoy_reloadable_features_defer_processing_backedup_streams); RUNTIME_GUARD(envoy_reloadable_features_detect_and_raise_rst_tcp_connection); RUNTIME_GUARD(envoy_reloadable_features_dfp_mixed_scheme); RUNTIME_GUARD(envoy_reloadable_features_enable_aws_credentials_file); @@ -44,7 +45,6 @@ RUNTIME_GUARD(envoy_reloadable_features_enable_connect_udp_support); RUNTIME_GUARD(envoy_reloadable_features_enable_intermediate_ca); RUNTIME_GUARD(envoy_reloadable_features_enable_zone_routing_different_zone_counts); RUNTIME_GUARD(envoy_reloadable_features_ext_authz_http_send_original_xff); -RUNTIME_GUARD(envoy_reloadable_features_format_ports_as_numbers); RUNTIME_GUARD(envoy_reloadable_features_handle_uppercase_scheme); RUNTIME_GUARD(envoy_reloadable_features_hmac_base64_encoding_only); RUNTIME_GUARD(envoy_reloadable_features_http1_allow_codec_error_response_after_1xx_headers); @@ -71,6 +71,7 @@ RUNTIME_GUARD(envoy_reloadable_features_oauth_use_url_encoding); RUNTIME_GUARD(envoy_reloadable_features_original_dst_rely_on_idle_timeout); RUNTIME_GUARD(envoy_reloadable_features_overload_manager_error_unknown_action); RUNTIME_GUARD(envoy_reloadable_features_proxy_status_upstream_request_timeout); +RUNTIME_GUARD(envoy_reloadable_features_quic_fix_filter_manager_uaf); RUNTIME_GUARD(envoy_reloadable_features_send_header_raw_value); RUNTIME_GUARD(envoy_reloadable_features_skip_dns_lookup_for_proxied_requests); RUNTIME_GUARD(envoy_reloadable_features_ssl_transport_failure_reason_format); @@ -98,9 +99,6 @@ FALSE_RUNTIME_GUARD(envoy_reloadable_features_test_feature_false); FALSE_RUNTIME_GUARD(envoy_reloadable_features_streaming_shadow); // TODO(adisuissa) reset to true to enable unified mux by default FALSE_RUNTIME_GUARD(envoy_reloadable_features_unified_mux); -// TODO(kbaichoo): Make this enabled by default when fairness and chunking -// are implemented, and we've had more cpu time. -FALSE_RUNTIME_GUARD(envoy_reloadable_features_defer_processing_backedup_streams); // TODO(birenroy) flip after a burn-in period FALSE_RUNTIME_GUARD(envoy_reloadable_features_http2_use_oghttp2); // Used to track if runtime is initialized. diff --git a/source/common/upstream/BUILD b/source/common/upstream/BUILD index 9e1f42d23e91..5a705463c2ec 100644 --- a/source/common/upstream/BUILD +++ b/source/common/upstream/BUILD @@ -619,4 +619,6 @@ envoy_cc_library( "//source/common/network:resolver_lib", "@envoy_api//envoy/config/upstream/local_address_selector/v3:pkg_cc_proto", ], + # Ensure this factory in the source is always linked in. + alwayslink = 1, ) diff --git a/source/exe/main_common.cc b/source/exe/main_common.cc index 10c5bfcb7e7c..e2d7d8703337 100644 --- a/source/exe/main_common.cc +++ b/source/exe/main_common.cc @@ -39,10 +39,12 @@ StrippedMainBase::CreateInstanceFunction createFunction() { Filesystem::Instance& file_system, std::unique_ptr process_context, Buffer::WatermarkFactorySharedPtr watermark_factory) { auto local_address = Network::Utility::getLocalAddress(options.localAddressIpVersion()); - return std::make_unique( - init_manager, options, time_system, local_address, hooks, restarter, store, - access_log_lock, component_factory, std::move(random_generator), tls, thread_factory, - file_system, std::move(process_context), watermark_factory); + auto server = std::make_unique( + init_manager, options, time_system, hooks, restarter, store, access_log_lock, + std::move(random_generator), tls, thread_factory, file_system, + std::move(process_context), watermark_factory); + server->initialize(local_address, component_factory); + return server; }; } diff --git a/source/extensions/common/aws/BUILD b/source/extensions/common/aws/BUILD index 96382e2095c2..b5d884069500 100644 --- a/source/extensions/common/aws/BUILD +++ b/source/extensions/common/aws/BUILD @@ -40,6 +40,18 @@ envoy_cc_library( external_deps = ["abseil_optional"], ) +envoy_cc_library( + name = "metadata_fetcher_lib", + srcs = ["metadata_fetcher.cc"], + hdrs = ["metadata_fetcher.h"], + deps = [ + ":utility_lib", + "//envoy/upstream:cluster_manager_interface", + "//source/common/http:utility_lib", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], +) + envoy_cc_library( name = "credentials_provider_impl_lib", srcs = ["credentials_provider_impl.cc"], @@ -63,10 +75,14 @@ envoy_cc_library( external_deps = ["curl"], deps = [ "//envoy/http:message_interface", + "//envoy/upstream:cluster_manager_interface", "//source/common/common:empty_string", "//source/common/common:matchers_lib", "//source/common/common:utility_lib", "//source/common/http:headers_lib", + "//source/common/http:utility_lib", + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/extensions/upstreams/http/v3:pkg_cc_proto", ], ) diff --git a/source/extensions/common/aws/metadata_fetcher.cc b/source/extensions/common/aws/metadata_fetcher.cc new file mode 100644 index 000000000000..339f75be7c2c --- /dev/null +++ b/source/extensions/common/aws/metadata_fetcher.cc @@ -0,0 +1,179 @@ +#include "source/extensions/common/aws/metadata_fetcher.h" + +#include "envoy/config/core/v3/base.pb.h" +#include "envoy/config/core/v3/http_uri.pb.h" + +#include "source/common/common/enum_to_int.h" +#include "source/common/http/headers.h" +#include "source/common/http/utility.h" +#include "source/common/protobuf/utility.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Aws { + +namespace { + +class MetadataFetcherImpl : public MetadataFetcher, + public Logger::Loggable, + public Http::AsyncClient::Callbacks { + +public: + MetadataFetcherImpl(Upstream::ClusterManager& cm, absl::string_view cluster_name) + : cm_(cm), cluster_name_(std::string(cluster_name)) {} + + ~MetadataFetcherImpl() override { cancel(); } + + void cancel() override { + if (request_ && !complete_) { + request_->cancel(); + ENVOY_LOG(debug, "fetch AWS Metadata [cluster = {}]: cancelled", cluster_name_); + } + reset(); + } + + absl::string_view failureToString(MetadataFetcher::MetadataReceiver::Failure reason) override { + switch (reason) { + case MetadataFetcher::MetadataReceiver::Failure::Network: + return "Network"; + case MetadataFetcher::MetadataReceiver::Failure::InvalidMetadata: + return "InvalidMetadata"; + case MetadataFetcher::MetadataReceiver::Failure::MissingConfig: + return "MissingConfig"; + default: + return ""; + } + } + + void fetch(Http::RequestMessage& message, Tracing::Span& parent_span, + MetadataFetcher::MetadataReceiver& receiver) override { + ASSERT(!request_); + ASSERT(!receiver_); + complete_ = false; + receiver_ = makeOptRef(receiver); + const auto thread_local_cluster = cm_.getThreadLocalCluster(cluster_name_); + if (thread_local_cluster == nullptr) { + ENVOY_LOG(error, "{} AWS Metadata failed: [cluster = {}] not found", __func__, cluster_name_); + complete_ = true; + receiver_->onMetadataError(MetadataFetcher::MetadataReceiver::Failure::MissingConfig); + reset(); + return; + } + + constexpr uint64_t MAX_RETRIES = 3; + constexpr uint64_t RETRY_DELAY = 1000; + constexpr uint64_t TIMEOUT = 5 * 1000; + + const auto host_attributes = Http::Utility::parseAuthority(message.headers().getHostValue()); + const auto host = host_attributes.host_; + const auto path = message.headers().getPathValue(); + const auto scheme = message.headers().getSchemeValue(); + const auto method = message.headers().getMethodValue(); + ENVOY_LOG(debug, "fetch AWS Metadata at [uri = {}]: start from cluster {}", + fmt::format("{}://{}{}", scheme, host, path), cluster_name_); + + Http::RequestHeaderMapPtr headersPtr = + Envoy::Http::createHeaderMap( + {{Envoy::Http::Headers::get().Method, std::string(method)}, + {Envoy::Http::Headers::get().Host, std::string(host)}, + {Envoy::Http::Headers::get().Scheme, std::string(scheme)}, + {Envoy::Http::Headers::get().Path, std::string(path)}}); + + // Copy the remaining headers. + message.headers().iterate( + [&headersPtr](const Http::HeaderEntry& entry) -> Http::HeaderMap::Iterate { + // Skip pseudo-headers + if (!entry.key().getStringView().empty() && entry.key().getStringView()[0] == ':') { + return Http::HeaderMap::Iterate::Continue; + } + headersPtr->addCopy(Http::LowerCaseString(entry.key().getStringView()), + entry.value().getStringView()); + return Http::HeaderMap::Iterate::Continue; + }); + + auto messagePtr = std::make_unique(std::move(headersPtr)); + + auto options = Http::AsyncClient::RequestOptions() + .setTimeout(std::chrono::milliseconds(TIMEOUT)) + .setParentSpan(parent_span) + .setSendXff(false) + .setChildSpanName("AWS Metadata Fetch"); + + envoy::config::route::v3::RetryPolicy route_retry_policy; + route_retry_policy.mutable_num_retries()->set_value(MAX_RETRIES); + route_retry_policy.mutable_per_try_timeout()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(TIMEOUT)); + route_retry_policy.mutable_per_try_idle_timeout()->CopyFrom( + Protobuf::util::TimeUtil::MillisecondsToDuration(RETRY_DELAY)); + route_retry_policy.set_retry_on("5xx,gateway-error,connect-failure,reset,refused-stream"); + + options.setRetryPolicy(route_retry_policy); + options.setBufferBodyForRetry(true); + request_ = makeOptRefFromPtr( + thread_local_cluster->httpAsyncClient().send(std::move(messagePtr), *this, options)); + } + + // HTTP async receive method on success. + void onSuccess(const Http::AsyncClient::Request&, Http::ResponseMessagePtr&& response) override { + complete_ = true; + const uint64_t status_code = Http::Utility::getResponseStatus(response->headers()); + if (status_code == enumToInt(Http::Code::OK)) { + ENVOY_LOG(debug, "{}: fetch AWS Metadata [cluster = {}]: success", __func__, cluster_name_); + if (response->body().length() != 0) { + const auto body = response->bodyAsString(); + receiver_->onMetadataSuccess(std::move(body)); + } else { + ENVOY_LOG(debug, "{}: fetch AWS Metadata [cluster = {}]: body is empty", __func__, + cluster_name_); + receiver_->onMetadataError(MetadataFetcher::MetadataReceiver::Failure::InvalidMetadata); + } + } else { + if (response->body().length() != 0) { + ENVOY_LOG(debug, "{}: fetch AWS Metadata [cluster = {}]: response status code {}, body: {}", + __func__, cluster_name_, status_code, response->bodyAsString()); + } else { + ENVOY_LOG(debug, + "{}: fetch AWS Metadata [cluster = {}]: response status code {}, body is empty", + __func__, cluster_name_, status_code); + } + receiver_->onMetadataError(MetadataFetcher::MetadataReceiver::Failure::Network); + } + reset(); + } + + // HTTP async receive method on failure. + void onFailure(const Http::AsyncClient::Request&, + Http::AsyncClient::FailureReason reason) override { + ENVOY_LOG(debug, "{}: fetch AWS Metadata [cluster = {}]: network error {}", __func__, + cluster_name_, enumToInt(reason)); + complete_ = true; + receiver_->onMetadataError(MetadataFetcher::MetadataReceiver::Failure::Network); + reset(); + } + + // TODO(suniltheta): Add metadata fetch status into the span like it is done on ext_authz filter. + void onBeforeFinalizeUpstreamSpan(Tracing::Span&, const Http::ResponseHeaderMap*) override {} + +private: + bool complete_{}; + Upstream::ClusterManager& cm_; + const std::string cluster_name_; + OptRef receiver_; + OptRef request_; + + void reset() { + request_.reset(); + receiver_.reset(); + } +}; +} // namespace + +MetadataFetcherPtr MetadataFetcher::create(Upstream::ClusterManager& cm, + absl::string_view cluster_name) { + return std::make_unique(cm, cluster_name); +} +} // namespace Aws +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/aws/metadata_fetcher.h b/source/extensions/common/aws/metadata_fetcher.h new file mode 100644 index 000000000000..a39d1480447c --- /dev/null +++ b/source/extensions/common/aws/metadata_fetcher.h @@ -0,0 +1,97 @@ +#pragma once + +#include +#include + +#include "envoy/common/pure.h" +#include "envoy/http/message.h" +#include "envoy/upstream/cluster_manager.h" + +#include "source/common/http/message_impl.h" +#include "source/extensions/common/aws/utility.h" + +#include "absl/strings/string_view.h" +#include "absl/types/optional.h" + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Aws { + +class MetadataFetcher; +using MetadataFetcherPtr = std::unique_ptr; + +/** + * MetadataFetcher interface can be used to retrieve AWS Metadata from various providers. + * An instance of this interface is designed to retrieve one AWS Metadata at a time. + * The implementation of AWS Metadata Fetcher is similar to JwksFetcher. + */ + +class MetadataFetcher { +public: + class MetadataReceiver { + public: + enum class Failure { + /* A network error occurred causing AWS Metadata retrieval failure. */ + Network, + /* A failure occurred when trying to parse the retrieved AWS Metadata data. */ + InvalidMetadata, + /* A missing config causing AWS Metadata retrieval failure. */ + MissingConfig, + }; + + virtual ~MetadataReceiver() = default; + + /** + * @brief Successful retrieval callback of returned AWS Metadata. + * @param body Fetched AWS Metadata. + */ + virtual void onMetadataSuccess(const std::string&& body) PURE; + + /** + * @brief Retrieval error callback. + * @param reason the failure reason. + */ + virtual void onMetadataError(Failure reason) PURE; + }; + + virtual ~MetadataFetcher() = default; + + /** + * @brief Cancel any in-flight request. + */ + virtual void cancel() PURE; + + /** + * @brief Retrieve a AWS Metadata from a remote HTTP host. + * At most one outstanding request may be in-flight. + * i.e. from the invocation of `fetch()` until either + * a callback or `cancel()` is invoked, no additional + * `fetch()` may be issued. The URI to fetch is to pre + * determined based on the credentials provider source. + * + * @param receiver the receiver of the fetched AWS Metadata or error + */ + virtual void fetch(Http::RequestMessage& message, Tracing::Span& parent_span, + MetadataReceiver& receiver) PURE; + + /** + * @brief Return MetadataReceiver Failure enum as a string. + * + * @return absl::string_view + */ + virtual absl::string_view failureToString(MetadataReceiver::Failure) PURE; + + /** + * @brief Factory method for creating a Metadata Fetcher. + * + * @param cm the cluster manager to use during AWS Metadata retrieval + * @param provider the AWS Metadata provider + * @return a MetadataFetcher instance + */ + static MetadataFetcherPtr create(Upstream::ClusterManager& cm, absl::string_view cluster_name); +}; +} // namespace Aws +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/common/aws/utility.cc b/source/extensions/common/aws/utility.cc index 9d5669b2113a..1643e4068ba7 100644 --- a/source/extensions/common/aws/utility.cc +++ b/source/extensions/common/aws/utility.cc @@ -1,13 +1,18 @@ #include "source/extensions/common/aws/utility.h" +#include "envoy/upstream/cluster_manager.h" + #include "source/common/common/empty_string.h" #include "source/common/common/fmt.h" #include "source/common/common/utility.h" +#include "source/common/protobuf/message_validator_impl.h" +#include "source/common/protobuf/utility.h" #include "absl/strings/match.h" #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" #include "curl/curl.h" +#include "fmt/printf.h" namespace Envoy { namespace Extensions { @@ -294,6 +299,57 @@ absl::optional Utility::fetchMetadata(Http::RequestMessage& message return buffer.empty() ? absl::nullopt : absl::optional(buffer); } +bool Utility::addInternalClusterStatic( + Upstream::ClusterManager& cm, absl::string_view cluster_name, + const envoy::config::cluster::v3::Cluster::DiscoveryType cluster_type, absl::string_view uri) { + // Check if local cluster exists with that name. + if (cm.getThreadLocalCluster(cluster_name) == nullptr) { + // Make sure we run this on main thread. + TRY_ASSERT_MAIN_THREAD { + envoy::config::cluster::v3::Cluster cluster; + absl::string_view host_port; + absl::string_view path; + Http::Utility::extractHostPathFromUri(uri, host_port, path); + const auto host_attributes = Http::Utility::parseAuthority(host_port); + const auto host = host_attributes.host_; + const auto port = host_attributes.port_ ? host_attributes.port_.value() : 80; + + cluster.set_name(cluster_name); + cluster.set_type(cluster_type); + cluster.mutable_connect_timeout()->set_seconds(5); + cluster.mutable_load_assignment()->set_cluster_name(cluster_name); + auto* endpoint = cluster.mutable_load_assignment() + ->add_endpoints() + ->add_lb_endpoints() + ->mutable_endpoint(); + auto* addr = endpoint->mutable_address(); + addr->mutable_socket_address()->set_address(host); + addr->mutable_socket_address()->set_port_value(port); + cluster.set_lb_policy(envoy::config::cluster::v3::Cluster::ROUND_ROBIN); + envoy::extensions::upstreams::http::v3::HttpProtocolOptions protocol_options; + auto* http_protocol_options = + protocol_options.mutable_explicit_http_config()->mutable_http_protocol_options(); + http_protocol_options->set_accept_http_10(true); + (*cluster.mutable_typed_extension_protocol_options()) + ["envoy.extensions.upstreams.http.v3.HttpProtocolOptions"] + .PackFrom(protocol_options); + + // TODO(suniltheta): use random number generator here for cluster version. + cm.addOrUpdateCluster(cluster, "12345"); + ENVOY_LOG_MISC(info, + "Added a {} internal cluster [name: {}, address:{}:{}] to fetch aws " + "credentials", + cluster_type, cluster_name, host, port); + } + END_TRY + CATCH(const EnvoyException& e, { + ENVOY_LOG_MISC(error, "Failed to add internal cluster {}: {}", cluster_name, e.what()); + return false; + }); + } + return true; +} + } // namespace Aws } // namespace Common } // namespace Extensions diff --git a/source/extensions/common/aws/utility.h b/source/extensions/common/aws/utility.h index 2ec7cae045cd..985ab0de6d9f 100644 --- a/source/extensions/common/aws/utility.h +++ b/source/extensions/common/aws/utility.h @@ -1,9 +1,13 @@ #pragma once +#include "envoy/config/cluster/v3/cluster.pb.h" +#include "envoy/extensions/upstreams/http/v3/http_protocol_options.pb.h" +#include "envoy/extensions/upstreams/http/v3/http_protocol_options.pb.validate.h" #include "envoy/http/message.h" #include "source/common/common/matchers.h" #include "source/common/http/headers.h" +#include "source/common/http/utility.h" namespace Envoy { namespace Extensions { @@ -92,6 +96,24 @@ class Utility { * gRPC auth plugins that are able to schedule blocking plugins on a different thread. */ static absl::optional fetchMetadata(Http::RequestMessage& message); + + /** + * @brief Adds a static cluster towards a credentials provider + * to fetch the credentials using http async client. + * + * @param cm cluster manager + * @param cluster_name a name for credentials provider cluster + * @param cluster_type STATIC or STRICT_DNS or LOGICAL_DNS etc + * @param uri provider's IP (STATIC cluster) or URL (STRICT_DNS). Will use port 80 if the port is + * not specified in the uri or no matching cluster is found. + * @return true if successfully added the cluster or if a cluster with the cluster_name already + * exists. + * @return false if failed to add the cluster + */ + static bool + addInternalClusterStatic(Upstream::ClusterManager& cm, absl::string_view cluster_name, + const envoy::config::cluster::v3::Cluster::DiscoveryType cluster_type, + absl::string_view uri); }; } // namespace Aws diff --git a/source/extensions/extensions_build_config.bzl b/source/extensions/extensions_build_config.bzl index f73bf64356c9..423eb9ea0190 100644 --- a/source/extensions/extensions_build_config.bzl +++ b/source/extensions/extensions_build_config.bzl @@ -124,6 +124,7 @@ EXTENSIONS = { "envoy.filters.http.aws_lambda": "//source/extensions/filters/http/aws_lambda:config", "envoy.filters.http.aws_request_signing": "//source/extensions/filters/http/aws_request_signing:config", "envoy.filters.http.bandwidth_limit": "//source/extensions/filters/http/bandwidth_limit:config", + "envoy.filters.http.basic_auth": "//source/extensions/filters/http/basic_auth:config", "envoy.filters.http.buffer": "//source/extensions/filters/http/buffer:config", "envoy.filters.http.cache": "//source/extensions/filters/http/cache:config", "envoy.filters.http.cdn_loop": "//source/extensions/filters/http/cdn_loop:config", @@ -265,6 +266,18 @@ EXTENSIONS = { "envoy.tracers.skywalking": "//source/extensions/tracers/skywalking:config", "envoy.tracers.opentelemetry": "//source/extensions/tracers/opentelemetry:config", + # + # OpenTelemetry Resource Detectors + # + + "envoy.tracers.opentelemetry.resource_detectors.environment": "//source/extensions/tracers/opentelemetry/resource_detectors/environment:config", + + # + # OpenTelemetry tracer samplers + # + + "envoy.tracers.opentelemetry.samplers.always_on": "//source/extensions/tracers/opentelemetry/samplers/always_on:config", + # # Transport sockets # diff --git a/source/extensions/extensions_metadata.yaml b/source/extensions/extensions_metadata.yaml index 7098afed83ad..1b6ba906ed52 100644 --- a/source/extensions/extensions_metadata.yaml +++ b/source/extensions/extensions_metadata.yaml @@ -217,6 +217,13 @@ envoy.filters.http.bandwidth_limit: status: stable type_urls: - envoy.extensions.filters.http.bandwidth_limit.v3.BandwidthLimit +envoy.filters.http.basic_auth: + categories: + - envoy.filters.http + security_posture: robust_to_untrusted_downstream + status: alpha + type_urls: + - envoy.extensions.filters.http.basic_auth.v3.BasicAuth envoy.filters.http.buffer: categories: - envoy.filters.http @@ -1148,6 +1155,13 @@ envoy.tracers.opentelemetry: status: wip type_urls: - envoy.config.trace.v3.OpenTelemetryConfig +envoy.tracers.opentelemetry.samplers.always_on: + categories: + - envoy.tracers.opentelemetry.samplers + security_posture: unknown + status: wip + type_urls: + - envoy.extensions.tracers.opentelemetry.samplers.v3.AlwaysOnSamplerConfig envoy.tracers.skywalking: categories: - envoy.tracers @@ -1676,3 +1690,10 @@ envoy.filters.network.set_filter_state: status: alpha type_urls: - envoy.extensions.filters.network.set_filter_state.v3.Config +envoy.tracers.opentelemetry.resource_detectors.environment: + categories: + - envoy.tracers.opentelemetry.resource_detectors + security_posture: unknown + status: wip + type_urls: + - envoy.extensions.tracers.opentelemetry.resource_detectors.v3.EnvironmentResourceDetectorConfig diff --git a/source/extensions/filters/common/ext_authz/check_request_utils.cc b/source/extensions/filters/common/ext_authz/check_request_utils.cc index 6d65dc7d4a4d..8a62aa1cdc98 100644 --- a/source/extensions/filters/common/ext_authz/check_request_utils.cc +++ b/source/extensions/filters/common/ext_authz/check_request_utils.cc @@ -192,6 +192,7 @@ void CheckRequestUtils::createHttpCheck( const Envoy::Http::RequestHeaderMap& headers, Protobuf::Map&& context_extensions, envoy::config::core::v3::Metadata&& metadata_context, + envoy::config::core::v3::Metadata&& route_metadata_context, envoy::service::auth::v3::CheckRequest& request, uint64_t max_request_bytes, bool pack_as_bytes, bool include_peer_certificate, bool include_tls_session, const Protobuf::Map& destination_labels, @@ -224,6 +225,7 @@ void CheckRequestUtils::createHttpCheck( // Fill in the context extensions and metadata context. (*attrs->mutable_context_extensions()) = std::move(context_extensions); (*attrs->mutable_metadata_context()) = std::move(metadata_context); + (*attrs->mutable_route_metadata_context()) = std::move(route_metadata_context); } void CheckRequestUtils::createTcpCheck( diff --git a/source/extensions/filters/common/ext_authz/check_request_utils.h b/source/extensions/filters/common/ext_authz/check_request_utils.h index 96d10334bcbb..1390485c0ae0 100644 --- a/source/extensions/filters/common/ext_authz/check_request_utils.h +++ b/source/extensions/filters/common/ext_authz/check_request_utils.h @@ -93,6 +93,7 @@ class CheckRequestUtils { const Envoy::Http::RequestHeaderMap& headers, Protobuf::Map&& context_extensions, envoy::config::core::v3::Metadata&& metadata_context, + envoy::config::core::v3::Metadata&& route_metadata_context, envoy::service::auth::v3::CheckRequest& request, uint64_t max_request_bytes, bool pack_as_bytes, bool include_peer_certificate, bool include_tls_session, diff --git a/source/extensions/filters/http/basic_auth/BUILD b/source/extensions/filters/http/basic_auth/BUILD new file mode 100644 index 000000000000..f610d4fee905 --- /dev/null +++ b/source/extensions/filters/http/basic_auth/BUILD @@ -0,0 +1,39 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "basic_auth_lib", + srcs = ["basic_auth_filter.cc"], + hdrs = ["basic_auth_filter.h"], + external_deps = ["ssl"], + deps = [ + "//envoy/server:filter_config_interface", + "//source/common/common:base64_lib", + "//source/common/config:utility_lib", + "//source/common/http:header_map_lib", + "//source/common/protobuf:utility_lib", + "//source/extensions/filters/http/common:pass_through_filter_lib", + ], +) + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + ":basic_auth_lib", + "//envoy/registry", + "//source/common/config:datasource_lib", + "//source/common/protobuf:utility_lib", + "//source/extensions/filters/http/common:factory_base_lib", + "@envoy_api//envoy/extensions/filters/http/basic_auth/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/filters/http/basic_auth/basic_auth_filter.cc b/source/extensions/filters/http/basic_auth/basic_auth_filter.cc new file mode 100644 index 000000000000..ae7b10e6c573 --- /dev/null +++ b/source/extensions/filters/http/basic_auth/basic_auth_filter.cc @@ -0,0 +1,91 @@ +#include "source/extensions/filters/http/basic_auth/basic_auth_filter.h" + +#include + +#include "source/common/common/base64.h" +#include "source/common/http/header_utility.h" +#include "source/common/http/headers.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace BasicAuth { + +namespace { + +// Function to compute SHA1 hash +std::string computeSHA1(absl::string_view password) { + unsigned char hash[SHA_DIGEST_LENGTH]; + + // Calculate the SHA-1 hash + SHA1(reinterpret_cast(password.data()), password.length(), hash); + + // Encode the binary hash in Base64 + return Base64::encode(reinterpret_cast(hash), SHA_DIGEST_LENGTH); +} + +} // namespace + +FilterConfig::FilterConfig(UserMapConstPtr users, const std::string& stats_prefix, + Stats::Scope& scope) + : users_(std::move(users)), stats_(generateStats(stats_prefix + "basic_auth.", scope)) {} + +bool FilterConfig::validateUser(absl::string_view username, absl::string_view password) const { + auto user = users_->find(username); + if (user == users_->end()) { + return false; + } + + return computeSHA1(password) == user->second.hash; +} + +BasicAuthFilter::BasicAuthFilter(FilterConfigConstSharedPtr config) : config_(std::move(config)) {} + +Http::FilterHeadersStatus BasicAuthFilter::decodeHeaders(Http::RequestHeaderMap& headers, bool) { + auto auth_header = headers.get(Http::CustomHeaders::get().Authorization); + if (!auth_header.empty()) { + absl::string_view auth_value = auth_header[0]->value().getStringView(); + + if (absl::StartsWith(auth_value, "Basic ")) { + // Extract and decode the Base64 part of the header. + absl::string_view base64Token = auth_value.substr(6); + const std::string decoded = Base64::decodeWithoutPadding(base64Token); + + // The decoded string is in the format "username:password". + const size_t colon_pos = decoded.find(':'); + + if (colon_pos != std::string::npos) { + absl::string_view decoded_view = decoded; + absl::string_view username = decoded_view.substr(0, colon_pos); + absl::string_view password = decoded_view.substr(colon_pos + 1); + + if (config_->validateUser(username, password)) { + config_->stats().allowed_.inc(); + return Http::FilterHeadersStatus::Continue; + } else { + config_->stats().denied_.inc(); + decoder_callbacks_->sendLocalReply( + Http::Code::Unauthorized, + "User authentication failed. Invalid username/password combination", nullptr, + absl::nullopt, "invalid_credential_for_basic_auth"); + return Http::FilterHeadersStatus::StopIteration; + } + } + } + } + + config_->stats().denied_.inc(); + decoder_callbacks_->sendLocalReply(Http::Code::Unauthorized, + "User authentication failed. Missing username and password", + nullptr, absl::nullopt, "no_credential_for_basic_auth"); + return Http::FilterHeadersStatus::StopIteration; +} + +void BasicAuthFilter::setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) { + decoder_callbacks_ = &callbacks; +} + +} // namespace BasicAuth +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/basic_auth/basic_auth_filter.h b/source/extensions/filters/http/basic_auth/basic_auth_filter.h new file mode 100644 index 000000000000..d900b304eb67 --- /dev/null +++ b/source/extensions/filters/http/basic_auth/basic_auth_filter.h @@ -0,0 +1,80 @@ +#pragma once + +#include "envoy/stats/stats_macros.h" + +#include "source/common/common/logger.h" +#include "source/extensions/filters/http/common/pass_through_filter.h" + +#include "absl/container/flat_hash_map.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace BasicAuth { + +/** + * All Basic Auth filter stats. @see stats_macros.h + */ +#define ALL_BASIC_AUTH_STATS(COUNTER) \ + COUNTER(allowed) \ + COUNTER(denied) + +/** + * Struct definition for Basic Auth stats. @see stats_macros.h + */ +struct BasicAuthStats { + ALL_BASIC_AUTH_STATS(GENERATE_COUNTER_STRUCT) +}; + +/** + * Struct definition for username password pairs. + */ +struct User { + // the user name + std::string name; + // the hashed password, see https://httpd.apache.org/docs/2.4/misc/password_encryptions.html + std::string hash; +}; + +using UserMapConstPtr = + std::unique_ptr>; // username, User + +/** + * Configuration for the Basic Auth filter. + */ +class FilterConfig { +public: + FilterConfig(UserMapConstPtr users, const std::string& stats_prefix, Stats::Scope& scope); + const BasicAuthStats& stats() const { return stats_; } + bool validateUser(absl::string_view username, absl::string_view password) const; + +private: + static BasicAuthStats generateStats(const std::string& prefix, Stats::Scope& scope) { + return BasicAuthStats{ALL_BASIC_AUTH_STATS(POOL_COUNTER_PREFIX(scope, prefix))}; + } + + UserMapConstPtr users_; + BasicAuthStats stats_; +}; +using FilterConfigConstSharedPtr = std::shared_ptr; + +// The Envoy filter to process HTTP basic auth. +class BasicAuthFilter : public Http::PassThroughDecoderFilter, + public Logger::Loggable { +public: + BasicAuthFilter(FilterConfigConstSharedPtr config); + + // Http::StreamDecoderFilter + Http::FilterHeadersStatus decodeHeaders(Http::RequestHeaderMap& headers, bool) override; + void setDecoderFilterCallbacks(Http::StreamDecoderFilterCallbacks& callbacks) override; + +private: + // The callback function. + Http::StreamDecoderFilterCallbacks* decoder_callbacks_; + FilterConfigConstSharedPtr config_; +}; + +} // namespace BasicAuth +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/basic_auth/config.cc b/source/extensions/filters/http/basic_auth/config.cc new file mode 100644 index 000000000000..02a3582cad69 --- /dev/null +++ b/source/extensions/filters/http/basic_auth/config.cc @@ -0,0 +1,69 @@ +#include "source/extensions/filters/http/basic_auth/config.h" + +#include "source/common/config/datasource.h" +#include "source/extensions/filters/http/basic_auth/basic_auth_filter.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace BasicAuth { + +using envoy::extensions::filters::http::basic_auth::v3::BasicAuth; + +namespace { + +UserMapConstPtr readHtpasswd(const std::string& htpasswd) { + std::unique_ptr> users = + std::make_unique>(); + std::istringstream htpsswd_ss(htpasswd); + std::string line; + + while (std::getline(htpsswd_ss, line)) { + const size_t colon_pos = line.find(':'); + + if (colon_pos != std::string::npos) { + std::string name = line.substr(0, colon_pos); + std::string hash = line.substr(colon_pos + 1); + + if (name.empty()) { + throw EnvoyException("basic auth: invalid user name"); + } + + if (absl::StartsWith(hash, "{SHA}")) { + hash = hash.substr(5); + // The base64 encoded SHA1 hash is 28 bytes long + if (hash.length() != 28) { + throw EnvoyException("basic auth: invalid SHA hash length"); + } + + users->insert({name, {name, hash}}); + continue; + } + } + + throw EnvoyException("basic auth: unsupported htpasswd format: please use {SHA}"); + } + + return users; +} + +} // namespace + +Http::FilterFactoryCb BasicAuthFilterFactory::createFilterFactoryFromProtoTyped( + const BasicAuth& proto_config, const std::string& stats_prefix, + Server::Configuration::FactoryContext& context) { + const std::string htpasswd = Config::DataSource::read(proto_config.users(), false, context.api()); + UserMapConstPtr users = readHtpasswd(htpasswd); + FilterConfigConstSharedPtr config = + std::make_unique(std::move(users), stats_prefix, context.scope()); + return [config](Http::FilterChainFactoryCallbacks& callbacks) -> void { + callbacks.addStreamDecoderFilter(std::make_shared(config)); + }; +} + +REGISTER_FACTORY(BasicAuthFilterFactory, Server::Configuration::NamedHttpFilterConfigFactory); + +} // namespace BasicAuth +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/basic_auth/config.h b/source/extensions/filters/http/basic_auth/config.h new file mode 100644 index 000000000000..7abebaaa789c --- /dev/null +++ b/source/extensions/filters/http/basic_auth/config.h @@ -0,0 +1,27 @@ +#pragma once + +#include "envoy/extensions/filters/http/basic_auth/v3/basic_auth.pb.h" +#include "envoy/extensions/filters/http/basic_auth/v3/basic_auth.pb.validate.h" + +#include "source/extensions/filters/http/common/factory_base.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace BasicAuth { + +class BasicAuthFilterFactory + : public Common::FactoryBase { +public: + BasicAuthFilterFactory() : FactoryBase("envoy.filters.http.basic_auth") {} + +private: + Http::FilterFactoryCb createFilterFactoryFromProtoTyped( + const envoy::extensions::filters::http::basic_auth::v3::BasicAuth& config, + const std::string& stats_prefix, Server::Configuration::FactoryContext& context) override; +}; + +} // namespace BasicAuth +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/filters/http/ext_authz/ext_authz.cc b/source/extensions/filters/http/ext_authz/ext_authz.cc index 3de1424b64ec..07db5ae73433 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.cc +++ b/source/extensions/filters/http/ext_authz/ext_authz.cc @@ -1,6 +1,9 @@ #include "source/extensions/filters/http/ext_authz/ext_authz.h" #include +#include +#include +#include #include "envoy/config/core/v3/base.pb.h" @@ -15,6 +18,46 @@ namespace Extensions { namespace HttpFilters { namespace ExtAuthz { +namespace { + +using MetadataProto = ::envoy::config::core::v3::Metadata; + +void fillMetadataContext(const std::vector& source_metadata, + const std::vector& metadata_context_namespaces, + const std::vector& typed_metadata_context_namespaces, + MetadataProto& metadata_context) { + for (const auto& context_key : metadata_context_namespaces) { + for (const MetadataProto* metadata : source_metadata) { + if (metadata == nullptr) { + continue; + } + const auto& filter_metadata = metadata->filter_metadata(); + if (const auto metadata_it = filter_metadata.find(context_key); + metadata_it != filter_metadata.end()) { + (*metadata_context.mutable_filter_metadata())[metadata_it->first] = metadata_it->second; + break; + } + } + } + + for (const auto& context_key : typed_metadata_context_namespaces) { + for (const MetadataProto* metadata : source_metadata) { + if (metadata == nullptr) { + continue; + } + const auto& typed_filter_metadata = metadata->typed_filter_metadata(); + if (const auto metadata_it = typed_filter_metadata.find(context_key); + metadata_it != typed_filter_metadata.end()) { + (*metadata_context.mutable_typed_filter_metadata())[metadata_it->first] = + metadata_it->second; + break; + } + } + } +} + +} // namespace + void FilterConfigPerRoute::merge(const FilterConfigPerRoute& other) { // We only merge context extensions here, and leave boolean flags untouched since those flags are // not used from the merged config. @@ -41,47 +84,29 @@ void Filter::initiateCall(const Http::RequestHeaderMap& headers) { context_extensions = maybe_merged_per_route_config.value().takeContextExtensions(); } + // If metadata_context_namespaces or typed_metadata_context_namespaces is specified, + // pass matching filter metadata to the ext_authz service. + // If metadata key is set in both the connection and request metadata, + // then the value will be the request metadata value. envoy::config::core::v3::Metadata metadata_context; - - // If metadata_context_namespaces is specified, pass matching filter metadata to the ext_authz - // service. If metadata key is set in both the connection and request metadata then the value - // will be the request metadata value. - const auto& connection_metadata = - decoder_callbacks_->connection()->streamInfo().dynamicMetadata().filter_metadata(); - const auto& request_metadata = - decoder_callbacks_->streamInfo().dynamicMetadata().filter_metadata(); - for (const auto& context_key : config_->metadataContextNamespaces()) { - if (const auto metadata_it = request_metadata.find(context_key); - metadata_it != request_metadata.end()) { - (*metadata_context.mutable_filter_metadata())[metadata_it->first] = metadata_it->second; - } else if (const auto metadata_it = connection_metadata.find(context_key); - metadata_it != connection_metadata.end()) { - (*metadata_context.mutable_filter_metadata())[metadata_it->first] = metadata_it->second; - } - } - - // If typed_metadata_context_namespaces is specified, pass matching typed filter metadata to the - // ext_authz service. If metadata key is set in both the connection and request metadata then - // the value will be the request metadata value. - const auto& connection_typed_metadata = - decoder_callbacks_->connection()->streamInfo().dynamicMetadata().typed_filter_metadata(); - const auto& request_typed_metadata = - decoder_callbacks_->streamInfo().dynamicMetadata().typed_filter_metadata(); - for (const auto& context_key : config_->typedMetadataContextNamespaces()) { - if (const auto metadata_it = request_typed_metadata.find(context_key); - metadata_it != request_typed_metadata.end()) { - (*metadata_context.mutable_typed_filter_metadata())[metadata_it->first] = metadata_it->second; - } else if (const auto metadata_it = connection_typed_metadata.find(context_key); - metadata_it != connection_typed_metadata.end()) { - (*metadata_context.mutable_typed_filter_metadata())[metadata_it->first] = metadata_it->second; - } + fillMetadataContext({&decoder_callbacks_->streamInfo().dynamicMetadata(), + &decoder_callbacks_->connection()->streamInfo().dynamicMetadata()}, + config_->metadataContextNamespaces(), + config_->typedMetadataContextNamespaces(), metadata_context); + + // Fill route_metadata_context from the selected route's metadata. + envoy::config::core::v3::Metadata route_metadata_context; + if (decoder_callbacks_->route() != nullptr) { + fillMetadataContext({&decoder_callbacks_->route()->metadata()}, + config_->routeMetadataContextNamespaces(), + config_->routeTypedMetadataContextNamespaces(), route_metadata_context); } Filters::Common::ExtAuthz::CheckRequestUtils::createHttpCheck( decoder_callbacks_, headers, std::move(context_extensions), std::move(metadata_context), - check_request_, config_->maxRequestBytes(), config_->packAsBytes(), - config_->includePeerCertificate(), config_->includeTLSSession(), config_->destinationLabels(), - config_->requestHeaderMatchers()); + std::move(route_metadata_context), check_request_, config_->maxRequestBytes(), + config_->packAsBytes(), config_->includePeerCertificate(), config_->includeTLSSession(), + config_->destinationLabels(), config_->requestHeaderMatchers()); ENVOY_STREAM_LOG(trace, "ext_authz filter calling authorization server", *decoder_callbacks_); // Store start time of ext_authz filter call @@ -369,19 +394,20 @@ void Filter::onComplete(Filters::Common::ExtAuthz::ResponsePtr&& response) { if (cluster_) { config_->incCounter(cluster_->statsScope(), config_->ext_authz_denied_); - - Http::CodeStats::ResponseStatInfo info{config_->scope(), - cluster_->statsScope(), - empty_stat_name, - enumToInt(response->status_code), - true, - empty_stat_name, - empty_stat_name, - empty_stat_name, - empty_stat_name, - empty_stat_name, - false}; - config_->httpContext().codeStats().chargeResponseStat(info, false); + if (config_->chargeClusterResponseStats()) { + Http::CodeStats::ResponseStatInfo info{config_->scope(), + cluster_->statsScope(), + empty_stat_name, + enumToInt(response->status_code), + true, + empty_stat_name, + empty_stat_name, + empty_stat_name, + empty_stat_name, + empty_stat_name, + false}; + config_->httpContext().codeStats().chargeResponseStat(info, false); + } } // setResponseFlag must be called before sendLocalReply diff --git a/source/extensions/filters/http/ext_authz/ext_authz.h b/source/extensions/filters/http/ext_authz/ext_authz.h index 0175cbc1c48a..084bff2704e6 100644 --- a/source/extensions/filters/http/ext_authz/ext_authz.h +++ b/source/extensions/filters/http/ext_authz/ext_authz.h @@ -88,8 +88,15 @@ class FilterConfig { config.metadata_context_namespaces().end()), typed_metadata_context_namespaces_(config.typed_metadata_context_namespaces().begin(), config.typed_metadata_context_namespaces().end()), + route_metadata_context_namespaces_(config.route_metadata_context_namespaces().begin(), + config.route_metadata_context_namespaces().end()), + route_typed_metadata_context_namespaces_( + config.route_typed_metadata_context_namespaces().begin(), + config.route_typed_metadata_context_namespaces().end()), include_peer_certificate_(config.include_peer_certificate()), include_tls_session_(config.include_tls_session()), + charge_cluster_response_stats_( + PROTOBUF_GET_WRAPPED_OR_DEFAULT(config, charge_cluster_response_stats, true)), stats_(generateStats(stats_prefix, config.stat_prefix(), scope)), ext_authz_ok_(pool_.add(createPoolStatName(config.stat_prefix(), "ok"))), ext_authz_denied_(pool_.add(createPoolStatName(config.stat_prefix(), "denied"))), @@ -167,6 +174,14 @@ class FilterConfig { return typed_metadata_context_namespaces_; } + const std::vector& routeMetadataContextNamespaces() { + return route_metadata_context_namespaces_; + } + + const std::vector& routeTypedMetadataContextNamespaces() { + return route_typed_metadata_context_namespaces_; + } + const ExtAuthzFilterStats& stats() const { return stats_; } void incCounter(Stats::Scope& scope, Stats::StatName name) { @@ -177,6 +192,8 @@ class FilterConfig { bool includeTLSSession() const { return include_tls_session_; } const LabelsMap& destinationLabels() const { return destination_labels_; } + bool chargeClusterResponseStats() const { return charge_cluster_response_stats_; } + const Filters::Common::ExtAuthz::MatcherSharedPtr& requestHeaderMatchers() const { return request_header_matchers_; } @@ -227,9 +244,12 @@ class FilterConfig { const std::vector metadata_context_namespaces_; const std::vector typed_metadata_context_namespaces_; + const std::vector route_metadata_context_namespaces_; + const std::vector route_typed_metadata_context_namespaces_; const bool include_peer_certificate_; const bool include_tls_session_; + const bool charge_cluster_response_stats_; // The stats for the filter. ExtAuthzFilterStats stats_; diff --git a/source/extensions/filters/http/well_known_names.h b/source/extensions/filters/http/well_known_names.h index cfd1783b55b4..8e33a0699c0a 100644 --- a/source/extensions/filters/http/well_known_names.h +++ b/source/extensions/filters/http/well_known_names.h @@ -16,6 +16,8 @@ class HttpFilterNameValues { const std::string Buffer = "envoy.filters.http.buffer"; // Bandwidth limit filter const std::string BandwidthLimit = "envoy.filters.http.bandwidth_limit"; + // Basic Auth filter + const std::string BasicAuth = "envoy.filters.http.basic_auth"; // Cache filter const std::string Cache = "envoy.filters.http.cache"; // CDN Loop filter diff --git a/source/extensions/filters/network/redis_proxy/router_impl.cc b/source/extensions/filters/network/redis_proxy/router_impl.cc index 59cba4554876..4aae1dad7dfa 100644 --- a/source/extensions/filters/network/redis_proxy/router_impl.cc +++ b/source/extensions/filters/network/redis_proxy/router_impl.cc @@ -106,6 +106,13 @@ RouteSharedPtr PrefixRoutes::upstreamPool(std::string& key, if (value == nullptr) { // prefix route not found, default to catch all route. value = catch_all_route_; + // prefix route not found, check if catch_all_route is defined to fallback to. + if (catch_all_route_ != nullptr) { + value = catch_all_route_; + } else { + // no route found. + return value; + } } if (value->removePrefix()) { diff --git a/source/extensions/filters/udp/udp_proxy/session_filters/dynamic_forward_proxy/proxy_filter.cc b/source/extensions/filters/udp/udp_proxy/session_filters/dynamic_forward_proxy/proxy_filter.cc index fdae4b4b5e83..fa0fc7f62c1d 100644 --- a/source/extensions/filters/udp/udp_proxy/session_filters/dynamic_forward_proxy/proxy_filter.cc +++ b/source/extensions/filters/udp/udp_proxy/session_filters/dynamic_forward_proxy/proxy_filter.cc @@ -106,13 +106,21 @@ ReadFilterStatus ProxyFilter::onData(Network::UdpRecvData& data) { return ReadFilterStatus::StopIteration; } -void ProxyFilter::onLoadDnsCacheComplete(const Common::DynamicForwardProxy::DnsHostInfoSharedPtr&) { +void ProxyFilter::onLoadDnsCacheComplete( + const Common::DynamicForwardProxy::DnsHostInfoSharedPtr& host_info) { ENVOY_LOG(debug, "load DNS cache complete, continuing"); + if (!host_info || !host_info->address()) { + ENVOY_LOG(debug, "empty DNS respose received"); + } + ASSERT(circuit_breaker_ != nullptr); circuit_breaker_.reset(); load_dns_cache_completed_ = true; - read_callbacks_->continueFilterChain(); + + if (!read_callbacks_->continueFilterChain()) { + return; + } while (!datagrams_buffer_.empty()) { BufferedDatagramPtr buffered_datagram = std::move(datagrams_buffer_.front()); diff --git a/source/extensions/filters/udp/udp_proxy/session_filters/filter.h b/source/extensions/filters/udp/udp_proxy/session_filters/filter.h index 44ed8ab08790..d750b41d050b 100644 --- a/source/extensions/filters/udp/udp_proxy/session_filters/filter.h +++ b/source/extensions/filters/udp/udp_proxy/session_filters/filter.h @@ -43,8 +43,9 @@ class ReadFilterCallbacks : public FilterCallbacks { /** * If a read filter stopped filter iteration, continueFilterChain() can be called to continue the * filter chain. It will have onNewSession() called if it was not previously called. + * @return false if the session is removed and no longer valid, otherwise returns true. */ - virtual void continueFilterChain() PURE; + virtual bool continueFilterChain() PURE; }; class WriteFilterCallbacks : public FilterCallbacks {}; diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc index 935480811d1b..7594c861b954 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.cc @@ -173,11 +173,13 @@ UdpProxyFilter::ActiveSession* UdpProxyFilter::ClusterInfo::createSessionWithOpt } new_session->createFilterChain(); - new_session->onNewSession(); - auto new_session_ptr = new_session.get(); - sessions_.emplace(std::move(new_session)); + if (new_session->onNewSession()) { + auto new_session_ptr = new_session.get(); + sessions_.emplace(std::move(new_session)); + return new_session_ptr; + } - return new_session_ptr; + return nullptr; } Upstream::HostConstSharedPtr UdpProxyFilter::ClusterInfo::chooseHost( @@ -382,7 +384,7 @@ void UdpProxyFilter::UdpActiveSession::onReadReady() { cluster_.filter_.read_callbacks_->udpListener().flush(); } -void UdpProxyFilter::ActiveSession::onNewSession() { +bool UdpProxyFilter::ActiveSession::onNewSession() { for (auto& active_read_filter : read_filters_) { if (active_read_filter->initialized_) { // The filter may call continueFilterChain() in onNewSession(), causing next @@ -393,11 +395,11 @@ void UdpProxyFilter::ActiveSession::onNewSession() { active_read_filter->initialized_ = true; auto status = active_read_filter->read_filter_->onNewSession(); if (status == ReadFilterStatus::StopIteration) { - return; + return true; } } - createUpstream(); + return createUpstream(); } void UdpProxyFilter::ActiveSession::onData(Network::UdpRecvData& data) { @@ -467,7 +469,7 @@ void UdpProxyFilter::UdpActiveSession::writeUpstream(Network::UdpRecvData& data) } } -void UdpProxyFilter::ActiveSession::onContinueFilterChain(ActiveReadFilter* filter) { +bool UdpProxyFilter::ActiveSession::onContinueFilterChain(ActiveReadFilter* filter) { ASSERT(filter != nullptr); std::list::iterator entry = std::next(filter->entry()); @@ -479,18 +481,23 @@ void UdpProxyFilter::ActiveSession::onContinueFilterChain(ActiveReadFilter* filt (*entry)->initialized_ = true; auto status = (*entry)->read_filter_->onNewSession(); if (status == ReadFilterStatus::StopIteration) { - break; + return true; } } - createUpstream(); + if (!createUpstream()) { + cluster_.removeSession(this); + return false; + } + + return true; } -void UdpProxyFilter::UdpActiveSession::createUpstream() { +bool UdpProxyFilter::UdpActiveSession::createUpstream() { if (udp_socket_) { // A session filter may call on continueFilterChain(), after already creating the socket, // so we first check that the socket was not created already. - return; + return true; } if (!host_) { @@ -498,12 +505,13 @@ void UdpProxyFilter::UdpActiveSession::createUpstream() { if (host_ == nullptr) { ENVOY_LOG(debug, "cannot find any valid host."); cluster_.cluster_.info()->trafficStats()->upstream_cx_none_healthy_.inc(); - return; + return false; } } cluster_.addSession(host_.get(), this); createUdpSocket(host_); + return true; } void UdpProxyFilter::UdpActiveSession::createUdpSocket(const Upstream::HostConstSharedPtr& host) { @@ -793,26 +801,28 @@ UdpProxyFilter::TunnelingActiveSession::TunnelingActiveSession( ClusterInfo& cluster, Network::UdpRecvData::LocalPeerAddresses&& addresses) : ActiveSession(cluster, std::move(addresses), nullptr) {} -void UdpProxyFilter::TunnelingActiveSession::createUpstream() { +bool UdpProxyFilter::TunnelingActiveSession::createUpstream() { if (conn_pool_factory_) { // A session filter may call on continueFilterChain(), after already creating the upstream, // so we first check that the factory was not created already. - return; + return true; } conn_pool_factory_ = std::make_unique(); load_balancer_context_ = std::make_unique( cluster_.filter_.config_->hashPolicy(), addresses_.peer_, &udp_session_info_); - establishUpstreamConnection(); + return establishUpstreamConnection(); } -void UdpProxyFilter::TunnelingActiveSession::establishUpstreamConnection() { +bool UdpProxyFilter::TunnelingActiveSession::establishUpstreamConnection() { if (!createConnectionPool()) { ENVOY_LOG(debug, "failed to create upstream connection pool"); cluster_.cluster_stats_.sess_tunnel_failure_.inc(); - cluster_.removeSession(this); + return false; } + + return true; } bool UdpProxyFilter::TunnelingActiveSession::createConnectionPool() { @@ -900,9 +910,7 @@ void UdpProxyFilter::TunnelingActiveSession::onUpstreamEvent(Network::Connection event == Network::ConnectionEvent::LocalClose) { upstream_.reset(); - if (connecting) { - establishUpstreamConnection(); - } else { + if (!connecting || !establishUpstreamConnection()) { cluster_.removeSession(this); } } diff --git a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h index 8bf19ef77428..fc226272db62 100644 --- a/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h +++ b/source/extensions/filters/udp/udp_proxy/udp_proxy_filter.h @@ -457,7 +457,7 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, // SessionFilters::ReadFilterCallbacks uint64_t sessionId() const override { return parent_.sessionId(); }; StreamInfo::StreamInfo& streamInfo() override { return parent_.streamInfo(); }; - void continueFilterChain() override { parent_.onContinueFilterChain(this); } + bool continueFilterChain() override { return parent_.onContinueFilterChain(this); } void injectDatagramToFilterChain(Network::UdpRecvData& data) override { parent_.onInjectReadDatagramToFilterChain(this, data); } @@ -509,13 +509,13 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, return absl::nullopt; } - void onNewSession(); + bool onNewSession(); void onData(Network::UdpRecvData& data); void processUpstreamDatagram(Network::UdpRecvData& data); void writeDownstream(Network::UdpRecvData& data); void resetIdleTimer(); - virtual void createUpstream() PURE; + virtual bool createUpstream() PURE; virtual void writeUpstream(Network::UdpRecvData& data) PURE; virtual void onIdleTimer() PURE; @@ -525,7 +525,7 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, uint64_t sessionId() const { return session_id_; }; StreamInfo::StreamInfo& streamInfo() { return udp_session_info_; }; - void onContinueFilterChain(ActiveReadFilter* filter); + bool onContinueFilterChain(ActiveReadFilter* filter); void onInjectReadDatagramToFilterChain(ActiveReadFilter* filter, Network::UdpRecvData& data); void onInjectWriteDatagramToFilterChain(ActiveWriteFilter* filter, Network::UdpRecvData& data); @@ -595,7 +595,7 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, ~UdpActiveSession() override = default; // ActiveSession - void createUpstream() override; + bool createUpstream() override; void writeUpstream(Network::UdpRecvData& data) override; void onIdleTimer() override; @@ -644,7 +644,7 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, ~TunnelingActiveSession() override = default; // ActiveSession - void createUpstream() override; + bool createUpstream() override; void writeUpstream(Network::UdpRecvData& data) override; void onIdleTimer() override; @@ -666,7 +666,7 @@ class UdpProxyFilter : public Network::UdpListenerReadFilter, private: using BufferedDatagramPtr = std::unique_ptr; - void establishUpstreamConnection(); + bool establishUpstreamConnection(); bool createConnectionPool(); void maybeBufferDatagram(Network::UdpRecvData& data); void flushBuffer(); diff --git a/source/extensions/tracers/opentelemetry/BUILD b/source/extensions/tracers/opentelemetry/BUILD index 58d0a20ba5b7..ea305b4ad950 100644 --- a/source/extensions/tracers/opentelemetry/BUILD +++ b/source/extensions/tracers/opentelemetry/BUILD @@ -41,6 +41,8 @@ envoy_cc_library( "//source/common/config:utility_lib", "//source/common/tracing:http_tracer_lib", "//source/extensions/tracers/common:factory_base_lib", + "//source/extensions/tracers/opentelemetry/resource_detectors:resource_detector_lib", + "//source/extensions/tracers/opentelemetry/samplers:sampler_lib", "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", "@opentelemetry_proto//:trace_cc_proto", ], diff --git a/source/extensions/tracers/opentelemetry/opentelemetry_tracer_impl.cc b/source/extensions/tracers/opentelemetry/opentelemetry_tracer_impl.cc index 52e40c5cffbc..8d5786b8c232 100644 --- a/source/extensions/tracers/opentelemetry/opentelemetry_tracer_impl.cc +++ b/source/extensions/tracers/opentelemetry/opentelemetry_tracer_impl.cc @@ -10,6 +10,9 @@ #include "source/common/tracing/http_tracer_impl.h" #include "source/extensions/tracers/opentelemetry/grpc_trace_exporter.h" #include "source/extensions/tracers/opentelemetry/http_trace_exporter.h" +#include "source/extensions/tracers/opentelemetry/resource_detectors/resource_detector.h" +#include "source/extensions/tracers/opentelemetry/resource_detectors/resource_provider.h" +#include "source/extensions/tracers/opentelemetry/samplers/sampler.h" #include "source/extensions/tracers/opentelemetry/span_context.h" #include "source/extensions/tracers/opentelemetry/span_context_extractor.h" #include "source/extensions/tracers/opentelemetry/trace_exporter.h" @@ -23,21 +26,52 @@ namespace Extensions { namespace Tracers { namespace OpenTelemetry { +namespace { + +SamplerSharedPtr +tryCreateSamper(const envoy::config::trace::v3::OpenTelemetryConfig& opentelemetry_config, + Server::Configuration::TracerFactoryContext& context) { + SamplerSharedPtr sampler; + if (opentelemetry_config.has_sampler()) { + auto& sampler_config = opentelemetry_config.sampler(); + auto* factory = Envoy::Config::Utility::getFactory(sampler_config); + if (!factory) { + throw EnvoyException(fmt::format("Sampler factory not found: '{}'", sampler_config.name())); + } + sampler = factory->createSampler(sampler_config.typed_config(), context); + } + return sampler; +} + +} // namespace + Driver::Driver(const envoy::config::trace::v3::OpenTelemetryConfig& opentelemetry_config, Server::Configuration::TracerFactoryContext& context) + : Driver(opentelemetry_config, context, ResourceProviderImpl{}) {} + +Driver::Driver(const envoy::config::trace::v3::OpenTelemetryConfig& opentelemetry_config, + Server::Configuration::TracerFactoryContext& context, + const ResourceProvider& resource_provider) : tls_slot_ptr_(context.serverFactoryContext().threadLocal().allocateSlot()), tracing_stats_{OPENTELEMETRY_TRACER_STATS( POOL_COUNTER_PREFIX(context.serverFactoryContext().scope(), "tracing.opentelemetry"))} { auto& factory_context = context.serverFactoryContext(); + Resource resource = resource_provider.getResource(opentelemetry_config, context); + ResourceConstSharedPtr resource_ptr = std::make_shared(std::move(resource)); + if (opentelemetry_config.has_grpc_service() && opentelemetry_config.has_http_service()) { throw EnvoyException( "OpenTelemetry Tracer cannot have both gRPC and HTTP exporters configured. " "OpenTelemetry tracer will be disabled."); } + // Create the sampler if configured + SamplerSharedPtr sampler = tryCreateSamper(opentelemetry_config, context); + // Create the tracer in Thread Local Storage. - tls_slot_ptr_->set([opentelemetry_config, &factory_context, this](Event::Dispatcher& dispatcher) { + tls_slot_ptr_->set([opentelemetry_config, &factory_context, this, resource_ptr, + sampler](Event::Dispatcher& dispatcher) { OpenTelemetryTraceExporterPtr exporter; if (opentelemetry_config.has_grpc_service()) { Grpc::AsyncClientFactoryPtr&& factory = @@ -52,8 +86,7 @@ Driver::Driver(const envoy::config::trace::v3::OpenTelemetryConfig& opentelemetr } TracerPtr tracer = std::make_unique( std::move(exporter), factory_context.timeSource(), factory_context.api().randomGenerator(), - factory_context.runtime(), dispatcher, tracing_stats_, opentelemetry_config.service_name()); - + factory_context.runtime(), dispatcher, tracing_stats_, resource_ptr, sampler); return std::make_shared(std::move(tracer)); }); } @@ -70,7 +103,6 @@ Tracing::SpanPtr Driver::startSpan(const Tracing::Config& config, // No propagation header, so we can create a fresh span with the given decision. Tracing::SpanPtr new_open_telemetry_span = tracer.startSpan(config, operation_name, stream_info.startTime(), tracing_decision); - new_open_telemetry_span->setSampled(tracing_decision.traced); return new_open_telemetry_span; } else { // Try to extract the span context. If we can't, just return a null span. diff --git a/source/extensions/tracers/opentelemetry/opentelemetry_tracer_impl.h b/source/extensions/tracers/opentelemetry/opentelemetry_tracer_impl.h index 5083cff22f6e..d197ba2d5f97 100644 --- a/source/extensions/tracers/opentelemetry/opentelemetry_tracer_impl.h +++ b/source/extensions/tracers/opentelemetry/opentelemetry_tracer_impl.h @@ -8,6 +8,8 @@ #include "source/common/common/logger.h" #include "source/common/singleton/const_singleton.h" #include "source/extensions/tracers/common/factory_base.h" +#include "source/extensions/tracers/opentelemetry/grpc_trace_exporter.h" +#include "source/extensions/tracers/opentelemetry/resource_detectors/resource_provider.h" #include "source/extensions/tracers/opentelemetry/tracer.h" namespace Envoy { @@ -31,6 +33,10 @@ class Driver : Logger::Loggable, public Tracing::Driver { Driver(const envoy::config::trace::v3::OpenTelemetryConfig& opentelemetry_config, Server::Configuration::TracerFactoryContext& context); + Driver(const envoy::config::trace::v3::OpenTelemetryConfig& opentelemetry_config, + Server::Configuration::TracerFactoryContext& context, + const ResourceProvider& resource_provider); + // Tracing::Driver Tracing::SpanPtr startSpan(const Tracing::Config& config, Tracing::TraceContext& trace_context, const StreamInfo::StreamInfo& stream_info, diff --git a/source/extensions/tracers/opentelemetry/resource_detectors/BUILD b/source/extensions/tracers/opentelemetry/resource_detectors/BUILD new file mode 100644 index 000000000000..c8b064de43e4 --- /dev/null +++ b/source/extensions/tracers/opentelemetry/resource_detectors/BUILD @@ -0,0 +1,27 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "resource_detector_lib", + srcs = [ + "resource_provider.cc", + ], + hdrs = [ + "resource_detector.h", + "resource_provider.h", + ], + deps = [ + "//envoy/config:typed_config_interface", + "//envoy/server:tracer_config_interface", + "//source/common/common:logger_lib", + "//source/common/config:utility_lib", + "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/tracers/opentelemetry/resource_detectors/environment/BUILD b/source/extensions/tracers/opentelemetry/resource_detectors/environment/BUILD new file mode 100644 index 000000000000..3a0026dbd0dd --- /dev/null +++ b/source/extensions/tracers/opentelemetry/resource_detectors/environment/BUILD @@ -0,0 +1,33 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + ":environment_resource_detector_lib", + "//envoy/registry", + "//source/common/config:utility_lib", + "@envoy_api//envoy/extensions/tracers/opentelemetry/resource_detectors/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "environment_resource_detector_lib", + srcs = ["environment_resource_detector.cc"], + hdrs = ["environment_resource_detector.h"], + deps = [ + "//source/common/config:datasource_lib", + "//source/extensions/tracers/opentelemetry/resource_detectors:resource_detector_lib", + "@envoy_api//envoy/extensions/tracers/opentelemetry/resource_detectors/v3:pkg_cc_proto", + ], +) diff --git a/source/extensions/tracers/opentelemetry/resource_detectors/environment/config.cc b/source/extensions/tracers/opentelemetry/resource_detectors/environment/config.cc new file mode 100644 index 000000000000..5216a959ce1d --- /dev/null +++ b/source/extensions/tracers/opentelemetry/resource_detectors/environment/config.cc @@ -0,0 +1,35 @@ +#include "source/extensions/tracers/opentelemetry/resource_detectors/environment/config.h" + +#include "envoy/extensions/tracers/opentelemetry/resource_detectors/v3/environment_resource_detector.pb.h" +#include "envoy/extensions/tracers/opentelemetry/resource_detectors/v3/environment_resource_detector.pb.validate.h" + +#include "source/common/config/utility.h" +#include "source/extensions/tracers/opentelemetry/resource_detectors/environment/environment_resource_detector.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +ResourceDetectorPtr EnvironmentResourceDetectorFactory::createResourceDetector( + const Protobuf::Message& message, Server::Configuration::TracerFactoryContext& context) { + + auto mptr = Envoy::Config::Utility::translateAnyToFactoryConfig( + dynamic_cast(message), context.messageValidationVisitor(), *this); + + const auto& proto_config = MessageUtil::downcastAndValidate< + const envoy::extensions::tracers::opentelemetry::resource_detectors::v3:: + EnvironmentResourceDetectorConfig&>(*mptr, context.messageValidationVisitor()); + + return std::make_unique(proto_config, context); +} + +/** + * Static registration for the Env resource detector factory. @see RegisterFactory. + */ +REGISTER_FACTORY(EnvironmentResourceDetectorFactory, ResourceDetectorFactory); + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/tracers/opentelemetry/resource_detectors/environment/config.h b/source/extensions/tracers/opentelemetry/resource_detectors/environment/config.h new file mode 100644 index 000000000000..a2bf1f72025f --- /dev/null +++ b/source/extensions/tracers/opentelemetry/resource_detectors/environment/config.h @@ -0,0 +1,46 @@ +#pragma once + +#include + +#include "envoy/extensions/tracers/opentelemetry/resource_detectors/v3/environment_resource_detector.pb.h" + +#include "source/extensions/tracers/opentelemetry/resource_detectors/resource_detector.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +/** + * Config registration for the Environment resource detector. @see ResourceDetectorFactory. + */ +class EnvironmentResourceDetectorFactory : public ResourceDetectorFactory { +public: + /** + * @brief Create a Resource Detector that reads from the OTEL_RESOURCE_ATTRIBUTES + * environment variable. + * + * @param message The resource detector configuration. + * @param context The tracer factory context. + * @return ResourceDetectorPtr + */ + ResourceDetectorPtr + createResourceDetector(const Protobuf::Message& message, + Server::Configuration::TracerFactoryContext& context) override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + + std::string name() const override { + return "envoy.tracers.opentelemetry.resource_detectors.environment"; + } +}; + +DECLARE_FACTORY(EnvironmentResourceDetectorFactory); + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/tracers/opentelemetry/resource_detectors/environment/environment_resource_detector.cc b/source/extensions/tracers/opentelemetry/resource_detectors/environment/environment_resource_detector.cc new file mode 100644 index 000000000000..3c69e32b76f3 --- /dev/null +++ b/source/extensions/tracers/opentelemetry/resource_detectors/environment/environment_resource_detector.cc @@ -0,0 +1,60 @@ +#include "environment_resource_detector.h" + +#include +#include + +#include "source/common/config/datasource.h" +#include "source/extensions/tracers/opentelemetry/resource_detectors/resource_detector.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +constexpr absl::string_view kOtelResourceAttributesEnv = "OTEL_RESOURCE_ATTRIBUTES"; + +/** + * @brief Detects a resource from the OTEL_RESOURCE_ATTRIBUTES environment variable + * Based on the OTel C++ SDK: + * https://github.com/open-telemetry/opentelemetry-cpp/blob/v1.11.0/sdk/src/resource/resource_detector.cc + * + * @return Resource A resource with the attributes from the OTEL_RESOURCE_ATTRIBUTES environment + * variable. + */ +Resource EnvironmentResourceDetector::detect() { + envoy::config::core::v3::DataSource ds; + ds.set_environment_variable(kOtelResourceAttributesEnv); + + Resource resource; + resource.schemaUrl_ = ""; + std::string attributes_str = ""; + + attributes_str = Config::DataSource::read(ds, true, context_.serverFactoryContext().api()); + + if (attributes_str.empty()) { + throw EnvoyException( + fmt::format("The OpenTelemetry environment resource detector is configured but the '{}'" + " environment variable is empty.", + kOtelResourceAttributesEnv)); + } + + for (const auto& pair : StringUtil::splitToken(attributes_str, ",")) { + const auto keyValue = StringUtil::splitToken(pair, "="); + if (keyValue.size() != 2) { + throw EnvoyException( + fmt::format("The OpenTelemetry environment resource detector is configured but the '{}'" + " environment variable has an invalid format.", + kOtelResourceAttributesEnv)); + } + + const std::string key = std::string(keyValue[0]); + const std::string value = std::string(keyValue[1]); + resource.attributes_[key] = value; + } + return resource; +} + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/tracers/opentelemetry/resource_detectors/environment/environment_resource_detector.h b/source/extensions/tracers/opentelemetry/resource_detectors/environment/environment_resource_detector.h new file mode 100644 index 000000000000..78327b047840 --- /dev/null +++ b/source/extensions/tracers/opentelemetry/resource_detectors/environment/environment_resource_detector.h @@ -0,0 +1,38 @@ +#pragma once + +#include "envoy/extensions/tracers/opentelemetry/resource_detectors/v3/environment_resource_detector.pb.h" +#include "envoy/server/factory_context.h" + +#include "source/common/common/logger.h" +#include "source/extensions/tracers/opentelemetry/resource_detectors/resource_detector.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +/** + * @brief A resource detector that extracts attributes from the OTEL_RESOURCE_ATTRIBUTES environment + * variable. + * @see + * https://github.com/open-telemetry/opentelemetry-specification/blob/v1.24.0/specification/resource/sdk.md#detecting-resource-information-from-the-environment + * + */ +class EnvironmentResourceDetector : public ResourceDetector, Logger::Loggable { +public: + EnvironmentResourceDetector(const envoy::extensions::tracers::opentelemetry::resource_detectors:: + v3::EnvironmentResourceDetectorConfig& config, + Server::Configuration::TracerFactoryContext& context) + : config_(config), context_(context) {} + Resource detect() override; + +private: + const envoy::extensions::tracers::opentelemetry::resource_detectors::v3:: + EnvironmentResourceDetectorConfig config_; + Server::Configuration::TracerFactoryContext& context_; +}; + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/tracers/opentelemetry/resource_detectors/resource_detector.h b/source/extensions/tracers/opentelemetry/resource_detectors/resource_detector.h new file mode 100644 index 000000000000..69894b917680 --- /dev/null +++ b/source/extensions/tracers/opentelemetry/resource_detectors/resource_detector.h @@ -0,0 +1,80 @@ +#pragma once + +#include +#include +#include + +#include "envoy/config/typed_config.h" +#include "envoy/server/tracer_config.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +/** + * @brief A string key-value map that stores the resource attributes. + */ +using ResourceAttributes = std::map; + +/** + * @brief A Resource represents the entity producing telemetry as Attributes. + * For example, a process producing telemetry that is running in a container on Kubernetes + * has a Pod name, it is in a namespace and possibly is part of a Deployment which also has a name. + * See: + * https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/resource/sdk.md + */ +struct Resource { + std::string schemaUrl_{""}; + ResourceAttributes attributes_{}; + + virtual ~Resource() = default; +}; + +using ResourceConstSharedPtr = std::shared_ptr; + +/** + * @brief The base type for all resource detectors + * + */ +class ResourceDetector { +public: + virtual ~ResourceDetector() = default; + + /** + * @brief Load attributes and returns a Resource object + * populated with them and a possible SchemaUrl. + * @return Resource + */ + virtual Resource detect() PURE; +}; + +using ResourceDetectorPtr = std::unique_ptr; + +/* + * A factory for creating resource detectors. + */ +class ResourceDetectorFactory : public Envoy::Config::TypedFactory { +public: + ~ResourceDetectorFactory() override = default; + + /** + * @brief Creates a resource detector based on the configuration type provided. + * + * @param message The resource detector configuration. + * @param context The tracer factory context. + * @return ResourceDetectorPtr A resource detector based on the configuration type provided. + */ + virtual ResourceDetectorPtr + createResourceDetector(const Protobuf::Message& message, + Server::Configuration::TracerFactoryContext& context) PURE; + + std::string category() const override { return "envoy.tracers.opentelemetry.resource_detectors"; } +}; + +using ResourceDetectorTypedFactoryPtr = std::unique_ptr; + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/tracers/opentelemetry/resource_detectors/resource_provider.cc b/source/extensions/tracers/opentelemetry/resource_detectors/resource_provider.cc new file mode 100644 index 000000000000..a8f106cc3729 --- /dev/null +++ b/source/extensions/tracers/opentelemetry/resource_detectors/resource_provider.cc @@ -0,0 +1,110 @@ +#include "source/extensions/tracers/opentelemetry/resource_detectors/resource_provider.h" + +#include + +#include "source/common/common/logger.h" +#include "source/common/config/utility.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +namespace { +bool isEmptyResource(const Resource& resource) { return resource.attributes_.empty(); } + +Resource createInitialResource(const std::string& service_name) { + Resource resource{}; + + // Creates initial resource with the static service.name attribute. + resource.attributes_[std::string(kServiceNameKey.data(), kServiceNameKey.size())] = + service_name.empty() ? std::string{kDefaultServiceName} : service_name; + + return resource; +} + +/** + * @brief Resolves the new schema url when merging two resources. + * This function implements the algorithm as defined in the OpenTelemetry Resource SDK + * specification. @see + * https://github.com/open-telemetry/opentelemetry-specification/blob/v1.24.0/specification/resource/sdk.md#merge + * + * @param old_schema_url The old resource's schema URL. + * @param updating_schema_url The updating resource's schema URL. + * @return std::string The calculated schema URL. + */ +std::string resolveSchemaUrl(const std::string& old_schema_url, + const std::string& updating_schema_url) { + if (old_schema_url.empty()) { + return updating_schema_url; + } + if (updating_schema_url.empty()) { + return old_schema_url; + } + if (old_schema_url == updating_schema_url) { + return old_schema_url; + } + // The OTel spec leaves this case (when both have value but are different) unspecified. + ENVOY_LOG_MISC(info, "Resource schemaUrl conflict. Fall-back to old schema url: {}", + old_schema_url); + return old_schema_url; +} + +/** + * @brief Updates an old resource with a new one. This function implements + * the Merge operation defined in the OpenTelemetry Resource SDK specification. + * @see + * https://github.com/open-telemetry/opentelemetry-specification/blob/v1.24.0/specification/resource/sdk.md#merge + * + * @param old_resource The old resource. + * @param updating_resource The new resource. + */ +void mergeResource(Resource& old_resource, const Resource& updating_resource) { + // The schemaUrl is merged, regardless if the resources being merged + // have attributes or not. This behavior is compliant with the OTel spec. + // see: https://github.com/envoyproxy/envoy/pull/29547#discussion_r1344540427 + old_resource.schemaUrl_ = resolveSchemaUrl(old_resource.schemaUrl_, updating_resource.schemaUrl_); + + if (isEmptyResource(updating_resource)) { + return; + } + for (auto const& attr : updating_resource.attributes_) { + old_resource.attributes_.insert_or_assign(attr.first, attr.second); + } +} +} // namespace + +Resource ResourceProviderImpl::getResource( + const envoy::config::trace::v3::OpenTelemetryConfig& opentelemetry_config, + Server::Configuration::TracerFactoryContext& context) const { + + Resource resource = createInitialResource(opentelemetry_config.service_name()); + + const auto& detectors_configs = opentelemetry_config.resource_detectors(); + + for (const auto& detector_config : detectors_configs) { + ResourceDetectorPtr detector; + auto* factory = Envoy::Config::Utility::getFactory(detector_config); + + if (!factory) { + throw EnvoyException( + fmt::format("Resource detector factory not found: '{}'", detector_config.name())); + } + + detector = factory->createResourceDetector(detector_config.typed_config(), context); + + if (!detector) { + throw EnvoyException( + fmt::format("Resource detector could not be created: '{}'", detector_config.name())); + } + + Resource detected_resource = detector->detect(); + mergeResource(resource, detected_resource); + } + return resource; +} + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/tracers/opentelemetry/resource_detectors/resource_provider.h b/source/extensions/tracers/opentelemetry/resource_detectors/resource_provider.h new file mode 100644 index 000000000000..9ecf6420c31d --- /dev/null +++ b/source/extensions/tracers/opentelemetry/resource_detectors/resource_provider.h @@ -0,0 +1,44 @@ +#pragma once + +#include "envoy/config/trace/v3/opentelemetry.pb.h" + +#include "source/extensions/tracers/opentelemetry/resource_detectors/resource_detector.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +constexpr absl::string_view kServiceNameKey = "service.name"; +constexpr absl::string_view kDefaultServiceName = "unknown_service:envoy"; + +class ResourceProvider : public Logger::Loggable { +public: + virtual ~ResourceProvider() = default; + + /** + * @brief Iterates through all loaded resource detectors and merge all the returned + * resources into one. Resource merging is done according to the OpenTelemetry + * resource SDK specification. @see + * https://github.com/open-telemetry/opentelemetry-specification/blob/v1.24.0/specification/resource/sdk.md#merge. + * + * @param opentelemetry_config The OpenTelemetry configuration, which contains the configured + * resource detectors. + * @param context The tracer factory context. + * @return Resource const The merged resource. + */ + virtual Resource + getResource(const envoy::config::trace::v3::OpenTelemetryConfig& opentelemetry_config, + Server::Configuration::TracerFactoryContext& context) const PURE; +}; + +class ResourceProviderImpl : public ResourceProvider { +public: + Resource getResource(const envoy::config::trace::v3::OpenTelemetryConfig& opentelemetry_config, + Server::Configuration::TracerFactoryContext& context) const override; +}; + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/tracers/opentelemetry/samplers/BUILD b/source/extensions/tracers/opentelemetry/samplers/BUILD new file mode 100644 index 000000000000..32a1005b11e8 --- /dev/null +++ b/source/extensions/tracers/opentelemetry/samplers/BUILD @@ -0,0 +1,25 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_library( + name = "sampler_lib", + srcs = [ + ], + hdrs = [ + "sampler.h", + ], + deps = [ + "//envoy/config:typed_config_interface", + "//envoy/server:tracer_config_interface", + "//source/common/common:logger_lib", + "//source/common/config:utility_lib", + "@opentelemetry_proto//:trace_cc_proto", + ], +) diff --git a/source/extensions/tracers/opentelemetry/samplers/always_on/BUILD b/source/extensions/tracers/opentelemetry/samplers/always_on/BUILD new file mode 100644 index 000000000000..744607330d57 --- /dev/null +++ b/source/extensions/tracers/opentelemetry/samplers/always_on/BUILD @@ -0,0 +1,33 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_extension", + "envoy_cc_library", + "envoy_extension_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_extension_package() + +envoy_cc_extension( + name = "config", + srcs = ["config.cc"], + hdrs = ["config.h"], + deps = [ + ":always_on_sampler_lib", + "//envoy/registry", + "//source/common/config:utility_lib", + "@envoy_api//envoy/extensions/tracers/opentelemetry/samplers/v3:pkg_cc_proto", + ], +) + +envoy_cc_library( + name = "always_on_sampler_lib", + srcs = ["always_on_sampler.cc"], + hdrs = ["always_on_sampler.h"], + deps = [ + "//source/common/config:datasource_lib", + "//source/extensions/tracers/opentelemetry:opentelemetry_tracer_lib", + "//source/extensions/tracers/opentelemetry/samplers:sampler_lib", + ], +) diff --git a/source/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler.cc b/source/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler.cc new file mode 100644 index 000000000000..3bc0aa87ab3d --- /dev/null +++ b/source/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler.cc @@ -0,0 +1,34 @@ +#include "source/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler.h" + +#include +#include +#include + +#include "source/common/config/datasource.h" +#include "source/extensions/tracers/opentelemetry/span_context.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +SamplingResult +AlwaysOnSampler::shouldSample(const absl::optional parent_context, + const std::string& /*trace_id*/, const std::string& /*name*/, + ::opentelemetry::proto::trace::v1::Span::SpanKind /*kind*/, + const std::map& /*attributes*/, + const std::vector& /*links*/) { + SamplingResult result; + result.decision = Decision::RECORD_AND_SAMPLE; + if (parent_context.has_value()) { + result.tracestate = parent_context.value().tracestate(); + } + return result; +} + +std::string AlwaysOnSampler::getDescription() const { return "AlwaysOnSampler"; } + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler.h b/source/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler.h new file mode 100644 index 000000000000..2d53a511fa29 --- /dev/null +++ b/source/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler.h @@ -0,0 +1,38 @@ +#pragma once + +#include "envoy/server/factory_context.h" + +#include "source/common/common/logger.h" +#include "source/common/config/datasource.h" +#include "source/extensions/tracers/opentelemetry/samplers/sampler.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +/** + * @brief A sampler which samples every span. + * https://opentelemetry.io/docs/specs/otel/trace/sdk/#alwayson + * - Returns RECORD_AND_SAMPLE always. + * - Description MUST be AlwaysOnSampler. + * + */ +class AlwaysOnSampler : public Sampler, Logger::Loggable { +public: + explicit AlwaysOnSampler(const Protobuf::Message& /*config*/, + Server::Configuration::TracerFactoryContext& /*context*/) {} + SamplingResult shouldSample(const absl::optional parent_context, + const std::string& trace_id, const std::string& name, + ::opentelemetry::proto::trace::v1::Span::SpanKind spankind, + const std::map& attributes, + const std::vector& links) override; + std::string getDescription() const override; + +private: +}; + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/tracers/opentelemetry/samplers/always_on/config.cc b/source/extensions/tracers/opentelemetry/samplers/always_on/config.cc new file mode 100644 index 000000000000..99288c4bf469 --- /dev/null +++ b/source/extensions/tracers/opentelemetry/samplers/always_on/config.cc @@ -0,0 +1,27 @@ +#include "source/extensions/tracers/opentelemetry/samplers/always_on/config.h" + +#include "envoy/server/tracer_config.h" + +#include "source/common/config/utility.h" +#include "source/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +SamplerSharedPtr +AlwaysOnSamplerFactory::createSampler(const Protobuf::Message& config, + Server::Configuration::TracerFactoryContext& context) { + return std::make_shared(config, context); +} + +/** + * Static registration for the Env sampler factory. @see RegisterFactory. + */ +REGISTER_FACTORY(AlwaysOnSamplerFactory, SamplerFactory); + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/tracers/opentelemetry/samplers/always_on/config.h b/source/extensions/tracers/opentelemetry/samplers/always_on/config.h new file mode 100644 index 000000000000..5f93f43c0f1f --- /dev/null +++ b/source/extensions/tracers/opentelemetry/samplers/always_on/config.h @@ -0,0 +1,42 @@ +#pragma once + +#include + +#include "envoy/extensions/tracers/opentelemetry/samplers/v3/always_on_sampler.pb.h" +#include "envoy/registry/registry.h" + +#include "source/extensions/tracers/opentelemetry/samplers/sampler.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +/** + * Config registration for the AlwaysOnSampler. @see SamplerFactory. + */ +class AlwaysOnSamplerFactory : public SamplerFactory { +public: + /** + * @brief Create a Sampler. @see AlwaysOnSampler + * + * @param config Protobuf config for the sampler. + * @param context A reference to the TracerFactoryContext. + * @return SamplerSharedPtr + */ + SamplerSharedPtr createSampler(const Protobuf::Message& config, + Server::Configuration::TracerFactoryContext& context) override; + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique< + envoy::extensions::tracers::opentelemetry::samplers::v3::AlwaysOnSamplerConfig>(); + } + std::string name() const override { return "envoy.tracers.opentelemetry.samplers.always_on"; } +}; + +DECLARE_FACTORY(AlwaysOnSamplerFactory); + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/tracers/opentelemetry/samplers/sampler.h b/source/extensions/tracers/opentelemetry/samplers/sampler.h new file mode 100644 index 000000000000..fd2be0bb647e --- /dev/null +++ b/source/extensions/tracers/opentelemetry/samplers/sampler.h @@ -0,0 +1,111 @@ +#pragma once + +#include +#include +#include +#include + +#include "envoy/config/typed_config.h" +#include "envoy/server/tracer_config.h" +#include "envoy/tracing/trace_context.h" + +#include "absl/types/optional.h" +#include "opentelemetry/proto/trace/v1/trace.pb.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +class SpanContext; + +enum class Decision { + // IsRecording will be false, the Span will not be recorded and all events and attributes will be + // dropped. + DROP, + // IsRecording will be true, but the Sampled flag MUST NOT be set. + RECORD_ONLY, + // IsRecording will be true and the Sampled flag MUST be set. + RECORD_AND_SAMPLE +}; + +struct SamplingResult { + /// @see Decision + Decision decision; + // A set of span Attributes that will also be added to the Span. Can be nullptr. + std::unique_ptr> attributes; + // A Tracestate that will be associated with the Span. If the sampler + // returns an empty Tracestate here, the Tracestate will be cleared, so samplers SHOULD normally + // return the passed-in Tracestate if they do not intend to change it + std::string tracestate; + + inline bool isRecording() const { + return decision == Decision::RECORD_ONLY || decision == Decision::RECORD_AND_SAMPLE; + } + + inline bool isSampled() const { return decision == Decision::RECORD_AND_SAMPLE; } +}; + +/** + * @brief The base type for all samplers + * see https://opentelemetry.io/docs/specs/otel/trace/sdk/#sampler + * + */ +class Sampler { +public: + virtual ~Sampler() = default; + + /** + * @brief Decides if a trace should be sampled. + * + * @param parent_context Span context describing the parent span. The Span's SpanContext may be + * invalid to indicate a root span. + * @param trace_id Trace id of the Span to be created. If the parent SpanContext contains a valid + * TraceId, they MUST always match. + * @param name Name of the Span to be created. + * @param spankind Span kind of the Span to be created. + * @param attributes Initial set of Attributes of the Span to be created. + * @param links Collection of links that will be associated with the Span to be created. + * @return SamplingResult @see SamplingResult + */ + virtual SamplingResult shouldSample(const absl::optional parent_context, + const std::string& trace_id, const std::string& name, + ::opentelemetry::proto::trace::v1::Span::SpanKind spankind, + const std::map& attributes, + const std::vector& links) PURE; + + /** + * @brief Returns a sampler description or name. + * + * @return The sampler name or short description with the configuration. + */ + virtual std::string getDescription() const PURE; +}; + +using SamplerSharedPtr = std::shared_ptr; + +/* + * A factory for creating a sampler + */ +class SamplerFactory : public Envoy::Config::TypedFactory { +public: + ~SamplerFactory() override = default; + + /** + * @brief Creates a sampler + * @param config The sampler protobuf config. + * @param context The TracerFactoryContext. + * @return SamplerSharedPtr A sampler. + */ + virtual SamplerSharedPtr createSampler(const Protobuf::Message& config, + Server::Configuration::TracerFactoryContext& context) PURE; + + std::string category() const override { return "envoy.tracers.opentelemetry.samplers"; } +}; + +using SamplerFactoryPtr = std::unique_ptr; + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/source/extensions/tracers/opentelemetry/tracer.cc b/source/extensions/tracers/opentelemetry/tracer.cc index 683d5ea87d5f..73325ac7f317 100644 --- a/source/extensions/tracers/opentelemetry/tracer.cc +++ b/source/extensions/tracers/opentelemetry/tracer.cc @@ -19,11 +19,32 @@ namespace OpenTelemetry { constexpr absl::string_view kTraceParent = "traceparent"; constexpr absl::string_view kTraceState = "tracestate"; constexpr absl::string_view kDefaultVersion = "00"; -constexpr absl::string_view kServiceNameKey = "service.name"; -constexpr absl::string_view kDefaultServiceName = "unknown_service:envoy"; using opentelemetry::proto::collector::trace::v1::ExportTraceServiceRequest; +namespace { + +void callSampler(SamplerSharedPtr sampler, const absl::optional span_context, + Span& new_span, const std::string& operation_name) { + if (!sampler) { + return; + } + const auto sampling_result = sampler->shouldSample( + span_context, operation_name, new_span.getTraceIdAsHex(), new_span.spankind(), {}, {}); + new_span.setSampled(sampling_result.isSampled()); + + if (sampling_result.attributes) { + for (auto const& attribute : *sampling_result.attributes) { + new_span.setTag(attribute.first, attribute.second); + } + } + if (!sampling_result.tracestate.empty()) { + new_span.setTracestate(sampling_result.tracestate); + } +} + +} // namespace + Span::Span(const Tracing::Config& config, const std::string& name, SystemTime start_time, Envoy::TimeSource& time_source, Tracer& parent_tracer, bool downstream_span) : parent_tracer_(parent_tracer), time_source_(time_source) { @@ -110,12 +131,9 @@ void Span::setTag(absl::string_view name, absl::string_view value) { Tracer::Tracer(OpenTelemetryTraceExporterPtr exporter, Envoy::TimeSource& time_source, Random::RandomGenerator& random, Runtime::Loader& runtime, Event::Dispatcher& dispatcher, OpenTelemetryTracerStats tracing_stats, - const std::string& service_name) + const ResourceConstSharedPtr resource, SamplerSharedPtr sampler) : exporter_(std::move(exporter)), time_source_(time_source), random_(random), runtime_(runtime), - tracing_stats_(tracing_stats), service_name_(service_name) { - if (service_name.empty()) { - service_name_ = std::string{kDefaultServiceName}; - } + tracing_stats_(tracing_stats), resource_(resource), sampler_(sampler) { flush_timer_ = dispatcher.createTimer([this]() -> void { tracing_stats_.timer_flushed_.inc(); flushSpans(); @@ -134,14 +152,20 @@ void Tracer::flushSpans() { ExportTraceServiceRequest request; // A request consists of ResourceSpans. ::opentelemetry::proto::trace::v1::ResourceSpans* resource_span = request.add_resource_spans(); - opentelemetry::proto::common::v1::KeyValue key_value = - opentelemetry::proto::common::v1::KeyValue(); - opentelemetry::proto::common::v1::AnyValue value_proto = - opentelemetry::proto::common::v1::AnyValue(); - value_proto.set_string_value(std::string{service_name_}); - key_value.set_key(std::string{kServiceNameKey}); - *key_value.mutable_value() = value_proto; - (*resource_span->mutable_resource()->add_attributes()) = key_value; + resource_span->set_schema_url(resource_->schemaUrl_); + + // add resource attributes + for (auto const& att : resource_->attributes_) { + opentelemetry::proto::common::v1::KeyValue key_value = + opentelemetry::proto::common::v1::KeyValue(); + opentelemetry::proto::common::v1::AnyValue value_proto = + opentelemetry::proto::common::v1::AnyValue(); + value_proto.set_string_value(std::string{att.second}); + key_value.set_key(std::string{att.first}); + *key_value.mutable_value() = value_proto; + (*resource_span->mutable_resource()->add_attributes()) = key_value; + } + ::opentelemetry::proto::trace::v1::ScopeSpans* scope_span = resource_span->add_scope_spans(); for (const auto& pending_span : span_buffer_) { (*scope_span->add_spans()) = pending_span; @@ -172,12 +196,16 @@ Tracing::SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::str bool downstream_span) { // Create an Tracers::OpenTelemetry::Span class that will contain the OTel span. Span new_span(config, operation_name, start_time, time_source_, *this, downstream_span); - new_span.setSampled(tracing_decision.traced); uint64_t trace_id_high = random_.random(); uint64_t trace_id = random_.random(); new_span.setTraceId(absl::StrCat(Hex::uint64ToHex(trace_id_high), Hex::uint64ToHex(trace_id))); uint64_t span_id = random_.random(); new_span.setId(Hex::uint64ToHex(span_id)); + if (sampler_) { + callSampler(sampler_, absl::nullopt, new_span, operation_name); + } else { + new_span.setSampled(tracing_decision.traced); + } return std::make_unique(new_span); } @@ -186,7 +214,6 @@ Tracing::SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::str bool downstream_span) { // Create a new span and populate details from the span context. Span new_span(config, operation_name, start_time, time_source_, *this, downstream_span); - new_span.setSampled(previous_span_context.sampled()); new_span.setTraceId(previous_span_context.traceId()); if (!previous_span_context.parentId().empty()) { new_span.setParentId(previous_span_context.parentId()); @@ -194,10 +221,15 @@ Tracing::SpanPtr Tracer::startSpan(const Tracing::Config& config, const std::str // Generate a new identifier for the span id. uint64_t span_id = random_.random(); new_span.setId(Hex::uint64ToHex(span_id)); - // Respect the previous span's sampled flag. - new_span.setSampled(previous_span_context.sampled()); - if (!previous_span_context.tracestate().empty()) { - new_span.setTracestate(std::string{previous_span_context.tracestate()}); + if (sampler_) { + // Sampler should make a sampling decision and set tracestate + callSampler(sampler_, previous_span_context, new_span, operation_name); + } else { + // Respect the previous span's sampled flag. + new_span.setSampled(previous_span_context.sampled()); + if (!previous_span_context.tracestate().empty()) { + new_span.setTracestate(std::string{previous_span_context.tracestate()}); + } } return std::make_unique(new_span); } diff --git a/source/extensions/tracers/opentelemetry/tracer.h b/source/extensions/tracers/opentelemetry/tracer.h index 07d38ef22c8e..bea45d54f4cc 100644 --- a/source/extensions/tracers/opentelemetry/tracer.h +++ b/source/extensions/tracers/opentelemetry/tracer.h @@ -11,6 +11,8 @@ #include "source/common/common/logger.h" #include "source/extensions/tracers/common/factory_base.h" #include "source/extensions/tracers/opentelemetry/grpc_trace_exporter.h" +#include "source/extensions/tracers/opentelemetry/resource_detectors/resource_detector.h" +#include "source/extensions/tracers/opentelemetry/samplers/sampler.h" #include "source/extensions/tracers/opentelemetry/span_context.h" #include "absl/strings/escaping.h" @@ -35,7 +37,8 @@ class Tracer : Logger::Loggable { public: Tracer(OpenTelemetryTraceExporterPtr exporter, Envoy::TimeSource& time_source, Random::RandomGenerator& random, Runtime::Loader& runtime, Event::Dispatcher& dispatcher, - OpenTelemetryTracerStats tracing_stats, const std::string& service_name); + OpenTelemetryTracerStats tracing_stats, const ResourceConstSharedPtr resource, + SamplerSharedPtr sampler); void sendSpan(::opentelemetry::proto::trace::v1::Span& span); @@ -64,7 +67,8 @@ class Tracer : Logger::Loggable { Runtime::Loader& runtime_; Event::TimerPtr flush_timer_; OpenTelemetryTracerStats tracing_stats_; - std::string service_name_; + const ResourceConstSharedPtr resource_; + SamplerSharedPtr sampler_; }; /** @@ -111,6 +115,8 @@ class Span : Logger::Loggable, public Tracing::Span { std::string getTraceIdAsHex() const override { return absl::BytesToHexString(span_.trace_id()); }; + ::opentelemetry::proto::trace::v1::Span::SpanKind spankind() const { return span_.kind(); } + /** * Sets the span's id. */ @@ -127,7 +133,7 @@ class Span : Logger::Loggable, public Tracing::Span { span_.set_parent_span_id(absl::HexStringToBytes(parent_span_id_hex)); } - std::string tracestate() { return span_.trace_state(); } + std::string tracestate() const { return span_.trace_state(); } /** * Sets the span's tracestate. diff --git a/source/server/server.cc b/source/server/server.cc index a2f391e6c0ea..575b29716ffc 100644 --- a/source/server/server.cc +++ b/source/server/server.cc @@ -74,14 +74,15 @@ std::unique_ptr getHandler(Event::Dispatcher& dispatcher) { } // namespace -InstanceImpl::InstanceImpl( - Init::Manager& init_manager, const Options& options, Event::TimeSystem& time_system, - Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks, - HotRestart& restarter, Stats::StoreRoot& store, Thread::BasicLockable& access_log_lock, - ComponentFactory& component_factory, Random::RandomGeneratorPtr&& random_generator, - ThreadLocal::Instance& tls, Thread::ThreadFactory& thread_factory, - Filesystem::Instance& file_system, std::unique_ptr process_context, - Buffer::WatermarkFactorySharedPtr watermark_factory) +InstanceImpl::InstanceImpl(Init::Manager& init_manager, const Options& options, + Event::TimeSystem& time_system, ListenerHooks& hooks, + HotRestart& restarter, Stats::StoreRoot& store, + Thread::BasicLockable& access_log_lock, + Random::RandomGeneratorPtr&& random_generator, + ThreadLocal::Instance& tls, Thread::ThreadFactory& thread_factory, + Filesystem::Instance& file_system, + std::unique_ptr process_context, + Buffer::WatermarkFactorySharedPtr watermark_factory) : init_manager_(init_manager), live_(false), options_(options), validation_context_(options_.allowUnknownStaticFields(), !options.rejectUnknownDynamicFields(), @@ -103,43 +104,7 @@ InstanceImpl::InstanceImpl( grpc_context_(store.symbolTable()), http_context_(store.symbolTable()), router_context_(store.symbolTable()), process_context_(std::move(process_context)), hooks_(hooks), quic_stat_names_(store.symbolTable()), server_contexts_(*this), - enable_reuse_port_default_(true), stats_flush_in_progress_(false) { - std::function set_up_logger = [&] { - TRY_ASSERT_MAIN_THREAD { - file_logger_ = std::make_unique( - options.logPath(), access_log_manager_, Logger::Registry::getSink()); - } - END_TRY - CATCH(const EnvoyException& e, { - throw EnvoyException( - fmt::format("Failed to open log-file '{}'. e.what(): {}", options.logPath(), e.what())); - }); - }; - - TRY_ASSERT_MAIN_THREAD { - if (!options.logPath().empty()) { - set_up_logger(); - } - restarter_.initialize(*dispatcher_, *this); - drain_manager_ = component_factory.createDrainManager(*this); - initialize(std::move(local_address), component_factory); - } - END_TRY - MULTI_CATCH( - const EnvoyException& e, - { - ENVOY_LOG(critical, "error initializing config '{} {} {}': {}", - options.configProto().DebugString(), options.configYaml(), options.configPath(), - e.what()); - terminate(); - throw; - }, - { - ENVOY_LOG(critical, "error initializing due to unknown exception"); - terminate(); - throw; - }); -} + enable_reuse_port_default_(true), stats_flush_in_progress_(false) {} InstanceImpl::~InstanceImpl() { terminate(); @@ -422,6 +387,45 @@ void InstanceUtil::loadBootstrapConfig(envoy::config::bootstrap::v3::Bootstrap& void InstanceImpl::initialize(Network::Address::InstanceConstSharedPtr local_address, ComponentFactory& component_factory) { + std::function set_up_logger = [&] { + TRY_ASSERT_MAIN_THREAD { + file_logger_ = std::make_unique( + options_.logPath(), access_log_manager_, Logger::Registry::getSink()); + } + END_TRY + CATCH(const EnvoyException& e, { + throw EnvoyException( + fmt::format("Failed to open log-file '{}'. e.what(): {}", options_.logPath(), e.what())); + }); + }; + + TRY_ASSERT_MAIN_THREAD { + if (!options_.logPath().empty()) { + set_up_logger(); + } + restarter_.initialize(*dispatcher_, *this); + drain_manager_ = component_factory.createDrainManager(*this); + initializeOrThrow(std::move(local_address), component_factory); + } + END_TRY + MULTI_CATCH( + const EnvoyException& e, + { + ENVOY_LOG(critical, "error initializing config '{} {} {}': {}", + options_.configProto().DebugString(), options_.configYaml(), + options_.configPath(), e.what()); + terminate(); + throw; + }, + { + ENVOY_LOG(critical, "error initializing due to unknown exception"); + terminate(); + throw; + }); +} + +void InstanceImpl::initializeOrThrow(Network::Address::InstanceConstSharedPtr local_address, + ComponentFactory& component_factory) { ENVOY_LOG(info, "initializing epoch {} (base id={}, hot restart version={})", options_.restartEpoch(), restarter_.baseId(), restarter_.version()); @@ -814,15 +818,6 @@ void InstanceImpl::onRuntimeReady() { shutdown(); }); } - - // TODO (nezdolik): Fully deprecate this runtime key in the next release. - if (runtime().snapshot().get(Network::TcpListenerImpl::GlobalMaxCxRuntimeKey)) { - ENVOY_LOG(warn, - "Usage of the deprecated runtime key {}, consider switching to " - "`envoy.resource_monitors.downstream_connections` instead." - "This runtime key will be removed in future.", - Network::TcpListenerImpl::GlobalMaxCxRuntimeKey); - } } void InstanceImpl::startWorkers() { @@ -908,7 +903,8 @@ RunHelper::RunHelper(Instance& instance, const Options& options, Event::Dispatch // If there is no global limit to the number of active connections, warn on startup. if (!overload_manager.getThreadLocalOverloadState().isResourceMonitorEnabled( - Server::OverloadProactiveResourceName::GlobalDownstreamMaxConnections)) { + Server::OverloadProactiveResourceName::GlobalDownstreamMaxConnections) && + !instance.runtime().snapshot().get(Network::TcpListenerImpl::GlobalMaxCxRuntimeKey)) { ENVOY_LOG(warn, "There is no configured limit to the number of allowed active downstream " "connections. Configure a " "limit in `envoy.resource_monitors.downstream_connections` resource monitor."); diff --git a/source/server/server.h b/source/server/server.h index 13baedd12c4b..859eb5d4789b 100644 --- a/source/server/server.h +++ b/source/server/server.h @@ -233,14 +233,16 @@ class InstanceImpl final : Logger::Loggable, * @throw EnvoyException if initialization fails. */ InstanceImpl(Init::Manager& init_manager, const Options& options, Event::TimeSystem& time_system, - Network::Address::InstanceConstSharedPtr local_address, ListenerHooks& hooks, - HotRestart& restarter, Stats::StoreRoot& store, - Thread::BasicLockable& access_log_lock, ComponentFactory& component_factory, + ListenerHooks& hooks, HotRestart& restarter, Stats::StoreRoot& store, + Thread::BasicLockable& access_log_lock, Random::RandomGeneratorPtr&& random_generator, ThreadLocal::Instance& tls, Thread::ThreadFactory& thread_factory, Filesystem::Instance& file_system, std::unique_ptr process_context, Buffer::WatermarkFactorySharedPtr watermark_factory = nullptr); + // initialize the server. This must be called before run(). + void initialize(Network::Address::InstanceConstSharedPtr local_address, + ComponentFactory& component_factory); ~InstanceImpl() override; void run() override; @@ -313,8 +315,10 @@ class InstanceImpl final : Logger::Loggable, ProtobufTypes::MessagePtr dumpBootstrapConfig(); void flushStatsInternal(); void updateServerStats(); - void initialize(Network::Address::InstanceConstSharedPtr local_address, - ComponentFactory& component_factory); + // This does most of the work of initialization, but can throw errors caught + // by initialize(). + void initializeOrThrow(Network::Address::InstanceConstSharedPtr local_address, + ComponentFactory& component_factory); void loadServerFlags(const absl::optional& flags_path); void startWorkers(); void terminate(); diff --git a/test/common/crypto/utility_test.cc b/test/common/crypto/utility_test.cc index 46eabc6b03cb..005c820ba86a 100644 --- a/test/common/crypto/utility_test.cc +++ b/test/common/crypto/utility_test.cc @@ -72,25 +72,22 @@ TEST(UtilityTest, TestImportPublicKey) { wrapper = Common::Crypto::Access::getTyped(*crypto_ptr); pkey = wrapper->getEVP_PKEY(); EXPECT_EQ(nullptr, pkey); + + EVP_PKEY* empty_pkey = EVP_PKEY_new(); + wrapper->setEVP_PKEY(empty_pkey); + pkey = wrapper->getEVP_PKEY(); + EXPECT_NE(nullptr, pkey); } TEST(UtilityTest, TestVerifySignature) { - auto key = "30820122300d06092a864886f70d01010105000382010f003082010a0282010100a7471266d01d160308d" - "73409c06f2e8d35c531c458d3e480e9f3191847d062ec5ccff7bc51e949d5f2c3540c189a4eca1e8633a6" - "2cf2d0923101c27e38013e71de9ae91a704849bff7fbe2ce5bf4bd666fd9731102a53193fe5a9a5a50644" - "ff8b1183fa897646598caad22a37f9544510836372b44c58c98586fb7144629cd8c9479592d996d32ff6d" - "395c0b8442ec5aa1ef8051529ea0e375883cefc72c04e360b4ef8f5760650589ca814918f678eee39b884" - "d5af8136a9630a6cc0cde157dc8e00f39540628d5f335b2c36c54c7c8bc3738a6b21acff815405afa28e5" - "183f550dac19abcf1145a7f9ced987db680e4a229cac75dee347ec9ebce1fc3dbbbb0203010001"; - auto hash_func = "sha256"; - auto signature = - "345ac3a167558f4f387a81c2d64234d901a7ceaa544db779d2f797b0ea4ef851b740905a63e2f4d5af42cee093a2" - "9c7155db9a63d3d483e0ef948f5ac51ce4e10a3a6606fd93ef68ee47b30c37491103039459122f78e1c7ea71a1a5" - "ea24bb6519bca02c8c9915fe8be24927c91812a13db72dbcb500103a79e8f67ff8cb9e2a631974e0668ab3977bf5" - "70a91b67d1b6bcd5dce84055f21427d64f4256a042ab1dc8e925d53a769f6681a873f5859693a7728fcbe95beace" - "1563b5ffbcd7c93b898aeba31421dafbfadeea50229c49fd6c445449314460f3d19150bd29a91333beaced557ed6" - "295234f7c14fa46303b7e977d2c89ba8a39a46a35f33eb07a332"; - auto data = "hello"; + auto key = "30820122300d06092a864886f70d01010105000382010f003082010a0282010100ba10ebe185465586093" + "228fb3b0093c560853b7ebf28497aefb9961a6cc886dd3f6d3278a93244fa5084a9c263bd57feb4ea1868" + "aa8a2718aa46708c803ce49318619982ba06a6615d24bb853c0fb85ebed833a802245e4518d4e2ba10da1" + "f22c732505433c558bed8895eb1e97cb5d65f821be9330143e93a738ef6896165879f692d75c2d7928e01" + "fd7fe601d16931bdd876c7b15b741e48546fe80db45df56e22ed2fa974ab937af7644d20834f41a61aeb9" + "a70d0248d274642b14ed6585892403bed8e03a9a12485ae44e3d39ab53e5bd70dee58476fb81860a18679" + "9429b71f79f204894cf21d31cc19118d547bb1b946532d080e074ec97e23667818490203010001"; + auto data = "hello\n"; Common::Crypto::CryptoObjectPtr crypto_ptr( Common::Crypto::UtilitySingleton::get().importPublicKey(Hex::decode(key))); @@ -98,30 +95,92 @@ TEST(UtilityTest, TestVerifySignature) { std::vector text(data, data + strlen(data)); - auto sig = Hex::decode(signature); - auto result = UtilitySingleton::get().verifySignature(hash_func, *crypto, sig, text); + // Map of hash function names and their respective signatures + std::map hashSignatures = { + {"sha1", + "9ed4cc60e8b2b51ff00b1cc06c628263476c8be6136510fc47e4668423c3492d8711489000b32163cd022661049" + "360fa0b8366692e41a4d4fb4237479694484012833ccc88938c1a471e33757c198b42623961d91cdf41ca01b375" + "002930b37a62377517cad297d5658e31610acaf79216a3d5c0afe4715dfe6e5bad3c20dac77bbbd2e7a4cb44172" + "abb620fe60b968426726ed25d2aed99abf9e8f705b7021e3448a78824e6982e9d14dbd0a317f45d42198f785f3b" + "0ca8e311695cedb4ce19626c246b8010a5de7b7978b8a3b56c1558f87bd658023e52b6e155c39594bae6e3cbf77" + "9d487a9ce3bffd7d8a2641f336771bec5c9d4a40dc8d4163fd2c1dd3b"}, // Signature for SHA-1 hash + // function + {"sha224", + "03813d50082dfb43444ebf47788e69271ebbfa17e64f7e7600ce761bd89ff459e21ecea6bc7de8396cfd80fe0ee" + "3d92967f0c467c930f7d0b1b1734e5d139ffaa5d84c5047cb38793b152ba8b284ec6d31e0b410b1e1a06ffda171" + "42c83b30593ac02a2f07f8e863ade752d23b2f41d56bd1ab6328c46de47233e2e2e4189e5bd3bce0b0428f485ff" + "e75f7343d89b376bd7dc2953467e63f5c1eb9279ca349fa74535d37e80f57216b8b73b0e67b32f0f18f41bae6a7" + "6e350dbc6188525eda1c79c0977bf433bb170d49de47985bc14a418d7a03d72eda740666dc05185fdcea6bb2914" + "d7bd0271bd06b3de72bc9db82d625799bf3441e2abff8fcd273efe6c7"}, // Signature for SHA-224 hash + // function + {"sha256", + "504c24addc615c01c915e3244b8248b470a41c82faa6a82526ddd8f21e197adae6c8b985e40960157d47f336d8b" + "a31ce4b3b1795379a7415af57daa757d3027e870b3c6644e749b583c51a16f9984afd39c909325d271d8d4c8d00" + "6288bd8f7945aa24a783613ecece95a9692b3b56dd1d831fc06d82eca40fd432a15a6cdb837d7ce378ac889c4ab" + "00b0c1f9c2be279952696b70c9ea2bb014d6f20b72ed4917904d5f24d5776058bd11121f3ed02e03c384cf42734" + "6b1d300867969f22e27aa9f0607344cc9d8e9a90802e97ac39af9324f60ddad24682e48424346dd5a53fe550370" + "bdf9f94dec8a80810edd648a33cc767f9e4328660e3ee1be8b47e9cfa"}, // Signature for SHA-256 hash + // function + {"sha384", + "91491000964d11f12ff658101f7c21ce8b37da6fff3abe836859c17e268858d77ee9c96e88ca86b36bca2b06472" + "a1f551d642bf056f1241d11d5b853e1458c2a9d86f9096e872c81480a06000346a61e51cb94e5174a98b9daacf5" + "204dd28e081c99a72066c406334a046ae5f3eb0e0eea86f0ae7eeb27d5dea245e850d05cc6c972f8249b8a4f018" + "6531735137a2e45f1f6410bf8e2382e95b57618802a0068ca197b2d8bcca53d6738e04b86ed9c69d45dad6d9bd7" + "be55596a719f12531d363e74c9d659738eaa50ab854869416f2b445f054aa2c1223c9edd223cbc5ac0d3582cb9b" + "5af494138bd6ace049e3ab326bb23fadd3dbcd74e9a3b372843f926ec"}, // Signature for SHA-384 hash + // function + {"sha512", + "5d001462d000c0aa23d931f6cce5def5f8472c7aaa0185cab87b256697b7a0c8fb6a4c9f84debf1b4ff3bf53213" + "0bcb25f724e09a74b5d5c915feb9c943a005ab879078b2fbcab0828e128ebfb7befee25d219bcd6cf1ad1f62b94" + "b460021eebc4c249e34219c71b4f526628976ecea8fb70e1166053da212747e8ba4b29cb91fa6541d53d3400a9d" + "34881a227e01eebf157104d84555c9e20320280723a72d3a724eba99f1fb14d59399321636ebfe7070d83d7b6b2" + "381fcdb683fb73e7796d36fe45dfb14a622c3426fe5bf69af9c24f9f1b30affad129b5f2b7dfa6fa384c73ad212" + "f414606882c3f9133d4702f487f9b08df8d0265fe5e8e12a11c6cb35c"}, // Signature for SHA-512 hash + // function + }; + + // Loop through each hash function and its signature + for (const auto& entry : hashSignatures) { + const std::string& hash_func = entry.first; + const std::string& signature = entry.second; + auto sig = Hex::decode(signature); + + auto result = UtilitySingleton::get().verifySignature(hash_func, *crypto, sig, text); + EXPECT_EQ(true, result.result_); + EXPECT_EQ("", result.error_message_); + } - EXPECT_EQ(true, result.result_); - EXPECT_EQ("", result.error_message_); + auto signature = + "504c24addc615c01c915e3244b8248b470a41c82faa6a82526ddd8f21e197adae6c8b985e40960157d47f336d8ba" + "31ce4b3b1795379a7415af57daa757d3027e870b3c6644e749b583c51a16f9984afd39c909325d271d8d4c8d0062" + "88bd8f7945aa24a783613ecece95a9692b3b56dd1d831fc06d82eca40fd432a15a6cdb837d7ce378ac889c4ab00b" + "0c1f9c2be279952696b70c9ea2bb014d6f20b72ed4917904d5f24d5776058bd11121f3ed02e03c384cf427346b1d" + "300867969f22e27aa9f0607344cc9d8e9a90802e97ac39af9324f60ddad24682e48424346dd5a53fe550370bdf9f" + "94dec8a80810edd648a33cc767f9e4328660e3ee1be8b47e9cfa"; + auto sig = Hex::decode(signature); - result = UtilitySingleton::get().verifySignature("unknown", *crypto, sig, text); + // Test an unknown hash function + auto result = UtilitySingleton::get().verifySignature("unknown", *crypto, sig, text); EXPECT_EQ(false, result.result_); EXPECT_EQ("unknown is not supported.", result.error_message_); + // Test with an empty crypto object auto empty_crypto = std::make_unique(); - result = UtilitySingleton::get().verifySignature(hash_func, *empty_crypto, sig, text); + result = UtilitySingleton::get().verifySignature("sha256", *empty_crypto, sig, text); EXPECT_EQ(false, result.result_); EXPECT_EQ("Failed to initialize digest verify.", result.error_message_); + // Test with incorrect data data = "baddata"; text = std::vector(data, data + strlen(data)); - result = UtilitySingleton::get().verifySignature(hash_func, *crypto, sig, text); + result = UtilitySingleton::get().verifySignature("sha256", *crypto, sig, text); EXPECT_EQ(false, result.result_); EXPECT_EQ("Failed to verify digest. Error code: 0", result.error_message_); + // Test with incorrect signature data = "hello"; text = std::vector(data, data + strlen(data)); - result = UtilitySingleton::get().verifySignature(hash_func, *crypto, Hex::decode("000000"), text); + result = UtilitySingleton::get().verifySignature("sha256", *crypto, Hex::decode("000000"), text); EXPECT_EQ(false, result.result_); EXPECT_EQ("Failed to verify digest. Error code: 0", result.error_message_); } diff --git a/test/common/formatter/substitution_formatter_test.cc b/test/common/formatter/substitution_formatter_test.cc index c32f48a2943f..8c9e6dc4afd0 100644 --- a/test/common/formatter/substitution_formatter_test.cc +++ b/test/common/formatter/substitution_formatter_test.cc @@ -604,14 +604,6 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { EXPECT_THAT(upstream_format.formatValueWithContext({}, stream_info), ProtoEq(ValueUtil::numberValue(18443))); - { - TestScopedRuntime scoped_runtime; - scoped_runtime.mergeValues({{"envoy.reloadable_features.format_ports_as_numbers", "false"}}); - - EXPECT_THAT(upstream_format.formatValueWithContext({}, stream_info), - ProtoEq(ValueUtil::stringValue("18443"))); - } - // Validate for IPv6 address address = Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv6Instance("::1", 19443)}; @@ -620,14 +612,6 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { EXPECT_THAT(upstream_format.formatValueWithContext({}, stream_info), ProtoEq(ValueUtil::numberValue(19443))); - { - TestScopedRuntime scoped_runtime; - scoped_runtime.mergeValues({{"envoy.reloadable_features.format_ports_as_numbers", "false"}}); - - EXPECT_THAT(upstream_format.formatValueWithContext({}, stream_info), - ProtoEq(ValueUtil::stringValue("19443"))); - } - // Validate for Pipe address = Network::Address::InstanceConstSharedPtr{new Network::Address::PipeInstance("/foo")}; stream_info.upstreamInfo()->setUpstreamLocalAddress(address); @@ -660,14 +644,6 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { EXPECT_EQ("443", upstream_format.formatWithContext({}, stream_info)); EXPECT_THAT(upstream_format.formatValueWithContext({}, stream_info), ProtoEq(ValueUtil::numberValue(443))); - - { - TestScopedRuntime scoped_runtime; - scoped_runtime.mergeValues({{"envoy.reloadable_features.format_ports_as_numbers", "false"}}); - - EXPECT_THAT(upstream_format.formatValueWithContext({}, stream_info), - ProtoEq(ValueUtil::stringValue("443"))); - } } { @@ -754,14 +730,6 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { EXPECT_THAT(upstream_format.formatValueWithContext({}, stream_info), ProtoEq(ValueUtil::numberValue(8443))); - { - TestScopedRuntime scoped_runtime; - scoped_runtime.mergeValues({{"envoy.reloadable_features.format_ports_as_numbers", "false"}}); - - EXPECT_THAT(upstream_format.formatValueWithContext({}, stream_info), - ProtoEq(ValueUtil::stringValue("8443"))); - } - // Validate for IPv6 address address = Network::Address::InstanceConstSharedPtr{new Network::Address::Ipv6Instance("::1", 9443)}; @@ -770,14 +738,6 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { EXPECT_THAT(upstream_format.formatValueWithContext({}, stream_info), ProtoEq(ValueUtil::numberValue(9443))); - { - TestScopedRuntime scoped_runtime; - scoped_runtime.mergeValues({{"envoy.reloadable_features.format_ports_as_numbers", "false"}}); - - EXPECT_THAT(upstream_format.formatValueWithContext({}, stream_info), - ProtoEq(ValueUtil::stringValue("9443"))); - } - // Validate for Pipe address = Network::Address::InstanceConstSharedPtr{new Network::Address::PipeInstance("/foo")}; stream_info.downstream_connection_info_provider_->setLocalAddress(address); @@ -805,14 +765,6 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { EXPECT_EQ("0", upstream_format.formatWithContext({}, stream_info)); EXPECT_THAT(upstream_format.formatValueWithContext({}, stream_info), ProtoEq(ValueUtil::numberValue(0))); - - { - TestScopedRuntime scoped_runtime; - scoped_runtime.mergeValues({{"envoy.reloadable_features.format_ports_as_numbers", "false"}}); - - EXPECT_THAT(upstream_format.formatValueWithContext({}, stream_info), - ProtoEq(ValueUtil::stringValue("0"))); - } } { @@ -834,14 +786,6 @@ TEST(SubstitutionFormatterTest, streamInfoFormatter) { EXPECT_EQ("63443", upstream_format.formatWithContext({}, stream_info)); EXPECT_THAT(upstream_format.formatValueWithContext({}, stream_info), ProtoEq(ValueUtil::numberValue(63443))); - - { - TestScopedRuntime scoped_runtime; - scoped_runtime.mergeValues({{"envoy.reloadable_features.format_ports_as_numbers", "false"}}); - - EXPECT_THAT(upstream_format.formatValueWithContext({}, stream_info), - ProtoEq(ValueUtil::stringValue("63443"))); - } } { diff --git a/test/common/http/codec_impl_fuzz_test.cc b/test/common/http/codec_impl_fuzz_test.cc index 46db8ea12b5c..6101a2ed88c9 100644 --- a/test/common/http/codec_impl_fuzz_test.cc +++ b/test/common/http/codec_impl_fuzz_test.cc @@ -377,20 +377,24 @@ class HttpStream : public LinkedObject { dispatcher = &context_.client_connection_.dispatcher_; } - // With this feature enabled for http2 we end up creating a schedulable - // callback the first time we re-enable reading as it's used to process - // the backed up data. + // With this feature enabled for http2 the codec may end up creating a + // schedulable callback the first time it re-enables reading as it's used + // to process the backed up data if there's any to process. if (Runtime::runtimeFeatureEnabled(Runtime::defer_processing_backedup_streams)) { - const bool expecting_schedulable_callback_creation = + const bool might_schedulable_callback_creation = http_protocol_ == Protocol::Http2 && state.read_disable_count_ == 0 && !disable && !state.created_schedulable_callback_; - if (expecting_schedulable_callback_creation) { + if (might_schedulable_callback_creation) { ASSERT(dispatcher != nullptr); state.created_schedulable_callback_ = true; - // The unique pointer of this object will be returned in createSchedulableCallback_ of - // dispatcher, so there is no risk of object leak. - new Event::MockSchedulableCallback(dispatcher); + ON_CALL(*dispatcher, createSchedulableCallback_(_)) + .WillByDefault(testing::Invoke([dispatcher](std::function cb) { + // The unique pointer of this object will be returned in + // createSchedulableCallback_ of dispatcher, so there is no risk of this object + // leaking. + return new Event::MockSchedulableCallback(dispatcher, cb); + })); } } diff --git a/test/common/http/conn_manager_impl_test_2.cc b/test/common/http/conn_manager_impl_test_2.cc index 5d456ab6c130..dec00c0bfa54 100644 --- a/test/common/http/conn_manager_impl_test_2.cc +++ b/test/common/http/conn_manager_impl_test_2.cc @@ -3692,7 +3692,7 @@ TEST_F(HttpConnectionManagerImplTest, NoProxyProtocolAdded) { } // Validate that deferred streams are processed with a variety of -// headers, data and trailer arriving in the same I/O cycle +// headers, data, metadata, and trailers arriving in the same I/O cycle TEST_F(HttpConnectionManagerImplTest, LimitWorkPerIOCycle) { const int kRequestsSentPerIOCycle = 100; EXPECT_CALL(runtime_.snapshot_, getInteger(_, _)).WillRepeatedly(ReturnArg<1>()); @@ -3701,13 +3701,14 @@ TEST_F(HttpConnectionManagerImplTest, LimitWorkPerIOCycle) { setup(false, ""); // Store the basic request encoder during filter chain setup. - std::vector> encoder_filters; + std::vector> decoder_filters; int decode_headers_call_count = 0; for (int i = 0; i < kRequestsSentPerIOCycle; ++i) { + int mod5 = i % 5; std::shared_ptr filter(new NiceMock()); - // Each 4th request is headers only - EXPECT_CALL(*filter, decodeHeaders(_, i % 4 == 0 ? true : false)) + // Each 0th request is headers only + EXPECT_CALL(*filter, decodeHeaders(_, mod5 == 0 ? true : false)) .WillRepeatedly(Invoke([&](RequestHeaderMap&, bool) -> FilterHeadersStatus { ++decode_headers_call_count; return FilterHeadersStatus::StopIteration; @@ -3715,18 +3716,24 @@ TEST_F(HttpConnectionManagerImplTest, LimitWorkPerIOCycle) { // Each 1st request is headers and data only // Each 2nd request is headers, data and trailers - if (i % 4 == 1 || i % 4 == 2) { - EXPECT_CALL(*filter, decodeData(_, i % 4 == 1 ? true : false)) + if (mod5 == 1 || mod5 == 2) { + EXPECT_CALL(*filter, decodeData(_, mod5 == 1 ? true : false)) .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); } // Each 3rd request is headers and trailers (no data) - if (i % 4 == 2 || i % 4 == 3) { + if (mod5 == 2 || mod5 == 3) { EXPECT_CALL(*filter, decodeTrailers(_)).WillOnce(Return(FilterTrailersStatus::StopIteration)); } + // Each 4th request is headers, metadata, and data. + if (mod5 == 4) { + EXPECT_CALL(*filter, decodeMetadata(_)).WillOnce(Return(FilterMetadataStatus::Continue)); + EXPECT_CALL(*filter, decodeData(_, true)) + .WillOnce(Return(FilterDataStatus::StopIterationNoBuffer)); + } EXPECT_CALL(*filter, setDecoderFilterCallbacks(_)); - encoder_filters.push_back(std::move(filter)); + decoder_filters.push_back(std::move(filter)); } uint64_t random_value = 0; @@ -3736,11 +3743,11 @@ TEST_F(HttpConnectionManagerImplTest, LimitWorkPerIOCycle) { EXPECT_CALL(filter_factory_, createFilterChain(_)) .Times(kRequestsSentPerIOCycle) - .WillRepeatedly(Invoke([&encoder_filters](FilterChainManager& manager) -> bool { + .WillRepeatedly(Invoke([&decoder_filters](FilterChainManager& manager) -> bool { static int index = 0; int i = index++; - FilterFactoryCb factory([&encoder_filters, i](FilterChainFactoryCallbacks& callbacks) { - callbacks.addStreamDecoderFilter(encoder_filters[i]); + FilterFactoryCb factory([&decoder_filters, i](FilterChainFactoryCallbacks& callbacks) { + callbacks.addStreamDecoderFilter(decoder_filters[i]); }); manager.applyFilterFactoryCb({}, factory); return true; @@ -3762,12 +3769,15 @@ TEST_F(HttpConnectionManagerImplTest, LimitWorkPerIOCycle) { RequestHeaderMapPtr headers{new TestRequestHeaderMapImpl{ {":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + MetadataMapPtr metadata = std::make_unique(); + (*metadata)["key1"] = "value1"; + RequestTrailerMapPtr trailers{ new TestRequestTrailerMapImpl{{"key1", "value1"}, {"key2", "value2"}}}; Buffer::OwnedImpl data("data"); - switch (i % 4) { + switch (i % 5) { case 0: decoder_->decodeHeaders(std::move(headers), true); break; @@ -3784,6 +3794,11 @@ TEST_F(HttpConnectionManagerImplTest, LimitWorkPerIOCycle) { decoder_->decodeHeaders(std::move(headers), false); decoder_->decodeTrailers(std::move(trailers)); break; + case 4: + decoder_->decodeHeaders(std::move(headers), false); + decoder_->decodeMetadata(std::move(metadata)); + decoder_->decodeData(data, true); + break; } } @@ -3809,7 +3824,7 @@ TEST_F(HttpConnectionManagerImplTest, LimitWorkPerIOCycle) { ASSERT_EQ(deferred_request_count, kRequestsSentPerIOCycle); - for (auto& filter : encoder_filters) { + for (auto& filter : decoder_filters) { ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; filter->callbacks_->streamInfo().setResponseCodeDetails(""); filter->callbacks_->encodeHeaders(std::move(response_headers), true, "details"); diff --git a/test/common/http/filter_manager_test.cc b/test/common/http/filter_manager_test.cc index c1d3e041e996..865f98515ccd 100644 --- a/test/common/http/filter_manager_test.cc +++ b/test/common/http/filter_manager_test.cc @@ -69,6 +69,61 @@ class FilterManagerTest : public testing::Test { std::make_shared(StreamInfo::FilterState::LifeSpan::Connection); }; +TEST_F(FilterManagerTest, RequestHeadersOrResponseHeadersAccess) { + initialize(); + + auto decoder_filter = std::make_shared>(); + auto encoder_filter = std::make_shared>(); + + EXPECT_CALL(filter_factory_, createFilterChain(_)) + .WillOnce(Invoke([&](FilterChainManager& manager) -> bool { + auto decoder_factory = createDecoderFilterFactoryCb(decoder_filter); + manager.applyFilterFactoryCb({}, decoder_factory); + auto encoder_factory = createEncoderFilterFactoryCb(encoder_filter); + manager.applyFilterFactoryCb({}, encoder_factory); + return true; + })); + filter_manager_->createFilterChain(); + + RequestHeaderMapPtr request_headers{ + new TestRequestHeaderMapImpl{{":authority", "host"}, {":path", "/"}, {":method", "GET"}}}; + ResponseHeaderMapPtr response_headers{new TestResponseHeaderMapImpl{{":status", "200"}}}; + RequestTrailerMapPtr request_trailers{new TestRequestTrailerMapImpl{{"foo", "bar"}}}; + ResponseTrailerMapPtr response_trailers{new TestResponseTrailerMapImpl{{"foo", "bar"}}}; + ResponseHeaderMapPtr informational_headers{ + new TestResponseHeaderMapImpl{{":status", "100"}, {"foo", "bar"}}}; + + EXPECT_CALL(filter_manager_callbacks_, requestHeaders()) + .Times(2) + .WillRepeatedly(Return(makeOptRef(*request_headers))); + EXPECT_CALL(filter_manager_callbacks_, responseHeaders()) + .Times(2) + .WillRepeatedly(Return(makeOptRef(*response_headers))); + EXPECT_CALL(filter_manager_callbacks_, requestTrailers()) + .Times(2) + .WillRepeatedly(Return(makeOptRef(*request_trailers))); + EXPECT_CALL(filter_manager_callbacks_, responseTrailers()) + .Times(2) + .WillRepeatedly(Return(makeOptRef(*response_trailers))); + EXPECT_CALL(filter_manager_callbacks_, informationalHeaders()) + .Times(2) + .WillRepeatedly(Return(makeOptRef(*informational_headers))); + + EXPECT_EQ(decoder_filter->callbacks_->requestHeaders().ptr(), request_headers.get()); + EXPECT_EQ(decoder_filter->callbacks_->responseHeaders().ptr(), response_headers.get()); + EXPECT_EQ(decoder_filter->callbacks_->requestTrailers().ptr(), request_trailers.get()); + EXPECT_EQ(decoder_filter->callbacks_->responseTrailers().ptr(), response_trailers.get()); + EXPECT_EQ(decoder_filter->callbacks_->informationalHeaders().ptr(), informational_headers.get()); + + EXPECT_EQ(encoder_filter->callbacks_->requestHeaders().ptr(), request_headers.get()); + EXPECT_EQ(encoder_filter->callbacks_->responseHeaders().ptr(), response_headers.get()); + EXPECT_EQ(encoder_filter->callbacks_->requestTrailers().ptr(), request_trailers.get()); + EXPECT_EQ(encoder_filter->callbacks_->responseTrailers().ptr(), response_trailers.get()); + EXPECT_EQ(encoder_filter->callbacks_->informationalHeaders().ptr(), informational_headers.get()); + + filter_manager_->destroyFilters(); +} + // Verifies that the local reply persists the gRPC classification even if the request headers are // modified. TEST_F(FilterManagerTest, SendLocalReplyDuringDecodingGrpcClassiciation) { diff --git a/test/extensions/common/aws/BUILD b/test/extensions/common/aws/BUILD index 55ebdf79f19f..43ce091b0f55 100644 --- a/test/extensions/common/aws/BUILD +++ b/test/extensions/common/aws/BUILD @@ -14,8 +14,11 @@ envoy_cc_mock( srcs = ["mocks.cc"], hdrs = ["mocks.h"], deps = [ + "//source/common/http:message_lib", "//source/extensions/common/aws:credentials_provider_interface", + "//source/extensions/common/aws:metadata_fetcher_lib", "//source/extensions/common/aws:signer_interface", + "//test/mocks/upstream:cluster_manager_mocks", ], ) @@ -37,6 +40,7 @@ envoy_cc_test( srcs = ["utility_test.cc"], deps = [ "//source/extensions/common/aws:utility_lib", + "//test/extensions/common/aws:aws_mocks", "//test/test_common:utility_lib", ], ) @@ -50,6 +54,22 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "metadata_fetcher_test", + srcs = ["metadata_fetcher_test.cc"], + deps = [ + "//source/extensions/common/aws:metadata_fetcher_lib", + "//test/extensions/common/aws:aws_mocks", + "//test/extensions/filters/http/common:mock_lib", + "//test/mocks/api:api_mocks", + "//test/mocks/event:event_mocks", + "//test/mocks/server:factory_context_mocks", + "//test/test_common:environment_lib", + "//test/test_common:simulated_time_system_lib", + "//test/test_common:utility_lib", + ], +) + envoy_cc_test( name = "credentials_provider_impl_test", srcs = ["credentials_provider_impl_test.cc"], diff --git a/test/extensions/common/aws/metadata_fetcher_test.cc b/test/extensions/common/aws/metadata_fetcher_test.cc new file mode 100644 index 000000000000..d009625e952a --- /dev/null +++ b/test/extensions/common/aws/metadata_fetcher_test.cc @@ -0,0 +1,283 @@ +#include +#include +#include + +#include "source/common/http/headers.h" +#include "source/common/http/message_impl.h" +#include "source/common/http/utility.h" +#include "source/common/protobuf/utility.h" +#include "source/extensions/common/aws/metadata_fetcher.h" + +#include "test/extensions/common/aws/mocks.h" +#include "test/extensions/filters/http/common/mock.h" +#include "test/mocks/api/mocks.h" +#include "test/mocks/event/mocks.h" +#include "test/mocks/server/factory_context.h" +#include "test/test_common/environment.h" +#include "test/test_common/simulated_time_system.h" +#include "test/test_common/utility.h" + +using Envoy::Extensions::HttpFilters::Common::MockUpstream; +using testing::_; +using testing::AllOf; +using testing::InSequence; +using testing::Mock; +using testing::NiceMock; +using testing::Ref; +using testing::Return; +using testing::Throw; +using testing::UnorderedElementsAre; + +namespace Envoy { +namespace Extensions { +namespace Common { +namespace Aws { + +MATCHER_P(OptionsHasBufferBodyForRetry, expectedValue, "") { + *result_listener << "\nexpected { buffer_body_for_retry: \"" << expectedValue + << "\"} but got {buffer_body_for_retry: \"" << arg.buffer_body_for_retry + << "\"}\n"; + return ExplainMatchResult(expectedValue, arg.buffer_body_for_retry, result_listener); +} + +MATCHER_P(NumRetries, expectedRetries, "") { + *result_listener << "\nexpected { num_retries: \"" << expectedRetries + << "\"} but got {num_retries: \"" << arg.num_retries().value() << "\"}\n"; + return ExplainMatchResult(expectedRetries, arg.num_retries().value(), result_listener); +} + +MATCHER_P(PerTryTimeout, expectedTimeout, "") { + *result_listener << "\nexpected { per_try_timeout: \"" << expectedTimeout + << "\"} but got { per_try_timeout: \"" << arg.per_try_timeout().seconds() + << "\"}\n"; + return ExplainMatchResult(expectedTimeout, arg.per_try_timeout().seconds(), result_listener); +} + +MATCHER_P(PerTryIdleTimeout, expectedIdleTimeout, "") { + *result_listener << "\nexpected { per_try_idle_timeout: \"" << expectedIdleTimeout + << "\"} but got { per_try_idle_timeout: \"" + << arg.per_try_idle_timeout().seconds() << "\"}\n"; + return ExplainMatchResult(expectedIdleTimeout, arg.per_try_idle_timeout().seconds(), + result_listener); +} + +MATCHER_P(RetryOnModes, expectedModes, "") { + const std::string& retry_on = arg.retry_on(); + std::set retry_on_modes = absl::StrSplit(retry_on, ','); + *result_listener << "\nexpected retry_on modes doesn't match " + << "received { retry_on modes: \"" << retry_on << "\"}\n"; + return ExplainMatchResult(expectedModes, retry_on_modes, result_listener); +} + +MATCHER_P(OptionsHasRetryPolicy, policyMatcher, "") { + if (!arg.retry_policy.has_value()) { + *result_listener << "Expected options to have retry policy, but it was unset"; + return false; + } + return ExplainMatchResult(policyMatcher, arg.retry_policy.value(), result_listener); +} + +class MetadataFetcherTest : public testing::Test { +public: + void setupFetcher() { + mock_factory_ctx_.cluster_manager_.initializeThreadLocalClusters({"cluster_name"}); + fetcher_ = MetadataFetcher::create(mock_factory_ctx_.cluster_manager_, "cluster_name"); + EXPECT_TRUE(fetcher_ != nullptr); + } + + testing::NiceMock mock_factory_ctx_; + std::unique_ptr fetcher_; + NiceMock parent_span_; +}; + +TEST_F(MetadataFetcherTest, TestGetSuccess) { + // Setup + setupFetcher(); + Http::RequestMessageImpl message; + std::string body = "not_empty"; + MockUpstream mock_result(mock_factory_ctx_.cluster_manager_, "200", body); + MockMetadataReceiver receiver; + EXPECT_CALL(receiver, onMetadataSuccess(std::move(body))); + EXPECT_CALL(receiver, onMetadataError(_)).Times(0); + + // Act + fetcher_->fetch(message, parent_span_, receiver); +} + +TEST_F(MetadataFetcherTest, TestRequestMatchAndSpanPassedDown) { + // Setup + setupFetcher(); + Http::RequestMessageImpl message; + + message.headers().setScheme(Http::Headers::get().SchemeValues.Http); + message.headers().setMethod(Http::Headers::get().MethodValues.Get); + message.headers().setHost("169.254.170.2:80"); + message.headers().setPath("/v2/credentials/c68caeb5-ef71-4914-8170-111111111111"); + message.headers().setCopy(Http::LowerCaseString(":pseudo-header"), "peudo-header-value"); + message.headers().setCopy(Http::LowerCaseString("X-aws-ec2-metadata-token"), "Token"); + + MockUpstream mock_result(mock_factory_ctx_.cluster_manager_, "200", "not_empty"); + MockMetadataReceiver receiver; + Http::MockAsyncClientRequest httpClientRequest( + &mock_factory_ctx_.cluster_manager_.thread_local_cluster_.async_client_); + + EXPECT_CALL(mock_factory_ctx_.cluster_manager_.thread_local_cluster_.async_client_, + send_(_, _, _)) + .WillOnce(Invoke( + [this, &httpClientRequest]( + Http::RequestMessagePtr& request, Http::AsyncClient::Callbacks& cb, + const Http::AsyncClient::RequestOptions& options) -> Http::AsyncClient::Request* { + Http::TestRequestHeaderMapImpl injected_headers = { + {":method", "GET"}, + {":scheme", "http"}, + {":authority", "169.254.170.2"}, + {":path", "/v2/credentials/c68caeb5-ef71-4914-8170-111111111111"}, + {"X-aws-ec2-metadata-token", "Token"}}; + EXPECT_THAT(request->headers(), IsSupersetOfHeaders(injected_headers)); + EXPECT_TRUE(request->headers().get(Http::LowerCaseString(":pseudo-header")).empty()); + + // Verify expectations for span + EXPECT_TRUE(options.parent_span_ == &this->parent_span_); + EXPECT_TRUE(options.child_span_name_ == "AWS Metadata Fetch"); + + // Let's say this ends up with a failure then verify it is handled properly by calling + // onMetadataError. + cb.onFailure(httpClientRequest, Http::AsyncClient::FailureReason::Reset); + return &httpClientRequest; + })); + EXPECT_CALL(receiver, onMetadataError(MetadataFetcher::MetadataReceiver::Failure::Network)); + // Act + fetcher_->fetch(message, parent_span_, receiver); +} + +TEST_F(MetadataFetcherTest, TestGet400) { + // Setup + setupFetcher(); + Http::RequestMessageImpl message; + + MockUpstream mock_result(mock_factory_ctx_.cluster_manager_, "400", "not_empty"); + MockMetadataReceiver receiver; + EXPECT_CALL(receiver, onMetadataSuccess(_)).Times(0); + EXPECT_CALL(receiver, onMetadataError(MetadataFetcher::MetadataReceiver::Failure::Network)); + + // Act + fetcher_->fetch(message, parent_span_, receiver); +} + +TEST_F(MetadataFetcherTest, TestGet400NoBody) { + // Setup + setupFetcher(); + Http::RequestMessageImpl message; + + MockUpstream mock_result(mock_factory_ctx_.cluster_manager_, "400", ""); + MockMetadataReceiver receiver; + EXPECT_CALL(receiver, onMetadataSuccess(_)).Times(0); + EXPECT_CALL(receiver, onMetadataError(MetadataFetcher::MetadataReceiver::Failure::Network)); + + // Act + fetcher_->fetch(message, parent_span_, receiver); +} + +TEST_F(MetadataFetcherTest, TestGetNoBody) { + // Setup + setupFetcher(); + Http::RequestMessageImpl message; + + MockUpstream mock_result(mock_factory_ctx_.cluster_manager_, "200", ""); + MockMetadataReceiver receiver; + EXPECT_CALL(receiver, onMetadataSuccess(_)).Times(0); + EXPECT_CALL(receiver, + onMetadataError(MetadataFetcher::MetadataReceiver::Failure::InvalidMetadata)); + + // Act + fetcher_->fetch(message, parent_span_, receiver); +} + +TEST_F(MetadataFetcherTest, TestHttpFailure) { + // Setup + setupFetcher(); + Http::RequestMessageImpl message; + + MockUpstream mock_result(mock_factory_ctx_.cluster_manager_, + Http::AsyncClient::FailureReason::Reset); + MockMetadataReceiver receiver; + EXPECT_CALL(receiver, onMetadataSuccess(_)).Times(0); + EXPECT_CALL(receiver, onMetadataError(MetadataFetcher::MetadataReceiver::Failure::Network)); + + // Act + fetcher_->fetch(message, parent_span_, receiver); +} + +TEST_F(MetadataFetcherTest, TestClusterNotFound) { + // Setup without thread local cluster + fetcher_ = MetadataFetcher::create(mock_factory_ctx_.cluster_manager_, "cluster_name"); + Http::RequestMessageImpl message; + MockMetadataReceiver receiver; + + EXPECT_CALL(mock_factory_ctx_.cluster_manager_, getThreadLocalCluster(_)) + .WillOnce(Return(nullptr)); + EXPECT_CALL(receiver, onMetadataSuccess(_)).Times(0); + EXPECT_CALL(receiver, onMetadataError(MetadataFetcher::MetadataReceiver::Failure::MissingConfig)); + + // Act + fetcher_->fetch(message, parent_span_, receiver); +} + +TEST_F(MetadataFetcherTest, TestCancel) { + // Setup + setupFetcher(); + Http::RequestMessageImpl message; + Http::MockAsyncClientRequest request( + &(mock_factory_ctx_.cluster_manager_.thread_local_cluster_.async_client_)); + MockUpstream mock_result(mock_factory_ctx_.cluster_manager_, &request); + MockMetadataReceiver receiver; + EXPECT_CALL(request, cancel()); + EXPECT_CALL(receiver, onMetadataSuccess(_)).Times(0); + EXPECT_CALL(receiver, onMetadataError(_)).Times(0); + + // Act + fetcher_->fetch(message, parent_span_, receiver); + // Proper cancel + fetcher_->cancel(); + Mock::VerifyAndClearExpectations(&request); + Mock::VerifyAndClearExpectations(&receiver); + // Re-entrant cancel should do nothing. + EXPECT_CALL(request, cancel()).Times(0); + fetcher_->cancel(); +} + +TEST_F(MetadataFetcherTest, TestDefaultRetryPolicy) { + // Setup + setupFetcher(); + Http::RequestMessageImpl message; + MockUpstream mock_result(mock_factory_ctx_.cluster_manager_, "200", "not_empty"); + MockMetadataReceiver receiver; + + EXPECT_CALL( + mock_factory_ctx_.cluster_manager_.thread_local_cluster_.async_client_, + send_(_, _, + AllOf(OptionsHasBufferBodyForRetry(true), + OptionsHasRetryPolicy(AllOf( + NumRetries(3), PerTryTimeout(5), PerTryIdleTimeout(1), + RetryOnModes(UnorderedElementsAre("5xx", "gateway-error", "connect-failure", + "refused-stream", "reset"))))))) + .WillOnce(Return(nullptr)); + // Act + fetcher_->fetch(message, parent_span_, receiver); +} + +TEST_F(MetadataFetcherTest, TestFailureToStringConversion) { + // Setup + setupFetcher(); + EXPECT_EQ(fetcher_->failureToString(MetadataFetcher::MetadataReceiver::Failure::Network), + "Network"); + EXPECT_EQ(fetcher_->failureToString(MetadataFetcher::MetadataReceiver::Failure::InvalidMetadata), + "InvalidMetadata"); + EXPECT_EQ(fetcher_->failureToString(MetadataFetcher::MetadataReceiver::Failure::MissingConfig), + "MissingConfig"); +} + +} // namespace Aws +} // namespace Common +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/common/aws/mocks.h b/test/extensions/common/aws/mocks.h index 5c3b0c7041af..6db726a8f936 100644 --- a/test/extensions/common/aws/mocks.h +++ b/test/extensions/common/aws/mocks.h @@ -1,8 +1,14 @@ #pragma once +#include "envoy/http/message.h" + +#include "source/common/http/message_impl.h" #include "source/extensions/common/aws/credentials_provider.h" +#include "source/extensions/common/aws/metadata_fetcher.h" #include "source/extensions/common/aws/signer.h" +#include "test/mocks/upstream/cluster_manager.h" + #include "gmock/gmock.h" namespace Envoy { @@ -10,6 +16,21 @@ namespace Extensions { namespace Common { namespace Aws { +class MockMetadataFetcher : public MetadataFetcher { +public: + MOCK_METHOD(void, cancel, ()); + MOCK_METHOD(absl::string_view, failureToString, (MetadataFetcher::MetadataReceiver::Failure)); + MOCK_METHOD(void, fetch, + (Http::RequestMessage & message, Tracing::Span& parent_span, + MetadataFetcher::MetadataReceiver& receiver)); +}; + +class MockMetadataReceiver : public MetadataFetcher::MetadataReceiver { +public: + MOCK_METHOD(void, onMetadataSuccess, (const std::string&& body)); + MOCK_METHOD(void, onMetadataError, (MetadataFetcher::MetadataReceiver::Failure reason)); +}; + class MockCredentialsProvider : public CredentialsProvider { public: MockCredentialsProvider(); diff --git a/test/extensions/common/aws/utility_test.cc b/test/extensions/common/aws/utility_test.cc index 629f28c2c957..221a94454d13 100644 --- a/test/extensions/common/aws/utility_test.cc +++ b/test/extensions/common/aws/utility_test.cc @@ -1,11 +1,18 @@ #include "source/extensions/common/aws/utility.h" +#include "test/extensions/common/aws/mocks.h" #include "test/test_common/utility.h" #include "gtest/gtest.h" +using testing::_; using testing::ElementsAre; +using testing::InSequence; +using testing::NiceMock; using testing::Pair; +using testing::Ref; +using testing::Return; +using testing::Throw; namespace Envoy { namespace Extensions { @@ -13,6 +20,12 @@ namespace Common { namespace Aws { namespace { +MATCHER_P(WithName, expectedName, "") { + *result_listener << "\nexpected { name: \"" << expectedName << "\"} but got {name: \"" + << arg.name() << "\"}\n"; + return ExplainMatchResult(expectedName, arg.name(), result_listener); +} + // Headers must be in alphabetical order by virtue of std::map TEST(UtilityTest, CanonicalizeHeadersInAlphabeticalOrder) { Http::TestRequestHeaderMapImpl headers{ @@ -346,6 +359,45 @@ TEST(UtilityTest, JoinCanonicalHeaderNamesWithEmptyMap) { EXPECT_EQ("", names); } +// Verify that we don't add a thread local cluster if it already exists. +TEST(UtilityTest, ThreadLocalClusterExistsAlready) { + NiceMock cluster_; + NiceMock cm_; + EXPECT_CALL(cm_, getThreadLocalCluster(_)).WillOnce(Return(&cluster_)); + EXPECT_CALL(cm_, addOrUpdateCluster(_, _)).Times(0); + EXPECT_TRUE(Utility::addInternalClusterStatic(cm_, "cluster_name", + envoy::config::cluster::v3::Cluster::STATIC, "")); +} + +// Verify that if thread local cluster doesn't exist we can create a new one. +TEST(UtilityTest, AddStaticClusterSuccess) { + NiceMock cm_; + EXPECT_CALL(cm_, getThreadLocalCluster(_)).WillOnce(Return(nullptr)); + EXPECT_CALL(cm_, addOrUpdateCluster(WithName("cluster_name"), _)).WillOnce(Return(true)); + EXPECT_TRUE(Utility::addInternalClusterStatic( + cm_, "cluster_name", envoy::config::cluster::v3::Cluster::STATIC, "127.0.0.1:80")); +} + +// Handle exception when adding thread local cluster fails. +TEST(UtilityTest, AddStaticClusterFailure) { + NiceMock cm_; + EXPECT_CALL(cm_, getThreadLocalCluster(_)).WillOnce(Return(nullptr)); + EXPECT_CALL(cm_, addOrUpdateCluster(WithName("cluster_name"), _)) + .WillOnce(Throw(EnvoyException("exeption message"))); + EXPECT_FALSE(Utility::addInternalClusterStatic( + cm_, "cluster_name", envoy::config::cluster::v3::Cluster::STATIC, "127.0.0.1:80")); +} + +// Verify that for uri argument in addInternalClusterStatic port value is optional +// and can contain request path which will be ignored. +TEST(UtilityTest, AddStaticClusterSuccessEvenWithMissingPort) { + NiceMock cm_; + EXPECT_CALL(cm_, getThreadLocalCluster(_)).WillOnce(Return(nullptr)); + EXPECT_CALL(cm_, addOrUpdateCluster(WithName("cluster_name"), _)).WillOnce(Return(true)); + EXPECT_TRUE(Utility::addInternalClusterStatic( + cm_, "cluster_name", envoy::config::cluster::v3::Cluster::STATIC, "127.0.0.1/something")); +} + } // namespace } // namespace Aws } // namespace Common diff --git a/test/extensions/filters/common/ext_authz/check_request_utils_test.cc b/test/extensions/filters/common/ext_authz/check_request_utils_test.cc index 83674fc8f177..9486088ba523 100644 --- a/test/extensions/filters/common/ext_authz/check_request_utils_test.cc +++ b/test/extensions/filters/common/ext_authz/check_request_utils_test.cc @@ -66,11 +66,11 @@ class CheckRequestUtilsTest : public testing::Test { auto metadata_val = MessageUtil::keyValueStruct("foo", "bar"); (*metadata_context.mutable_filter_metadata())["meta.key"] = metadata_val; - CheckRequestUtils::createHttpCheck(&callbacks_, request_headers, std::move(context_extensions), - std::move(metadata_context), request, - /*max_request_bytes=*/0, /*pack_as_bytes=*/false, - include_peer_certificate, want_tls_session != nullptr, - labels, nullptr); + CheckRequestUtils::createHttpCheck( + &callbacks_, request_headers, std::move(context_extensions), std::move(metadata_context), + envoy::config::core::v3::Metadata(), request, /*max_request_bytes=*/0, + /*pack_as_bytes=*/false, include_peer_certificate, want_tls_session != nullptr, labels, + nullptr); EXPECT_EQ("source", request.attributes().source().principal()); EXPECT_EQ("destination", request.attributes().destination().principal()); @@ -78,7 +78,6 @@ class CheckRequestUtilsTest : public testing::Test { EXPECT_EQ("value", request.attributes().context_extensions().at("key")); EXPECT_EQ("value_1", request.attributes().destination().labels().at("label_1")); EXPECT_EQ("value_2", request.attributes().destination().labels().at("label_2")); - EXPECT_EQ("bar", request.attributes() .metadata_context() .filter_metadata() @@ -86,6 +85,7 @@ class CheckRequestUtilsTest : public testing::Test { .fields() .at("foo") .string_value()); + EXPECT_TRUE(request.attributes().has_route_metadata_context()); if (include_peer_certificate) { EXPECT_EQ(cert_data_, request.attributes().source().certificate()); @@ -190,7 +190,7 @@ TEST_F(CheckRequestUtilsTest, BasicHttp) { expectBasicHttp(); CheckRequestUtils::createHttpCheck( &callbacks_, request_headers, Protobuf::Map(), - envoy::config::core::v3::Metadata(), request_, size, + envoy::config::core::v3::Metadata(), envoy::config::core::v3::Metadata(), request_, size, /*pack_as_bytes=*/false, /*include_peer_certificate=*/false, /*include_tls_session=*/false, Protobuf::Map(), nullptr); ASSERT_EQ(size, request_.attributes().request().http().body().size()); @@ -218,9 +218,9 @@ TEST_F(CheckRequestUtilsTest, BasicHttpWithDuplicateHeaders) { expectBasicHttp(); CheckRequestUtils::createHttpCheck( &callbacks_, request_headers, Protobuf::Map(), - envoy::config::core::v3::Metadata(), request_, size, - /*pack_as_bytes=*/false, /*include_peer_certificate=*/false, - /*include_tls_session=*/false, Protobuf::Map(), nullptr); + envoy::config::core::v3::Metadata(), envoy::config::core::v3::Metadata(), request_, size, + /*pack_as_bytes=*/false, /*include_peer_certificate=*/false, /*include_tls_session=*/false, + Protobuf::Map(), nullptr); ASSERT_EQ(size, request_.attributes().request().http().body().size()); EXPECT_EQ(buffer_->toString().substr(0, size), request_.attributes().request().http().body()); EXPECT_EQ(",foo,bar", request_.attributes().request().http().headers().at("x-duplicate-header")); @@ -247,7 +247,7 @@ TEST_F(CheckRequestUtilsTest, BasicHttpWithRequestHeaderMatchers) { CheckRequestUtils::createHttpCheck( &callbacks_, request_headers, Protobuf::Map(), - envoy::config::core::v3::Metadata(), request_, size, + envoy::config::core::v3::Metadata(), envoy::config::core::v3::Metadata(), request_, size, /*pack_as_bytes=*/false, /*include_peer_certificate=*/false, /*include_tls_session=*/false, Protobuf::Map(), createRequestHeaderMatchers()); ASSERT_EQ(size, request_.attributes().request().http().body().size()); @@ -270,9 +270,9 @@ TEST_F(CheckRequestUtilsTest, BasicHttpWithPartialBody) { expectBasicHttp(); CheckRequestUtils::createHttpCheck( &callbacks_, headers_, Protobuf::Map(), - envoy::config::core::v3::Metadata(), request_, size, - /*pack_as_bytes=*/false, /*include_peer_certificate=*/false, - /*include_tls_session=*/false, Protobuf::Map(), nullptr); + envoy::config::core::v3::Metadata(), envoy::config::core::v3::Metadata(), request_, size, + /*pack_as_bytes=*/false, /*include_peer_certificate=*/false, /*include_tls_session=*/false, + Protobuf::Map(), nullptr); ASSERT_EQ(size, request_.attributes().request().http().body().size()); EXPECT_EQ(buffer_->toString().substr(0, size), request_.attributes().request().http().body()); EXPECT_EQ("true", request_.attributes().request().http().headers().at( @@ -290,9 +290,9 @@ TEST_F(CheckRequestUtilsTest, BasicHttpWithFullBody) { expectBasicHttp(); CheckRequestUtils::createHttpCheck( &callbacks_, headers_, Protobuf::Map(), - envoy::config::core::v3::Metadata(), request_, buffer_->length(), /*pack_as_bytes=*/false, - /*include_peer_certificate=*/false, /*include_tls_session=*/false, - Protobuf::Map(), nullptr); + envoy::config::core::v3::Metadata(), envoy::config::core::v3::Metadata(), request_, + buffer_->length(), /*pack_as_bytes=*/false, /*include_peer_certificate=*/false, + /*include_tls_session=*/false, Protobuf::Map(), nullptr); ASSERT_EQ(buffer_->length(), request_.attributes().request().http().body().size()); EXPECT_EQ(buffer_->toString().substr(0, buffer_->length()), request_.attributes().request().http().body()); @@ -323,9 +323,9 @@ TEST_F(CheckRequestUtilsTest, BasicHttpWithFullBodyPackAsBytes) { // request_.SerializeToString() still returns "true" when it is failed to serialize the data. CheckRequestUtils::createHttpCheck( &callbacks_, headers_, Protobuf::Map(), - envoy::config::core::v3::Metadata(), request_, buffer_->length(), /*pack_as_bytes=*/true, - /*include_peer_certificate=*/false, /*include_tls_session=*/false, - Protobuf::Map(), nullptr); + envoy::config::core::v3::Metadata(), envoy::config::core::v3::Metadata(), request_, + buffer_->length(), /*pack_as_bytes=*/true, /*include_peer_certificate=*/false, + /*include_tls_session=*/false, Protobuf::Map(), nullptr); // TODO(dio): Find a way to test this without using function from testing::internal namespace. testing::internal::CaptureStderr(); diff --git a/test/extensions/filters/http/basic_auth/BUILD b/test/extensions/filters/http/basic_auth/BUILD new file mode 100644 index 000000000000..39e573d580cd --- /dev/null +++ b/test/extensions/filters/http/basic_auth/BUILD @@ -0,0 +1,45 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "filter_test", + srcs = ["filter_test.cc"], + extension_names = ["envoy.filters.http.basic_auth"], + deps = [ + "//source/extensions/filters/http/basic_auth:basic_auth_lib", + "//test/mocks/server:server_mocks", + "@envoy_api//envoy/extensions/filters/http/basic_auth/v3:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + extension_names = ["envoy.filters.http.basic_auth"], + deps = [ + "//source/extensions/filters/http/basic_auth:config", + "//test/mocks/server:server_mocks", + ], +) + +envoy_extension_cc_test( + name = "basic_auth_integration_test", + size = "large", + srcs = ["basic_auth_integration_test.cc"], + extension_names = ["envoy.filters.http.basic_auth"], + deps = [ + "//source/extensions/filters/http/basic_auth:config", + "//test/integration:http_protocol_integration_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/filters/http/basic_auth/basic_auth_integration_test.cc b/test/extensions/filters/http/basic_auth/basic_auth_integration_test.cc new file mode 100644 index 000000000000..2e70bf9efb5b --- /dev/null +++ b/test/extensions/filters/http/basic_auth/basic_auth_integration_test.cc @@ -0,0 +1,119 @@ +#include "test/integration/http_protocol_integration.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace BasicAuth { +namespace { + +class BasicAuthIntegrationTest : public HttpProtocolIntegrationTest { +public: + void initializeFilter() { + // user1, test1 + // user2, test2 + const std::string filter_config = + R"EOF( +name: envoy.filters.http.basic_auth +typed_config: + "@type": type.googleapis.com/envoy.extensions.filters.http.basic_auth.v3.BasicAuth + users: + inline_string: |- + user1:{SHA}tESsBmE/yNY3lb6a0L6vVQEZNqw= + user2:{SHA}EJ9LPFDXsN9ynSmbxvjp75Bmlx8= +)EOF"; + config_helper_.prependFilter(filter_config); + initialize(); + } +}; + +// BasicAuth integration tests that should run with all protocols +class BasicAuthIntegrationTestAllProtocols : public BasicAuthIntegrationTest {}; + +INSTANTIATE_TEST_SUITE_P( + Protocols, BasicAuthIntegrationTestAllProtocols, + testing::ValuesIn(HttpProtocolIntegrationTest::getProtocolTestParamsWithoutHTTP3()), + HttpProtocolIntegrationTest::protocolTestParamsToString); + +// Request with valid credential +TEST_P(BasicAuthIntegrationTestAllProtocols, ValidCredential) { + initializeFilter(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + {"Authorization", "Basic dXNlcjE6dGVzdDE="}, // user1, test1 + }); + + waitForNextUpstreamRequest(); + upstream_request_->encodeHeaders(Http::TestResponseHeaderMapImpl{{":status", "200"}}, true); + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("200", response->headers().getStatusValue()); +} + +// Request without credential +TEST_P(BasicAuthIntegrationTestAllProtocols, NoCredential) { + initializeFilter(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + }); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("401", response->headers().getStatusValue()); + EXPECT_EQ("User authentication failed. Missing username and password", response->body()); +} + +// Request without wrong password +TEST_P(BasicAuthIntegrationTestAllProtocols, WrongPasswrod) { + initializeFilter(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + {"Authorization", "Basic dXNlcjE6dGVzdDI="}, // user1, test2 + }); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("401", response->headers().getStatusValue()); + EXPECT_EQ("User authentication failed. Invalid username/password combination", response->body()); +} + +// Request with none-existed user +TEST_P(BasicAuthIntegrationTestAllProtocols, NoneExistedUser) { + initializeFilter(); + codec_client_ = makeHttpConnection(lookupPort("http")); + + auto response = codec_client_->makeHeaderOnlyRequest(Http::TestRequestHeaderMapImpl{ + {":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}, + {"Authorization", "Basic dXNlcjM6dGVzdDI="}, // user3, test2 + }); + + ASSERT_TRUE(response->waitForEndStream()); + ASSERT_TRUE(response->complete()); + EXPECT_EQ("401", response->headers().getStatusValue()); + EXPECT_EQ("User authentication failed. Invalid username/password combination", response->body()); +} +} // namespace +} // namespace BasicAuth +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/basic_auth/config_test.cc b/test/extensions/filters/http/basic_auth/config_test.cc new file mode 100644 index 000000000000..2be2d5812596 --- /dev/null +++ b/test/extensions/filters/http/basic_auth/config_test.cc @@ -0,0 +1,127 @@ +#include "source/extensions/filters/http/basic_auth/basic_auth_filter.h" +#include "source/extensions/filters/http/basic_auth/config.h" + +#include "test/mocks/server/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace BasicAuth { + +TEST(Factory, ValidConfig) { + const std::string yaml = R"( + users: + inline_string: |- + user1:{SHA}tESsBmE/yNY3lb6a0L6vVQEZNqw= + user2:{SHA}EJ9LPFDXsN9ynSmbxvjp75Bmlx8= + )"; + + BasicAuthFilterFactory factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + + NiceMock context; + + auto callback = factory.createFilterFactoryFromProto(*proto_config, "stats", context); + Http::MockFilterChainFactoryCallbacks filter_callback; + EXPECT_CALL(filter_callback, addStreamDecoderFilter(_)); + callback(filter_callback); +} + +TEST(Factory, InvalidConfigNoColon) { + const std::string yaml = R"( + users: + inline_string: |- + user1{SHA}tESsBmE/yNY3lb6a0L6vVQEZNqw= + user2:{SHA}EJ9LPFDXsN9ynSmbxvjp75Bmlx8= + )"; + + BasicAuthFilterFactory factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + + NiceMock context; + + EXPECT_THROW(factory.createFilterFactoryFromProto(*proto_config, "stats", context), + EnvoyException); +} + +TEST(Factory, InvalidConfigNoUser) { + const std::string yaml = R"( + users: + inline_string: |- + :{SHA}tESsBmE/yNY3lb6a0L6vVQEZNqw= + user2:{SHA}EJ9LPFDXsN9ynSmbxvjp75Bmlx8= + )"; + + BasicAuthFilterFactory factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + + NiceMock context; + + EXPECT_THROW(factory.createFilterFactoryFromProto(*proto_config, "stats", context), + EnvoyException); +} + +TEST(Factory, InvalidConfigNoPassword) { + const std::string yaml = R"( + users: + inline_string: |- + user1: + user2:{SHA}EJ9LPFDXsN9ynSmbxvjp75Bmlx8= + )"; + + BasicAuthFilterFactory factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + + NiceMock context; + + EXPECT_THROW(factory.createFilterFactoryFromProto(*proto_config, "stats", context), + EnvoyException); +} + +TEST(Factory, InvalidConfigNoHash) { + const std::string yaml = R"( + users: + inline_string: |- + user1:{SHA} + user2:{SHA}EJ9LPFDXsN9ynSmbxvjp75Bmlx8= + )"; + + BasicAuthFilterFactory factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + + NiceMock context; + + EXPECT_THROW(factory.createFilterFactoryFromProto(*proto_config, "stats", context), + EnvoyException); +} + +TEST(Factory, InvalidConfigNotSHA) { + const std::string yaml = R"( + users: + inline_string: |- + user1:{SHA}tESsBmE/yNY3lb6a0L6vVQEZNqw= + user2:$apr1$0vAnUTEB$4EJJr0GR3y48WF2AiieWs. + )"; + + BasicAuthFilterFactory factory; + ProtobufTypes::MessagePtr proto_config = factory.createEmptyRouteConfigProto(); + TestUtility::loadFromYaml(yaml, *proto_config); + + NiceMock context; + + EXPECT_THROW(factory.createFilterFactoryFromProto(*proto_config, "stats", context), + EnvoyException); +} + +} // namespace BasicAuth +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/basic_auth/filter_test.cc b/test/extensions/filters/http/basic_auth/filter_test.cc new file mode 100644 index 000000000000..d8d3f3000be4 --- /dev/null +++ b/test/extensions/filters/http/basic_auth/filter_test.cc @@ -0,0 +1,103 @@ +#include "envoy/extensions/filters/http/basic_auth/v3/basic_auth.pb.h" + +#include "source/extensions/filters/http/basic_auth/basic_auth_filter.h" + +#include "test/mocks/http/mocks.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace HttpFilters { +namespace BasicAuth { + +class FilterTest : public testing::Test { +public: + FilterTest() { + std::unique_ptr> users = + std::make_unique>(); + users->insert({"user1", {"user1", "tESsBmE/yNY3lb6a0L6vVQEZNqw="}}); // user1:test1 + users->insert({"user2", {"user2", "EJ9LPFDXsN9ynSmbxvjp75Bmlx8="}}); // user2:test2 + config_ = std::make_unique(std::move(users), "stats", *stats_.rootScope()); + filter_ = std::make_shared(config_); + filter_->setDecoderFilterCallbacks(decoder_filter_callbacks_); + } + + NiceMock stats_; + NiceMock decoder_filter_callbacks_; + FilterConfigConstSharedPtr config_; + std::shared_ptr filter_; +}; + +TEST_F(FilterTest, BasicAuth) { + // user1:test1 + Http::TestRequestHeaderMapImpl request_headers_user1{{"Authorization", "Basic dXNlcjE6dGVzdDE="}}; + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->decodeHeaders(request_headers_user1, true)); + + // user2:test2 + Http::TestRequestHeaderMapImpl request_headers_user2{{"Authorization", "Basic dXNlcjI6dGVzdDI="}}; + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->decodeHeaders(request_headers_user2, true)); +} + +TEST_F(FilterTest, UserNotExist) { + // user3:test2 + Http::TestRequestHeaderMapImpl request_headers_user1{{"Authorization", "Basic dXNlcjM6dGVzdDI="}}; + + EXPECT_CALL(decoder_filter_callbacks_, sendLocalReply(_, _, _, _, _)) + .WillOnce(Invoke([&](Http::Code code, absl::string_view body, + std::function, + const absl::optional grpc_status, + absl::string_view details) { + EXPECT_EQ(Http::Code::Unauthorized, code); + EXPECT_EQ("User authentication failed. Invalid username/password combination", body); + EXPECT_EQ(grpc_status, absl::nullopt); + EXPECT_EQ(details, "invalid_credential_for_basic_auth"); + })); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_user1, true)); +} + +TEST_F(FilterTest, InvalidPassword) { + // user1:test2 + Http::TestRequestHeaderMapImpl request_headers_user1{{"Authorization", "Basic dXNlcjE6dGVzdDI="}}; + + EXPECT_CALL(decoder_filter_callbacks_, sendLocalReply(_, _, _, _, _)) + .WillOnce(Invoke([&](Http::Code code, absl::string_view body, + std::function, + const absl::optional grpc_status, + absl::string_view details) { + EXPECT_EQ(Http::Code::Unauthorized, code); + EXPECT_EQ("User authentication failed. Invalid username/password combination", body); + EXPECT_EQ(grpc_status, absl::nullopt); + EXPECT_EQ(details, "invalid_credential_for_basic_auth"); + })); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_user1, true)); +} + +TEST_F(FilterTest, NoAuthHeader) { + Http::TestRequestHeaderMapImpl request_headers_user1; + + EXPECT_CALL(decoder_filter_callbacks_, sendLocalReply(_, _, _, _, _)) + .WillOnce(Invoke([&](Http::Code code, absl::string_view body, + std::function, + const absl::optional grpc_status, + absl::string_view details) { + EXPECT_EQ(Http::Code::Unauthorized, code); + EXPECT_EQ("User authentication failed. Missing username and password", body); + EXPECT_EQ(grpc_status, absl::nullopt); + EXPECT_EQ(details, "no_credential_for_basic_auth"); + })); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers_user1, true)); +} + +} // namespace BasicAuth +} // namespace HttpFilters +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc index 5ce46084511f..a7b48eb4f768 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_integration_test.cc @@ -184,6 +184,7 @@ class ExtAuthzGrpcIntegrationTest : public Grpc::GrpcClientIntegrationParamTest, attributes->clear_source(); attributes->clear_destination(); attributes->clear_metadata_context(); + attributes->clear_route_metadata_context(); attributes->mutable_request()->clear_time(); http_request->clear_id(); http_request->clear_headers(); diff --git a/test/extensions/filters/http/ext_authz/ext_authz_test.cc b/test/extensions/filters/http/ext_authz/ext_authz_test.cc index 311bcb8e3597..6f902ef0e36f 100644 --- a/test/extensions/filters/http/ext_authz/ext_authz_test.cc +++ b/test/extensions/filters/http/ext_authz/ext_authz_test.cc @@ -36,9 +36,12 @@ using Envoy::Http::LowerCaseString; using testing::_; +using testing::Contains; using testing::InSequence; using testing::Invoke; +using testing::Key; using testing::NiceMock; +using testing::Not; using testing::Return; using testing::ReturnRef; using testing::Values; @@ -1508,6 +1511,174 @@ TEST_F(HttpFilterTest, ConnectionMetadataContext) { "not.selected.data")); } +// Verifies that specified route metadata is passed along in the check request +TEST_F(HttpFilterTest, RouteMetadataContext) { + initialize(R"EOF( + transport_api_version: V3 + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + route_metadata_context_namespaces: + - request.connection.route.have.data + - request.route.have.data + - connection.route.have.data + - route.has.data + - request.has.data + - untyped.and.typed.route.data + - typed.route.data + - untyped.route.data + route_typed_metadata_context_namespaces: + - untyped.and.typed.route.data + - typed.route.data + - untyped.route.data + metadata_context_namespaces: + - request.connection.route.have.data + - request.route.have.data + - connection.route.have.data + - connection.has.data + - route.has.data + )EOF"); + + const std::string route_yaml = R"EOF( + filter_metadata: + request.connection.route.have.data: + data: route + request.route.have.data: + data: route + connection.route.have.data: + data: route + route.has.data: + data: route + untyped.and.typed.route.data: + data: route_untyped + untyped.route.data: + data: route_untyped + typed_filter_metadata: + untyped.and.typed.route.data: + '@type': type.googleapis.com/helloworld.HelloRequest + name: route_typed + typed.route.data: + '@type': type.googleapis.com/helloworld.HelloRequest + name: route_typed + )EOF"; + + const std::string request_yaml = R"EOF( + filter_metadata: + request.connection.route.have.data: + data: request + request.route.have.data: + data: request + )EOF"; + + const std::string connection_yaml = R"EOF( + filter_metadata: + request.connection.route.have.data: + data: connection + connection.route.have.data: + data: connection + connection.has.data: + data: connection + )EOF"; + + prepareCheck(); + + envoy::config::core::v3::Metadata request_metadata, connection_metadata, route_metadata; + TestUtility::loadFromYaml(request_yaml, request_metadata); + TestUtility::loadFromYaml(connection_yaml, connection_metadata); + TestUtility::loadFromYaml(route_yaml, route_metadata); + ON_CALL(decoder_filter_callbacks_.stream_info_, dynamicMetadata()) + .WillByDefault(ReturnRef(request_metadata)); + connection_.stream_info_.metadata_ = connection_metadata; + ON_CALL(*decoder_filter_callbacks_.route_, metadata()).WillByDefault(ReturnRef(route_metadata)); + + envoy::service::auth::v3::CheckRequest check_request; + EXPECT_CALL(*client_, check(_, _, _, _)) + .WillOnce( + Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks&, + const envoy::service::auth::v3::CheckRequest& check_param, Tracing::Span&, + const StreamInfo::StreamInfo&) -> void { check_request = check_param; })); + + filter_->decodeHeaders(request_headers_, false); + Http::MetadataMap metadata_map{{"metadata", "metadata"}}; + EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter_->decodeMetadata(metadata_map)); + + for (const auto& namespace_from_route : std::vector{ + "request.connection.route.have.data", + "request.route.have.data", + "connection.route.have.data", + "route.has.data", + }) { + ASSERT_THAT(check_request.attributes().route_metadata_context().filter_metadata(), + Contains(Key(namespace_from_route))); + EXPECT_EQ("route", check_request.attributes() + .route_metadata_context() + .filter_metadata() + .at(namespace_from_route) + .fields() + .at("data") + .string_value()); + } + EXPECT_THAT(check_request.attributes().route_metadata_context().filter_metadata(), + Not(Contains(Key("request.has.data")))); + + for (const auto& namespace_from_request : + std::vector{"request.connection.route.have.data", "request.route.have.data"}) { + ASSERT_THAT(check_request.attributes().metadata_context().filter_metadata(), + Contains(Key(namespace_from_request))); + EXPECT_EQ("request", check_request.attributes() + .metadata_context() + .filter_metadata() + .at(namespace_from_request) + .fields() + .at("data") + .string_value()); + } + for (const auto& namespace_from_connection : + std::vector{"connection.route.have.data", "connection.has.data"}) { + ASSERT_THAT(check_request.attributes().metadata_context().filter_metadata(), + Contains(Key(namespace_from_connection))); + EXPECT_EQ("connection", check_request.attributes() + .metadata_context() + .filter_metadata() + .at(namespace_from_connection) + .fields() + .at("data") + .string_value()); + } + EXPECT_THAT(check_request.attributes().metadata_context().filter_metadata(), + Not(Contains(Key("route.has.data")))); + + for (const auto& namespace_from_route_untyped : + std::vector{"untyped.and.typed.route.data", "untyped.route.data"}) { + ASSERT_THAT(check_request.attributes().route_metadata_context().filter_metadata(), + Contains(Key(namespace_from_route_untyped))); + EXPECT_EQ("route_untyped", check_request.attributes() + .route_metadata_context() + .filter_metadata() + .at(namespace_from_route_untyped) + .fields() + .at("data") + .string_value()); + } + EXPECT_THAT(check_request.attributes().route_metadata_context().filter_metadata(), + Not(Contains(Key("typed.route.data")))); + + for (const auto& namespace_from_route_typed : + std::vector{"untyped.and.typed.route.data", "typed.route.data"}) { + ASSERT_THAT(check_request.attributes().route_metadata_context().typed_filter_metadata(), + Contains(Key(namespace_from_route_typed))); + helloworld::HelloRequest hello; + EXPECT_TRUE(check_request.attributes() + .route_metadata_context() + .typed_filter_metadata() + .at(namespace_from_route_typed) + .UnpackTo(&hello)); + EXPECT_EQ("route_typed", hello.name()); + } + EXPECT_THAT(check_request.attributes().route_metadata_context().typed_filter_metadata(), + Not(Contains(Key("untyped.route.data")))); +} + // Test that filter can be disabled via the filter_enabled field. TEST_F(HttpFilterTest, FilterDisabled) { initialize(R"EOF( @@ -2378,6 +2549,51 @@ TEST_P(HttpFilterTestParam, DeniedResponseWith401) { .value()); } +// Test that a denied response results in the connection closing with a 401 response to the client. +TEST_P(HttpFilterTestParam, DeniedResponseWith401NoClusterResponseCodeStats) { + initialize(R"EOF( + transport_api_version: V3 + grpc_service: + envoy_grpc: + cluster_name: "ext_authz_server" + charge_cluster_response_stats: + value: false + )EOF"); + + InSequence s; + + prepareCheck(); + EXPECT_CALL(*client_, check(_, _, _, _)) + .WillOnce( + Invoke([&](Filters::Common::ExtAuthz::RequestCallbacks& callbacks, + const envoy::service::auth::v3::CheckRequest&, Tracing::Span&, + const StreamInfo::StreamInfo&) -> void { request_callbacks_ = &callbacks; })); + + EXPECT_CALL(decoder_filter_callbacks_.stream_info_, + setResponseFlag(Envoy::StreamInfo::ResponseFlag::UnauthorizedExternalService)); + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter_->decodeHeaders(request_headers_, false)); + + Http::TestResponseHeaderMapImpl response_headers{{":status", "401"}}; + EXPECT_CALL(decoder_filter_callbacks_, + encodeHeaders_(HeaderMapEqualRef(&response_headers), true)); + EXPECT_CALL(decoder_filter_callbacks_, continueDecoding()).Times(0); + + Filters::Common::ExtAuthz::Response response{}; + response.status = Filters::Common::ExtAuthz::CheckStatus::Denied; + response.status_code = Http::Code::Unauthorized; + request_callbacks_->onComplete(std::make_unique(response)); + EXPECT_EQ(1U, decoder_filter_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("ext_authz.denied") + .value()); + EXPECT_EQ(1U, config_->stats().denied_.value()); + EXPECT_EQ(0, decoder_filter_callbacks_.clusterInfo() + ->statsScope() + .counterFromString("upstream_rq_4xx") + .value()); +} + // Test that a denied response results in the connection closing with a 403 response to the client. TEST_P(HttpFilterTestParam, DeniedResponseWith403) { InSequence s; diff --git a/test/extensions/filters/network/redis_proxy/router_impl_test.cc b/test/extensions/filters/network/redis_proxy/router_impl_test.cc index 366ac792700e..b3b0deb932e9 100644 --- a/test/extensions/filters/network/redis_proxy/router_impl_test.cc +++ b/test/extensions/filters/network/redis_proxy/router_impl_test.cc @@ -69,6 +69,20 @@ TEST(PrefixRoutesTest, RoutedToCatchAll) { EXPECT_EQ(upstream_c, router.upstreamPool(key, stream_info)->upstream("")); } +TEST(PrefixRoutesTest, MissingCatchAll) { + Upstreams upstreams; + upstreams.emplace("fake_clusterA", std::make_shared()); + upstreams.emplace("fake_clusterB", std::make_shared()); + + Runtime::MockLoader runtime_; + + PrefixRoutes router(createPrefixRoutes(), std::move(upstreams), runtime_); + + std::string key("c:bar"); + NiceMock stream_info; + EXPECT_EQ(nullptr, router.upstreamPool(key, stream_info)); +} + TEST(PrefixRoutesTest, RoutedToLongestPrefix) { auto upstream_a = std::make_shared(); diff --git a/test/extensions/filters/udp/udp_proxy/mocks.cc b/test/extensions/filters/udp/udp_proxy/mocks.cc index b8a59bb23c66..7c654662a83e 100644 --- a/test/extensions/filters/udp/udp_proxy/mocks.cc +++ b/test/extensions/filters/udp/udp_proxy/mocks.cc @@ -15,6 +15,7 @@ namespace SessionFilters { MockReadFilterCallbacks::MockReadFilterCallbacks() { ON_CALL(*this, sessionId()).WillByDefault(Return(session_id_)); ON_CALL(*this, streamInfo()).WillByDefault(ReturnRef(stream_info_)); + ON_CALL(*this, continueFilterChain()).WillByDefault(Return(true)); } MockReadFilterCallbacks::~MockReadFilterCallbacks() = default; diff --git a/test/extensions/filters/udp/udp_proxy/mocks.h b/test/extensions/filters/udp/udp_proxy/mocks.h index ce23c9374e1a..f0bd4e7505ef 100644 --- a/test/extensions/filters/udp/udp_proxy/mocks.h +++ b/test/extensions/filters/udp/udp_proxy/mocks.h @@ -22,7 +22,7 @@ class MockReadFilterCallbacks : public ReadFilterCallbacks { MOCK_METHOD(uint64_t, sessionId, (), (const)); MOCK_METHOD(StreamInfo::StreamInfo&, streamInfo, ()); - MOCK_METHOD(void, continueFilterChain, ()); + MOCK_METHOD(bool, continueFilterChain, ()); MOCK_METHOD(void, injectDatagramToFilterChain, (Network::UdpRecvData & data)); uint64_t session_id_{1}; diff --git a/test/extensions/filters/udp/udp_proxy/session_filters/dynamic_forward_proxy/proxy_filter_integration_test.cc b/test/extensions/filters/udp/udp_proxy/session_filters/dynamic_forward_proxy/proxy_filter_integration_test.cc index cf76b3ae7b21..d3a822c31bab 100644 --- a/test/extensions/filters/udp/udp_proxy/session_filters/dynamic_forward_proxy/proxy_filter_integration_test.cc +++ b/test/extensions/filters/udp/udp_proxy/session_filters/dynamic_forward_proxy/proxy_filter_integration_test.cc @@ -35,22 +35,25 @@ class DynamicForwardProxyIntegrationTest uint32_t max_buffered_bytes_; }; - void setup(absl::optional buffer_config = absl::nullopt, uint32_t max_hosts = 1024, + void setup(std::string upsteam_host = "localhost", + absl::optional buffer_config = absl::nullopt, uint32_t max_hosts = 1024, uint32_t max_pending_requests = 1024) { setUdpFakeUpstream(FakeUpstreamConfig::UdpConfig()); - config_helper_.addConfigModifier([this, buffer_config, max_hosts, max_pending_requests]( - envoy::config::bootstrap::v3::Bootstrap& bootstrap) { - // Switch predefined cluster_0 to CDS filesystem sourcing. - bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_resource_api_version( - envoy::config::core::v3::ApiVersion::V3); - bootstrap.mutable_dynamic_resources() - ->mutable_cds_config() - ->mutable_path_config_source() - ->set_path(cds_helper_.cdsPath()); - bootstrap.mutable_static_resources()->clear_clusters(); - - std::string filter_config = fmt::format(R"EOF( + config_helper_.addConfigModifier( + [this, upsteam_host, buffer_config, max_hosts, + max_pending_requests](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + // Switch predefined cluster_0 to CDS filesystem sourcing. + bootstrap.mutable_dynamic_resources()->mutable_cds_config()->set_resource_api_version( + envoy::config::core::v3::ApiVersion::V3); + bootstrap.mutable_dynamic_resources() + ->mutable_cds_config() + ->mutable_path_config_source() + ->set_path(cds_helper_.cdsPath()); + bootstrap.mutable_static_resources()->clear_clusters(); + + std::string filter_config = fmt::format( + R"EOF( name: udp_proxy typed_config: '@type': type.googleapis.com/envoy.extensions.filters.udp.udp_proxy.v3.UdpProxyConfig @@ -66,7 +69,7 @@ name: udp_proxy - name: setter typed_config: '@type': type.googleapis.com/test.extensions.filters.udp.udp_proxy.session_filters.DynamicForwardProxySetterFilterConfig - host: localhost + host: {} port: {} - name: dfp typed_config: @@ -79,24 +82,23 @@ name: udp_proxy dns_cache_circuit_breaker: max_pending_requests: {} )EOF", - fake_upstreams_[0]->localAddress()->ip()->port(), - Network::Test::ipVersionToDnsFamily(GetParam()), - max_hosts, max_pending_requests); + upsteam_host, fake_upstreams_[0]->localAddress()->ip()->port(), + Network::Test::ipVersionToDnsFamily(GetParam()), max_hosts, max_pending_requests); - if (buffer_config.has_value()) { - filter_config += fmt::format(R"EOF( + if (buffer_config.has_value()) { + filter_config += fmt::format(R"EOF( buffer_options: max_buffered_datagrams: {} max_buffered_bytes: {} )EOF", - buffer_config.value().max_buffered_datagrams_, - buffer_config.value().max_buffered_bytes_); - } + buffer_config.value().max_buffered_datagrams_, + buffer_config.value().max_buffered_bytes_); + } - auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); - auto* filter = listener->add_listener_filters(); - TestUtility::loadFromYaml(filter_config, *filter); - }); + auto* listener = bootstrap.mutable_static_resources()->mutable_listeners(0); + auto* filter = listener->add_listener_filters(); + TestUtility::loadFromYaml(filter_config, *filter); + }); // Setup the initial CDS cluster. cluster_.mutable_connect_timeout()->CopyFrom( @@ -157,7 +159,7 @@ TEST_P(DynamicForwardProxyIntegrationTest, BasicFlow) { } TEST_P(DynamicForwardProxyIntegrationTest, BasicFlowWithBuffering) { - setup(BufferConfig{1, 1024}); + setup("localhost", BufferConfig{1, 1024}); const uint32_t port = lookupPort("listener_0"); const auto listener_address = Network::Utility::resolveUrl( fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); @@ -175,7 +177,7 @@ TEST_P(DynamicForwardProxyIntegrationTest, BasicFlowWithBuffering) { } TEST_P(DynamicForwardProxyIntegrationTest, BufferOverflowDueToDatagramSize) { - setup(BufferConfig{1, 2}); + setup("localhost", BufferConfig{1, 2}); const uint32_t port = lookupPort("listener_0"); const auto listener_address = Network::Utility::resolveUrl( fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); @@ -196,6 +198,26 @@ TEST_P(DynamicForwardProxyIntegrationTest, BufferOverflowDueToDatagramSize) { EXPECT_EQ("hello2", request_datagram.buffer_->toString()); } +TEST_P(DynamicForwardProxyIntegrationTest, EmptyDnsResponseDueToDummyHost) { + setup("dummyhost"); + const uint32_t port = lookupPort("listener_0"); + const auto listener_address = Network::Utility::resolveUrl( + fmt::format("tcp://{}:{}", Network::Test::getLoopbackAddressUrlString(version_), port)); + Network::Test::UdpSyncPeer client(version_); + + client.write("hello1", *listener_address); + test_server_->waitForCounterEq("dns_cache.foo.dns_query_attempt", 1); + + // The DNS response is empty, so will not be found any valid host and session will be removed. + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_none_healthy", 1); + test_server_->waitForGaugeEq("udp.foo.downstream_sess_active", 0); + + // DNS cache hit but still no host found. + client.write("hello2", *listener_address); + test_server_->waitForCounterEq("cluster.cluster_0.upstream_cx_none_healthy", 2); + test_server_->waitForGaugeEq("udp.foo.downstream_sess_active", 0); +} + } // namespace } // namespace DynamicForwardProxy } // namespace SessionFilters diff --git a/test/extensions/filters/udp/udp_proxy/session_filters/dynamic_forward_proxy/proxy_filter_test.cc b/test/extensions/filters/udp/udp_proxy/session_filters/dynamic_forward_proxy/proxy_filter_test.cc index 06aafcca58c2..6af769197b9d 100644 --- a/test/extensions/filters/udp/udp_proxy/session_filters/dynamic_forward_proxy/proxy_filter_test.cc +++ b/test/extensions/filters/udp/udp_proxy/session_filters/dynamic_forward_proxy/proxy_filter_test.cc @@ -209,8 +209,12 @@ TEST_F(DynamicProxyFilterTest, EXPECT_EQ(ReadFilterStatus::StopIteration, filter_->onData(recv_data_stub1_)); EXPECT_CALL(callbacks_, continueFilterChain()); - filter_->onLoadDnsCacheComplete( - std::make_shared()); + + auto host_info = std::make_shared(); + host_info->address_ = Network::Utility::parseInternetAddress("1.2.3.4", 50); + EXPECT_CALL(*host_info, address()); + filter_->onLoadDnsCacheComplete(host_info); + EXPECT_CALL(*handle, onDestroy()); } @@ -237,8 +241,12 @@ TEST_F(DynamicProxyFilterTest, LoadingCacheEntryWithDefaultBufferConfig) { EXPECT_CALL(callbacks_, continueFilterChain()); EXPECT_CALL(callbacks_, injectDatagramToFilterChain(_)).Times(2); - filter_->onLoadDnsCacheComplete( - std::make_shared()); + + auto host_info = std::make_shared(); + host_info->address_ = Network::Utility::parseInternetAddress("1.2.3.4", 50); + EXPECT_CALL(*host_info, address()); + filter_->onLoadDnsCacheComplete(host_info); + EXPECT_CALL(*handle, onDestroy()); EXPECT_FALSE(filter_config_->bufferEnabled()); } @@ -267,8 +275,12 @@ TEST_F(DynamicProxyFilterTest, LoadingCacheEntryWithBufferSizeOverflow) { EXPECT_CALL(callbacks_, continueFilterChain()); EXPECT_CALL(callbacks_, injectDatagramToFilterChain(_)); - filter_->onLoadDnsCacheComplete( - std::make_shared()); + + auto host_info = std::make_shared(); + host_info->address_ = Network::Utility::parseInternetAddress("1.2.3.4", 50); + EXPECT_CALL(*host_info, address()); + filter_->onLoadDnsCacheComplete(host_info); + EXPECT_CALL(*handle, onDestroy()); EXPECT_FALSE(filter_config_->bufferEnabled()); } @@ -297,12 +309,43 @@ TEST_F(DynamicProxyFilterTest, LoadingCacheEntryWithBufferBytesOverflow) { EXPECT_CALL(callbacks_, continueFilterChain()); EXPECT_CALL(callbacks_, injectDatagramToFilterChain(_)); - filter_->onLoadDnsCacheComplete( - std::make_shared()); + + auto host_info = std::make_shared(); + host_info->address_ = Network::Utility::parseInternetAddress("1.2.3.4", 50); + EXPECT_CALL(*host_info, address()); + filter_->onLoadDnsCacheComplete(host_info); + EXPECT_CALL(*handle, onDestroy()); EXPECT_FALSE(filter_config_->bufferEnabled()); } +TEST_F(DynamicProxyFilterTest, LoadingCacheEntryWithContinueFilterChainFailure) { + FilterConfig config; + config.mutable_buffer_options(); + setup(config); + + setFilterState("host", 50); + EXPECT_TRUE(filter_config_->bufferEnabled()); + Upstream::ResourceAutoIncDec* circuit_breakers_{ + new Upstream::ResourceAutoIncDec(pending_requests_)}; + EXPECT_CALL(*dns_cache_manager_->dns_cache_, canCreateDnsRequest_()) + .WillOnce(Return(circuit_breakers_)); + Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle* handle = + new Extensions::Common::DynamicForwardProxy::MockLoadDnsCacheEntryHandle(); + EXPECT_CALL(*dns_cache_manager_->dns_cache_, loadDnsCacheEntry_(Eq("host"), 50, _, _)) + .WillOnce(Return( + MockLoadDnsCacheEntryResult{LoadDnsCacheEntryStatus::Loading, handle, absl::nullopt})); + EXPECT_EQ(ReadFilterStatus::StopIteration, filter_->onNewSession()); + EXPECT_EQ(ReadFilterStatus::StopIteration, filter_->onData(recv_data_stub1_)); + + // Session is removed and no longer valid, no datagrams will be injected. + EXPECT_CALL(callbacks_, continueFilterChain()).WillOnce(Return(false)); + EXPECT_CALL(callbacks_, injectDatagramToFilterChain(_)).Times(0); + filter_->onLoadDnsCacheComplete(nullptr); + + EXPECT_CALL(*handle, onDestroy()); +} + } // namespace } // namespace DynamicForwardProxy } // namespace SessionFilters diff --git a/test/extensions/quic/proof_source/BUILD b/test/extensions/quic/proof_source/BUILD new file mode 100644 index 000000000000..43dcae18636e --- /dev/null +++ b/test/extensions/quic/proof_source/BUILD @@ -0,0 +1,21 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test_library", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test_library( + name = "pending_proof_source_factory_impl_lib", + srcs = ["pending_proof_source_factory_impl.cc"], + hdrs = ["pending_proof_source_factory_impl.h"], + tags = ["nofips"], + deps = [ + "//envoy/registry", + "//source/common/quic:envoy_quic_proof_source_factory_interface", + "//source/common/quic:envoy_quic_proof_source_lib", + ], +) diff --git a/test/extensions/quic/proof_source/pending_proof_source_factory_impl.cc b/test/extensions/quic/proof_source/pending_proof_source_factory_impl.cc new file mode 100644 index 000000000000..9dd472518eb3 --- /dev/null +++ b/test/extensions/quic/proof_source/pending_proof_source_factory_impl.cc @@ -0,0 +1,39 @@ +#include "test/extensions/quic/proof_source/pending_proof_source_factory_impl.h" + +#include "source/common/quic/envoy_quic_proof_source.h" + +namespace Envoy { +namespace Quic { + +class PendingProofSource : public EnvoyQuicProofSource { +public: + PendingProofSource(Network::Socket& listen_socket, + Network::FilterChainManager& filter_chain_manager, + Server::ListenerStats& listener_stats, TimeSource& time_source) + : EnvoyQuicProofSource(listen_socket, filter_chain_manager, listener_stats, time_source) {} + +protected: + void signPayload(const quic::QuicSocketAddress& /*server_address*/, + const quic::QuicSocketAddress& /*client_address*/, + const std::string& /*hostname*/, uint16_t /*signature_algorithm*/, + absl::string_view /*in*/, + std::unique_ptr callback) override { + // Make the callback pending. + pending_callbacks_.push_back(std::move(callback)); + } + +private: + std::vector> pending_callbacks_; +}; + +std::unique_ptr PendingProofSourceFactoryImpl::createQuicProofSource( + Network::Socket& listen_socket, Network::FilterChainManager& filter_chain_manager, + Server::ListenerStats& listener_stats, TimeSource& time_source) { + return std::make_unique(listen_socket, filter_chain_manager, listener_stats, + time_source); +} + +REGISTER_FACTORY(PendingProofSourceFactoryImpl, EnvoyQuicProofSourceFactoryInterface); + +} // namespace Quic +} // namespace Envoy diff --git a/test/extensions/quic/proof_source/pending_proof_source_factory_impl.h b/test/extensions/quic/proof_source/pending_proof_source_factory_impl.h new file mode 100644 index 000000000000..35b77043d385 --- /dev/null +++ b/test/extensions/quic/proof_source/pending_proof_source_factory_impl.h @@ -0,0 +1,30 @@ +#pragma once + +#include "envoy/registry/registry.h" + +#include "source/common/protobuf/protobuf.h" +#include "source/common/quic/envoy_quic_proof_source_factory_interface.h" + +namespace Envoy { +namespace Quic { + +// Provides a ProofSource implementation which makes signing pending. +class PendingProofSourceFactoryImpl : public EnvoyQuicProofSourceFactoryInterface { +public: + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + // Using Struct instead of a custom config proto. This is only allowed in tests. + return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()}; + } + + std::string name() const override { return "envoy.quic.proof_source.pending_signing"; } + + std::unique_ptr + createQuicProofSource(Network::Socket& listen_socket, + Network::FilterChainManager& filter_chain_manager, + Server::ListenerStats& listener_stats, TimeSource& time_source) override; +}; + +DECLARE_FACTORY(PendingProofSourceFactoryImpl); + +} // namespace Quic +} // namespace Envoy diff --git a/test/extensions/resource_monitors/downstream_connections/cx_limit_overload_integration_test.cc b/test/extensions/resource_monitors/downstream_connections/cx_limit_overload_integration_test.cc index b2fa4afe9659..7242c9f89cd6 100644 --- a/test/extensions/resource_monitors/downstream_connections/cx_limit_overload_integration_test.cc +++ b/test/extensions/resource_monitors/downstream_connections/cx_limit_overload_integration_test.cc @@ -104,7 +104,7 @@ TEST_F(GlobalDownstreamCxLimitIntegrationTest, GlobalLimitSetViaRuntimeKeyAndOve config_helper_.addRuntimeOverride("overload.global_downstream_max_connections", "3"); initializeOverloadManager(2); const std::string log_line = - "Global downstream connections limits is configured via deprecated runtime key " + "Global downstream connections limits is configured via runtime key " "overload.global_downstream_max_connections and in " "envoy.resource_monitors.global_downstream_max_connections. Using overload manager " "config."; diff --git a/test/extensions/resource_monitors/injected_resource/injected_resource_monitor_integration_test.cc b/test/extensions/resource_monitors/injected_resource/injected_resource_monitor_integration_test.cc index 88cbd4b8a5fd..d9be99300106 100644 --- a/test/extensions/resource_monitors/injected_resource/injected_resource_monitor_integration_test.cc +++ b/test/extensions/resource_monitors/injected_resource/injected_resource_monitor_integration_test.cc @@ -42,14 +42,6 @@ class OverloadIntegrationTest : public testing::TestWithParam http2_options) override { - IntegrationCodecClientPtr codec = - HttpIntegrationTest::makeRawHttpConnection(std::move(conn), http2_options); - return codec; - } - void initialize() override { config_helper_.addConfigModifier([this](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { const std::string overload_config = diff --git a/test/extensions/tracers/opentelemetry/opentelemetry_tracer_impl_test.cc b/test/extensions/tracers/opentelemetry/opentelemetry_tracer_impl_test.cc index 4954854efd3d..300e92cf8d5a 100644 --- a/test/extensions/tracers/opentelemetry/opentelemetry_tracer_impl_test.cc +++ b/test/extensions/tracers/opentelemetry/opentelemetry_tracer_impl_test.cc @@ -27,6 +27,14 @@ using testing::NiceMock; using testing::Return; using testing::ReturnRef; +class MockResourceProvider : public ResourceProvider { +public: + MOCK_METHOD(Resource, getResource, + (const envoy::config::trace::v3::OpenTelemetryConfig& opentelemetry_config, + Server::Configuration::TracerFactoryContext& context), + (const)); +}; + class OpenTelemetryDriverTest : public testing::Test { public: OpenTelemetryDriverTest() = default; @@ -44,7 +52,13 @@ class OpenTelemetryDriverTest : public testing::Test { .WillByDefault(Return(ByMove(std::move(mock_client_factory)))); ON_CALL(factory_context, scope()).WillByDefault(ReturnRef(scope_)); - driver_ = std::make_unique(opentelemetry_config, context_); + Resource resource; + resource.attributes_.insert(std::pair("key1", "val1")); + + auto mock_resource_provider = NiceMock(); + EXPECT_CALL(mock_resource_provider, getResource(_, _)).WillRepeatedly(Return(resource)); + + driver_ = std::make_unique(opentelemetry_config, context_, mock_resource_provider); } void setupValidDriver() { @@ -183,6 +197,9 @@ TEST_F(OpenTelemetryDriverTest, ParseSpanContextFromHeadersTest) { key: "service.name" value: string_value: "unknown_service:envoy" + key: "key1" + value: + string_value: "val1" scope_spans: spans: trace_id: "AAA" @@ -550,6 +567,9 @@ TEST_F(OpenTelemetryDriverTest, ExportOTLPSpanWithAttributes) { key: "service.name" value: string_value: "unknown_service:envoy" + key: "key1" + value: + string_value: "val1" scope_spans: spans: trace_id: "AAA" @@ -659,6 +679,9 @@ TEST_F(OpenTelemetryDriverTest, ExportSpanWithCustomServiceName) { key: "service.name" value: string_value: "test-service-name" + key: "key1" + value: + string_value: "val1" scope_spans: spans: trace_id: "AAA" diff --git a/test/extensions/tracers/opentelemetry/resource_detectors/BUILD b/test/extensions/tracers/opentelemetry/resource_detectors/BUILD new file mode 100644 index 000000000000..b91bdda9b2e2 --- /dev/null +++ b/test/extensions/tracers/opentelemetry/resource_detectors/BUILD @@ -0,0 +1,21 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "resource_provider_test", + srcs = ["resource_provider_test.cc"], + deps = [ + "//envoy/registry", + "//source/extensions/tracers/opentelemetry/resource_detectors:resource_detector_lib", + "//test/mocks/server:tracer_factory_context_mocks", + "//test/test_common:registry_lib", + "//test/test_common:utility_lib", + ], +) diff --git a/test/extensions/tracers/opentelemetry/resource_detectors/environment/BUILD b/test/extensions/tracers/opentelemetry/resource_detectors/environment/BUILD new file mode 100644 index 000000000000..2e6598200c38 --- /dev/null +++ b/test/extensions/tracers/opentelemetry/resource_detectors/environment/BUILD @@ -0,0 +1,37 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + extension_names = ["envoy.tracers.opentelemetry.resource_detectors.environment"], + deps = [ + "//envoy/registry", + "//source/extensions/tracers/opentelemetry/resource_detectors/environment:config", + "//source/extensions/tracers/opentelemetry/resource_detectors/environment:environment_resource_detector_lib", + "//test/mocks/server:tracer_factory_context_mocks", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "environment_resource_detector_test", + srcs = ["environment_resource_detector_test.cc"], + extension_names = ["envoy.tracers.opentelemetry.resource_detectors.environment"], + deps = [ + "//source/extensions/tracers/opentelemetry/resource_detectors/environment:environment_resource_detector_lib", + "//test/mocks/server:tracer_factory_context_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/tracers/opentelemetry/resource_detectors/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/tracers/opentelemetry/resource_detectors/environment/config_test.cc b/test/extensions/tracers/opentelemetry/resource_detectors/environment/config_test.cc new file mode 100644 index 000000000000..7e9ada0850eb --- /dev/null +++ b/test/extensions/tracers/opentelemetry/resource_detectors/environment/config_test.cc @@ -0,0 +1,36 @@ +#include "envoy/registry/registry.h" + +#include "source/extensions/tracers/opentelemetry/resource_detectors/environment/config.h" + +#include "test/mocks/server/tracer_factory_context.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +// Test create resource detector via factory +TEST(EnvironmentResourceDetectorFactoryTest, Basic) { + auto* factory = Registry::FactoryRegistry::getFactory( + "envoy.tracers.opentelemetry.resource_detectors.environment"); + ASSERT_NE(factory, nullptr); + + envoy::config::core::v3::TypedExtensionConfig typed_config; + const std::string yaml = R"EOF( + name: envoy.tracers.opentelemetry.resource_detectors.environment + typed_config: + "@type": type.googleapis.com/envoy.extensions.tracers.opentelemetry.resource_detectors.v3.EnvironmentResourceDetectorConfig + )EOF"; + TestUtility::loadFromYaml(yaml, typed_config); + + NiceMock context; + EXPECT_NE(factory->createResourceDetector(typed_config.typed_config(), context), nullptr); +} + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/tracers/opentelemetry/resource_detectors/environment/environment_resource_detector_test.cc b/test/extensions/tracers/opentelemetry/resource_detectors/environment/environment_resource_detector_test.cc new file mode 100644 index 000000000000..e88f0dd5e72a --- /dev/null +++ b/test/extensions/tracers/opentelemetry/resource_detectors/environment/environment_resource_detector_test.cc @@ -0,0 +1,109 @@ +#include + +#include "envoy/extensions/tracers/opentelemetry/resource_detectors/v3/environment_resource_detector.pb.h" +#include "envoy/registry/registry.h" + +#include "source/extensions/tracers/opentelemetry/resource_detectors/environment/environment_resource_detector.h" + +#include "test/mocks/server/tracer_factory_context.h" +#include "test/test_common/environment.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using testing::ReturnRef; + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +const std::string kOtelResourceAttributesEnv = "OTEL_RESOURCE_ATTRIBUTES"; + +// Test detector when env variable is not present +TEST(EnvironmentResourceDetectorTest, EnvVariableNotPresent) { + NiceMock context; + + envoy::extensions::tracers::opentelemetry::resource_detectors::v3:: + EnvironmentResourceDetectorConfig config; + + auto detector = std::make_unique(config, context); + EXPECT_THROW_WITH_MESSAGE(detector->detect(), EnvoyException, + "Environment variable doesn't exist: OTEL_RESOURCE_ATTRIBUTES"); +} + +// Test detector when env variable is present but contains an empty value +TEST(EnvironmentResourceDetectorTest, EnvVariablePresentButEmpty) { + NiceMock context; + TestEnvironment::setEnvVar(kOtelResourceAttributesEnv, "", 1); + Envoy::Cleanup cleanup([]() { TestEnvironment::unsetEnvVar(kOtelResourceAttributesEnv); }); + + envoy::extensions::tracers::opentelemetry::resource_detectors::v3:: + EnvironmentResourceDetectorConfig config; + + auto detector = std::make_unique(config, context); + +#ifdef WIN32 + EXPECT_THROW_WITH_MESSAGE(detector->detect(), EnvoyException, + "Environment variable doesn't exist: OTEL_RESOURCE_ATTRIBUTES"); +#else + EXPECT_THROW_WITH_MESSAGE(detector->detect(), EnvoyException, + "The OpenTelemetry environment resource detector is configured but the " + "'OTEL_RESOURCE_ATTRIBUTES'" + " environment variable is empty."); +#endif +} + +// Test detector with valid values in the env variable +TEST(EnvironmentResourceDetectorTest, EnvVariablePresentAndWithAttributes) { + NiceMock context; + TestEnvironment::setEnvVar(kOtelResourceAttributesEnv, "key1=val1,key2=val2", 1); + Envoy::Cleanup cleanup([]() { TestEnvironment::unsetEnvVar(kOtelResourceAttributesEnv); }); + ResourceAttributes expected_attributes = {{"key1", "val1"}, {"key2", "val2"}}; + + Api::ApiPtr api = Api::createApiForTest(); + EXPECT_CALL(context.server_factory_context_, api()).WillRepeatedly(ReturnRef(*api)); + + envoy::extensions::tracers::opentelemetry::resource_detectors::v3:: + EnvironmentResourceDetectorConfig config; + + auto detector = std::make_unique(config, context); + Resource resource = detector->detect(); + + EXPECT_EQ(resource.schemaUrl_, ""); + EXPECT_EQ(2, resource.attributes_.size()); + + for (auto& actual : resource.attributes_) { + auto expected = expected_attributes.find(actual.first); + + EXPECT_TRUE(expected != expected_attributes.end()); + EXPECT_EQ(expected->second, actual.second); + } +} + +// Test detector with invalid values mixed with valid ones in the env variable +TEST(EnvironmentResourceDetectorTest, EnvVariablePresentAndWithAttributesWrongFormat) { + NiceMock context; + TestEnvironment::setEnvVar(kOtelResourceAttributesEnv, "key1=val1,key2val2,key3/val3, , key", 1); + Envoy::Cleanup cleanup([]() { TestEnvironment::unsetEnvVar(kOtelResourceAttributesEnv); }); + ResourceAttributes expected_attributes = {{"key1", "val"}}; + + Api::ApiPtr api = Api::createApiForTest(); + EXPECT_CALL(context.server_factory_context_, api()).WillRepeatedly(ReturnRef(*api)); + + envoy::extensions::tracers::opentelemetry::resource_detectors::v3:: + EnvironmentResourceDetectorConfig config; + + auto detector = std::make_unique(config, context); + + EXPECT_THROW_WITH_MESSAGE(detector->detect(), EnvoyException, + "The OpenTelemetry environment resource detector is configured but the " + "'OTEL_RESOURCE_ATTRIBUTES'" + " environment variable has an invalid format."); +} + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/tracers/opentelemetry/resource_detectors/resource_provider_test.cc b/test/extensions/tracers/opentelemetry/resource_detectors/resource_provider_test.cc new file mode 100644 index 000000000000..8f49c4d93ca7 --- /dev/null +++ b/test/extensions/tracers/opentelemetry/resource_detectors/resource_provider_test.cc @@ -0,0 +1,424 @@ +#include + +#include "envoy/registry/registry.h" + +#include "source/extensions/tracers/opentelemetry/resource_detectors/resource_provider.h" + +#include "test/mocks/server/tracer_factory_context.h" +#include "test/test_common/environment.h" +#include "test/test_common/registry.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using ::testing::Return; + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { +namespace { + +class SampleDetector : public ResourceDetector { +public: + MOCK_METHOD(Resource, detect, ()); +}; + +class DetectorFactoryA : public ResourceDetectorFactory { +public: + MOCK_METHOD(ResourceDetectorPtr, createResourceDetector, + (const Protobuf::Message& message, + Server::Configuration::TracerFactoryContext& context)); + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + + std::string name() const override { return "envoy.tracers.opentelemetry.resource_detectors.a"; } +}; + +class DetectorFactoryB : public ResourceDetectorFactory { +public: + MOCK_METHOD(ResourceDetectorPtr, createResourceDetector, + (const Protobuf::Message& message, + Server::Configuration::TracerFactoryContext& context)); + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + + std::string name() const override { return "envoy.tracers.opentelemetry.resource_detectors.b"; } +}; + +const std::string kOtelResourceAttributesEnv = "OTEL_RESOURCE_ATTRIBUTES"; + +class ResourceProviderTest : public testing::Test { +public: + ResourceProviderTest() { + resource_a_.attributes_.insert(std::pair("key1", "val1")); + resource_b_.attributes_.insert(std::pair("key2", "val2")); + } + NiceMock context_; + Resource resource_a_; + Resource resource_b_; +}; + +// Verifies a resource with the static service name is returned when no detectors are configured +TEST_F(ResourceProviderTest, NoResourceDetectorsConfigured) { + const std::string yaml_string = R"EOF( + grpc_service: + envoy_grpc: + cluster_name: fake-cluster + timeout: 0.250s + service_name: my-service + )EOF"; + envoy::config::trace::v3::OpenTelemetryConfig opentelemetry_config; + TestUtility::loadFromYaml(yaml_string, opentelemetry_config); + + ResourceProviderImpl resource_provider; + Resource resource = resource_provider.getResource(opentelemetry_config, context_); + + EXPECT_EQ(resource.schemaUrl_, ""); + + // Only the service name was added to the resource + EXPECT_EQ(1, resource.attributes_.size()); +} + +// Verifies a resource with the default service name is returned when no detectors + static service +// name are configured +TEST_F(ResourceProviderTest, ServiceNameNotProvided) { + const std::string yaml_string = R"EOF( + grpc_service: + envoy_grpc: + cluster_name: fake-cluster + timeout: 0.250s + )EOF"; + envoy::config::trace::v3::OpenTelemetryConfig opentelemetry_config; + TestUtility::loadFromYaml(yaml_string, opentelemetry_config); + + ResourceProviderImpl resource_provider; + Resource resource = resource_provider.getResource(opentelemetry_config, context_); + + EXPECT_EQ(resource.schemaUrl_, ""); + + // service.name receives the unknown value when not configured + EXPECT_EQ(1, resource.attributes_.size()); + auto service_name = resource.attributes_.find("service.name"); + EXPECT_EQ("unknown_service:envoy", service_name->second); +} + +// Verifies it is possible to configure multiple resource detectors +TEST_F(ResourceProviderTest, MultipleResourceDetectorsConfigured) { + auto detector_a = std::make_unique>(); + EXPECT_CALL(*detector_a, detect()).WillOnce(Return(resource_a_)); + + auto detector_b = std::make_unique>(); + EXPECT_CALL(*detector_b, detect()).WillOnce(Return(resource_b_)); + + DetectorFactoryA factory_a; + Registry::InjectFactory factory_a_registration(factory_a); + + DetectorFactoryB factory_b; + Registry::InjectFactory factory_b_registration(factory_b); + + EXPECT_CALL(factory_a, createResourceDetector(_, _)) + .WillOnce(Return(testing::ByMove(std::move(detector_a)))); + EXPECT_CALL(factory_b, createResourceDetector(_, _)) + .WillOnce(Return(testing::ByMove(std::move(detector_b)))); + + // Expected merged attributes from all detectors + ResourceAttributes expected_attributes = { + {"service.name", "my-service"}, {"key1", "val1"}, {"key2", "val2"}}; + + const std::string yaml_string = R"EOF( + grpc_service: + envoy_grpc: + cluster_name: fake-cluster + timeout: 0.250s + service_name: my-service + resource_detectors: + - name: envoy.tracers.opentelemetry.resource_detectors.a + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + - name: envoy.tracers.opentelemetry.resource_detectors.b + typed_config: + "@type": type.googleapis.com/google.protobuf.StringValue + )EOF"; + envoy::config::trace::v3::OpenTelemetryConfig opentelemetry_config; + TestUtility::loadFromYaml(yaml_string, opentelemetry_config); + + ResourceProviderImpl resource_provider; + Resource resource = resource_provider.getResource(opentelemetry_config, context_); + + EXPECT_EQ(resource.schemaUrl_, ""); + + // The resource should contain all 3 merged attributes + // service.name + 1 for each detector + EXPECT_EQ(3, resource.attributes_.size()); + + for (auto& actual : resource.attributes_) { + auto expected = expected_attributes.find(actual.first); + + EXPECT_TRUE(expected != expected_attributes.end()); + EXPECT_EQ(expected->second, actual.second); + } +} + +// Verifies Envoy fails when an unknown resource detector is configured +TEST_F(ResourceProviderTest, UnknownResourceDetectors) { + const std::string yaml_string = R"EOF( + grpc_service: + envoy_grpc: + cluster_name: fake-cluster + timeout: 0.250s + service_name: my-service + resource_detectors: + - name: envoy.tracers.opentelemetry.resource_detectors.UnkownResourceDetector + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + )EOF"; + envoy::config::trace::v3::OpenTelemetryConfig opentelemetry_config; + TestUtility::loadFromYaml(yaml_string, opentelemetry_config); + + ResourceProviderImpl resource_provider; + EXPECT_THROW_WITH_MESSAGE( + resource_provider.getResource(opentelemetry_config, context_), EnvoyException, + "Resource detector factory not found: " + "'envoy.tracers.opentelemetry.resource_detectors.UnkownResourceDetector'"); +} + +// Verifies Envoy fails when an error occurs while instantiating a resource detector +TEST_F(ResourceProviderTest, ProblemCreatingResourceDetector) { + DetectorFactoryA factory; + Registry::InjectFactory factory_registration(factory); + + // Simulating having a problem when creating the resource detector + EXPECT_CALL(factory, createResourceDetector(_, _)).WillOnce(Return(testing::ByMove(nullptr))); + + const std::string yaml_string = R"EOF( + grpc_service: + envoy_grpc: + cluster_name: fake-clusterdetector_a + timeout: 0.250s + service_name: my-service + resource_detectors: + - name: envoy.tracers.opentelemetry.resource_detectors.a + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + )EOF"; + + envoy::config::trace::v3::OpenTelemetryConfig opentelemetry_config; + TestUtility::loadFromYaml(yaml_string, opentelemetry_config); + + ResourceProviderImpl resource_provider; + EXPECT_THROW_WITH_MESSAGE(resource_provider.getResource(opentelemetry_config, context_), + EnvoyException, + "Resource detector could not be created: " + "'envoy.tracers.opentelemetry.resource_detectors.a'"); +} + +// Test merge when old schema url is empty but updating is not +TEST_F(ResourceProviderTest, OldSchemaEmptyUpdatingSet) { + std::string expected_schema_url = "my.schema/v1"; + Resource old_resource = resource_a_; + + // Updating resource is empty (no attributes) + Resource updating_resource; + updating_resource.schemaUrl_ = expected_schema_url; + + auto detector_a = std::make_unique>(); + EXPECT_CALL(*detector_a, detect()).WillOnce(Return(old_resource)); + + auto detector_b = std::make_unique>(); + EXPECT_CALL(*detector_b, detect()).WillOnce(Return(updating_resource)); + + DetectorFactoryA factory_a; + Registry::InjectFactory factory_a_registration(factory_a); + + DetectorFactoryB factory_b; + Registry::InjectFactory factory_b_registration(factory_b); + + EXPECT_CALL(factory_a, createResourceDetector(_, _)) + .WillOnce(Return(testing::ByMove(std::move(detector_a)))); + EXPECT_CALL(factory_b, createResourceDetector(_, _)) + .WillOnce(Return(testing::ByMove(std::move(detector_b)))); + + const std::string yaml_string = R"EOF( + grpc_service: + envoy_grpc: + cluster_name: fake-cluster + timeout: 0.250s + service_name: my-service + resource_detectors: + - name: envoy.tracers.opentelemetry.resource_detectors.a + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + - name: envoy.tracers.opentelemetry.resource_detectors.b + typed_config: + "@type": type.googleapis.com/google.protobuf.StringValue + )EOF"; + envoy::config::trace::v3::OpenTelemetryConfig opentelemetry_config; + TestUtility::loadFromYaml(yaml_string, opentelemetry_config); + + ResourceProviderImpl resource_provider; + Resource resource = resource_provider.getResource(opentelemetry_config, context_); + + // OTel spec says the updating schema should be used + EXPECT_EQ(expected_schema_url, resource.schemaUrl_); +} + +// Test merge when old schema url is not empty but updating is +TEST_F(ResourceProviderTest, OldSchemaSetUpdatingEmpty) { + std::string expected_schema_url = "my.schema/v1"; + Resource old_resource = resource_a_; + old_resource.schemaUrl_ = expected_schema_url; + + Resource updating_resource = resource_b_; + updating_resource.schemaUrl_ = ""; + + auto detector_a = std::make_unique>(); + EXPECT_CALL(*detector_a, detect()).WillOnce(Return(old_resource)); + + auto detector_b = std::make_unique>(); + EXPECT_CALL(*detector_b, detect()).WillOnce(Return(updating_resource)); + + DetectorFactoryA factory_a; + Registry::InjectFactory factory_a_registration(factory_a); + + DetectorFactoryB factory_b; + Registry::InjectFactory factory_b_registration(factory_b); + + EXPECT_CALL(factory_a, createResourceDetector(_, _)) + .WillOnce(Return(testing::ByMove(std::move(detector_a)))); + EXPECT_CALL(factory_b, createResourceDetector(_, _)) + .WillOnce(Return(testing::ByMove(std::move(detector_b)))); + + const std::string yaml_string = R"EOF( + grpc_service: + envoy_grpc: + cluster_name: fake-cluster + timeout: 0.250s + service_name: my-service + resource_detectors: + - name: envoy.tracers.opentelemetry.resource_detectors.a + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + - name: envoy.tracers.opentelemetry.resource_detectors.b + typed_config: + "@type": type.googleapis.com/google.protobuf.StringValue + )EOF"; + envoy::config::trace::v3::OpenTelemetryConfig opentelemetry_config; + TestUtility::loadFromYaml(yaml_string, opentelemetry_config); + + ResourceProviderImpl resource_provider; + Resource resource = resource_provider.getResource(opentelemetry_config, context_); + + // OTel spec says the updating schema should be used + EXPECT_EQ(expected_schema_url, resource.schemaUrl_); +} + +// Test merge when both old and updating schema url are set and equal +TEST_F(ResourceProviderTest, OldAndUpdatingSchemaAreEqual) { + std::string expected_schema_url = "my.schema/v1"; + Resource old_resource = resource_a_; + old_resource.schemaUrl_ = expected_schema_url; + + Resource updating_resource = resource_b_; + updating_resource.schemaUrl_ = expected_schema_url; + + auto detector_a = std::make_unique>(); + EXPECT_CALL(*detector_a, detect()).WillOnce(Return(old_resource)); + + auto detector_b = std::make_unique>(); + EXPECT_CALL(*detector_b, detect()).WillOnce(Return(updating_resource)); + + DetectorFactoryA factory_a; + Registry::InjectFactory factory_a_registration(factory_a); + + DetectorFactoryB factory_b; + Registry::InjectFactory factory_b_registration(factory_b); + + EXPECT_CALL(factory_a, createResourceDetector(_, _)) + .WillOnce(Return(testing::ByMove(std::move(detector_a)))); + EXPECT_CALL(factory_b, createResourceDetector(_, _)) + .WillOnce(Return(testing::ByMove(std::move(detector_b)))); + + const std::string yaml_string = R"EOF( + grpc_service: + envoy_grpc: + cluster_name: fake-cluster + timeout: 0.250s + service_name: my-service + resource_detectors: + - name: envoy.tracers.opentelemetry.resource_detectors.a + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + - name: envoy.tracers.opentelemetry.resource_detectors.b + typed_config: + "@type": type.googleapis.com/google.protobuf.StringValue + )EOF"; + envoy::config::trace::v3::OpenTelemetryConfig opentelemetry_config; + TestUtility::loadFromYaml(yaml_string, opentelemetry_config); + + ResourceProviderImpl resource_provider; + Resource resource = resource_provider.getResource(opentelemetry_config, context_); + + EXPECT_EQ(expected_schema_url, resource.schemaUrl_); +} + +// Test merge when both old and updating schema url are set but different +TEST_F(ResourceProviderTest, OldAndUpdatingSchemaAreDifferent) { + std::string expected_schema_url = "my.schema/v1"; + Resource old_resource = resource_a_; + old_resource.schemaUrl_ = expected_schema_url; + + Resource updating_resource = resource_b_; + updating_resource.schemaUrl_ = "my.schema/v2"; + + auto detector_a = std::make_unique>(); + EXPECT_CALL(*detector_a, detect()).WillOnce(Return(old_resource)); + + auto detector_b = std::make_unique>(); + EXPECT_CALL(*detector_b, detect()).WillOnce(Return(updating_resource)); + + DetectorFactoryA factory_a; + Registry::InjectFactory factory_a_registration(factory_a); + + DetectorFactoryB factory_b; + Registry::InjectFactory factory_b_registration(factory_b); + + EXPECT_CALL(factory_a, createResourceDetector(_, _)) + .WillOnce(Return(testing::ByMove(std::move(detector_a)))); + EXPECT_CALL(factory_b, createResourceDetector(_, _)) + .WillOnce(Return(testing::ByMove(std::move(detector_b)))); + + const std::string yaml_string = R"EOF( + grpc_service: + envoy_grpc: + cluster_name: fake-cluster + timeout: 0.250s + service_name: my-service + resource_detectors: + - name: envoy.tracers.opentelemetry.resource_detectors.a + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + - name: envoy.tracers.opentelemetry.resource_detectors.b + typed_config: + "@type": type.googleapis.com/google.protobuf.StringValue + )EOF"; + envoy::config::trace::v3::OpenTelemetryConfig opentelemetry_config; + TestUtility::loadFromYaml(yaml_string, opentelemetry_config); + + ResourceProviderImpl resource_provider; + Resource resource = resource_provider.getResource(opentelemetry_config, context_); + + // OTel spec says Old schema should be used + EXPECT_EQ(expected_schema_url, resource.schemaUrl_); +} + +} // namespace +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/tracers/opentelemetry/samplers/BUILD b/test/extensions/tracers/opentelemetry/samplers/BUILD new file mode 100644 index 000000000000..55414e7854c9 --- /dev/null +++ b/test/extensions/tracers/opentelemetry/samplers/BUILD @@ -0,0 +1,23 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_cc_test", + "envoy_package", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_cc_test( + name = "sampler_test", + srcs = ["sampler_test.cc"], + deps = [ + "//envoy/registry", + "//source/extensions/tracers/opentelemetry:opentelemetry_tracer_lib", + "//source/extensions/tracers/opentelemetry/samplers:sampler_lib", + "//test/mocks/server:tracer_factory_context_mocks", + "//test/test_common:registry_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/config/trace/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/tracers/opentelemetry/samplers/always_on/BUILD b/test/extensions/tracers/opentelemetry/samplers/always_on/BUILD new file mode 100644 index 000000000000..063cda9f0ec1 --- /dev/null +++ b/test/extensions/tracers/opentelemetry/samplers/always_on/BUILD @@ -0,0 +1,51 @@ +load( + "//bazel:envoy_build_system.bzl", + "envoy_package", +) +load( + "//test/extensions:extensions_build_system.bzl", + "envoy_extension_cc_test", +) + +licenses(["notice"]) # Apache 2 + +envoy_package() + +envoy_extension_cc_test( + name = "config_test", + srcs = ["config_test.cc"], + extension_names = ["envoy.tracers.opentelemetry.samplers.always_on"], + deps = [ + "//envoy/registry", + "//source/extensions/tracers/opentelemetry/samplers/always_on:always_on_sampler_lib", + "//source/extensions/tracers/opentelemetry/samplers/always_on:config", + "//test/mocks/server:tracer_factory_context_mocks", + "//test/test_common:utility_lib", + ], +) + +envoy_extension_cc_test( + name = "always_on_sampler_test", + srcs = ["always_on_sampler_test.cc"], + extension_names = ["envoy.tracers.opentelemetry.samplers.always_on"], + deps = [ + "//source/extensions/tracers/opentelemetry/samplers/always_on:always_on_sampler_lib", + "//test/mocks/server:tracer_factory_context_mocks", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/tracers/opentelemetry/samplers/v3:pkg_cc_proto", + ], +) + +envoy_extension_cc_test( + name = "always_on_sampler_integration_test", + srcs = [ + "always_on_sampler_integration_test.cc", + ], + extension_names = ["envoy.tracers.opentelemetry.samplers.always_on"], + deps = [ + "//source/exe:main_common_lib", + "//test/integration:http_integration_lib", + "//test/test_common:utility_lib", + "@envoy_api//envoy/extensions/filters/network/http_connection_manager/v3:pkg_cc_proto", + ], +) diff --git a/test/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler_integration_test.cc b/test/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler_integration_test.cc new file mode 100644 index 000000000000..051a21b6846f --- /dev/null +++ b/test/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler_integration_test.cc @@ -0,0 +1,142 @@ +#include +#include + +#include "envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.pb.h" + +#include "test/integration/http_integration.h" +#include "test/test_common/utility.h" + +#include "absl/strings/match.h" +#include "absl/strings/string_view.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { +namespace { + +const char* TRACEPARENT_VALUE = "00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01"; +const char* TRACEPARENT_VALUE_START = "00-0af7651916cd43dd8448eb211c80319c"; + +class AlwaysOnSamplerIntegrationTest : public Envoy::HttpIntegrationTest, + public testing::TestWithParam { +public: + AlwaysOnSamplerIntegrationTest() : HttpIntegrationTest(Http::CodecType::HTTP1, GetParam()) { + + const std::string yaml_string = R"EOF( + provider: + name: envoy.tracers.opentelemetry + typed_config: + "@type": type.googleapis.com/envoy.config.trace.v3.OpenTelemetryConfig + grpc_service: + envoy_grpc: + cluster_name: opentelemetry_collector + timeout: 0.250s + service_name: "a_service_name" + sampler: + name: envoy.tracers.opentelemetry.samplers.dynatrace + typed_config: + "@type": type.googleapis.com/envoy.extensions.tracers.opentelemetry.samplers.v3.AlwaysOnSamplerConfig + )EOF"; + + auto tracing_config = + std::make_unique<::envoy::extensions::filters::network::http_connection_manager::v3:: + HttpConnectionManager_Tracing>(); + TestUtility::loadFromYaml(yaml_string, *tracing_config.get()); + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { hcm.set_allocated_tracing(tracing_config.release()); }); + + initialize(); + codec_client_ = makeHttpConnection(lookupPort("http")); + } +}; + +INSTANTIATE_TEST_SUITE_P(IpVersions, AlwaysOnSamplerIntegrationTest, + testing::ValuesIn(TestEnvironment::getIpVersionsForTest()), + TestUtility::ipTestParamsToString); + +// Sends a request with traceparent and tracestate header. +TEST_P(AlwaysOnSamplerIntegrationTest, TestWithTraceparentAndTracestate) { + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, + {":authority", "host"}, {"tracestate", "key=value"}, {"traceparent", TRACEPARENT_VALUE}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); + + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ(response->headers().getStatusValue(), "200"); + + // traceparent should be set: traceid should be re-used, span id should be different + absl::string_view traceparent_value = upstream_request_->headers() + .get(Http::LowerCaseString("traceparent"))[0] + ->value() + .getStringView(); + EXPECT_TRUE(absl::StartsWith(traceparent_value, TRACEPARENT_VALUE_START)); + EXPECT_NE(TRACEPARENT_VALUE, traceparent_value); + // tracestate should be forwarded + absl::string_view tracestate_value = upstream_request_->headers() + .get(Http::LowerCaseString("tracestate"))[0] + ->value() + .getStringView(); + EXPECT_EQ("key=value", tracestate_value); +} + +// Sends a request with traceparent but no tracestate header. +TEST_P(AlwaysOnSamplerIntegrationTest, TestWithTraceparentOnly) { + Http::TestRequestHeaderMapImpl request_headers{{":method", "GET"}, + {":path", "/test/long/url"}, + {":scheme", "http"}, + {":authority", "host"}, + {"traceparent", TRACEPARENT_VALUE}}; + auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); + + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ(response->headers().getStatusValue(), "200"); + + // traceparent should be set: traceid should be re-used, span id should be different + absl::string_view traceparent_value = upstream_request_->headers() + .get(Http::LowerCaseString("traceparent"))[0] + ->value() + .getStringView(); + EXPECT_TRUE(absl::StartsWith(traceparent_value, TRACEPARENT_VALUE_START)); + EXPECT_NE(TRACEPARENT_VALUE, traceparent_value); + // OTLP tracer adds an empty tracestate + absl::string_view tracestate_value = upstream_request_->headers() + .get(Http::LowerCaseString("tracestate"))[0] + ->value() + .getStringView(); + EXPECT_EQ("", tracestate_value); +} + +// Sends a request without traceparent and tracestate header. +TEST_P(AlwaysOnSamplerIntegrationTest, TestWithoutTraceparentAndTracestate) { + Http::TestRequestHeaderMapImpl request_headers{ + {":method", "GET"}, {":path", "/test/long/url"}, {":scheme", "http"}, {":authority", "host"}}; + + auto response = sendRequestAndWaitForResponse(request_headers, 0, default_response_headers_, 0); + + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_TRUE(response->complete()); + EXPECT_EQ(response->headers().getStatusValue(), "200"); + + // traceparent will be added, trace_id and span_id will be generated, so there is nothing we can + // assert + EXPECT_EQ(upstream_request_->headers().get(::Envoy::Http::LowerCaseString("traceparent")).size(), + 1); + // OTLP tracer adds an empty tracestate + absl::string_view tracestate_value = upstream_request_->headers() + .get(Http::LowerCaseString("tracestate"))[0] + ->value() + .getStringView(); + EXPECT_EQ("", tracestate_value); +} + +} // namespace +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler_test.cc b/test/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler_test.cc new file mode 100644 index 000000000000..79d37ca9bfe8 --- /dev/null +++ b/test/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler_test.cc @@ -0,0 +1,56 @@ +#include + +#include "envoy/extensions/tracers/opentelemetry/samplers/v3/always_on_sampler.pb.h" + +#include "source/extensions/tracers/opentelemetry/samplers/always_on/always_on_sampler.h" +#include "source/extensions/tracers/opentelemetry/span_context.h" + +#include "test/mocks/server/tracer_factory_context.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +// Verify sampler being invoked with an invalid span context +TEST(AlwaysOnSamplerTest, TestWithInvalidParentContext) { + envoy::extensions::tracers::opentelemetry::samplers::v3::AlwaysOnSamplerConfig config; + NiceMock context; + auto sampler = std::make_shared(config, context); + EXPECT_STREQ(sampler->getDescription().c_str(), "AlwaysOnSampler"); + + auto sampling_result = + sampler->shouldSample(absl::nullopt, "operation_name", "12345", + ::opentelemetry::proto::trace::v1::Span::SPAN_KIND_SERVER, {}, {}); + EXPECT_EQ(sampling_result.decision, Decision::RECORD_AND_SAMPLE); + EXPECT_EQ(sampling_result.attributes, nullptr); + EXPECT_STREQ(sampling_result.tracestate.c_str(), ""); + EXPECT_TRUE(sampling_result.isRecording()); + EXPECT_TRUE(sampling_result.isSampled()); +} + +// Verify sampler being invoked with a valid span context +TEST(AlwaysOnSamplerTest, TestWithValidParentContext) { + envoy::extensions::tracers::opentelemetry::samplers::v3::AlwaysOnSamplerConfig config; + NiceMock context; + auto sampler = std::make_shared(config, context); + EXPECT_STREQ(sampler->getDescription().c_str(), "AlwaysOnSampler"); + + SpanContext span_context("0", "12345", "45678", false, "some_tracestate"); + auto sampling_result = + sampler->shouldSample(span_context, "operation_name", "12345", + ::opentelemetry::proto::trace::v1::Span::SPAN_KIND_SERVER, {}, {}); + EXPECT_EQ(sampling_result.decision, Decision::RECORD_AND_SAMPLE); + EXPECT_EQ(sampling_result.attributes, nullptr); + EXPECT_STREQ(sampling_result.tracestate.c_str(), "some_tracestate"); + EXPECT_TRUE(sampling_result.isRecording()); + EXPECT_TRUE(sampling_result.isSampled()); +} + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/tracers/opentelemetry/samplers/always_on/config_test.cc b/test/extensions/tracers/opentelemetry/samplers/always_on/config_test.cc new file mode 100644 index 000000000000..226cd58e34f3 --- /dev/null +++ b/test/extensions/tracers/opentelemetry/samplers/always_on/config_test.cc @@ -0,0 +1,38 @@ +#include "envoy/registry/registry.h" + +#include "source/extensions/tracers/opentelemetry/samplers/always_on/config.h" + +#include "test/mocks/server/tracer_factory_context.h" +#include "test/test_common/utility.h" + +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +// Test create sampler via factory +TEST(AlwaysOnSamplerFactoryTest, Test) { + auto* factory = Registry::FactoryRegistry::getFactory( + "envoy.tracers.opentelemetry.samplers.always_on"); + ASSERT_NE(factory, nullptr); + EXPECT_STREQ(factory->name().c_str(), "envoy.tracers.opentelemetry.samplers.always_on"); + EXPECT_NE(factory->createEmptyConfigProto(), nullptr); + + envoy::config::core::v3::TypedExtensionConfig typed_config; + const std::string yaml = R"EOF( + name: envoy.tracers.opentelemetry.samplers.always_on + typed_config: + "@type": type.googleapis.com/envoy.extensions.tracers.opentelemetry.samplers.v3.AlwaysOnSamplerConfig + )EOF"; + TestUtility::loadFromYaml(yaml, typed_config); + NiceMock context; + EXPECT_NE(factory->createSampler(typed_config.typed_config(), context), nullptr); + EXPECT_STREQ(factory->name().c_str(), "envoy.tracers.opentelemetry.samplers.always_on"); +} + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/tracers/opentelemetry/samplers/sampler_test.cc b/test/extensions/tracers/opentelemetry/samplers/sampler_test.cc new file mode 100644 index 000000000000..f80103b4345c --- /dev/null +++ b/test/extensions/tracers/opentelemetry/samplers/sampler_test.cc @@ -0,0 +1,193 @@ +#include + +#include "envoy/config/trace/v3/opentelemetry.pb.h" +#include "envoy/registry/registry.h" + +#include "source/common/tracing/http_tracer_impl.h" +#include "source/extensions/tracers/opentelemetry/opentelemetry_tracer_impl.h" +#include "source/extensions/tracers/opentelemetry/samplers/sampler.h" +#include "source/extensions/tracers/opentelemetry/span_context.h" + +#include "test/mocks/server/tracer_factory_context.h" +#include "test/test_common/registry.h" +#include "test/test_common/utility.h" + +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace Envoy { +namespace Extensions { +namespace Tracers { +namespace OpenTelemetry { + +using ::testing::NiceMock; +using ::testing::StrictMock; + +class TestSampler : public Sampler { +public: + MOCK_METHOD(SamplingResult, shouldSample, + ((const absl::optional), (const std::string&), (const std::string&), + (::opentelemetry::proto::trace::v1::Span::SpanKind), + (const std::map&), (const std::vector&)), + (override)); + MOCK_METHOD(std::string, getDescription, (), (const, override)); +}; + +class TestSamplerFactory : public SamplerFactory { +public: + MOCK_METHOD(SamplerSharedPtr, createSampler, + (const Protobuf::Message& message, + Server::Configuration::TracerFactoryContext& context)); + + ProtobufTypes::MessagePtr createEmptyConfigProto() override { + return std::make_unique(); + } + + std::string name() const override { return "envoy.tracers.opentelemetry.samplers.testsampler"; } +}; + +class SamplerFactoryTest : public testing::Test { + +protected: + NiceMock config; + NiceMock stream_info; + Tracing::TestTraceContextImpl trace_context{}; + NiceMock context; +}; + +// Test OTLP tracer without a sampler +TEST_F(SamplerFactoryTest, TestWithoutSampler) { + // using StrictMock, calls to SamplerFactory would cause a test failure + auto test_sampler = std::make_shared>(); + StrictMock sampler_factory; + Registry::InjectFactory sampler_factory_registration(sampler_factory); + + // no sampler configured + const std::string yaml_string = R"EOF( + grpc_service: + envoy_grpc: + cluster_name: fake-cluster + timeout: 0.250s + service_name: my-service + )EOF"; + + envoy::config::trace::v3::OpenTelemetryConfig opentelemetry_config; + TestUtility::loadFromYaml(yaml_string, opentelemetry_config); + + auto driver = std::make_unique(opentelemetry_config, context); + + driver->startSpan(config, trace_context, stream_info, "operation_name", + {Tracing::Reason::Sampling, true}); +} + +// Test config containing an unknown sampler +TEST_F(SamplerFactoryTest, TestWithInvalidSampler) { + // using StrictMock, calls to SamplerFactory would cause a test failure + auto test_sampler = std::make_shared>(); + StrictMock sampler_factory; + Registry::InjectFactory sampler_factory_registration(sampler_factory); + + // invalid sampler configured + const std::string yaml_string = R"EOF( + grpc_service: + envoy_grpc: + cluster_name: fake-cluster + timeout: 0.250s + service_name: my-service + sampler: + name: envoy.tracers.opentelemetry.samplers.testsampler + typed_config: + "@type": type.googleapis.com/google.protobuf.Value + )EOF"; + + envoy::config::trace::v3::OpenTelemetryConfig opentelemetry_config; + TestUtility::loadFromYaml(yaml_string, opentelemetry_config); + + EXPECT_THROW(std::make_unique(opentelemetry_config, context), EnvoyException); +} + +// Test OTLP tracer with a sampler +TEST_F(SamplerFactoryTest, TestWithSampler) { + auto test_sampler = std::make_shared>(); + TestSamplerFactory sampler_factory; + Registry::InjectFactory sampler_factory_registration(sampler_factory); + + EXPECT_CALL(sampler_factory, createSampler(_, _)).WillOnce(Return(test_sampler)); + + const std::string yaml_string = R"EOF( + grpc_service: + envoy_grpc: + cluster_name: fake-cluster + timeout: 0.250s + service_name: my-service + sampler: + name: envoy.tracers.opentelemetry.samplers.testsampler + typed_config: + "@type": type.googleapis.com/google.protobuf.Struct + )EOF"; + + envoy::config::trace::v3::OpenTelemetryConfig opentelemetry_config; + TestUtility::loadFromYaml(yaml_string, opentelemetry_config); + + auto driver = std::make_unique(opentelemetry_config, context); + + // shouldSample returns a result without additional attributes and Decision::RECORD_AND_SAMPLE + EXPECT_CALL(*test_sampler, shouldSample(_, _, _, _, _, _)) + .WillOnce([](const absl::optional, const std::string&, const std::string&, + ::opentelemetry::proto::trace::v1::Span::SpanKind, + const std::map&, const std::vector&) { + SamplingResult res; + res.decision = Decision::RECORD_AND_SAMPLE; + res.tracestate = "this_is=tracesate"; + return res; + }); + + Tracing::SpanPtr tracing_span = driver->startSpan( + config, trace_context, stream_info, "operation_name", {Tracing::Reason::Sampling, true}); + // startSpan returns a Tracing::SpanPtr. Tracing::Span has no sampled() method. + // We know that the underlying span is Extensions::Tracers::OpenTelemetry::Span + // So the dynamic_cast should be safe. + std::unique_ptr span(dynamic_cast(tracing_span.release())); + EXPECT_TRUE(span->sampled()); + EXPECT_STREQ(span->tracestate().c_str(), "this_is=tracesate"); + + // shouldSamples return a result containing additional attributes and Decision::DROP + EXPECT_CALL(*test_sampler, shouldSample(_, _, _, _, _, _)) + .WillOnce([](const absl::optional, const std::string&, const std::string&, + ::opentelemetry::proto::trace::v1::Span::SpanKind, + const std::map&, const std::vector&) { + SamplingResult res; + res.decision = Decision::DROP; + std::map attributes; + attributes["key"] = "value"; + attributes["another_key"] = "another_value"; + res.attributes = + std::make_unique>(std::move(attributes)); + res.tracestate = "this_is=another_tracesate"; + return res; + }); + tracing_span = driver->startSpan(config, trace_context, stream_info, "operation_name", + {Tracing::Reason::Sampling, true}); + std::unique_ptr unsampled_span(dynamic_cast(tracing_span.release())); + EXPECT_FALSE(unsampled_span->sampled()); + EXPECT_STREQ(unsampled_span->tracestate().c_str(), "this_is=another_tracesate"); +} + +// Test sampling result decision +TEST(SamplingResultTest, TestSamplingResult) { + SamplingResult result; + result.decision = Decision::RECORD_AND_SAMPLE; + EXPECT_TRUE(result.isRecording()); + EXPECT_TRUE(result.isSampled()); + result.decision = Decision::RECORD_ONLY; + EXPECT_TRUE(result.isRecording()); + EXPECT_FALSE(result.isSampled()); + result.decision = Decision::DROP; + EXPECT_FALSE(result.isRecording()); + EXPECT_FALSE(result.isSampled()); +} + +} // namespace OpenTelemetry +} // namespace Tracers +} // namespace Extensions +} // namespace Envoy diff --git a/test/extensions/transport_sockets/tls/test_data/certs.sh b/test/extensions/transport_sockets/tls/test_data/certs.sh index 75a63ba41605..a63bbddacec9 100755 --- a/test/extensions/transport_sockets/tls/test_data/certs.sh +++ b/test/extensions/transport_sockets/tls/test_data/certs.sh @@ -114,7 +114,7 @@ generate_cert_chain() { ca_name="i$((x - 1))" fi echo "$x: $certname $ca_name" - generate_ca $certname $ca_name + generate_ca "$certname" "$ca_name" done for x in {1..3}; do cat "i${x}_cert.pem" >> test_long_cert_chain.pem diff --git a/test/integration/BUILD b/test/integration/BUILD index 2fba6896fbc0..3b104213539a 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -536,6 +536,7 @@ envoy_cc_test( "//source/extensions/load_balancing_policies/ring_hash:config", "//test/integration/filters:encode1xx_local_reply_config_lib", "//test/integration/filters:local_reply_during_decoding_filter_lib", + "//test/integration/filters:metadata_control_filter_lib", "//test/integration/filters:metadata_stop_all_filter_config_lib", "//test/integration/filters:on_local_reply_filter_config_lib", "//test/integration/filters:request_metadata_filter_config_lib", @@ -1553,7 +1554,9 @@ envoy_cc_test( "//test/test_common:test_runtime_lib", "@envoy_api//envoy/config/bootstrap/v3:pkg_cc_proto", "@envoy_api//envoy/config/overload/v3:pkg_cc_proto", - ], + ] + envoy_select_enable_http3([ + "//test/extensions/quic/proof_source:pending_proof_source_factory_impl_lib", + ]), ) envoy_cc_test( diff --git a/test/integration/base_integration_test.cc b/test/integration/base_integration_test.cc index 38cd432d54ce..2712e0cf4004 100644 --- a/test/integration/base_integration_test.cc +++ b/test/integration/base_integration_test.cc @@ -459,11 +459,7 @@ void BaseIntegrationTest::createGeneratedApiTestServer( if (config_helper_.bootstrap().static_resources().listeners_size() > 0 && !defer_listener_finalization_) { - // Wait for listeners to be created before invoking registerTestServerPorts() below, as that - // needs to know about the bound listener ports. - // Using 2x default timeout to cover for slow TLS implementations (no inline asm) on slow - // computers (e.g., Raspberry Pi) that sometimes time out on TLS listeners here. - Event::TestTimeSystem::RealTimeBound bound(2 * TestUtility::DefaultTimeout); + Event::TestTimeSystem::RealTimeBound bound(listeners_bound_timeout_ms_); const char* success = "listener_manager.listener_create_success"; const char* rejected = "listener_manager.lds.update_rejected"; for (Stats::CounterSharedPtr success_counter = test_server->counter(success), diff --git a/test/integration/base_integration_test.h b/test/integration/base_integration_test.h index c7f1e639005e..7b16822a4c7f 100644 --- a/test/integration/base_integration_test.h +++ b/test/integration/base_integration_test.h @@ -474,6 +474,13 @@ class BaseIntegrationTest : protected Logger::Loggable { void checkForMissingTagExtractionRules(); + // Sets the timeout to wait for listeners to be created before invoking + // registerTestServerPorts(), as that needs to know about the bound listener ports. + // Needs to be called before invoking createEnvoy() (invoked during initialize()). + void setListenersBoundTimeout(const std::chrono::milliseconds& duration) { + listeners_bound_timeout_ms_ = duration; + } + std::unique_ptr upstream_stats_store_; // Make sure the test server will be torn down after any fake client. @@ -527,6 +534,13 @@ class BaseIntegrationTest : protected Logger::Loggable { spdlog::level::level_enum default_log_level_; + // Timeout to wait for listeners to be created before invoking + // registerTestServerPorts(), as that needs to know about the bound listener ports. + // Using 2x default timeout to cover for slow TLS implementations (no inline asm) on slow + // computers (e.g., Raspberry Pi) that sometimes time out on TLS listeners, or when + // the number of listeners in a test is large. + std::chrono::milliseconds listeners_bound_timeout_ms_{2 * TestUtility::DefaultTimeout}; + // Target number of upstreams. uint32_t fake_upstreams_count_{1}; diff --git a/test/integration/cx_limit_integration_test.cc b/test/integration/cx_limit_integration_test.cc index ccf2b19dc1c6..c998727d01b9 100644 --- a/test/integration/cx_limit_integration_test.cc +++ b/test/integration/cx_limit_integration_test.cc @@ -136,18 +136,6 @@ TEST_P(ConnectionLimitIntegrationTest, TestListenerLimit) { doTest(init_func, "downstream_cx_overflow"); } -TEST_P(ConnectionLimitIntegrationTest, TestDeprecationWarningForGlobalCxRuntimeLimit) { - std::function init_func = [this]() { - setGlobalLimit(4); - initialize(); - }; - const std::string log_line = - "Usage of the deprecated runtime key overload.global_downstream_max_connections, " - "consider switching to `envoy.resource_monitors.downstream_connections` instead." - "This runtime key will be removed in future."; - EXPECT_LOG_CONTAINS("warn", log_line, { init_func(); }); -} - // TODO (nezdolik) move this test to overload manager test suite, once runtime key is fully // deprecated. TEST_P(ConnectionLimitIntegrationTest, TestEmptyGlobalCxRuntimeLimit) { diff --git a/test/integration/http_integration.cc b/test/integration/http_integration.cc index 4ae499b7a06d..190f4bd8104c 100644 --- a/test/integration/http_integration.cc +++ b/test/integration/http_integration.cc @@ -266,7 +266,8 @@ IntegrationCodecClientPtr HttpIntegrationTest::makeHttpConnection(uint32_t port) IntegrationCodecClientPtr HttpIntegrationTest::makeRawHttpConnection( Network::ClientConnectionPtr&& conn, - absl::optional http2_options) { + absl::optional http2_options, + bool wait_till_connected) { std::shared_ptr cluster{new NiceMock()}; cluster->max_response_headers_count_ = 200; if (!http2_options.has_value()) { @@ -295,7 +296,8 @@ IntegrationCodecClientPtr HttpIntegrationTest::makeRawHttpConnection( // This call may fail in QUICHE because of INVALID_VERSION. QUIC connection doesn't support // in-connection version negotiation. auto codec = std::make_unique(*dispatcher_, random_, std::move(conn), - host_description, downstream_protocol_); + host_description, downstream_protocol_, + wait_till_connected); if (downstream_protocol_ == Http::CodecType::HTTP3 && codec->disconnected()) { // Connection may get closed during version negotiation or handshake. // TODO(#8479) QUIC connection doesn't support in-connection version negotiationPropagate diff --git a/test/integration/http_integration.h b/test/integration/http_integration.h index f0d1020e46bd..3cb15cda441f 100644 --- a/test/integration/http_integration.h +++ b/test/integration/http_integration.h @@ -156,9 +156,10 @@ class HttpIntegrationTest : public BaseIntegrationTest { IntegrationCodecClientPtr makeHttpConnection(uint32_t port); // Makes a http connection object without checking its connected state. - virtual IntegrationCodecClientPtr makeRawHttpConnection( - Network::ClientConnectionPtr&& conn, - absl::optional http2_options); + virtual IntegrationCodecClientPtr + makeRawHttpConnection(Network::ClientConnectionPtr&& conn, + absl::optional http2_options, + bool wait_till_connected = true); // Makes a downstream network connection object based on client codec version. Network::ClientConnectionPtr makeClientConnectionWithOptions( uint32_t port, const Network::ConnectionSocket::OptionsSharedPtr& options) override; diff --git a/test/integration/integration_test.cc b/test/integration/integration_test.cc index f424bdc833b0..702c6bf6d9ed 100644 --- a/test/integration/integration_test.cc +++ b/test/integration/integration_test.cc @@ -1593,6 +1593,7 @@ TEST_P(IntegrationTest, AbsolutePathWithoutPort) { // Ensure that connect behaves the same with allow_absolute_url enabled and without TEST_P(IntegrationTest, Connect) { + setListenersBoundTimeout(3 * TestUtility::DefaultTimeout); const std::string& request = "CONNECT www.somewhere.com:80 HTTP/1.1\r\n\r\n"; config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { // Clone the whole listener. diff --git a/test/integration/multiplexed_integration_test.cc b/test/integration/multiplexed_integration_test.cc index 580eb8b40716..215ab4dca48e 100644 --- a/test/integration/multiplexed_integration_test.cc +++ b/test/integration/multiplexed_integration_test.cc @@ -2277,6 +2277,61 @@ TEST_P(Http2FrameIntegrationTest, MultipleRequests) { tcp_client_->close(); } +TEST_P(Http2FrameIntegrationTest, MultipleRequestsWithMetadata) { + // Allow metadata usage. + config_helper_.addConfigModifier([&](envoy::config::bootstrap::v3::Bootstrap& bootstrap) -> void { + RELEASE_ASSERT(bootstrap.mutable_static_resources()->clusters_size() >= 1, ""); + ConfigHelper::HttpProtocolOptions protocol_options; + protocol_options.mutable_explicit_http_config() + ->mutable_http2_protocol_options() + ->set_allow_metadata(true); + ConfigHelper::setProtocolOptions(*bootstrap.mutable_static_resources()->mutable_clusters(0), + protocol_options); + }); + config_helper_.addConfigModifier( + [&](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) -> void { hcm.mutable_http2_protocol_options()->set_allow_metadata(true); }); + + config_helper_.prependFilter(R"EOF( + name: metadata-control-filter + )EOF"); + + const int kRequestsSentPerIOCycle = 20; + autonomous_upstream_ = true; + config_helper_.addRuntimeOverride("http.max_requests_per_io_cycle", "1"); + beginSession(); + + std::string buffer; + for (int i = 0; i < kRequestsSentPerIOCycle; ++i) { + auto request = + Http2Frame::makePostRequest(Http2Frame::makeClientStreamId(i), "a", "/", + {{"response_data_blocks", "0"}, {"no_trailers", "1"}}); + absl::StrAppend(&buffer, std::string(request)); + } + + for (int i = 0; i < kRequestsSentPerIOCycle; ++i) { + Http::MetadataMap metadata_map{{"should_continue", absl::StrCat(i)}}; + auto metadata = Http2Frame::makeMetadataFrameFromMetadataMap( + Http2Frame::makeClientStreamId(i), metadata_map, Http2Frame::MetadataFlags::EndMetadata); + absl::StrAppend(&buffer, std::string(metadata)); + } + + for (int i = 0; i < kRequestsSentPerIOCycle; ++i) { + auto data = Http2Frame::makeDataFrame(Http2Frame::makeClientStreamId(i), "", + Http2Frame::DataFlags::EndStream); + absl::StrAppend(&buffer, std::string(data)); + } + + ASSERT_TRUE(tcp_client_->write(buffer, false, false)); + + for (int i = 0; i < kRequestsSentPerIOCycle; ++i) { + auto frame = readFrame(); + EXPECT_EQ(Http2Frame::Type::Headers, frame.type()); + EXPECT_EQ(Http2Frame::ResponseStatus::Ok, frame.responseStatus()); + } + tcp_client_->close(); +} + // Validate the request completion during processing of deferred list works. TEST_P(Http2FrameIntegrationTest, MultipleRequestsDecodeHeadersEndsRequest) { const int kRequestsSentPerIOCycle = 20; diff --git a/test/integration/overload_integration_test.cc b/test/integration/overload_integration_test.cc index f621b89f658a..1232968546dc 100644 --- a/test/integration/overload_integration_test.cc +++ b/test/integration/overload_integration_test.cc @@ -260,6 +260,62 @@ TEST_P(OverloadScaledTimerIntegrationTest, CloseIdleHttpConnections) { codec_client_->close(); } +TEST_P(OverloadScaledTimerIntegrationTest, HTTP3CloseIdleHttpConnectionsDuringHandshake) { + if (downstreamProtocol() != Http::CodecClient::Type::HTTP3) { + return; + } + TestScopedRuntime scoped_runtime; + scoped_runtime.mergeValues({{"envoy.reloadable_features.quic_fix_filter_manager_uaf", "true"}}); + + config_helper_.addConfigModifier([](envoy::config::bootstrap::v3::Bootstrap& bootstrap) { + auto* proof_source_config = bootstrap.mutable_static_resources() + ->mutable_listeners(0) + ->mutable_udp_listener_config() + ->mutable_quic_options() + ->mutable_proof_source_config(); + proof_source_config->set_name("envoy.quic.proof_source.pending_signing"); + proof_source_config->mutable_typed_config(); + }); + initializeOverloadManager( + TestUtility::parseYaml(R"EOF( + timer_scale_factors: + - timer: HTTP_DOWNSTREAM_CONNECTION_IDLE + min_timeout: 3s + )EOF")); + + // Set the load so the timer is reduced but not to the minimum value. + updateResource(0.8); + test_server_->waitForGaugeGe("overload.envoy.overload_actions.reduce_timeouts.scale_percent", 50); + // Create an HTTP connection without finishing the handshake. + codec_client_ = makeRawHttpConnection(makeClientConnection((lookupPort("http"))), absl::nullopt, + /*wait_till_connected=*/false); + EXPECT_FALSE(codec_client_->connected()); + + // Advancing past the minimum time shouldn't close the connection. + timeSystem().advanceTimeWait(std::chrono::seconds(3)); + dispatcher_->run(Event::Dispatcher::RunType::NonBlock); + EXPECT_FALSE(codec_client_->connected()); + EXPECT_FALSE(codec_client_->disconnected()); + + // Increase load more so that the timer is reduced to the minimum. + updateResource(0.9); + test_server_->waitForGaugeEq("overload.envoy.overload_actions.reduce_timeouts.scale_percent", + 100); + + // Create another HTTP connection without finishing handshake. + IntegrationCodecClientPtr codec_client2 = makeRawHttpConnection( + makeClientConnection((lookupPort("http"))), absl::nullopt, /*wait_till_connected=*/false); + EXPECT_FALSE(codec_client2->connected()); + // Advancing past the minimum time and wait for the proxy to notice and close both connections. + timeSystem().advanceTimeWait(std::chrono::seconds(3)); + test_server_->waitForCounterGe("http.config_test.downstream_cx_idle_timeout", 2); + ASSERT_TRUE(codec_client_->waitForDisconnect()); + EXPECT_FALSE(codec_client_->sawGoAway()); + EXPECT_FALSE(codec_client2->connected()); + ASSERT_TRUE(codec_client2->waitForDisconnect()); + EXPECT_FALSE(codec_client2->sawGoAway()); +} + TEST_P(OverloadScaledTimerIntegrationTest, CloseIdleHttpStream) { initializeOverloadManager( TestUtility::parseYaml(R"EOF( diff --git a/test/integration/quic_http_integration_test.cc b/test/integration/quic_http_integration_test.cc index 304164ef09fd..75706cf7231f 100644 --- a/test/integration/quic_http_integration_test.cc +++ b/test/integration/quic_http_integration_test.cc @@ -229,12 +229,13 @@ class QuicHttpIntegrationTestBase : public HttpIntegrationTest { return session; } - IntegrationCodecClientPtr makeRawHttpConnection( - Network::ClientConnectionPtr&& conn, - absl::optional http2_options) override { + IntegrationCodecClientPtr + makeRawHttpConnection(Network::ClientConnectionPtr&& conn, + absl::optional http2_options, + bool wait_till_connected = true) override { ENVOY_LOG(debug, "Creating a new client {}", conn->connectionInfoProvider().localAddress()->asStringView()); - return makeRawHttp3Connection(std::move(conn), http2_options, true); + return makeRawHttp3Connection(std::move(conn), http2_options, wait_till_connected); } // Create Http3 codec client with the option not to wait for 1-RTT key establishment. diff --git a/test/integration/server.cc b/test/integration/server.cc index 3a19040a98b5..4995765c9481 100644 --- a/test/integration/server.cc +++ b/test/integration/server.cc @@ -232,11 +232,11 @@ void IntegrationTestServerImpl::createAndRunEnvoyServer( if (process_object.has_value()) { process_context = std::make_unique(process_object->get()); } - Server::InstanceImpl server(init_manager, options, time_system, local_address, hooks, restarter, - stat_store, access_log_lock, component_factory, - std::move(random_generator), tls, Thread::threadFactoryForTest(), - Filesystem::fileSystemForTest(), std::move(process_context), - watermark_factory); + Server::InstanceImpl server(init_manager, options, time_system, hooks, restarter, stat_store, + access_log_lock, std::move(random_generator), tls, + Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), + std::move(process_context), watermark_factory); + server.initialize(local_address, component_factory); // This is technically thread unsafe (assigning to a shared_ptr accessed // across threads), but because we synchronize below through serverReady(), the only // consumer on the main test thread in ~IntegrationTestServerImpl will not race. diff --git a/test/integration/shadow_policy_integration_test.cc b/test/integration/shadow_policy_integration_test.cc index 4f4c1b8855d7..fb196931d04d 100644 --- a/test/integration/shadow_policy_integration_test.cc +++ b/test/integration/shadow_policy_integration_test.cc @@ -510,6 +510,11 @@ TEST_P(ShadowPolicyIntegrationTest, MainRequestOverBufferLimit) { GTEST_SKIP() << "Not applicable for non-streaming shadows."; } autonomous_upstream_ = true; + if (Runtime::runtimeFeatureEnabled(Runtime::defer_processing_backedup_streams)) { + // With deferred processing, a local reply is triggered so the upstream + // stream will be incomplete. + autonomous_allow_incomplete_streams_ = true; + } cluster_with_custom_filter_ = 0; filter_name_ = "encoder-decoder-buffer-filter"; initialConfigSetup("cluster_1", ""); @@ -537,7 +542,13 @@ TEST_P(ShadowPolicyIntegrationTest, MainRequestOverBufferLimit) { EXPECT_EQ(test_server_->counter("cluster.cluster_0.upstream_cx_total")->value(), 1); EXPECT_EQ(test_server_->counter("cluster.cluster_1.upstream_cx_total")->value(), 1); - test_server_->waitForCounterEq("cluster.cluster_1.upstream_rq_completed", 1); + if (Runtime::runtimeFeatureEnabled(Runtime::defer_processing_backedup_streams)) { + // With deferred processing, the encoder-decoder-buffer-filter will + // buffer too much data triggering a local reply. + test_server_->waitForCounterEq("http.config_test.downstream_rq_4xx", 1); + } else { + test_server_->waitForCounterEq("cluster.cluster_1.upstream_rq_completed", 1); + } } TEST_P(ShadowPolicyIntegrationTest, ShadowRequestOverBufferLimit) { diff --git a/test/mocks/event/mocks.cc b/test/mocks/event/mocks.cc index 16468bf697cb..e6a0b436f671 100644 --- a/test/mocks/event/mocks.cc +++ b/test/mocks/event/mocks.cc @@ -76,12 +76,23 @@ MockSchedulableCallback::~MockSchedulableCallback() { } } +MockSchedulableCallback::MockSchedulableCallback(MockDispatcher* dispatcher, + std::function callback, + testing::MockFunction* destroy_cb) + : dispatcher_(dispatcher), callback_(callback), destroy_cb_(destroy_cb) { + ON_CALL(*this, scheduleCallbackCurrentIteration()).WillByDefault(Assign(&enabled_, true)); + ON_CALL(*this, scheduleCallbackNextIteration()).WillByDefault(Assign(&enabled_, true)); + ON_CALL(*this, cancel()).WillByDefault(Assign(&enabled_, false)); + ON_CALL(*this, enabled()).WillByDefault(ReturnPointee(&enabled_)); +} + MockSchedulableCallback::MockSchedulableCallback(MockDispatcher* dispatcher, testing::MockFunction* destroy_cb) : dispatcher_(dispatcher), destroy_cb_(destroy_cb) { EXPECT_CALL(*dispatcher, createSchedulableCallback_(_)) .WillOnce(DoAll(SaveArg<0>(&callback_), Return(this))) .RetiresOnSaturation(); + ON_CALL(*this, scheduleCallbackCurrentIteration()).WillByDefault(Assign(&enabled_, true)); ON_CALL(*this, scheduleCallbackNextIteration()).WillByDefault(Assign(&enabled_, true)); ON_CALL(*this, cancel()).WillByDefault(Assign(&enabled_, false)); diff --git a/test/mocks/event/mocks.h b/test/mocks/event/mocks.h index f3279645a31e..4a0b2f6f9e27 100644 --- a/test/mocks/event/mocks.h +++ b/test/mocks/event/mocks.h @@ -224,6 +224,8 @@ class MockSchedulableCallback : public SchedulableCallback { public: MockSchedulableCallback(MockDispatcher* dispatcher, testing::MockFunction* destroy_cb = nullptr); + MockSchedulableCallback(MockDispatcher* dispatcher, std::function callback, + testing::MockFunction* destroy_cb = nullptr); ~MockSchedulableCallback() override; void invokeCallback() { diff --git a/test/mocks/http/mocks.h b/test/mocks/http/mocks.h index 7a26fb2fb2ed..2e3d2b86a094 100644 --- a/test/mocks/http/mocks.h +++ b/test/mocks/http/mocks.h @@ -269,6 +269,11 @@ class MockStreamDecoderFilterCallbacks : public StreamDecoderFilterCallbacks, MOCK_METHOD(OptRef, downstreamCallbacks, ()); MOCK_METHOD(OptRef, upstreamCallbacks, ()); MOCK_METHOD(absl::string_view, filterConfigName, (), (const override)); + MOCK_METHOD(RequestHeaderMapOptRef, requestHeaders, ()); + MOCK_METHOD(RequestTrailerMapOptRef, requestTrailers, ()); + MOCK_METHOD(ResponseHeaderMapOptRef, informationalHeaders, ()); + MOCK_METHOD(ResponseHeaderMapOptRef, responseHeaders, ()); + MOCK_METHOD(ResponseTrailerMapOptRef, responseTrailers, ()); // Http::StreamDecoderFilterCallbacks // NOLINTNEXTLINE(readability-identifier-naming) @@ -278,15 +283,12 @@ class MockStreamDecoderFilterCallbacks : public StreamDecoderFilterCallbacks, absl::string_view details); void encode1xxHeaders(ResponseHeaderMapPtr&& headers) override { encode1xxHeaders_(*headers); } - MOCK_METHOD(ResponseHeaderMapOptRef, informationalHeaders, (), (const)); void encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream, absl::string_view details) override { stream_info_.setResponseCodeDetails(details); encodeHeaders_(*headers, end_stream); } - MOCK_METHOD(ResponseHeaderMapOptRef, responseHeaders, (), (const)); void encodeTrailers(ResponseTrailerMapPtr&& trailers) override { encodeTrailers_(*trailers); } - MOCK_METHOD(ResponseTrailerMapOptRef, responseTrailers, (), (const)); void encodeMetadata(MetadataMapPtr&& metadata_map) override { encodeMetadata_(std::move(metadata_map)); } @@ -361,6 +363,11 @@ class MockStreamEncoderFilterCallbacks : public StreamEncoderFilterCallbacks, MOCK_METHOD(OptRef, downstreamCallbacks, ()); MOCK_METHOD(OptRef, upstreamCallbacks, ()); MOCK_METHOD(absl::string_view, filterConfigName, (), (const override)); + MOCK_METHOD(RequestHeaderMapOptRef, requestHeaders, ()); + MOCK_METHOD(RequestTrailerMapOptRef, requestTrailers, ()); + MOCK_METHOD(ResponseHeaderMapOptRef, informationalHeaders, ()); + MOCK_METHOD(ResponseHeaderMapOptRef, responseHeaders, ()); + MOCK_METHOD(ResponseTrailerMapOptRef, responseTrailers, ()); // Http::StreamEncoderFilterCallbacks MOCK_METHOD(void, addEncodedData, (Buffer::Instance & data, bool streaming)); diff --git a/test/per_file_coverage.sh b/test/per_file_coverage.sh index de38b62ab65f..b883f9afb130 100755 --- a/test/per_file_coverage.sh +++ b/test/per_file_coverage.sh @@ -7,7 +7,7 @@ declare -a KNOWN_LOW_COVERAGE=( "source/common/api:84.5" "source/common/api/posix:81.8" "source/common/config:95.3" -"source/common/crypto:88.1" +"source/common/crypto:95.5" "source/common/event:95.1" # Emulated edge events guards don't report LCOV "source/common/filesystem/posix:96.2" # FileReadToEndNotReadable fails in some env; createPath can't test all failure branches. "source/common/http/http2:95.2" diff --git a/test/server/server_fuzz_test.cc b/test/server/server_fuzz_test.cc index e07229c88a82..93e269c00786 100644 --- a/test/server/server_fuzz_test.cc +++ b/test/server/server_fuzz_test.cc @@ -152,11 +152,11 @@ DEFINE_PROTO_FUZZER(const envoy::config::bootstrap::v3::Bootstrap& input) { std::unique_ptr server; try { server = std::make_unique( - init_manager, options, test_time.timeSystem(), - std::make_shared("127.0.0.1"), hooks, restart, stats_store, - fakelock, component_factory, std::make_unique(), - thread_local_instance, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), - nullptr); + init_manager, options, test_time.timeSystem(), hooks, restart, stats_store, fakelock, + std::make_unique(), thread_local_instance, + Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), nullptr); + server->initialize(std::make_shared("127.0.0.1"), + component_factory); } catch (const EnvoyException& ex) { ENVOY_LOG_MISC(debug, "Controlled EnvoyException exit: {}", ex.what()); return; diff --git a/test/server/server_test.cc b/test/server/server_test.cc index 689946c01927..8e8bca32f2f0 100644 --- a/test/server/server_test.cc +++ b/test/server/server_test.cc @@ -258,12 +258,12 @@ class ServerInstanceImplTestBase { : std::make_unique("Server"); server_ = std::make_unique( - *init_manager_, options_, time_system_, - std::make_shared("127.0.0.1"), hooks, restart_, - stats_store_, fakelock_, component_factory_, + *init_manager_, options_, time_system_, hooks, restart_, stats_store_, fakelock_, std::make_unique>(), *thread_local_, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), std::move(process_context_)); + server_->initialize(std::make_shared("127.0.0.1"), + component_factory_); EXPECT_TRUE(server_->api().fileSystem().fileExists(std::string(Platform::null_device_path))); } @@ -277,11 +277,11 @@ class ServerInstanceImplTestBase { thread_local_ = std::make_unique(); init_manager_ = std::make_unique("Server"); server_ = std::make_unique( - *init_manager_, options_, time_system_, - std::make_shared("127.0.0.1"), hooks_, restart_, - stats_store_, fakelock_, component_factory_, + *init_manager_, options_, time_system_, hooks_, restart_, stats_store_, fakelock_, std::make_unique>(), *thread_local_, Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), nullptr); + server_->initialize(std::make_shared("127.0.0.1"), + component_factory_); EXPECT_TRUE(server_->api().fileSystem().fileExists(std::string(Platform::null_device_path))); } @@ -1302,13 +1302,13 @@ TEST_P(ServerInstanceImplTest, LogToFileError) { TEST_P(ServerInstanceImplTest, NoOptionsPassed) { thread_local_ = std::make_unique(); init_manager_ = std::make_unique("Server"); + server_.reset(new InstanceImpl( + *init_manager_, options_, time_system_, hooks_, restart_, stats_store_, fakelock_, + std::make_unique>(), *thread_local_, + Thread::threadFactoryForTest(), Filesystem::fileSystemForTest(), nullptr)); EXPECT_THROW_WITH_MESSAGE( - server_.reset(new InstanceImpl(*init_manager_, options_, time_system_, - std::make_shared("127.0.0.1"), - hooks_, restart_, stats_store_, fakelock_, component_factory_, - std::make_unique>(), - *thread_local_, Thread::threadFactoryForTest(), - Filesystem::fileSystemForTest(), nullptr)), + server_->initialize(std::make_shared("127.0.0.1"), + component_factory_), EnvoyException, "At least one of --config-path or --config-yaml or Options::configProto() should be " "non-empty"); diff --git a/tools/base/requirements.in b/tools/base/requirements.in index 160e6f0af407..c13d3a06eee0 100644 --- a/tools/base/requirements.in +++ b/tools/base/requirements.in @@ -10,7 +10,7 @@ colorama coloredlogs cryptography>=41.0.1 dependatool>=0.2.2 -envoy.base.utils>=0.4.27 +envoy.base.utils>=0.5.0 envoy.code.check>=0.5.8 envoy.dependency.check>=0.1.10 envoy.distribution.release>=0.0.9 diff --git a/tools/base/requirements.txt b/tools/base/requirements.txt index 2a991b19d840..6ae32732d784 100644 --- a/tools/base/requirements.txt +++ b/tools/base/requirements.txt @@ -32,9 +32,9 @@ aio-api-github==0.2.5 \ # -r requirements.in # envoy-base-utils # envoy-dependency-check -aio-api-nist==0.0.3 \ - --hash=sha256:3465d25e4ffdec35d824960e6d68fbff070f823fde55a40fa4eb53a7fd7d18ca \ - --hash=sha256:5ecf9f32e19ad8804bba1358dde93d1008029335009541dadc69c3823241b382 +aio-api-nist==0.0.4 \ + --hash=sha256:1f2909d60ed4fdb3a3ffc37ad6012666f34078b71648394be91f5e67bbf8b6ca \ + --hash=sha256:c948ee597b9e7cda7982e17bc4aca509b8aa68510899b42e2d382c10fb0d6f89 # via envoy-dependency-check aio-core==0.10.0 \ --hash=sha256:57e2d8dd8ee8779b0ebc2e2447492c0db8d7ed782e9ad1bb2662593740751acb \ @@ -473,9 +473,9 @@ docutils==0.19 \ # envoy-docs-sphinx-runner # sphinx # sphinx-rtd-theme -envoy-base-utils==0.4.27 \ - --hash=sha256:908d7fdcf9ca2dae01cccdd6d2f5b9002e5ef8610f4437aba86af5a5f3a248a8 \ - --hash=sha256:e5a89a3c96cb424995705c884f2d0fac111f0519d2590da16fc8b5bfeba11469 +envoy-base-utils==0.5.0 \ + --hash=sha256:a185279fc2f6c49ba2b2a1d02ab2a361733ee4a5c3f41a225cbd2dd349369458 \ + --hash=sha256:f71e4bdcea86539f273388f1cb6210b40ec5d05f49aaacb7ef776a60fb60f107 # via # -r requirements.in # envoy-code-check @@ -491,9 +491,9 @@ envoy-code-check==0.5.8 \ --hash=sha256:03f32588cc9ed98ab6703cbca6f81df1527db71c3a0f962be6a6084ded40d528 \ --hash=sha256:2b12c51098c78d393823cf055a54e9308c37321d769041f01a2f35b04074d6f3 # via -r requirements.in -envoy-dependency-check==0.1.10 \ - --hash=sha256:4a637e0ed7184791b495041f9baf44567a95cbb979e1e5f26f6a8c33f724cf9e \ - --hash=sha256:e6ae41249f298c865a357edcd8e4850354f222ea4f0dd629c737706b23670c75 +envoy-dependency-check==0.1.11 \ + --hash=sha256:1c4e9f238787bda6d1270452538b361b3f33be3866640373161b70ac9c98c740 \ + --hash=sha256:3318930cf8632b3e9d0bfbd724f148c8eeb2b3e20784d92f62e16c6c706ba511 # via -r requirements.in envoy-distribution-distrotest==0.0.10 \ --hash=sha256:83e912c48da22eb3e514fc1142247d33eb7ed0d59e94eca2ffbd178a26fbf808 \ @@ -884,57 +884,57 @@ oauth2client==4.1.3 \ # via # gcs-oauth2-boto-plugin # google-apitools -orjson==3.9.9 \ - --hash=sha256:02e693843c2959befdd82d1ebae8b05ed12d1cb821605d5f9fe9f98ca5c9fd2b \ - --hash=sha256:06f0c024a75e8ba5d9101facb4fb5a028cdabe3cdfe081534f2a9de0d5062af2 \ - --hash=sha256:0a1a4d9e64597e550428ba091e51a4bcddc7a335c8f9297effbfa67078972b5c \ - --hash=sha256:0d2cd6ef4726ef1b8c63e30d8287225a383dbd1de3424d287b37c1906d8d2855 \ - --hash=sha256:0f89dc338a12f4357f5bf1b098d3dea6072fb0b643fd35fec556f4941b31ae27 \ - --hash=sha256:12b83e0d8ba4ca88b894c3e00efc59fe6d53d9ffb5dbbb79d437a466fc1a513d \ - --hash=sha256:1ef06431f021453a47a9abb7f7853f04f031d31fbdfe1cc83e3c6aadde502cce \ - --hash=sha256:1f352117eccac268a59fedac884b0518347f5e2b55b9f650c2463dd1e732eb61 \ - --hash=sha256:24301f2d99d670ded4fb5e2f87643bc7428a54ba49176e38deb2887e42fe82fb \ - --hash=sha256:31d676bc236f6e919d100fb85d0a99812cff1ebffaa58106eaaec9399693e227 \ - --hash=sha256:335406231f9247f985df045f0c0c8f6b6d5d6b3ff17b41a57c1e8ef1a31b4d04 \ - --hash=sha256:397a185e5dd7f8ebe88a063fe13e34d61d394ebb8c70a443cee7661b9c89bda7 \ - --hash=sha256:4a308aeac326c2bafbca9abbae1e1fcf682b06e78a54dad0347b760525838d85 \ - --hash=sha256:50232572dd300c49f134838c8e7e0917f29a91f97dbd608d23f2895248464b7f \ - --hash=sha256:512e5a41af008e76451f5a344941d61f48dddcf7d7ddd3073deb555de64596a6 \ - --hash=sha256:5424ecbafe57b2de30d3b5736c5d5835064d522185516a372eea069b92786ba6 \ - --hash=sha256:543b36df56db195739c70d645ecd43e49b44d5ead5f8f645d2782af118249b37 \ - --hash=sha256:678ffb5c0a6b1518b149cc328c610615d70d9297e351e12c01d0beed5d65360f \ - --hash=sha256:6fcf06c69ccc78e32d9f28aa382ab2ab08bf54b696dbe00ee566808fdf05da7d \ - --hash=sha256:75b805549cbbcb963e9c9068f1a05abd0ea4c34edc81f8d8ef2edb7e139e5b0f \ - --hash=sha256:8038ba245d0c0a6337cfb6747ea0c51fe18b0cf1a4bc943d530fd66799fae33d \ - --hash=sha256:879d2d1f6085c9c0831cec6716c63aaa89e41d8e036cabb19a315498c173fcc6 \ - --hash=sha256:8cba20c9815c2a003b8ca4429b0ad4aa87cb6649af41365821249f0fd397148e \ - --hash=sha256:8e7877256b5092f1e4e48fc0f1004728dc6901e7a4ffaa4acb0a9578610aa4ce \ - --hash=sha256:906cac73b7818c20cf0f6a7dde5a6f009c52aecc318416c7af5ea37f15ca7e66 \ - --hash=sha256:920814e02e3dd7af12f0262bbc18b9fe353f75a0d0c237f6a67d270da1a1bb44 \ - --hash=sha256:957a45fb201c61b78bcf655a16afbe8a36c2c27f18a998bd6b5d8a35e358d4ad \ - --hash=sha256:9a4402e7df1b5c9a4c71c7892e1c8f43f642371d13c73242bda5964be6231f95 \ - --hash=sha256:9d9b5440a5d215d9e1cfd4aee35fd4101a8b8ceb8329f549c16e3894ed9f18b5 \ - --hash=sha256:a3bf6ca6bce22eb89dd0650ef49c77341440def966abcb7a2d01de8453df083a \ - --hash=sha256:a71b0cc21f2c324747bc77c35161e0438e3b5e72db6d3b515310457aba743f7f \ - --hash=sha256:ab7bae2b8bf17620ed381e4101aeeb64b3ba2a45fc74c7617c633a923cb0f169 \ - --hash=sha256:ae72621f216d1d990468291b1ec153e1b46e0ed188a86d54e0941f3dabd09ee8 \ - --hash=sha256:b20becf50d4aec7114dc902b58d85c6431b3a59b04caa977e6ce67b6fee0e159 \ - --hash=sha256:b28c1a65cd13fff5958ab8b350f0921121691464a7a1752936b06ed25c0c7b6e \ - --hash=sha256:b97a67c47840467ccf116136450c50b6ed4e16a8919c81a4b4faef71e0a2b3f4 \ - --hash=sha256:bd55ea5cce3addc03f8fb0705be0cfed63b048acc4f20914ce5e1375b15a293b \ - --hash=sha256:c4eb31a8e8a5e1d9af5aa9e247c2a52ad5cf7e968aaa9aaefdff98cfcc7f2e37 \ - --hash=sha256:c63eca397127ebf46b59c9c1fb77b30dd7a8fc808ac385e7a58a7e64bae6e106 \ - --hash=sha256:c959550e0705dc9f59de8fca1a316da0d9b115991806b217c82931ac81d75f74 \ - --hash=sha256:cffb77cf0cd3cbf20eb603f932e0dde51b45134bdd2d439c9f57924581bb395b \ - --hash=sha256:d1c01cf4b8e00c7e98a0a7cf606a30a26c32adf2560be2d7d5d6766d6f474b31 \ - --hash=sha256:d3f56e41bc79d30fdf077073072f2377d2ebf0b946b01f2009ab58b08907bc28 \ - --hash=sha256:e159b97f5676dcdac0d0f75ec856ef5851707f61d262851eb41a30e8fadad7c9 \ - --hash=sha256:e98ca450cb4fb176dd572ce28c6623de6923752c70556be4ef79764505320acb \ - --hash=sha256:eb50d869b3c97c7c5187eda3759e8eb15deb1271d694bc5d6ba7040db9e29036 \ - --hash=sha256:ece2d8ed4c34903e7f1b64fb1e448a00e919a4cdb104fc713ad34b055b665fca \ - --hash=sha256:f28090060a31f4d11221f9ba48b2273b0d04b702f4dcaa197c38c64ce639cc51 \ - --hash=sha256:f692e7aabad92fa0fff5b13a846fb586b02109475652207ec96733a085019d80 \ - --hash=sha256:f708ca623287186e5876256cb30599308bce9b2757f90d917b7186de54ce6547 +orjson==3.9.10 \ + --hash=sha256:06ad5543217e0e46fd7ab7ea45d506c76f878b87b1b4e369006bdb01acc05a83 \ + --hash=sha256:0a73160e823151f33cdc05fe2cea557c5ef12fdf276ce29bb4f1c571c8368a60 \ + --hash=sha256:1234dc92d011d3554d929b6cf058ac4a24d188d97be5e04355f1b9223e98bbe9 \ + --hash=sha256:1d0dc4310da8b5f6415949bd5ef937e60aeb0eb6b16f95041b5e43e6200821fb \ + --hash=sha256:2a11b4b1a8415f105d989876a19b173f6cdc89ca13855ccc67c18efbd7cbd1f8 \ + --hash=sha256:2e2ecd1d349e62e3960695214f40939bbfdcaeaaa62ccc638f8e651cf0970e5f \ + --hash=sha256:3a2ce5ea4f71681623f04e2b7dadede3c7435dfb5e5e2d1d0ec25b35530e277b \ + --hash=sha256:3e892621434392199efb54e69edfff9f699f6cc36dd9553c5bf796058b14b20d \ + --hash=sha256:3fb205ab52a2e30354640780ce4587157a9563a68c9beaf52153e1cea9aa0921 \ + --hash=sha256:4689270c35d4bb3102e103ac43c3f0b76b169760aff8bcf2d401a3e0e58cdb7f \ + --hash=sha256:49f8ad582da6e8d2cf663c4ba5bf9f83cc052570a3a767487fec6af839b0e777 \ + --hash=sha256:4bd176f528a8151a6efc5359b853ba3cc0e82d4cd1fab9c1300c5d957dc8f48c \ + --hash=sha256:4cf7837c3b11a2dfb589f8530b3cff2bd0307ace4c301e8997e95c7468c1378e \ + --hash=sha256:4fd72fab7bddce46c6826994ce1e7de145ae1e9e106ebb8eb9ce1393ca01444d \ + --hash=sha256:5148bab4d71f58948c7c39d12b14a9005b6ab35a0bdf317a8ade9a9e4d9d0bd5 \ + --hash=sha256:5869e8e130e99687d9e4be835116c4ebd83ca92e52e55810962446d841aba8de \ + --hash=sha256:602a8001bdf60e1a7d544be29c82560a7b49319a0b31d62586548835bbe2c862 \ + --hash=sha256:61804231099214e2f84998316f3238c4c2c4aaec302df12b21a64d72e2a135c7 \ + --hash=sha256:666c6fdcaac1f13eb982b649e1c311c08d7097cbda24f32612dae43648d8db8d \ + --hash=sha256:674eb520f02422546c40401f4efaf8207b5e29e420c17051cddf6c02783ff5ca \ + --hash=sha256:7ec960b1b942ee3c69323b8721df2a3ce28ff40e7ca47873ae35bfafeb4555ca \ + --hash=sha256:7f433be3b3f4c66016d5a20e5b4444ef833a1f802ced13a2d852c637f69729c1 \ + --hash=sha256:7f8fb7f5ecf4f6355683ac6881fd64b5bb2b8a60e3ccde6ff799e48791d8f864 \ + --hash=sha256:81a3a3a72c9811b56adf8bcc829b010163bb2fc308877e50e9910c9357e78521 \ + --hash=sha256:858379cbb08d84fe7583231077d9a36a1a20eb72f8c9076a45df8b083724ad1d \ + --hash=sha256:8b9ba0ccd5a7f4219e67fbbe25e6b4a46ceef783c42af7dbc1da548eb28b6531 \ + --hash=sha256:92af0d00091e744587221e79f68d617b432425a7e59328ca4c496f774a356071 \ + --hash=sha256:9ebbdbd6a046c304b1845e96fbcc5559cd296b4dfd3ad2509e33c4d9ce07d6a1 \ + --hash=sha256:9edd2856611e5050004f4722922b7b1cd6268da34102667bd49d2a2b18bafb81 \ + --hash=sha256:a353bf1f565ed27ba71a419b2cd3db9d6151da426b61b289b6ba1422a702e643 \ + --hash=sha256:b5b7d4a44cc0e6ff98da5d56cde794385bdd212a86563ac321ca64d7f80c80d1 \ + --hash=sha256:b90f340cb6397ec7a854157fac03f0c82b744abdd1c0941a024c3c29d1340aff \ + --hash=sha256:c18a4da2f50050a03d1da5317388ef84a16013302a5281d6f64e4a3f406aabc4 \ + --hash=sha256:c338ed69ad0b8f8f8920c13f529889fe0771abbb46550013e3c3d01e5174deef \ + --hash=sha256:c5a02360e73e7208a872bf65a7554c9f15df5fe063dc047f79738998b0506a14 \ + --hash=sha256:c62b6fa2961a1dcc51ebe88771be5319a93fd89bd247c9ddf732bc250507bc2b \ + --hash=sha256:c812312847867b6335cfb264772f2a7e85b3b502d3a6b0586aa35e1858528ab1 \ + --hash=sha256:c943b35ecdf7123b2d81d225397efddf0bce2e81db2f3ae633ead38e85cd5ade \ + --hash=sha256:ce0a29c28dfb8eccd0f16219360530bc3cfdf6bf70ca384dacd36e6c650ef8e8 \ + --hash=sha256:cf80b550092cc480a0cbd0750e8189247ff45457e5a023305f7ef1bcec811616 \ + --hash=sha256:cff7570d492bcf4b64cc862a6e2fb77edd5e5748ad715f487628f102815165e9 \ + --hash=sha256:d2c1e559d96a7f94a4f581e2a32d6d610df5840881a8cba8f25e446f4d792df3 \ + --hash=sha256:deeb3922a7a804755bbe6b5be9b312e746137a03600f488290318936c1a2d4dc \ + --hash=sha256:e28a50b5be854e18d54f75ef1bb13e1abf4bc650ab9d635e4258c58e71eb6ad5 \ + --hash=sha256:e99c625b8c95d7741fe057585176b1b8783d46ed4b8932cf98ee145c4facf499 \ + --hash=sha256:ec6f18f96b47299c11203edfbdc34e1b69085070d9a3d1f302810cc23ad36bf3 \ + --hash=sha256:ed8bc367f725dfc5cabeed1ae079d00369900231fbb5a5280cf0736c30e2adf7 \ + --hash=sha256:ee5926746232f627a3be1cc175b2cfad24d0170d520361f4ce3fa2fd83f09e1d \ + --hash=sha256:f295efcd47b6124b01255d1491f9e46f17ef40d3d7eabf7364099e463fb45f0f \ + --hash=sha256:fb0b361d73f6b8eeceba47cd37070b5e6c9de5beaeaa63a1cb35c7e1a73ef088 # via # -r requirements.in # envoy-base-utils diff --git a/tools/dependency/BUILD b/tools/dependency/BUILD index 1631a05cb26a..a7fb7edd96f5 100644 --- a/tools/dependency/BUILD +++ b/tools/dependency/BUILD @@ -1,4 +1,5 @@ load("@base_pip3//:requirements.bzl", "requirement") +load("@bazel_skylib//rules:common_settings.bzl", "bool_flag") load("@envoy_repo//:path.bzl", "PATH") load("//bazel:envoy_build_system.bzl", "envoy_package") load("//tools/base:envoy_python.bzl", "envoy_entry_point", "envoy_genjson", "envoy_pytool_binary") @@ -10,18 +11,34 @@ envoy_package() envoy_py_namespace() +bool_flag( + name = "preload_cve_data", + build_setting_default = False, +) + +config_setting( + name = "preloaded_cve_data", + flag_values = { + ":preload_cve_data": "true", + }, +) + envoy_entry_point( name = "check", args = [ "--repository_locations=$(location //bazel:all_repository_locations)", "--cve_config=$(location :cve.yaml)", - "--cve_data=$(location :cve_data)", - ], + ] + select({ + ":preloaded_cve_data": ["--cve_data=$(location :cve_data)"], + "//conditions:default": [], + }), data = [ ":cve.yaml", - ":cve_data", "//bazel:all_repository_locations", - ], + ] + select({ + ":preloaded_cve_data": [":cve_data"], + "//conditions:default": [], + }), pkg = "envoy.dependency.check", deps = [requirement("orjson")], ) diff --git a/tools/extensions/extensions_schema.yaml b/tools/extensions/extensions_schema.yaml index 36181fb61786..407d8fcfe4d1 100644 --- a/tools/extensions/extensions_schema.yaml +++ b/tools/extensions/extensions_schema.yaml @@ -136,6 +136,8 @@ categories: - envoy.http.early_header_mutation - envoy.http.custom_response - envoy.router.cluster_specifier_plugin +- envoy.tracers.opentelemetry.resource_detectors +- envoy.tracers.opentelemetry.samplers status_values: - name: stable diff --git a/tools/spelling/spelling_dictionary.txt b/tools/spelling/spelling_dictionary.txt index 3a73591424f9..29e9a2538994 100644 --- a/tools/spelling/spelling_dictionary.txt +++ b/tools/spelling/spelling_dictionary.txt @@ -38,6 +38,7 @@ DOM Gasd GiB IPTOS +OTEL Repick Reserializer SION @@ -822,6 +823,7 @@ hostnames hostset hotrestart hrefs +htpasswd huffman hystrix idempotency diff --git a/tools/vscode/refresh_compdb.sh b/tools/vscode/refresh_compdb.sh index 46a8f433f954..996a9d576102 100755 --- a/tools/vscode/refresh_compdb.sh +++ b/tools/vscode/refresh_compdb.sh @@ -5,10 +5,13 @@ bazel_or_isk=bazelisk command -v bazelisk &> /dev/null || bazel_or_isk=bazel -[[ -z "${EXCLUDE_CONTRIB}" ]] || opts="--exclude_contrib" +opts=(--vscode --bazel="$bazel_or_isk") + +[[ -z "${EXCLUDE_CONTRIB}" ]] || opts+=(--exclude_contrib) # Setting TEST_TMPDIR here so the compdb headers won't be overwritten by another bazel run -TEST_TMPDIR=${BUILD_DIR:-/tmp}/envoy-compdb tools/gen_compilation_database.py --vscode --bazel=$bazel_or_isk ${opts} +TEST_TMPDIR=${BUILD_DIR:-/tmp}/envoy-compdb tools/gen_compilation_database.py \ + "${opts[@]}" # Kill clangd to reload the compilation database pkill clangd || :