diff --git a/.azure-pipelines/build-docker-sonic-vs-template.yml b/.azure-pipelines/build-docker-sonic-vs-template.yml index 97e8afb394..480d4ff3ab 100644 --- a/.azure-pipelines/build-docker-sonic-vs-template.yml +++ b/.azure-pipelines/build-docker-sonic-vs-template.yml @@ -23,6 +23,10 @@ parameters: - name: artifact_name type: string +- name: asan + type: boolean + default: false + jobs: - job: displayName: ${{ parameters.arch }} @@ -36,50 +40,64 @@ jobs: inputs: source: specific project: build - pipeline: 9 + pipeline: Azure.sonic-swss-common artifact: ${{ parameters.swss_common_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' + runBranch: 'refs/heads/$(BUILD_BRANCH)' + path: $(Build.ArtifactStagingDirectory)/download + allowPartiallySucceededBuilds: true displayName: "Download sonic swss common deb packages" - task: DownloadPipelineArtifact@2 inputs: source: specific project: build - pipeline: 12 + pipeline: Azure.sonic-sairedis artifact: ${{ parameters.sairedis_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' + runBranch: 'refs/heads/$(BUILD_BRANCH)' + path: $(Build.ArtifactStagingDirectory)/download + allowPartiallySucceededBuilds: true displayName: "Download sonic sairedis deb packages" - task: DownloadPipelineArtifact@2 inputs: artifact: ${{ parameters.swss_artifact_name }} - displayName: "Download sonic swss artifact" + path: $(Build.ArtifactStagingDirectory)/download + displayName: "Download pre-stage built ${{ parameters.swss_artifact_name }}" - task: DownloadPipelineArtifact@2 inputs: source: specific project: build - pipeline: 1 + pipeline: Azure.sonic-buildimage.official.vs artifact: sonic-buildimage.vs runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' - displayName: "Download sonic buildimage" + runBranch: 'refs/heads/$(BUILD_BRANCH)' + path: $(Build.ArtifactStagingDirectory)/download + patterns: '**/target/${{ parameters.artifact_name }}.gz' + displayName: "Download sonic-buildimage ${{ parameters.artifact_name }}" - script: | + set -ex echo $(Build.DefinitionName).$(Build.BuildNumber) - docker load < ../target/docker-sonic-vs.gz + docker load < $(Build.ArtifactStagingDirectory)/download/target/${{ parameters.artifact_name }}.gz mkdir -p .azure-pipelines/docker-sonic-vs/debs - cp -v ../*.deb .azure-pipelines/docker-sonic-vs/debs + cp -v $(Build.ArtifactStagingDirectory)/download/*.deb .azure-pipelines/docker-sonic-vs/debs pushd .azure-pipelines - docker build --no-cache -t docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) docker-sonic-vs + build_args="" + if [ '${{ parameters.asan }}' == True ]; then + build_args="--build-arg need_dbg=y" + fi - popd + docker build $build_args --no-cache -t docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber).asan-${{ parameters.asan }} docker-sonic-vs - docker save docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) | gzip -c > $(Build.ArtifactStagingDirectory)/docker-sonic-vs.gz + popd + docker save docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber).asan-${{ parameters.asan }} | gzip -c > $(Build.ArtifactStagingDirectory)/docker-sonic-vs.gz + rm -rf $(Build.ArtifactStagingDirectory)/download + displayName: "Build ${{ parameters.artifact_name }}" - publish: $(Build.ArtifactStagingDirectory)/ artifact: ${{ parameters.artifact_name }} displayName: "Archive sonic docker vs image" diff --git a/.azure-pipelines/build-template.yml b/.azure-pipelines/build-template.yml index e75ffdad8d..b8beeaef17 100644 --- a/.azure-pipelines/build-template.yml +++ b/.azure-pipelines/build-template.yml @@ -23,12 +23,6 @@ parameters: - name: sonic_slave type: string -- name: buildimage_artifact_name - type: string - -- name: buildimage_pipeline - type: number - - name: sairedis_artifact_name type: string @@ -46,6 +40,13 @@ parameters: type: boolean default: false +- name: common_lib_artifact_name + type: string + +- name: asan + type: boolean + default: false + jobs: - job: displayName: ${{ parameters.arch }} @@ -54,7 +55,7 @@ jobs: pool: ${{ if ne(parameters.pool, 'default') }}: name: ${{ parameters.pool }} - ${{ if eq(parameters.pool, 'default') }}: + ${{ else }}: vmImage: 'ubuntu-20.04' container: @@ -65,96 +66,86 @@ jobs: clean: true submodules: true - script: | - sudo apt-get install -y libhiredis0.14 libhiredis-dev - sudo apt-get install -y libzmq5 libzmq3-dev - sudo apt-get install -qq -y \ - libhiredis-dev \ - swig3.0 - sudo apt-get install -y libdbus-1-3 - sudo apt-get install -y libteam-dev \ - libteam5 \ - libteamdctl0 + sudo apt-get update + sudo apt-get install -y \ + libhiredis-dev \ + libzmq3-dev \ + swig4.0 \ + libdbus-1-dev \ + libteam-dev displayName: "Install dependencies" - task: DownloadPipelineArtifact@2 inputs: source: specific project: build - pipeline: 9 + pipeline: Azure.sonic-swss-common artifact: ${{ parameters.swss_common_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' - path: '$(Build.SourcesDirectory)/${{ parameters.swss_common_artifact_name }}' + runBranch: 'refs/heads/$(BUILD_BRANCH)' + path: $(Build.ArtifactStagingDirectory)/download + allowPartiallySucceededBuilds: true + patterns: | + libswsscommon_1.0.0_${{ parameters.arch }}.deb + libswsscommon-dev_1.0.0_${{ parameters.arch }}.deb displayName: "Download sonic swss common deb packages" - task: DownloadPipelineArtifact@2 inputs: source: specific project: build - pipeline: 12 + pipeline: Azure.sonic-sairedis artifact: ${{ parameters.sairedis_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' - path: '$(Build.SourcesDirectory)/${{ parameters.sairedis_artifact_name }}' + runBranch: 'refs/heads/$(BUILD_BRANCH)' + path: $(Build.ArtifactStagingDirectory)/download + allowPartiallySucceededBuilds: true + patterns: | + libsaivs_*.deb + libsaivs-dev_*.deb + libsairedis_*.deb + libsairedis-dev_*.deb + libsaimetadata_*.deb + libsaimetadata-dev_*.deb + syncd-vs_*.deb displayName: "Download sonic sairedis deb packages" - task: DownloadPipelineArtifact@2 - ${{ if eq(parameters.buildimage_pipeline, 141) }}: - continueOnError: True inputs: source: specific project: build - pipeline: ${{ parameters.buildimage_pipeline }} - artifact: ${{ parameters.buildimage_artifact_name }} + pipeline: Azure.sonic-buildimage.common_libs + artifact: ${{ parameters.common_lib_artifact_name }} runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' - path: '$(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }}' - displayName: "Download sonic buildimage deb packages" + runBranch: 'refs/heads/$(BUILD_BRANCH)' + path: $(Build.ArtifactStagingDirectory)/download + patterns: | + target/debs/bullseye/libnl-3-200_*.deb + target/debs/bullseye/libnl-3-dev_*.deb + target/debs/bullseye/libnl-genl-3-200_*.deb + target/debs/bullseye/libnl-genl-3-dev_*.deb + target/debs/bullseye/libnl-route-3-200_*.deb + target/debs/bullseye/libnl-route-3-dev_*.deb + target/debs/bullseye/libnl-nf-3-200_*.deb + target/debs/bullseye/libnl-nf-3-dev_*.deb + target/debs/bullseye/libyang_*.deb + displayName: "Download common libs" - script: | - buildimage_artifact_downloaded=n - [ -d "$(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }}/target" ] && buildimage_artifact_downloaded=y - echo "buildimage_artifact_downloaded=$buildimage_artifact_downloaded" - echo "##vso[task.setvariable variable=buildimage_artifact_downloaded]$buildimage_artifact_downloaded" - condition: eq(${{ parameters.buildimage_pipeline }}, 141) - displayName: "Check if sonic buildimage deb packages downloaded" - - task: DownloadPipelineArtifact@2 - condition: and(eq(variables.buildimage_artifact_downloaded, 'n'), eq(${{ parameters.buildimage_pipeline }}, 141)) - inputs: - source: specific - project: build - pipeline: ${{ parameters.buildimage_pipeline }} - artifact: 'sonic-buildimage.marvell-armhf1' - runVersion: specific - runId: 63911 - path: '$(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }}' - displayName: "Download sonic buildimage deb packages from 63911" - - script: | - cd $(Build.SourcesDirectory)/${{ parameters.buildimage_artifact_name }} - sudo dpkg -i target/debs/buster/libnl-3-200_*.deb - sudo dpkg -i target/debs/buster/libnl-3-dev_*.deb - sudo dpkg -i target/debs/buster/libnl-genl-3-200_*.deb - sudo dpkg -i target/debs/buster/libnl-genl-3-dev_*.deb - sudo dpkg -i target/debs/buster/libnl-route-3-200_*.deb - sudo dpkg -i target/debs/buster/libnl-route-3-dev_*.deb - sudo dpkg -i target/debs/buster/libnl-nf-3-200_*.deb - sudo dpkg -i target/debs/buster/libnl-nf-3-dev_*.deb - cd $(Build.SourcesDirectory)/${{ parameters.swss_common_artifact_name }} - sudo dpkg -i libswsscommon_1.0.0_${{ parameters.arch }}.deb - sudo dpkg -i libswsscommon-dev_1.0.0_${{ parameters.arch }}.deb - cd $(Build.SourcesDirectory)/${{ parameters.sairedis_artifact_name }} - sudo dpkg -i libsaivs_*.deb - sudo dpkg -i libsaivs-dev_*.deb - sudo dpkg -i libsairedis_*.deb - sudo dpkg -i libsairedis-dev_*.deb - sudo dpkg -i libsaimetadata_*.deb - sudo dpkg -i libsaimetadata-dev_*.deb - sudo dpkg -i syncd-vs_*.deb - workingDirectory: $(Pipeline.Workspace) + set -ex + cd download + sudo dpkg -i $(find target/debs/bullseye -type f) + sudo dpkg -i $(ls *.deb) + cd .. + rm -rf download + workingDirectory: $(Build.ArtifactStagingDirectory) displayName: "Install libnl3, sonic swss common and sairedis" - script: | - set -x + set -ex tar czf pytest.tgz tests cp -r pytest.tgz $(Build.ArtifactStagingDirectory)/ if [ '${{ parameters.archive_gcov }}' == True ]; then export ENABLE_GCOV=y fi + if [ '${{ parameters.asan }}' == True ]; then + export ENABLE_ASAN=y + fi ./autogen.sh dpkg-buildpackage -us -uc -b -j$(nproc) && cp ../*.deb . displayName: "Compile sonic swss" diff --git a/.azure-pipelines/docker-sonic-vs/Dockerfile b/.azure-pipelines/docker-sonic-vs/Dockerfile index f288c8fdaa..16edf2629d 100644 --- a/.azure-pipelines/docker-sonic-vs/Dockerfile +++ b/.azure-pipelines/docker-sonic-vs/Dockerfile @@ -1,29 +1,20 @@ FROM docker-sonic-vs ARG docker_container_name +ARG need_dbg -ADD ["debs", "/debs"] +COPY ["debs", "/debs"] -RUN dpkg --purge python-swsscommon -RUN dpkg --purge python3-swsscommon -RUN dpkg --purge swss -RUN dpkg --purge libsairedis -RUN dpkg --purge libswsscommon -RUN dpkg --purge libsaimetadata -RUN dpkg --purge libsaivs -RUN dpkg --purge syncd-vs +RUN dpkg -i /debs/libswsscommon_1.0.0_amd64.deb \ + /debs/python3-swsscommon_1.0.0_amd64.deb \ + /debs/sonic-db-cli_1.0.0_amd64.deb \ + /debs/libsaimetadata_1.0.0_amd64.deb \ + /debs/libsairedis_1.0.0_amd64.deb \ + /debs/libsaivs_1.0.0_amd64.deb \ + /debs/syncd-vs_1.0.0_amd64.deb \ + /debs/swss_1.0.0_amd64.deb -RUN dpkg -i /debs/libswsscommon_1.0.0_amd64.deb -RUN dpkg -i /debs/python-swsscommon_1.0.0_amd64.deb -RUN dpkg -i /debs/python3-swsscommon_1.0.0_amd64.deb - -RUN dpkg -i /debs/libsaimetadata_1.0.0_amd64.deb -RUN dpkg -i /debs/libsairedis_1.0.0_amd64.deb -RUN dpkg -i /debs/libsaivs_1.0.0_amd64.deb -RUN dpkg -i /debs/syncd-vs_1.0.0_amd64.deb - -RUN dpkg --purge swss -RUN dpkg -i /debs/swss_1.0.0_amd64.deb +RUN if [ "$need_dbg" = "y" ] ; then dpkg -i /debs/swss-dbg_1.0.0_amd64.deb ; fi RUN apt-get update diff --git a/.azure-pipelines/gcov.yml b/.azure-pipelines/gcov.yml index 0940f82cce..27129c5611 100644 --- a/.azure-pipelines/gcov.yml +++ b/.azure-pipelines/gcov.yml @@ -14,7 +14,7 @@ parameters: - name: timeout type: number - default: 180 + default: 240 - name: sonic_slave type: string @@ -47,9 +47,8 @@ jobs: vmImage: 'ubuntu-20.04' variables: - DIFF_COVER_CHECK_THRESHOLD: 0 + DIFF_COVER_CHECK_THRESHOLD: 80 DIFF_COVER_ENABLE: 'true' - DIFF_COVER_WORKING_DIRECTORY: $(System.DefaultWorkingDirectory)/gcov/ container: image: sonicdev-microsoft.azurecr.io:443/${{ parameters.sonic_slave }}:latest @@ -60,9 +59,9 @@ jobs: set -ex # Install .NET CORE curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add - - sudo apt-add-repository https://packages.microsoft.com/debian/10/prod + sudo apt-add-repository https://packages.microsoft.com/debian/11/prod sudo apt-get update - sudo apt-get install -y dotnet-sdk-5.0 + sudo apt-get install -y dotnet-sdk-7.0 displayName: "Install .NET CORE" - script: | sudo apt-get install -y lcov @@ -106,8 +105,7 @@ jobs: sudo ./gcov_support.sh generate sudo ./gcov_support.sh merge_container_info $(Build.ArtifactStagingDirectory) sudo cp -rf gcov_output $(Build.ArtifactStagingDirectory) - mkdir -p $(System.DefaultWorkingDirectory)/gcov - sudo cp -rf $(Build.ArtifactStagingDirectory)/gcov_output/AllMergeReport/* $(System.DefaultWorkingDirectory)/gcov/ + sudo cp -rf $(Build.ArtifactStagingDirectory)/gcov_output/AllMergeReport/coverage.xml $(System.DefaultWorkingDirectory)/ ls -lh $(Build.ArtifactStagingDirectory) popd workingDirectory: $(Pipeline.Workspace) diff --git a/.azure-pipelines/test-docker-sonic-vs-template.yml b/.azure-pipelines/test-docker-sonic-vs-template.yml index 002b7749e1..d6cbd73849 100644 --- a/.azure-pipelines/test-docker-sonic-vs-template.yml +++ b/.azure-pipelines/test-docker-sonic-vs-template.yml @@ -1,7 +1,7 @@ parameters: - name: timeout type: number - default: 360 + default: 480 - name: log_artifact_name type: string @@ -16,6 +16,14 @@ parameters: type: boolean default: false +- name: docker_sonic_vs_name + type: string + default: docker-sonic-vs + +- name: asan + type: boolean + default: false + jobs: - job: displayName: vstest @@ -30,36 +38,37 @@ jobs: - checkout: self - task: DownloadPipelineArtifact@2 inputs: - artifact: docker-sonic-vs - displayName: "Download docker sonic vs image" - + artifact: ${{ parameters.docker_sonic_vs_name }} + path: $(Build.ArtifactStagingDirectory)/download + displayName: "Download pre-stage built ${{ parameters.docker_sonic_vs_name }}" - task: DownloadPipelineArtifact@2 inputs: source: specific project: build - pipeline: 9 + pipeline: Azure.sonic-swss-common artifact: sonic-swss-common.amd64.ubuntu20_04 runVersion: 'latestFromBranch' - runBranch: 'refs/heads/master' + runBranch: 'refs/heads/$(BUILD_BRANCH)' + path: $(Build.ArtifactStagingDirectory)/download displayName: "Download sonic swss common deb packages" - script: | - set -x + set -ex sudo .azure-pipelines/build_and_install_module.sh - sudo apt-get install -y libhiredis0.14 - sudo dpkg -i --force-confask,confnew ../libswsscommon_1.0.0_amd64.deb || apt-get install -f - sudo dpkg -i ../python3-swsscommon_1.0.0_amd64.deb + sudo apt-get install -y libhiredis0.14 libyang0.16 + sudo dpkg -i --force-confask,confnew $(Build.ArtifactStagingDirectory)/download/libswsscommon_1.0.0_amd64.deb || apt-get install -f + sudo dpkg -i $(Build.ArtifactStagingDirectory)/download/python3-swsscommon_1.0.0_amd64.deb # install packages for vs test sudo apt-get install -y net-tools bridge-utils vlan sudo apt-get install -y python3-pip - sudo pip3 install pytest==4.6.2 attrs==19.1.0 exabgp==4.0.10 distro==1.5.0 docker==4.4.1 redis==3.3.4 flaky==3.7.0 + sudo pip3 install pytest==4.6.2 attrs==19.1.0 exabgp==4.0.10 distro==1.5.0 docker>=4.4.1 redis==3.3.4 flaky==3.7.0 displayName: "Install dependencies" - script: | - set -x - sudo docker load -i ../docker-sonic-vs.gz + set -ex + sudo docker load -i $(Build.ArtifactStagingDirectory)/download/docker-sonic-vs.gz docker ps ip netns list uname -a @@ -67,27 +76,65 @@ jobs: sudo /sbin/ip link del Vrf1 type vrf table 1001 pushd tests + params='' if [ '${{ parameters.archive_gcov }}' == True ]; then - sudo py.test -v --force-flaky --junitxml=tr.xml --keeptb --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) - else - sudo py.test -v --force-flaky --junitxml=tr.xml --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber) + params='--keeptb' + fi + if [ '${{ parameters.asan }}' == True ]; then + params='--graceful-stop' + fi + + all_tests=$(ls test_*.py) + all_tests="${all_tests} p4rt" + test_set=() + # Run 20 tests as a set. + for test in ${all_tests}; do + test_set+=("${test}") + if [ ${#test_set[@]} -ge 20 ]; then + test_name=$(echo "${test_set[0]}" | cut -d "." -f 1) + echo "${test_set[*]}" | xargs sudo py.test -v --force-flaky --junitxml="${test_name}_tr.xml" $params --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber).asan-${{ parameters.asan }} + container_count=$(docker ps -q -a | wc -l) + if [ '${{ parameters.archive_gcov }}' == True ] && [ ${container_count} -gt 0 ]; then + ./gcov_support.sh set_environment $(Build.ArtifactStagingDirectory) + docker stop $(docker ps -q -a) + docker rm $(docker ps -q -a) + fi + test_set=() + fi + done + if [ ${#test_set[@]} -gt 0 ]; then + test_name=$(echo "${test_set[0]}" | cut -d "." -f 1) + echo "${test_set[*]}" | xargs sudo py.test -v $params --force-flaky --junitxml="${test_name}_tr.xml" $params --imgname=docker-sonic-vs:$(Build.DefinitionName).$(Build.BuildNumber).asan-${{ parameters.asan }} + container_count=$(docker ps -q -a | wc -l) + if [ '${{ parameters.archive_gcov }}' == True ] && [ ${container_count} -gt 0 ]; then + ./gcov_support.sh set_environment $(Build.ArtifactStagingDirectory) + docker stop $(docker ps -q -a) + docker rm $(docker ps -q -a) + fi fi + + rm -rf $(Build.ArtifactStagingDirectory)/download displayName: "Run vs tests" + continueOnError: ${{ parameters.asan }} - task: PublishTestResults@2 inputs: - testResultsFiles: '**/tr.xml' + testResultsFiles: '**/*_tr.xml' testRunTitle: vstest - condition: always() + condition: succeeded() - script: | cp -r tests/log $(Build.ArtifactStagingDirectory)/ + if [ '${{ parameters.asan }}' == True ]; then + cp -vr tests/log/*/log/asan $(Build.ArtifactStagingDirectory)/ + fi + if [ '${{ parameters.archive_gcov }}' == True ]; then sudo apt-get install -y lcov - ./tests/gcov_support.sh set_environment $(Build.ArtifactStagingDirectory) - docker stop $(docker ps -q -a) - docker rm $(docker ps -q -a) + cd $(Build.ArtifactStagingDirectory)/gcov_tmp/ + tar -zcvf sonic-gcov.tar.gz sonic-gcov/ + rm -rf sonic-gcov fi displayName: "Collect logs" condition: always() @@ -95,9 +142,25 @@ jobs: - publish: $(Build.ArtifactStagingDirectory)/gcov_tmp artifact: ${{ parameters.gcov_artifact_name }} displayName: "Publish gcov output" - condition: eq('${{ parameters.archive_gcov }}', true) + condition: and(succeeded(), eq('${{ parameters.archive_gcov }}', true)) - publish: $(Build.ArtifactStagingDirectory)/ artifact: ${{ parameters.log_artifact_name }}@$(System.JobAttempt) displayName: "Publish logs" condition: always() + + - publish: $(Build.ArtifactStagingDirectory)/asan + artifact: asan-reports + displayName: "Publish ASAN reports" + condition: eq('${{ parameters.asan }}', true) + + - script: | + if [ "$(ls -A $(Build.ArtifactStagingDirectory)/asan)" ]; then + echo "There are issues reported by ASAN" + exit 1 + else + echo "No issues reported by ASAN" + fi + displayName: "Check ASAN reports" + condition: eq('${{ parameters.asan }}', true) + continueOnError: true diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 38d8187f49..46732aa050 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -9,3 +9,22 @@ # @prsunny will be requested for review when someone opens # a pull request. * @prsunny + +# MACSEC +/gearsyncd/ @Pterosaur +/orchagent/macsecorch* @Pterosaur + +# LAG +/teamsyncd/ @judyjoseph +/tlm_teamd/ @judyjoseph +/cfgmgr/teammgr* @judyjoseph + +# Buffer Management, PFC +/orchagent/bufferorch* @neethajohn +/orchagent/qosorch* @neethajohn +/orchagent/pfc* @neethajohn +/cfgmgr/buffer* @neethajohn + +# Chassis +/orchagent/fabricportsorch* @abdosi @judyjoseph +/tests/test_virtual_chassis.py @abdosi @judyjoseph diff --git a/.github/codeql/codeql-config.yml b/.github/codeql/codeql-config.yml new file mode 100644 index 0000000000..2c8b0498f3 --- /dev/null +++ b/.github/codeql/codeql-config.yml @@ -0,0 +1,4 @@ +name: "CodeQL config" +queries: + - uses: security-and-quality + - uses: security-extended diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 0000000000..f9a1c3d005 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,156 @@ +# For more infomation, please visit: https://github.com/github/codeql-action + +name: "CodeQL" + +on: + push: + branches: + - 'master' + - '202[0-9][0-9][0-9]' + pull_request_target: + branches: + - 'master' + - '202[0-9][0-9][0-9]' + workflow_dispatch: + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-20.04 + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'cpp','python' ] + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2.1.29 + with: + config-file: ./.github/codeql/codeql-config.yml + languages: ${{ matrix.language }} + + - if: matrix.language == 'cpp' + name: Install prerequisites + run: | + sudo apt-get update + sudo apt-get install -y libxml-simple-perl \ + aspell \ + aspell-en \ + libhiredis-dev \ + libnl-3-dev \ + libnl-genl-3-dev \ + libnl-route-3-dev \ + libnl-nf-3-dev \ + libyang-dev \ + libzmq3-dev \ + libzmq5 \ + swig3.0 \ + libpython2.7-dev \ + libgtest-dev \ + libgmock-dev \ + libboost1.71-dev \ + libboost-serialization1.71-dev \ + dh-exec \ + doxygen \ + cdbs \ + bison \ + flex \ + graphviz \ + autoconf-archive \ + uuid-dev \ + libjansson-dev \ + python \ + stgit + + - if: matrix.language == 'cpp' + name: Build sonic-swss-common + run: | + cd .. + git clone https://github.com/sonic-net/sonic-swss-common + pushd sonic-swss-common + ./autogen.sh + dpkg-buildpackage -rfakeroot -us -uc -b -j$(nproc) + popd + dpkg-deb -x libswsscommon_${SWSSCOMMON_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libswsscommon-dev_${SWSSCOMMON_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + env: + SWSSCOMMON_VER: "1.0.0" + + - if: matrix.language == 'cpp' + name: Build sonic-sairedis + run: | + cd .. + git clone --recursive https://github.com/sonic-net/sonic-sairedis + pushd sonic-sairedis + ./autogen.sh + DEB_BUILD_OPTIONS=nocheck \ + SWSS_COMMON_INC="$(dirname $GITHUB_WORKSPACE)/usr/include" \ + SWSS_COMMON_LIB="$(dirname $GITHUB_WORKSPACE)/usr/lib/x86_64-linux-gnu" \ + DEB_CFLAGS_SET="-Wno-error" DEB_CXXFLAGS_SET="-Wno-error" \ + dpkg-buildpackage -rfakeroot -us -uc -b -Psyncd,vs,nopython2 -j$(nproc) + popd + dpkg-deb -x libsairedis_${SAIREDIS_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libsairedis-dev_${SAIREDIS_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libsaimetadata_${SAIREDIS_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libsaimetadata-dev_${SAIREDIS_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libsaivs_${SAIREDIS_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libsaivs-dev_${SAIREDIS_VER}_amd64.deb $(dirname $GITHUB_WORKSPACE) + env: + SAIREDIS_VER: "1.0.0" + + # Inject libnl deb only after sonic-sairedis compilation is done. + - if: matrix.language == 'cpp' + name: Build libnl + run: | + cd .. + git clone https://github.com/sonic-net/sonic-buildimage + pushd sonic-buildimage/src/libnl3 + git clone https://github.com/thom311/libnl libnl3-${LIBNL3_VER} + pushd libnl3-${LIBNL3_VER} + git checkout tags/libnl${LIBNL3_VER//./_} + git checkout -b sonic + git config --local user.name $USER + git config --local user.email $USER@microsoft.com + stg init + stg import -s ../patch/series + git config --local --unset user.name + git config --local --unset user.email + ln -s ../debian debian + dpkg-buildpackage -rfakeroot -us -uc -b -j$(nproc) + popd + dpkg-deb -x libnl-3-200_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-3-dev_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-genl-3-200_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-genl-3-dev_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-route-3-200_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-route-3-dev_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-nf-3-200_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + dpkg-deb -x libnl-nf-3-dev_${LIBNL3_VER}-${LIBNL3_REV}_amd64.deb $(dirname $GITHUB_WORKSPACE) + popd + env: + LIBNL3_VER: "3.5.0" + LIBNL3_REV: "1" + + - if: matrix.language == 'cpp' + name: Build repository + run: | + ./autogen.sh + ./configure --prefix=/usr \ + --with-extra-inc=$(dirname $GITHUB_WORKSPACE)/usr/include \ + --with-extra-lib=$(dirname $GITHUB_WORKSPACE)/lib/x86_64-linux-gnu \ + --with-extra-usr-lib=$(dirname $GITHUB_WORKSPACE)/usr/lib/x86_64-linux-gnu \ + --with-libnl-3.0-inc=$(dirname $GITHUB_WORKSPACE)/usr/include/libnl3 + + - name: Perform CodeQL analysis + uses: github/codeql-action/analyze@v2.1.29 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/semgrep.yml b/.github/workflows/semgrep.yml new file mode 100644 index 0000000000..975769a505 --- /dev/null +++ b/.github/workflows/semgrep.yml @@ -0,0 +1,22 @@ +name: Semgrep + +on: + pull_request: {} + push: + branches: + - master + - '201[7-9][0-1][0-9]' + - '202[0-9][0-1][0-9]' + +jobs: + semgrep: + if: github.repository_owner == 'sonic-net' + name: Semgrep + runs-on: ubuntu-latest + container: + image: returntocorp/semgrep + steps: + - uses: actions/checkout@v3 + - run: semgrep ci + env: + SEMGREP_RULES: p/default diff --git a/.gitignore b/.gitignore index c2522ba711..a0c8c5ac82 100644 --- a/.gitignore +++ b/.gitignore @@ -74,6 +74,10 @@ swssconfig/swssplayer tlm_teamd/tlm_teamd teamsyncd/teamsyncd tests/tests +tests/mock_tests/tests_response_publisher +tests/mock_tests/tests_fpmsyncd +tests/mock_tests/tests_intfmgrd +tests/mock_tests/tests_portsyncd # Test Files # @@ -85,5 +89,7 @@ tests/mock_tests/tests.trs tests/test-suite.log tests/tests.log tests/tests.trs +tests/mock_tests/**/*log +tests/mock_tests/**/*trs orchagent/p4orch/tests/**/*gcda orchagent/p4orch/tests/**/*gcno diff --git a/README.md b/README.md index 32492bc29b..e627f04317 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,18 @@ -[![Total alerts](https://img.shields.io/lgtm/alerts/g/Azure/sonic-swss.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Azure/sonic-swss/alerts/) -[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Azure/sonic-swss.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Azure/sonic-swss/context:python) -[![Language grade: C/C++](https://img.shields.io/lgtm/grade/cpp/g/Azure/sonic-swss.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Azure/sonic-swss/context:cpp) +*static analysis:* + +[![Total alerts](https://img.shields.io/lgtm/alerts/g/sonic-net/sonic-swss.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/sonic-net/sonic-swss/alerts/) +[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/sonic-net/sonic-swss.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/sonic-net/sonic-swss/context:python) +[![Language grade: C/C++](https://img.shields.io/lgtm/grade/cpp/g/sonic-net/sonic-swss.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/sonic-net/sonic-swss/context:cpp) + +*sonic-swss builds:* + +[![master build](https://dev.azure.com/mssonic/build/_apis/build/status/Azure.sonic-swss?branchName=master&label=master)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=15&branchName=master) +[![202205 build](https://dev.azure.com/mssonic/build/_apis/build/status/Azure.sonic-swss?branchName=202205&label=202205)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=15&branchName=202205) +[![202111 build](https://dev.azure.com/mssonic/build/_apis/build/status/Azure.sonic-swss?branchName=202111&label=202111)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=15&branchName=202111) +[![202106 build](https://dev.azure.com/mssonic/build/_apis/build/status/Azure.sonic-swss?branchName=202106&label=202106)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=15&branchName=202106) +[![202012 build](https://dev.azure.com/mssonic/build/_apis/build/status/Azure.sonic-swss?branchName=202012&label=202012)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=15&branchName=202012) +[![201911 build](https://dev.azure.com/mssonic/build/_apis/build/status/Azure.sonic-swss?branchName=201911&label=201911)](https://dev.azure.com/mssonic/build/_build/latest?definitionId=15&branchName=201911) -[![VS](https://sonic-jenkins.westus2.cloudapp.azure.com/job/vs/job/sonic-swss-build/badge/icon?subject=VS%20build)](https://sonic-jenkins.westus2.cloudapp.azure.com/job/vs/job/sonic-swss-build/) # SONiC - SWitch State Service - SWSS @@ -41,7 +51,7 @@ For your convenience, you can install prepared packages on Debian Jessie: #### Install from Source -Checkout the source: `git clone https://github.com/Azure/sonic-swss.git` and install it yourself. +Checkout the source: `git clone https://github.com/sonic-net/sonic-swss.git` and install it yourself. Get SAI header files into /usr/include/sai. Put the SAI header files that you use to compile libsairedis into /usr/include/sai @@ -64,13 +74,13 @@ You can also build a debian package using: ## Need Help? For general questions, setup help, or troubleshooting: -- [sonicproject on Google Groups](https://groups.google.com/d/forum/sonicproject) +- [sonicproject on Google Groups](https://groups.google.com/g/sonicproject) For bug reports or feature requests, please open an Issue. ## Contribution guide -See the [contributors guide](https://github.com/Azure/SONiC/blob/gh-pages/CONTRIBUTING.md) for information about how to contribute. +See the [contributors guide](https://github.com/sonic-net/SONiC/wiki/Becoming-a-contributor) for information about how to contribute. ### GitHub Workflow diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 06cd17686e..5933d4f57e 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -3,10 +3,37 @@ # Add steps that publish test results, save build artifacts, deploy, and more: # https://docs.microsoft.com/azure/devops/pipelines/apps/c-cpp/gcc +pr: +- master +- 202??? +- 201??? + trigger: + batch: true + branches: + include: + - master + - 202??? + - 201??? + - dash + +# this part need to be set in UI +schedules: +- cron: "0 0 * * 6" + displayName: Weekly build branches: include: - - "*" + - master + - 202??? + - 201??? + always: true + +variables: + - name: BUILD_BRANCH + ${{ if eq(variables['Build.Reason'], 'PullRequest') }}: + value: $(System.PullRequest.TargetBranch) + ${{ else }}: + value: $(Build.SourceBranchName) stages: - stage: Build @@ -15,15 +42,27 @@ stages: - template: .azure-pipelines/build-template.yml parameters: arch: amd64 - sonic_slave: sonic-slave-buster - buildimage_artifact_name: sonic-buildimage.vs - buildimage_pipeline: 142 + sonic_slave: sonic-slave-bullseye + common_lib_artifact_name: common-lib swss_common_artifact_name: sonic-swss-common sairedis_artifact_name: sonic-sairedis artifact_name: sonic-swss archive_pytests: true archive_gcov: true +- stage: BuildAsan + dependsOn: [] + jobs: + - template: .azure-pipelines/build-template.yml + parameters: + arch: amd64 + sonic_slave: sonic-slave-bullseye + common_lib_artifact_name: common-lib + swss_common_artifact_name: sonic-swss-common + sairedis_artifact_name: sonic-sairedis + artifact_name: sonic-swss-asan + asan: true + - stage: BuildArm dependsOn: Build condition: succeeded('Build') @@ -33,9 +72,8 @@ stages: arch: armhf timeout: 240 pool: sonicbld-armhf - sonic_slave: sonic-slave-buster-armhf - buildimage_artifact_name: sonic-buildimage.marvell-armhf - buildimage_pipeline: 141 + sonic_slave: sonic-slave-bullseye-armhf + common_lib_artifact_name: common-lib.armhf swss_common_artifact_name: sonic-swss-common.armhf sairedis_artifact_name: sonic-sairedis.armhf artifact_name: sonic-swss.armhf @@ -46,10 +84,9 @@ stages: arch: arm64 timeout: 240 pool: sonicbld-arm64 - sonic_slave: sonic-slave-buster-arm64 + sonic_slave: sonic-slave-bullseye-arm64 + common_lib_artifact_name: common-lib.arm64 swss_common_artifact_name: sonic-swss-common.arm64 - buildimage_artifact_name: sonic-buildimage.centec-arm64 - buildimage_pipeline: 140 sairedis_artifact_name: sonic-sairedis.arm64 artifact_name: sonic-swss.arm64 archive_gcov: false @@ -65,6 +102,18 @@ stages: swss_artifact_name: sonic-swss artifact_name: docker-sonic-vs +- stage: BuildDockerAsan + dependsOn: BuildAsan + condition: succeeded('BuildAsan') + jobs: + - template: .azure-pipelines/build-docker-sonic-vs-template.yml + parameters: + swss_common_artifact_name: sonic-swss-common + sairedis_artifact_name: sonic-sairedis + swss_artifact_name: sonic-swss-asan + artifact_name: docker-sonic-vs-asan + asan: true + - stage: Test dependsOn: BuildDocker condition: succeeded('BuildDocker') @@ -73,17 +122,29 @@ stages: parameters: log_artifact_name: log gcov_artifact_name: sonic-gcov - sonic_slave: sonic-slave-buster + sonic_slave: sonic-slave-bullseye archive_gcov: true +- stage: TestAsan + dependsOn: BuildDockerAsan + condition: succeeded('BuildDockerAsan') + jobs: + - template: .azure-pipelines/test-docker-sonic-vs-template.yml + parameters: + log_artifact_name: log-asan + gcov_artifact_name: sonic-gcov + sonic_slave: sonic-slave-bullseye + docker_sonic_vs_name: docker-sonic-vs-asan + asan: true + - stage: Gcov dependsOn: Test - condition: always() + condition: in(dependencies.Test.result, 'Succeeded', 'SucceededWithIssues') jobs: - template: .azure-pipelines/gcov.yml parameters: arch: amd64 - sonic_slave: sonic-slave-buster + sonic_slave: sonic-slave-bullseye swss_common_artifact_name: sonic-swss-common sairedis_artifact_name: sonic-sairedis swss_artifact_name: sonic-swss diff --git a/cfgmgr/Makefile.am b/cfgmgr/Makefile.am index e11ec4635a..685ab04407 100644 --- a/cfgmgr/Makefile.am +++ b/cfgmgr/Makefile.am @@ -15,7 +15,10 @@ dist_cfgmgr_DATA = \ buffer_pool_mellanox.lua \ buffer_check_headroom_vs.lua \ buffer_headroom_vs.lua \ - buffer_pool_vs.lua + buffer_pool_vs.lua \ + buffer_check_headroom_barefoot.lua \ + buffer_headroom_barefoot.lua \ + buffer_pool_barefoot.lua if DEBUG DBGFLAGS = -ggdb -DDEBUG @@ -23,70 +26,75 @@ else DBGFLAGS = -g endif -vlanmgrd_SOURCES = vlanmgrd.cpp vlanmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h -vlanmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -vlanmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -vlanmgrd_LDADD = $(COMMON_LIBS) $(SAIMETA_LIBS) - -teammgrd_SOURCES = teammgrd.cpp teammgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h -teammgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -teammgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -teammgrd_LDADD = $(COMMON_LIBS) $(SAIMETA_LIBS) - -portmgrd_SOURCES = portmgrd.cpp portmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h -portmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -portmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -portmgrd_LDADD = $(COMMON_LIBS) $(SAIMETA_LIBS) - -intfmgrd_SOURCES = intfmgrd.cpp intfmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/lib/subintf.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h -intfmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -intfmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -intfmgrd_LDADD = $(COMMON_LIBS) $(SAIMETA_LIBS) - -buffermgrd_SOURCES = buffermgrd.cpp buffermgr.cpp buffermgrdyn.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h -buffermgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -buffermgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -buffermgrd_LDADD = $(COMMON_LIBS) $(SAIMETA_LIBS) - -vrfmgrd_SOURCES = vrfmgrd.cpp vrfmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h -vrfmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -vrfmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -vrfmgrd_LDADD = $(COMMON_LIBS) $(SAIMETA_LIBS) - -nbrmgrd_SOURCES = nbrmgrd.cpp nbrmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h -nbrmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(LIBNL_CFLAGS) -nbrmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(LIBNL_CPPFLAGS) -nbrmgrd_LDADD = $(COMMON_LIBS) $(SAIMETA_LIBS) $(LIBNL_LIBS) - -vxlanmgrd_SOURCES = vxlanmgrd.cpp vxlanmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h -vxlanmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -vxlanmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -vxlanmgrd_LDADD = $(COMMON_LIBS) $(SAIMETA_LIBS) - -sflowmgrd_SOURCES = sflowmgrd.cpp sflowmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h -sflowmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -sflowmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -sflowmgrd_LDADD = $(COMMON_LIBS) $(SAIMETA_LIBS) - -natmgrd_SOURCES = natmgrd.cpp natmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h -natmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -natmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -natmgrd_LDADD = $(COMMON_LIBS) $(SAIMETA_LIBS) - -coppmgrd_SOURCES = coppmgrd.cpp coppmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h -coppmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -coppmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -coppmgrd_LDADD = $(COMMON_LIBS) $(SAIMETA_LIBS) - -tunnelmgrd_SOURCES = tunnelmgrd.cpp tunnelmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h -tunnelmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -tunnelmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -tunnelmgrd_LDADD = $(COMMON_LIBS) $(SAIMETA_LIBS) - -macsecmgrd_SOURCES = macsecmgrd.cpp macsecmgr.cpp $(top_srcdir)/orchagent/orch.cpp $(top_srcdir)/orchagent/request_parser.cpp $(top_srcdir)/orchagent/response_publisher.cpp shellcmd.h -macsecmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -macsecmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -macsecmgrd_LDADD = $(COMMON_LIBS) $(SAIMETA_LIBS) +COMMON_ORCH_SOURCE = $(top_srcdir)/orchagent/orch.cpp \ + $(top_srcdir)/orchagent/request_parser.cpp \ + $(top_srcdir)/orchagent/response_publisher.cpp \ + $(top_srcdir)/lib/recorder.cpp + +vlanmgrd_SOURCES = vlanmgrd.cpp vlanmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +vlanmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +vlanmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +vlanmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + +teammgrd_SOURCES = teammgrd.cpp teammgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +teammgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +teammgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +teammgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + +portmgrd_SOURCES = portmgrd.cpp portmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +portmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +portmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +portmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + +intfmgrd_SOURCES = intfmgrd.cpp intfmgr.cpp $(top_srcdir)/lib/subintf.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +intfmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +intfmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +intfmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + +buffermgrd_SOURCES = buffermgrd.cpp buffermgr.cpp buffermgrdyn.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +buffermgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +buffermgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +buffermgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + +vrfmgrd_SOURCES = vrfmgrd.cpp vrfmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +vrfmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +vrfmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +vrfmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + +nbrmgrd_SOURCES = nbrmgrd.cpp nbrmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +nbrmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(LIBNL_CFLAGS) $(CFLAGS_ASAN) +nbrmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(LIBNL_CPPFLAGS) $(CFLAGS_ASAN) +nbrmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) $(LIBNL_LIBS) + +vxlanmgrd_SOURCES = vxlanmgrd.cpp vxlanmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +vxlanmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +vxlanmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +vxlanmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + +sflowmgrd_SOURCES = sflowmgrd.cpp sflowmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +sflowmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +sflowmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +sflowmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + +natmgrd_SOURCES = natmgrd.cpp natmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +natmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +natmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +natmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + +coppmgrd_SOURCES = coppmgrd.cpp coppmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +coppmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +coppmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +coppmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + +tunnelmgrd_SOURCES = tunnelmgrd.cpp tunnelmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +tunnelmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +tunnelmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +tunnelmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + +macsecmgrd_SOURCES = macsecmgrd.cpp macsecmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +macsecmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +macsecmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +macsecmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) if GCOV_ENABLED vlanmgrd_LDADD += -lgcovpreload @@ -104,3 +112,19 @@ tunnelmgrd_LDADD += -lgcovpreload macsecmgrd_LDADD += -lgcovpreload endif +if ASAN_ENABLED +vlanmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +teammgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +portmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +intfmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +buffermgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +vrfmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +nbrmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +vxlanmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +sflowmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +natmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +coppmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +tunnelmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +macsecmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +endif + diff --git a/cfgmgr/buffer_check_headroom_barefoot.lua b/cfgmgr/buffer_check_headroom_barefoot.lua new file mode 100644 index 0000000000..74551b1a42 --- /dev/null +++ b/cfgmgr/buffer_check_headroom_barefoot.lua @@ -0,0 +1,6 @@ +local ret = {} + +table.insert(ret, "result:true") +table.insert(ret, "debug:No need to check port headroom limit as shared headroom pool model is supported.") + +return ret diff --git a/cfgmgr/buffer_check_headroom_mellanox.lua b/cfgmgr/buffer_check_headroom_mellanox.lua index 20b62d2938..6ae5b883ba 100644 --- a/cfgmgr/buffer_check_headroom_mellanox.lua +++ b/cfgmgr/buffer_check_headroom_mellanox.lua @@ -5,7 +5,7 @@ local port = KEYS[1] local input_profile_name = ARGV[1] -local input_profile_size = ARGV[2] +local input_profile_size = tonumber(ARGV[2]) local new_pg = ARGV[3] local function is_port_with_8lanes(lanes) @@ -60,7 +60,8 @@ if is_port_with_8lanes(lanes) then pipeline_latency = pipeline_latency * 2 - 1 egress_mirror_size = egress_mirror_size * 2 end -accumulative_size = accumulative_size + 2 * pipeline_latency * 1024 + egress_mirror_size +local lossy_pg_size = pipeline_latency * 1024 +accumulative_size = accumulative_size + lossy_pg_size + egress_mirror_size -- Fetch all keys in BUFFER_PG according to the port redis.call('SELECT', appl_db) @@ -81,41 +82,48 @@ local function get_number_of_pgs(keyname) return size end -local no_input_pg = true -if new_pg ~= nil then - if get_number_of_pgs(new_pg) ~= 0 then - no_input_pg = false - new_pg = 'BUFFER_PG_TABLE:' .. new_pg - end +-- Fetch all the PGs in APPL_DB, and store them into a hash table +local pg_keys = redis.call('KEYS', 'BUFFER_PG_TABLE:' .. port .. ':*') +local all_pgs = {} +for i = 1, #pg_keys do + local profile = redis.call('HGET', pg_keys[i], 'profile') + all_pgs[pg_keys[i]] = profile +end + +-- Fetch all the pending PGs, and store them into the hash table +-- Overwrite any existing entries +local pending_pg_keys = redis.call('KEYS', '_BUFFER_PG_TABLE:' .. port .. ':*') +for i = 1, #pending_pg_keys do + local profile = redis.call('HGET', pending_pg_keys[i], 'profile') + -- Remove the leading underscore when storing it into the hash table + all_pgs[string.sub(pending_pg_keys[i], 2, -1)] = profile + table.insert(debuginfo, 'debug:pending entry: ' .. pending_pg_keys[i] .. ':' .. profile) +end + +if new_pg ~= nil and get_number_of_pgs(new_pg) ~= 0 then + all_pgs['BUFFER_PG_TABLE:' .. new_pg] = input_profile_name end --- Fetch all the PGs, accumulate the sizes +-- Handle all the PGs, accumulate the sizes -- Assume there is only one lossless profile configured among all PGs on each port table.insert(debuginfo, 'debug:other overhead:' .. accumulative_size) -local pg_keys = redis.call('KEYS', 'BUFFER_PG_TABLE:' .. port .. ':*') -for i = 1, #pg_keys do - local profile = redis.call('HGET', pg_keys[i], 'profile') +for pg_key, profile in pairs(all_pgs) do local current_profile_size - if profile ~= 'ingress_lossy_profile' and (no_input_pg or new_pg ~= pg_keys[i]) then - if profile ~= input_profile_name and not no_input_pg then - local referenced_profile = redis.call('HGETALL', 'BUFFER_PROFILE_TABLE:' .. profile) - for j = 1, #referenced_profile, 2 do - if referenced_profile[j] == 'size' then - current_profile_size = tonumber(referenced_profile[j+1]) - end - end - else - current_profile_size = input_profile_size - profile = input_profile_name + if profile ~= input_profile_name then + local referenced_profile_size = redis.call('HGET', 'BUFFER_PROFILE_TABLE:' .. profile, 'size') + if not referenced_profile_size then + referenced_profile_size = redis.call('HGET', '_BUFFER_PROFILE_TABLE:' .. profile, 'size') + table.insert(debuginfo, 'debug:pending profile: ' .. profile) end - accumulative_size = accumulative_size + current_profile_size * get_number_of_pgs(pg_keys[i]) - table.insert(debuginfo, 'debug:' .. pg_keys[i] .. ':' .. profile .. ':' .. current_profile_size .. ':' .. get_number_of_pgs(pg_keys[i]) .. ':accu:' .. accumulative_size) + current_profile_size = tonumber(referenced_profile_size) + else + current_profile_size = input_profile_size end -end - -if not no_input_pg then - accumulative_size = accumulative_size + input_profile_size * get_number_of_pgs(new_pg) - table.insert(debuginfo, 'debug:' .. new_pg .. '*:' .. input_profile_name .. ':' .. input_profile_size .. ':' .. get_number_of_pgs(new_pg) .. ':accu:' .. accumulative_size) + if current_profile_size == 0 then + current_profile_size = lossy_pg_size + end + accumulative_size = accumulative_size + current_profile_size * get_number_of_pgs(pg_key) + table.insert(debuginfo, 'debug:' .. pg_key .. ':' .. profile .. ':' .. current_profile_size .. ':' .. get_number_of_pgs(pg_key) .. ':accu:' .. accumulative_size) end if max_headroom_size > accumulative_size then diff --git a/cfgmgr/buffer_headroom_barefoot.lua b/cfgmgr/buffer_headroom_barefoot.lua new file mode 100644 index 0000000000..f5e61013b3 --- /dev/null +++ b/cfgmgr/buffer_headroom_barefoot.lua @@ -0,0 +1,147 @@ +-- KEYS - profile name +-- ARGV[1] - port speed +-- ARGV[2] - cable length +-- ARGV[3] - port mtu +-- ARGV[4] - gearbox delay + +-- Parameters retried from databases: +-- From CONFIG_DB.LOSSLESS_TRAFFIC_PATTERN +-- small packet percentage: the parameter which is used to control worst case regarding the cell utilization +-- mtu: the mtu of lossless packet +-- From STATE_DB.ASIC_TABLE: +-- cell size: cell_size of the ASIC +-- pipeline_latency: the latency (XON) +-- mac_phy_delay: the bytes held in the switch chip's egress pipeline and PHY when XOFF has been generated +-- peer_response_time: the bytes that are held in the peer switch's pipeline and will be send out when the XOFF packet is received + +local lossless_mtu +local small_packet_percentage +local cell_size +local pipeline_latency +local mac_phy_delay +local peer_response_time + +local port_speed = tonumber(ARGV[1]) +local cable_length = tonumber(string.sub(ARGV[2], 1, -2)) +local port_mtu = tonumber(ARGV[3]) +local gearbox_delay = tonumber(ARGV[4]) + +local config_db = "4" +local state_db = "6" + +local ret = {} + +-- Pause quanta should be taken for each operating speed is defined in IEEE 802.3 31B.3.7. +-- The key of table pause_quanta_per_speed is operating speed at Mb/s. +-- The value of table pause_quanta_per_speed is the number of pause_quanta. +local pause_quanta_per_speed = {} +pause_quanta_per_speed[400000] = 905 +pause_quanta_per_speed[200000] = 453 +pause_quanta_per_speed[100000] = 394 +pause_quanta_per_speed[50000] = 147 +pause_quanta_per_speed[40000] = 118 +pause_quanta_per_speed[25000] = 80 +pause_quanta_per_speed[10000] = 67 +pause_quanta_per_speed[1000] = 2 +pause_quanta_per_speed[100] = 1 + +-- Get pause_quanta from the pause_quanta_per_speed table +local pause_quanta = pause_quanta_per_speed[port_speed] + +if gearbox_delay == nil then + gearbox_delay = 0 +end + +-- Fetch ASIC info from ASIC table in STATE_DB +redis.call("SELECT", state_db) +local asic_keys = redis.call("KEYS", "ASIC_TABLE*") + +-- Only one key should exist +local asic_table_content = redis.call("HGETALL", asic_keys[1]) + +for i = 1, #asic_table_content, 2 do + if asic_table_content[i] == "cell_size" then + cell_size = tonumber(asic_table_content[i+1]) + end + if asic_table_content[i] == "pipeline_latency" then + pipeline_latency = tonumber(asic_table_content[i+1]) * 1024 + end + if asic_table_content[i] == "mac_phy_delay" then + mac_phy_delay = tonumber(asic_table_content[i+1]) * 1024 + end + -- If failed to get pause_quanta from the table, then use the default peer_response_time stored in state_db + if asic_table_content[i] == "peer_response_time" and pause_quanta == nil then + peer_response_time = tonumber(asic_table_content[i+1]) * 1024 + end +end + +-- Fetch lossless traffic info from CONFIG_DB +redis.call("SELECT", config_db) +local lossless_traffic_keys = redis.call("KEYS", "LOSSLESS_TRAFFIC_PATTERN*") + +-- Only one key should exist +local lossless_traffic_table_content = redis.call("HGETALL", lossless_traffic_keys[1]) +for i = 1, #lossless_traffic_table_content, 2 do + if lossless_traffic_table_content[i] == "mtu" then + lossless_mtu = tonumber(lossless_traffic_table_content[i+1]) + end + if lossless_traffic_table_content[i] == "small_packet_percentage" then + small_packet_percentage = tonumber(lossless_traffic_table_content[i+1]) + end +end + +-- Fetch the shared headroom pool size +local shp_size = tonumber(redis.call("HGET", "BUFFER_POOL|ingress_lossless_pool", "xoff")) + +-- Calculate the headroom information +local speed_of_light = 198000000 +local minimal_packet_size = 64 +local cell_occupancy +local worst_case_factor +local propagation_delay +local bytes_on_cable +local bytes_on_gearbox +local xoff_value +local xon_value +local headroom_size + +if cell_size > 2 * minimal_packet_size then + worst_case_factor = cell_size / minimal_packet_size +else + worst_case_factor = (2 * cell_size) / (1 + cell_size) +end + +cell_occupancy = (100 - small_packet_percentage + small_packet_percentage * worst_case_factor) / 100 + +if (gearbox_delay == 0) then + bytes_on_gearbox = 0 +else + bytes_on_gearbox = port_speed * gearbox_delay / (8 * 1024) +end + +-- If successfully get pause_quanta from the table, then calculate peer_response_time from it +if pause_quanta ~= nil then + peer_response_time = (pause_quanta) * 512 / 8 +end + +if port_speed == 400000 then + peer_response_time = 2 * peer_response_time +end + +bytes_on_cable = 2 * cable_length * port_speed * 1000000000 / speed_of_light / (8 * 1024) +propagation_delay = port_mtu + bytes_on_cable + 2 * bytes_on_gearbox + mac_phy_delay + peer_response_time + +-- Calculate the xoff and xon and then round up at 1024 bytes +xoff_value = lossless_mtu + propagation_delay * cell_occupancy +xoff_value = math.ceil(xoff_value / 1024) * 1024 +xon_value = pipeline_latency +xon_value = math.ceil(xon_value / 1024) * 1024 + +headroom_size = xon_value +headroom_size = math.ceil(headroom_size / 1024) * 1024 + +table.insert(ret, "xon" .. ":" .. math.ceil(xon_value)) +table.insert(ret, "xoff" .. ":" .. math.ceil(xoff_value)) +table.insert(ret, "size" .. ":" .. math.ceil(headroom_size)) + +return ret diff --git a/cfgmgr/buffer_pool_barefoot.lua b/cfgmgr/buffer_pool_barefoot.lua new file mode 100644 index 0000000000..49c3a961f7 --- /dev/null +++ b/cfgmgr/buffer_pool_barefoot.lua @@ -0,0 +1,30 @@ +-- KEYS - None +-- ARGV - None + +local result = {} +local config_db = "4" +local state_db = "6" + +redis.call("SELECT", state_db) +local asic_keys = redis.call("KEYS", "ASIC_TABLE*") +local cell_size = tonumber(redis.call("HGET", asic_keys[1], "cell_size")) + +-- Based on cell_size, calculate singular headroom +local ppg_headroom = 400 * cell_size + +redis.call("SELECT", config_db) +local ports = redis.call("KEYS", "PORT|*") +local ports_num = #ports + +-- 2 PPGs per port, 70% of possible maximum value. +local shp_size = math.ceil(ports_num * 2 * ppg_headroom * 0.7) + +local ingress_lossless_pool_size_fixed = tonumber(redis.call('HGET', 'BUFFER_POOL|ingress_lossless_pool', 'size')) +local ingress_lossy_pool_size_fixed = tonumber(redis.call('HGET', 'BUFFER_POOL|ingress_lossy_pool', 'size')) +local egress_lossy_pool_size_fixed = tonumber(redis.call('HGET', 'BUFFER_POOL|egress_lossy_pool', 'size')) + +table.insert(result, "ingress_lossless_pool" .. ":" .. ingress_lossless_pool_size_fixed .. ":" .. shp_size) +table.insert(result, "ingress_lossy_pool" .. ":" .. ingress_lossy_pool_size_fixed) +table.insert(result, "egress_lossy_pool" .. ":" .. egress_lossy_pool_size_fixed) + +return result diff --git a/cfgmgr/buffer_pool_mellanox.lua b/cfgmgr/buffer_pool_mellanox.lua index 8c51c28706..ee48fe0403 100644 --- a/cfgmgr/buffer_pool_mellanox.lua +++ b/cfgmgr/buffer_pool_mellanox.lua @@ -10,12 +10,13 @@ local port_count_8lanes = 0 -- Number of lossy PG on ports with 8 lanes local lossypg_8lanes = 0 +local ingress_profile_is_lossless = {} + -- Private headrom local private_headroom = 10 * 1024 local result = {} local profiles = {} -local lossless_profiles = {} local total_port = 0 @@ -52,11 +53,11 @@ local function iterate_all_items(all_items, check_lossless) port = string.match(all_items[i], "Ethernet%d+") if port ~= nil then local range = string.match(all_items[i], "Ethernet%d+:([^%s]+)$") - local profile_name = redis.call('HGET', all_items[i], 'profile') - if not profile_name then + local profile_name_without_table = redis.call('HGET', all_items[i], 'profile') + if not profile_name_without_table then return 1 end - profile_name = "BUFFER_PROFILE_TABLE:" .. profile_name + local profile_name = "BUFFER_PROFILE_TABLE:" .. profile_name_without_table local profile_ref_count = profiles[profile_name] if profile_ref_count == nil then -- Indicate an error in case the referenced profile hasn't been inserted or has been removed @@ -71,10 +72,11 @@ local function iterate_all_items(all_items, check_lossless) size = 1 + tonumber(string.sub(range, -1)) - tonumber(string.sub(range, 1, 1)) end profiles[profile_name] = profile_ref_count + size - if port_set_8lanes[port] and profile_name == 'BUFFER_PROFILE_TABLE:ingress_lossy_profile' then + if port_set_8lanes[port] and ingress_profile_is_lossless[profile_name] == false then + -- Handle additional buffer reserved for lossy PG on 8-lane ports lossypg_8lanes = lossypg_8lanes + size end - if check_lossless and lossless_profiles[profile_name] then + if check_lossless and ingress_profile_is_lossless[profile_name] then if lossless_ports[port] == nil then lossless_port_count = lossless_port_count + 1 lossless_ports[port] = true @@ -113,7 +115,8 @@ local function iterate_profile_list(all_items) -- To distinguish both cases, a new name "ingress_lossy_profile_list" is introduced to indicate -- the profile is used by the profile list where its size should be zero. profile_name = 'BUFFER_PROFILE_TABLE:' .. profile_name - if profile_name == 'BUFFER_PROFILE_TABLE:ingress_lossy_profile' then + -- TODO CHECK ALL LOSSY PROFILES + if ingress_profile_is_lossless[profile_name] == false then profile_name = profile_name .. '_list' if profiles[profile_name] == nil then profiles[profile_name] = 0 @@ -162,9 +165,25 @@ local function fetch_buffer_pool_size_from_appldb() end end +-- Main -- -- Connect to CONFIG_DB redis.call('SELECT', config_db) +-- Parse all the pools and seperate them according to the direction +local ipools = {} +local epools = {} +local pools = redis.call('KEYS', 'BUFFER_POOL|*') +for i = 1, #pools, 1 do + local type = redis.call('HGET', pools[i], 'type') + if type == 'ingress' then + table.insert(ipools, pools[i]) + else + if type == 'egress' then + table.insert(epools, pools[i]) + end + end +end + local ports_table = redis.call('KEYS', 'PORT|*') total_port = #ports_table @@ -250,9 +269,19 @@ redis.call('SELECT', appl_db) local all_profiles = redis.call('KEYS', 'BUFFER_PROFILE*') for i = 1, #all_profiles, 1 do if all_profiles[i] ~= "BUFFER_PROFILE_TABLE_KEY_SET" and all_profiles[i] ~= "BUFFER_PROFILE_TABLE_DEL_SET" then - local xoff = redis.call('HGET', all_profiles[i], 'xoff') - if xoff then - lossless_profiles[all_profiles[i]] = true + local pool = redis.call('HGET', all_profiles[i], 'pool') + for j = 1, #ipools, 1 do + if "BUFFER_POOL|" .. pool == ipools[j] then + -- For ingress profiles, check whether it is lossless or lossy + -- For lossy profiles, there is buffer implicitly reserved when they are applied on PGs + local xoff = redis.call('HGET', all_profiles[i], 'xoff') + if xoff then + ingress_profile_is_lossless[all_profiles[i]] = true + else + ingress_profile_is_lossless[all_profiles[i]] = false + end + break + end end profiles[all_profiles[i]] = 0 end @@ -289,12 +318,13 @@ local accumulative_xoff = 0 for name in pairs(profiles) do if name ~= "BUFFER_PROFILE_TABLE_KEY_SET" and name ~= "BUFFER_PROFILE_TABLE_DEL_SET" then local size = tonumber(redis.call('HGET', name, 'size')) - if size ~= nil then - if name == "BUFFER_PROFILE_TABLE:ingress_lossy_profile" then - size = size + lossypg_reserved + if size ~= nil then + -- Handle the implicitly reserved buffer for lossy profile applied on PG + if ingress_profile_is_lossless[name] == false then + size = size + lossypg_reserved end if size ~= 0 then - if shp_enabled and shp_size == 0 then + if shp_size == 0 then local xon = tonumber(redis.call('HGET', name, 'xon')) local xoff = tonumber(redis.call('HGET', name, 'xoff')) if xon ~= nil and xoff ~= nil and xon + xoff > size then @@ -304,6 +334,8 @@ for name in pairs(profiles) do accumulative_occupied_buffer = accumulative_occupied_buffer + size * profiles[name] end table.insert(statistics, {name, size, profiles[name]}) + else + table.insert(statistics, {name, "-", profiles[name]}) end end end @@ -314,6 +346,12 @@ accumulative_occupied_buffer = accumulative_occupied_buffer + lossypg_extra_for_ -- Accumulate sizes for private headrooms local accumulative_private_headroom = 0 +local force_enable_shp = false +if accumulative_xoff > 0 and shp_enabled ~= true then + force_enable_shp = true + shp_size = 655360 + shp_enabled = true +end if shp_enabled then accumulative_private_headroom = lossless_port_count * private_headroom accumulative_occupied_buffer = accumulative_occupied_buffer + accumulative_private_headroom @@ -336,7 +374,6 @@ redis.call('SELECT', config_db) -- Fetch all the pools that need update local pools_need_update = {} -local ipools = redis.call('KEYS', 'BUFFER_POOL|ingress*') local ingress_pool_count = 0 local ingress_lossless_pool_size = nil for i = 1, #ipools, 1 do @@ -351,7 +388,6 @@ for i = 1, #ipools, 1 do end end -local epools = redis.call('KEYS', 'BUFFER_POOL|egress*') for i = 1, #epools, 1 do local size = redis.call('HGET', epools[i], 'size') if not size then @@ -361,6 +397,9 @@ end if shp_enabled and shp_size == 0 then shp_size = math.ceil(accumulative_xoff / over_subscribe_ratio) + if shp_size == 0 then + shp_size = 655360 + end end local pool_size @@ -402,6 +441,7 @@ table.insert(result, "debug:mgmt_pool:" .. mgmt_pool_size) if shp_enabled then table.insert(result, "debug:accumulative_private_headroom:" .. accumulative_private_headroom) table.insert(result, "debug:accumulative xoff:" .. accumulative_xoff) + table.insert(result, "debug:force enabled shp:" .. tostring(force_enable_shp)) end table.insert(result, "debug:accumulative_mgmt_pg:" .. accumulative_management_pg) table.insert(result, "debug:egress_mirror:" .. accumulative_egress_mirror_overhead) diff --git a/cfgmgr/buffermgr.cpp b/cfgmgr/buffermgr.cpp index 0fb39a862a..ba247197c1 100644 --- a/cfgmgr/buffermgr.cpp +++ b/cfgmgr/buffermgr.cpp @@ -11,10 +11,13 @@ #include "exec.h" #include "shellcmd.h" #include "warm_restart.h" +#include "converter.h" using namespace std; using namespace swss; +#define PORT_NAME_GLOBAL "global" + BufferMgr::BufferMgr(DBConnector *cfgDb, DBConnector *applDb, string pg_lookup_file, const vector &tableNames) : Orch(cfgDb, tableNames), m_cfgPortTable(cfgDb, CFG_PORT_TABLE_NAME), @@ -133,11 +136,11 @@ Create/update two tables: profile (in m_cfgBufferProfileTable) and port buffer ( } } */ -task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) +task_process_status BufferMgr::doSpeedUpdateTask(string port) { - vector fvVectorPg, fvVectorProfile; string cable; string speed; + string pfc_enable; if (m_cableLenLookup.count(port) == 0) { @@ -152,32 +155,71 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) return task_process_status::task_success; } - speed = m_speedLookup[port]; + if (m_portStatusLookup.count(port) == 0) + { + // admin_statue is not available yet. This can happen when notification of `PORT_QOS_MAP` table + // comes first. + SWSS_LOG_INFO("pfc_enable status is not available for port %s", port.c_str()); + return task_process_status::task_need_retry; + } - string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + LOSSLESS_PGS; + if (m_portPfcStatus.count(port) == 0) + { + // PORT_QOS_MAP is not ready yet. The notification is cleared, and buffer pg + // will be handled when `pfc_enable` in `PORT_QOS_MAP` table is available + SWSS_LOG_INFO("pfc_enable status is not available for port %s", port.c_str()); + return task_process_status::task_success; + } + pfc_enable = m_portPfcStatus[port]; + + speed = m_speedLookup[port]; // key format is pg_lossless___profile string buffer_profile_key = "pg_lossless_" + speed + "_" + cable + "_profile"; string profile_ref = buffer_profile_key; + + vector lossless_pgs = tokenize(pfc_enable, ','); + // Convert to bitmap + unsigned long lossless_pg_id = 0; + for (auto pg : lossless_pgs) + { + try + { + uint8_t cur_pg = to_uint(pg); + lossless_pg_id |= (1< lossless_pg_combinations = generateIdListFromMap(lossless_pg_id, sizeof(lossless_pg_id)); - m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); - - if (!admin_up && m_platform == "mellanox") + if (m_portStatusLookup[port] == "down" && (m_platform == "mellanox" || m_platform == "barefoot")) { - // Remove the entry in BUFFER_PG table if any - if (!fvVectorPg.empty()) + for (auto lossless_pg : lossless_pg_combinations) { - for (auto &prop : fvVectorPg) + // Remove the entry in BUFFER_PG table if any + vector fvVectorPg; + string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + lossless_pg; + + m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); + if (!fvVectorPg.empty()) { - if (fvField(prop) == "profile") + for (auto &prop : fvVectorPg) { - if (fvValue(prop) == profile_ref) + if (fvField(prop) == "profile") { - SWSS_LOG_NOTICE("Removing PG %s from port %s which is administrative down", buffer_pg_key.c_str(), port.c_str()); - m_cfgBufferPgTable.del(buffer_pg_key); - } - else - { - SWSS_LOG_NOTICE("Not default profile %s is configured on PG %s, won't reclaim buffer", fvValue(prop).c_str(), buffer_pg_key.c_str()); + if (fvValue(prop) == profile_ref) + { + SWSS_LOG_NOTICE("Removing PG %s from port %s which is administrative down", buffer_pg_key.c_str(), port.c_str()); + m_cfgBufferPgTable.del(buffer_pg_key); + } + else + { + SWSS_LOG_NOTICE("Not default profile %s is configured on PG %s, won't reclaim buffer", fvValue(prop).c_str(), buffer_pg_key.c_str()); + } } } } @@ -185,14 +227,15 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) return task_process_status::task_success; } - + if (m_pgProfileLookup.count(speed) == 0 || m_pgProfileLookup[speed].count(cable) == 0) { - SWSS_LOG_ERROR("Unable to create/update PG profile for port %s. No PG profile configured for speed %s and cable length %s", - port.c_str(), speed.c_str(), cable.c_str()); - return task_process_status::task_invalid_entry; + SWSS_LOG_ERROR("Unable to create/update PG profile for port %s. No PG profile configured for speed %s and cable length %s", + port.c_str(), speed.c_str(), cable.c_str()); + return task_process_status::task_invalid_entry; } + vector fvVectorProfile; // check if profile already exists - if yes - skip creation m_cfgBufferProfileTable.get(buffer_profile_key, fvVectorProfile); // Create record in BUFFER_PROFILE table @@ -213,9 +256,10 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) fvVectorProfile.push_back(make_pair("pool", INGRESS_LOSSLESS_PG_POOL_NAME)); fvVectorProfile.push_back(make_pair("xon", m_pgProfileLookup[speed][cable].xon)); - if (m_pgProfileLookup[speed][cable].xon_offset.length() > 0) { + if (m_pgProfileLookup[speed][cable].xon_offset.length() > 0) + { fvVectorProfile.push_back(make_pair("xon_offset", - m_pgProfileLookup[speed][cable].xon_offset)); + m_pgProfileLookup[speed][cable].xon_offset)); } fvVectorProfile.push_back(make_pair("xoff", m_pgProfileLookup[speed][cable].xoff)); fvVectorProfile.push_back(make_pair("size", m_pgProfileLookup[speed][cable].size)); @@ -227,20 +271,32 @@ task_process_status BufferMgr::doSpeedUpdateTask(string port, bool admin_up) SWSS_LOG_NOTICE("Reusing existing profile '%s'", buffer_profile_key.c_str()); } - /* Check if PG Mapping is already then log message and return. */ - for (auto& prop : fvVectorPg) + for (auto lossless_pg : lossless_pg_combinations) { - if ((fvField(prop) == "profile") && (profile_ref == fvValue(prop))) + vector fvVectorPg; + string buffer_pg_key = port + m_cfgBufferPgTable.getTableNameSeparator() + lossless_pg; + + m_cfgBufferPgTable.get(buffer_pg_key, fvVectorPg); + bool profile_existing = false; + /* Check if PG Mapping is already then log message and return. */ + for (auto& prop : fvVectorPg) { - SWSS_LOG_NOTICE("PG to Buffer Profile Mapping %s already present", buffer_pg_key.c_str()); - return task_process_status::task_success; + if ((fvField(prop) == "profile") && (profile_ref == fvValue(prop))) + { + SWSS_LOG_NOTICE("PG to Buffer Profile Mapping %s already present", buffer_pg_key.c_str()); + profile_existing = true; + break; + } } - } - - fvVectorPg.clear(); + if (profile_existing) + { + continue; + } + fvVectorPg.clear(); - fvVectorPg.push_back(make_pair("profile", profile_ref)); - m_cfgBufferPgTable.set(buffer_pg_key, fvVectorPg); + fvVectorPg.push_back(make_pair("profile", profile_ref)); + m_cfgBufferPgTable.set(buffer_pg_key, fvVectorPg); + } return task_process_status::task_success; } @@ -346,6 +402,53 @@ void BufferMgr::doBufferMetaTask(Consumer &consumer) } } +/* +Parse PORT_QOS_MAP to retrieve on which queue PFC is enable, and +cached in a map +*/ +void BufferMgr::doPortQosTableTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple tuple = it->second; + string port_name = kfvKey(tuple); + if (port_name == PORT_NAME_GLOBAL) + { + // Ignore the entry for global level + it = consumer.m_toSync.erase(it); + continue; + } + string op = kfvOp(tuple); + if (op == SET_COMMAND) + { + bool update_pfc_enable = false; + for (auto itp : kfvFieldsValues(tuple)) + { + if (fvField(itp) == "pfc_enable") + { + if (m_portPfcStatus.count(port_name) == 0 || m_portPfcStatus[port_name] != fvValue(itp)) + { + m_portPfcStatus[port_name] = fvValue(itp); + update_pfc_enable = true; + } + SWSS_LOG_INFO("Got pfc enable status for port %s status %s", port_name.c_str(), fvValue(itp).c_str()); + break; + } + } + if (update_pfc_enable) + { + // The return status is ignored + doSpeedUpdateTask(port_name); + } + } + it = consumer.m_toSync.erase(it); + } + +} + void BufferMgr::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -399,6 +502,12 @@ void BufferMgr::doTask(Consumer &consumer) return; } + if (table_name == CFG_PORT_QOS_MAP_TABLE_NAME) + { + doPortQosTableTask(consumer); + return; + } + auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { @@ -422,7 +531,6 @@ void BufferMgr::doTask(Consumer &consumer) } else if (m_pgfile_processed && table_name == CFG_PORT_TABLE_NAME) { - bool admin_up = false; for (auto i : kfvFieldsValues(t)) { if (fvField(i) == "speed") @@ -431,39 +539,34 @@ void BufferMgr::doTask(Consumer &consumer) } if (fvField(i) == "admin_status") { - admin_up = ("up" == fvValue(i)); + m_portStatusLookup[port] = fvValue(i); } } if (m_speedLookup.count(port) != 0) { // create/update profile for port - task_status = doSpeedUpdateTask(port, admin_up); + task_status = doSpeedUpdateTask(port); } + } - if (task_status != task_process_status::task_success) - { + switch (task_status) + { + case task_process_status::task_failed: + SWSS_LOG_ERROR("Failed to process table update"); + return; + case task_process_status::task_need_retry: + SWSS_LOG_INFO("Unable to process table update. Will retry..."); + ++it; + break; + case task_process_status::task_invalid_entry: + SWSS_LOG_ERROR("Failed to process invalid entry, drop it"); + it = consumer.m_toSync.erase(it); + break; + default: + it = consumer.m_toSync.erase(it); break; - } } } - - switch (task_status) - { - case task_process_status::task_failed: - SWSS_LOG_ERROR("Failed to process table update"); - return; - case task_process_status::task_need_retry: - SWSS_LOG_INFO("Unable to process table update. Will retry..."); - ++it; - break; - case task_process_status::task_invalid_entry: - SWSS_LOG_ERROR("Failed to process invalid entry, drop it"); - it = consumer.m_toSync.erase(it); - break; - default: - it = consumer.m_toSync.erase(it); - break; - } } } diff --git a/cfgmgr/buffermgr.h b/cfgmgr/buffermgr.h index e7f04465ef..b9b3f2c496 100644 --- a/cfgmgr/buffermgr.h +++ b/cfgmgr/buffermgr.h @@ -11,7 +11,6 @@ namespace swss { #define INGRESS_LOSSLESS_PG_POOL_NAME "ingress_lossless_pool" -#define LOSSLESS_PGS "3-4" #define BUFFERMGR_TIMER_PERIOD 10 @@ -28,6 +27,8 @@ typedef std::map pg_profile_lookup_t; typedef std::map port_cable_length_t; typedef std::map port_speed_t; +typedef std::map port_pfc_status_t; +typedef std::map port_admin_status_t; class BufferMgr : public Orch { @@ -56,17 +57,22 @@ class BufferMgr : public Orch pg_profile_lookup_t m_pgProfileLookup; port_cable_length_t m_cableLenLookup; + port_admin_status_t m_portStatusLookup; port_speed_t m_speedLookup; std::string getPgPoolMode(); void readPgProfileLookupFile(std::string); task_process_status doCableTask(std::string port, std::string cable_length); - task_process_status doSpeedUpdateTask(std::string port, bool admin_up); + task_process_status doSpeedUpdateTask(std::string port); void doBufferTableTask(Consumer &consumer, ProducerStateTable &applTable); void transformSeperator(std::string &name); void doTask(Consumer &consumer); void doBufferMetaTask(Consumer &consumer); + + port_pfc_status_t m_portPfcStatus; + void doPortQosTableTask(Consumer &consumer); + }; } diff --git a/cfgmgr/buffermgrd.cpp b/cfgmgr/buffermgrd.cpp index 05932a9e3c..fddaac930b 100644 --- a/cfgmgr/buffermgrd.cpp +++ b/cfgmgr/buffermgrd.cpp @@ -21,26 +21,6 @@ using json = nlohmann::json; /* SELECT() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - void usage() { cout << "Usage: buffermgrd <-l pg_lookup.ini|-a asic_table.json [-p peripheral_table.json] [-z zero_profiles.json]>" << endl; @@ -189,6 +169,8 @@ int main(int argc, char **argv) WarmStart::initialize("buffermgrd", "swss"); WarmStart::checkWarmStart("buffermgrd", "swss"); + DBConnector applStateDb("APPL_STATE_DB", 0); + vector buffer_table_connectors = { TableConnector(&cfgDb, CFG_PORT_TABLE_NAME), TableConnector(&cfgDb, CFG_PORT_CABLE_LEN_TABLE_NAME), @@ -202,7 +184,7 @@ int main(int argc, char **argv) TableConnector(&stateDb, STATE_BUFFER_MAXIMUM_VALUE_TABLE), TableConnector(&stateDb, STATE_PORT_TABLE_NAME) }; - cfgOrchList.emplace_back(new BufferMgrDynamic(&cfgDb, &stateDb, &applDb, buffer_table_connectors, peripherial_table_ptr, zero_profiles_ptr)); + cfgOrchList.emplace_back(new BufferMgrDynamic(&cfgDb, &stateDb, &applDb, &applStateDb, buffer_table_connectors, peripherial_table_ptr, zero_profiles_ptr)); } else if (!pg_lookup_file.empty()) { @@ -215,7 +197,8 @@ int main(int argc, char **argv) CFG_BUFFER_QUEUE_TABLE_NAME, CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, - CFG_DEVICE_METADATA_TABLE_NAME + CFG_DEVICE_METADATA_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME }; cfgOrchList.emplace_back(new BufferMgr(&cfgDb, &applDb, pg_lookup_file, cfg_buffer_tables)); } diff --git a/cfgmgr/buffermgrdyn.cpp b/cfgmgr/buffermgrdyn.cpp index b4578c2370..6c9a1e831e 100644 --- a/cfgmgr/buffermgrdyn.cpp +++ b/cfgmgr/buffermgrdyn.cpp @@ -26,21 +26,22 @@ using namespace std; using namespace swss; -BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, const vector &tables, shared_ptr> gearboxInfo, shared_ptr> zeroProfilesInfo) : +BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, DBConnector *applStateDb, const vector &tables, shared_ptr> gearboxInfo, shared_ptr> zeroProfilesInfo) : Orch(tables), m_platform(), - m_bufferDirections({BUFFER_INGRESS, BUFFER_EGRESS}), - m_bufferObjectNames({"priority group", "queue"}), - m_bufferDirectionNames({"ingress", "egress"}), + m_bufferDirections{BUFFER_INGRESS, BUFFER_EGRESS}, + m_bufferObjectNames{"priority group", "queue"}, + m_bufferDirectionNames{"ingress", "egress"}, m_applDb(applDb), m_zeroProfilesLoaded(false), m_supportRemoving(true), m_cfgDefaultLosslessBufferParam(cfgDb, CFG_DEFAULT_LOSSLESS_BUFFER_PARAMETER), m_cfgDeviceMetaDataTable(cfgDb, CFG_DEVICE_METADATA_TABLE_NAME), m_applBufferPoolTable(applDb, APP_BUFFER_POOL_TABLE_NAME), + m_applStateBufferPoolTable(applStateDb, APP_BUFFER_POOL_TABLE_NAME), m_applBufferProfileTable(applDb, APP_BUFFER_PROFILE_TABLE_NAME), - m_applBufferObjectTables({ProducerStateTable(applDb, APP_BUFFER_PG_TABLE_NAME), ProducerStateTable(applDb, APP_BUFFER_QUEUE_TABLE_NAME)}), - m_applBufferProfileListTables({ProducerStateTable(applDb, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME), ProducerStateTable(applDb, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME)}), + m_applBufferObjectTables{ProducerStateTable(applDb, APP_BUFFER_PG_TABLE_NAME), ProducerStateTable(applDb, APP_BUFFER_QUEUE_TABLE_NAME)}, + m_applBufferProfileListTables{ProducerStateTable(applDb, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME), ProducerStateTable(applDb, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME)}, m_statePortTable(stateDb, STATE_PORT_TABLE_NAME), m_stateBufferMaximumTable(stateDb, STATE_BUFFER_MAXIMUM_VALUE_TABLE), m_stateBufferPoolTable(stateDb, STATE_BUFFER_POOL_TABLE_NAME), @@ -111,8 +112,11 @@ BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBC } catch (...) { - SWSS_LOG_ERROR("Lua scripts for buffer calculation were not loaded successfully, buffermgrd won't start"); - return; + if (platform != "mock_test") + { + SWSS_LOG_ERROR("Lua scripts for buffer calculation were not loaded successfully, buffermgrd won't start"); + return; + } } // Init timer @@ -718,7 +722,13 @@ void BufferMgrDynamic::recalculateSharedBufferPool() // - In case the shared headroom pool size is statically configured, as it is programmed to APPL_DB during buffer pool handling, // - any change from lua plugin will be ignored. // - will handle ingress_lossless_pool in the way all other pools are handled in this case - auto &pool = m_bufferPoolLookup[poolName]; + const auto &poolRef = m_bufferPoolLookup.find(poolName); + if (poolRef == m_bufferPoolLookup.end()) + { + SWSS_LOG_WARN("Unconfigured buffer pool %s got from lua plugin", poolName.c_str()); + continue; + } + auto &pool = poolRef->second; auto &poolSizeStr = pairs[1]; auto old_xoff = pool.xoff; bool xoff_updated = false; @@ -875,10 +885,8 @@ void BufferMgrDynamic::updateBufferProfileToDb(const string &name, const buffer_ } vector fvVector; - string mode = getPgPoolMode(); - // profile threshold field name - mode += "_th"; + const string &&mode = profile.threshold_mode.empty() ? getPgPoolMode() + "_th" : profile.threshold_mode; if (profile.lossless) { @@ -959,7 +967,7 @@ task_process_status BufferMgrDynamic::allocateProfile(const string &speed, const string mode = getPgPoolMode(); if (mode.empty()) { - SWSS_LOG_NOTICE("BUFFER_PROFILE %s cannot be created because the buffer pool isn't ready", profile_name.c_str()); + SWSS_LOG_INFO("BUFFER_PROFILE %s cannot be created because the buffer pool isn't ready", profile_name.c_str()); return task_process_status::task_need_retry; } @@ -1043,10 +1051,10 @@ bool BufferMgrDynamic::isHeadroomResourceValid(const string &port, const buffer_ // profile: the profile referenced by the new_pg (if provided) or all PGs // new_pg: which pg is newly added? - if (!profile.lossless) + if (!profile.lossless && new_pg.empty()) { - SWSS_LOG_INFO("No need to check headroom for lossy PG port %s profile %s size %s pg %s", - port.c_str(), profile.name.c_str(), profile.size.c_str(), new_pg.c_str()); + SWSS_LOG_INFO("No need to check headroom for lossy PG port %s profile %s size %s without a PG specified", + port.c_str(), profile.name.c_str(), profile.size.c_str()); return true; } @@ -1430,9 +1438,10 @@ task_process_status BufferMgrDynamic::refreshPgsForPort(const string &port, cons return task_process_status::task_success; } - if (!m_bufferPoolReady) + if (!m_bufferPoolReady || m_defaultThreshold.empty()) { - SWSS_LOG_INFO("Nothing to be done since the buffer pool is not ready"); + SWSS_LOG_INFO("Nothing to be done since either the buffer pool or default threshold is not ready"); + m_bufferObjectsPending = true; return task_process_status::task_success; } @@ -1454,6 +1463,12 @@ task_process_status BufferMgrDynamic::refreshPgsForPort(const string &port, cons if (portPg.dynamic_calculated) { + if (portInfo.state != PORT_READY) + { + SWSS_LOG_INFO("Nothing to be done for %s since port is not ready", key.c_str()); + continue; + } + string threshold; // Calculate new headroom size if (portPg.static_configured) @@ -1482,7 +1497,7 @@ task_process_status BufferMgrDynamic::refreshPgsForPort(const string &port, cons // Calculate whether accumulative headroom size exceeds the maximum value // Abort if it does - if (!isHeadroomResourceValid(port, m_bufferProfileLookup[newProfile], exactly_matched_key)) + if (!isHeadroomResourceValid(port, m_bufferProfileLookup[newProfile], key)) { SWSS_LOG_ERROR("Update speed (%s) and cable length (%s) for port %s failed, accumulative headroom size exceeds the limit", speed.c_str(), cable_length.c_str(), port.c_str()); @@ -1848,6 +1863,14 @@ task_process_status BufferMgrDynamic::handleBufferMaxParam(KeyOpFieldsValuesTupl SWSS_LOG_INFO("BUFFER_MAX_PARAM: Got port %s's max priority group %s", key.c_str(), value.c_str()); portInfo.maximum_buffer_objects[BUFFER_PG] = (sai_uint32_t)pgCount; + + if (m_bufferCompletelyInitialized && portInfo.state == PORT_ADMIN_DOWN) + { + // This is mostly for the case where the port is created only-the-fly + // The maximum buffer parameters can be received after buffer items + reclaimReservedBufferForPort(key, m_portPgLookup, BUFFER_PG); + SWSS_LOG_NOTICE("Admin-down port %s is handled after maximum buffer parameter has been received", key.c_str()); + } } else if (fvField(i) == "max_queues") { @@ -1861,6 +1884,14 @@ task_process_status BufferMgrDynamic::handleBufferMaxParam(KeyOpFieldsValuesTupl SWSS_LOG_INFO("BUFFER_MAX_PARAM: Got port %s's max queue %s", key.c_str(), value.c_str()); portInfo.maximum_buffer_objects[BUFFER_QUEUE] = (sai_uint32_t)queueCount; + + if (m_bufferCompletelyInitialized && portInfo.state == PORT_ADMIN_DOWN) + { + // This is mostly for the case where the port is created only-the-fly + // The maximum buffer parameters can be received after buffer items + reclaimReservedBufferForPort(key, m_portQueueLookup, BUFFER_QUEUE); + SWSS_LOG_NOTICE("Admin-down port %s is handled after maximum buffer parameter has been received", key.c_str()); + } } } } @@ -1892,10 +1923,16 @@ task_process_status BufferMgrDynamic::handleBufferMaxParam(KeyOpFieldsValuesTupl task_process_status BufferMgrDynamic::handleDefaultLossLessBufferParam(KeyOpFieldsValuesTuple &tuple) { string op = kfvOp(tuple); - string newRatio = "0"; + string newRatio = ""; if (op == SET_COMMAND) { + if (m_bufferPoolLookup.find(INGRESS_LOSSLESS_PG_POOL_NAME) == m_bufferPoolLookup.end()) + { + SWSS_LOG_INFO("%s has not been configured, need to retry", INGRESS_LOSSLESS_PG_POOL_NAME); + return task_process_status::task_need_retry; + } + for (auto i : kfvFieldsValues(tuple)) { if (fvField(i) == "default_dynamic_th") @@ -1910,6 +1947,10 @@ task_process_status BufferMgrDynamic::handleDefaultLossLessBufferParam(KeyOpFiel } } } + else if (op == DEL_COMMAND) + { + newRatio = ""; + } else { SWSS_LOG_ERROR("Unsupported command %s received for DEFAULT_LOSSLESS_BUFFER_PARAMETER table", op.c_str()); @@ -1920,6 +1961,13 @@ task_process_status BufferMgrDynamic::handleDefaultLossLessBufferParam(KeyOpFiel { bool isSHPEnabled = isNonZero(m_overSubscribeRatio); bool willSHPBeEnabled = isNonZero(newRatio); + if (m_portInitDone && (!isSHPEnabled) && willSHPBeEnabled) + { + if (!isSharedHeadroomPoolEnabledInSai()) + { + return task_process_status::task_need_retry; + } + } SWSS_LOG_INFO("Recalculate shared buffer pool size due to over subscribe ratio has been updated from %s to %s", m_overSubscribeRatio.c_str(), newRatio.c_str()); m_overSubscribeRatio = newRatio; @@ -1928,6 +1976,24 @@ task_process_status BufferMgrDynamic::handleDefaultLossLessBufferParam(KeyOpFiel return task_process_status::task_success; } +bool BufferMgrDynamic::isSharedHeadroomPoolEnabledInSai() +{ + string xoff; + recalculateSharedBufferPool(); + if (!isNonZero(m_bufferPoolLookup[INGRESS_LOSSLESS_PG_POOL_NAME].xoff)) + { + return true; + } + m_applBufferPoolTable.flush(); + m_applStateBufferPoolTable.hget(INGRESS_LOSSLESS_PG_POOL_NAME, "xoff", xoff); + if (!isNonZero(xoff)) + { + SWSS_LOG_INFO("Shared headroom pool is enabled but has not been applied to SAI, retrying"); + return false; + } + + return true; +} task_process_status BufferMgrDynamic::handleCableLenTable(KeyOpFieldsValuesTuple &tuple) { @@ -1937,6 +2003,7 @@ task_process_status BufferMgrDynamic::handleCableLenTable(KeyOpFieldsValuesTuple int failed_item_count = 0; if (op == SET_COMMAND) { + m_cableLengths.clear(); for (auto i : kfvFieldsValues(tuple)) { // receive and cache cable length table @@ -1951,6 +2018,8 @@ task_process_status BufferMgrDynamic::handleCableLenTable(KeyOpFieldsValuesTuple port.c_str(), portInfo.effective_speed.c_str(), portInfo.cable_length.c_str(), portInfo.gearbox_model.c_str()); + m_cableLengths[port] = cable_length; + if (portInfo.cable_length == cable_length) { continue; @@ -2159,6 +2228,11 @@ task_process_status BufferMgrDynamic::handlePortTable(KeyOpFieldsValuesTuple &tu string &mtu = portInfo.mtu; string &effective_speed = portInfo.effective_speed; + if (cable_length.empty() && !m_cableLengths[port].empty()) + { + cable_length = m_cableLengths[port]; + } + bool need_refresh_all_buffer_objects = false, need_handle_admin_down = false, was_admin_down = false; if (effective_speed_updated || mtu_updated) @@ -2280,6 +2354,28 @@ task_process_status BufferMgrDynamic::handlePortTable(KeyOpFieldsValuesTuple &tu task_status = refreshPgsForPort(port, portInfo.effective_speed, portInfo.cable_length, portInfo.mtu); } } + else if (op == DEL_COMMAND) + { + cleanUpItemsForReclaimingBuffer(port); + if ((m_portPgLookup.find(port) != m_portPgLookup.end() + && !m_portPgLookup[port].empty()) + || (m_portQueueLookup.find(port) != m_portQueueLookup.end() + && !m_portQueueLookup[port].empty()) + || (m_portProfileListLookups[BUFFER_INGRESS].find(port) != m_portProfileListLookups[BUFFER_INGRESS].end() + && !m_portProfileListLookups[BUFFER_INGRESS][port].empty()) + || (m_portProfileListLookups[BUFFER_EGRESS].find(port) != m_portProfileListLookups[BUFFER_EGRESS].end() + && !m_portProfileListLookups[BUFFER_EGRESS][port].empty())) + { + SWSS_LOG_INFO("Port %s can't be removed before buffer items have been removed", port.c_str()); + return task_process_status::task_need_retry; + } + m_portPgLookup.erase(port); + m_portQueueLookup.erase(port); + m_portProfileListLookups[BUFFER_INGRESS].erase(port); + m_portProfileListLookups[BUFFER_EGRESS].erase(port); + m_portInfoLookup.erase(port); + SWSS_LOG_NOTICE("Port %s is removed", port.c_str()); + } return task_status; } @@ -2346,6 +2442,14 @@ task_process_status BufferMgrDynamic::handleBufferPoolTable(KeyOpFieldsValuesTup { bool isSHPEnabledBySize = isNonZero(m_configuredSharedHeadroomPoolSize); + if (m_portInitDone && (!isSHPEnabledBySize) && willSHPBeEnabledBySize) + { + if (!isSharedHeadroomPoolEnabledInSai()) + { + return task_process_status::task_need_retry; + } + } + m_configuredSharedHeadroomPoolSize = newSHPSize; refreshSharedHeadroomPool(false, isSHPEnabledBySize != willSHPBeEnabledBySize); } @@ -2377,6 +2481,28 @@ task_process_status BufferMgrDynamic::handleBufferPoolTable(KeyOpFieldsValuesTup m_applBufferPoolTable.del(pool); m_stateBufferPoolTable.del(pool); m_bufferPoolLookup.erase(pool); + if (pool == INGRESS_LOSSLESS_PG_POOL_NAME) + { + m_configuredSharedHeadroomPoolSize.clear(); + } + + if (m_bufferPoolReady && m_bufferPoolLookup.empty()) + { + for(auto &port : m_adminDownPorts) + { + cleanUpItemsForReclaimingBuffer(port); + } + + // Zero profiles must be unloaded once all pools have been uploaded + // This can be resulted from "config qos reload" + // Any zero profile left can leads to buffer pool not able to be cleared + unloadZeroPoolAndProfiles(); + + m_bufferPoolReady = false; + m_bufferCompletelyInitialized = false; + + m_pendingApplyZeroProfilePorts = m_adminDownPorts; + } } else { @@ -2398,6 +2524,10 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues // For set command: // 1. Create the corresponding table entries in APPL_DB // 2. Record the table in the internal cache m_bufferProfileLookup + + // If the profile did not exist, it will be created in the next line by the [] operator with incomplete data. + // In case the flow does not finish successfully, the incomplete profile should be removed + bool needRemoveOnFailure = (m_bufferProfileLookup.find(profileName) == m_bufferProfileLookup.end()); buffer_profile_t &profileApp = m_bufferProfileLookup[profileName]; profileApp.static_configured = true; @@ -2418,24 +2548,44 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues if (!value.empty()) { auto &poolName = value; - if (poolName.empty()) - { - SWSS_LOG_ERROR("BUFFER_PROFILE: Invalid format of reference to pool: %s", value.c_str()); - return task_process_status::task_invalid_entry; - } - auto poolRef = m_bufferPoolLookup.find(poolName); if (poolRef == m_bufferPoolLookup.end()) { - SWSS_LOG_WARN("Pool %s hasn't been configured yet, need retry", poolName.c_str()); + SWSS_LOG_INFO("Pool %s hasn't been configured yet, need retry", poolName.c_str()); + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } return task_process_status::task_need_retry; } profileApp.pool_name = poolName; profileApp.direction = poolRef->second.direction; + auto threshold_mode = poolRef->second.mode + "_th"; + if (profileApp.threshold_mode.empty()) + { + profileApp.threshold_mode = threshold_mode; + } + else if (profileApp.threshold_mode != threshold_mode) + { + SWSS_LOG_ERROR("Buffer profile %s's mode %s doesn't match with buffer pool %s whose mode is %s", + profileName.c_str(), + profileApp.threshold_mode.c_str(), + poolName.c_str(), + threshold_mode.c_str()); + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } + return task_process_status::task_failed; + } } else { SWSS_LOG_ERROR("Pool for BUFFER_PROFILE %s hasn't been specified", field.c_str()); + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } return task_process_status::task_failed; } } @@ -2456,12 +2606,25 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues { profileApp.size = value; } - else if (field == buffer_dynamic_th_field_name) - { - profileApp.threshold = value; - } - else if (field == buffer_static_th_field_name) + else if (field == buffer_dynamic_th_field_name || field == buffer_static_th_field_name) { + if (profileApp.threshold_mode.empty()) + { + profileApp.threshold_mode = field; + } + else if (profileApp.threshold_mode != field) + { + SWSS_LOG_ERROR("Buffer profile %s's mode %s doesn't align with buffer pool %s whose mode is %s", + profileName.c_str(), + field.c_str(), + profileApp.pool_name.c_str(), + profileApp.threshold_mode.c_str()); + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } + return task_process_status::task_failed; + } profileApp.threshold = value; } else if (field == buffer_headroom_type_field_name) @@ -2484,7 +2647,11 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues if (profileApp.direction != BUFFER_INGRESS) { SWSS_LOG_ERROR("BUFFER_PROFILE %s is ingress but referencing an egress pool %s", profileName.c_str(), profileApp.pool_name.c_str()); - return task_process_status::task_success; + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } + return task_process_status::task_failed; } if (profileApp.dynamic_calculated) @@ -2569,6 +2736,12 @@ void BufferMgrDynamic::handleSetSingleBufferObjectOnAdminDownPort(buffer_directi { if (idsToZero.empty()) { + // Happens only after "config qos reload" + if (!m_zeroProfilesLoaded) + { + loadZeroPoolAndProfiles(); + } + // If initialization finished, no extra handle required. // Check whether the key overlaps with supported but not configured map auto const &idsToAdd = parseObjectNameFromKey(key, 1); @@ -2684,6 +2857,14 @@ void BufferMgrDynamic::handleDelSingleBufferObjectOnAdminDownPort(buffer_directi if (idsToZero.empty()) { + if (!m_bufferPoolReady) + { + // Reclaiming buffer has not started yet so just remove it. + // Do not add it to "supported but not configured" set + updateBufferObjectToDb(key, "", false, direction); + return; + } + // For admin down ports, if zero profiles have been applied to all configured items // do NOT remove it otherwise SDK default value will be set for the items // Move the key to supported_but_not_configured_items so that the slice of items @@ -2752,6 +2933,9 @@ void BufferMgrDynamic::handleDelSingleBufferObjectOnAdminDownPort(buffer_directi task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &key, const string &port, const KeyOpFieldsValuesTuple &tuple) { string op = kfvOp(tuple); + // If the buffer PG did not exist, it will be created in the next line by the [] operator with incomplete data. + // In case the flow does not finish successfully, the incomplete profile should be removed + bool needRemoveOnFailure = (m_portPgLookup[port].find(key) == m_portPgLookup[port].end()); buffer_pg_t &bufferPg = m_portPgLookup[port][key]; port_info_t &portInfo = m_portInfoLookup[port]; @@ -2787,6 +2971,10 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke if (profileName.empty()) { SWSS_LOG_ERROR("BUFFER_PG: Invalid format of reference to profile: %s", value.c_str()); + if (needRemoveOnFailure) + { + m_portPgLookup[port].erase(key); + } return task_process_status::task_invalid_entry; } @@ -2795,16 +2983,33 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke { // In this case, we shouldn't set the dynamic calculated flag to true // It will be updated when its profile configured. - bufferPg.dynamic_calculated = false; - SWSS_LOG_WARN("Profile %s hasn't been configured yet, skip", profileName.c_str()); + if (needRemoveOnFailure) + { + m_portPgLookup[port].erase(key); + } + SWSS_LOG_INFO("Profile %s hasn't been configured yet, skip", profileName.c_str()); return task_process_status::task_need_retry; } else { buffer_profile_t &profileRef = searchRef->second; + if (profileRef.direction == BUFFER_EGRESS) + { + if (needRemoveOnFailure) + { + m_portPgLookup[port].erase(key); + } + SWSS_LOG_ERROR("Egress buffer profile configured on PG %s", key.c_str()); + return task_process_status::task_failed; + } bufferPg.dynamic_calculated = profileRef.dynamic_calculated; bufferPg.configured_profile_name = profileName; bufferPg.lossless = profileRef.lossless; + if (!profileRef.lossless && !isHeadroomResourceValid(port, profileRef, key)) + { + SWSS_LOG_ERROR("Unable to configure lossy PG %s, accumulative headroom size exceeds the limit", key.c_str()); + return task_process_status::task_failed; + } } bufferPg.static_configured = true; bufferPg.configured_profile_name = profileName; @@ -2813,6 +3018,10 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke if (field != buffer_profile_field_name) { SWSS_LOG_ERROR("BUFFER_PG: Invalid field %s", field.c_str()); + if (needRemoveOnFailure) + { + m_portPgLookup[port].erase(key); + } return task_process_status::task_invalid_entry; } @@ -2853,7 +3062,7 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke // For del command: // 1. Removing it from APPL_DB // 2. Update internal caches - string &runningProfileName = bufferPg.running_profile_name; + string runningProfileName = bufferPg.running_profile_name; string &configProfileName = bufferPg.configured_profile_name; if (!m_supportRemoving) @@ -2896,6 +3105,7 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke else { SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); + m_portPgLookup[port].erase(key); return task_process_status::task_invalid_entry; } @@ -2911,7 +3121,7 @@ task_process_status BufferMgrDynamic::checkBufferProfileDirection(const string & auto profileSearchRef = m_bufferProfileLookup.find(profileName); if (profileSearchRef == m_bufferProfileLookup.end()) { - SWSS_LOG_NOTICE("Profile %s doesn't exist, need retry", profileName.c_str()); + SWSS_LOG_INFO("Profile %s doesn't exist, need retry", profileName.c_str()); return task_process_status::task_need_retry; } @@ -2939,8 +3149,7 @@ task_process_status BufferMgrDynamic::handleSingleBufferQueueEntry(const string if (op == SET_COMMAND) { - auto &portQueue = m_portQueueLookup[port][queues]; - + bool successful = false; SWSS_LOG_INFO("Inserting entry BUFFER_QUEUE_TABLE:%s to APPL_DB", key.c_str()); for (auto i : kfvFieldsValues(tuple)) @@ -2951,8 +3160,10 @@ task_process_status BufferMgrDynamic::handleSingleBufferQueueEntry(const string auto rc = checkBufferProfileDirection(fvValue(i), BUFFER_EGRESS); if (rc != task_process_status::task_success) return rc; - portQueue.running_profile_name = fvValue(i); + + m_portQueueLookup[port][queues].running_profile_name = fvValue(i); SWSS_LOG_NOTICE("Queue %s has been configured on the system, referencing profile %s", key.c_str(), fvValue(i).c_str()); + successful = true; } else { @@ -2963,8 +3174,13 @@ task_process_status BufferMgrDynamic::handleSingleBufferQueueEntry(const string SWSS_LOG_INFO("Inserting field %s value %s", fvField(i).c_str(), fvValue(i).c_str()); } - // TODO: check overlap. Currently, assume there is no overlap + if (!successful) + { + SWSS_LOG_ERROR("Invalid BUFFER_QUEUE configuration on %s: no profile configured", key.c_str()); + return task_process_status::task_failed; + } + auto &portQueue = m_portQueueLookup[port][queues]; if (PORT_ADMIN_DOWN == portInfo.state) { handleSetSingleBufferObjectOnAdminDownPort(BUFFER_QUEUE, port, key, portQueue.running_profile_name); @@ -2983,6 +3199,8 @@ task_process_status BufferMgrDynamic::handleSingleBufferQueueEntry(const string } SWSS_LOG_INFO("Removing entry %s from APPL_DB", key.c_str()); m_portQueueLookup[port].erase(queues); + if (m_portQueueLookup[port].empty()) + m_portQueueLookup.erase(port); if (PORT_ADMIN_DOWN == portInfo.state) { handleDelSingleBufferObjectOnAdminDownPort(BUFFER_QUEUE, port, key, portInfo); @@ -3034,6 +3252,22 @@ task_process_status BufferMgrDynamic::handleSingleBufferPortProfileListEntry(con // For admin-down ports, zero profile list has been applied on the port when it entered admin-down state updateBufferObjectListToDb(key, profileListLookup[port], dir); } + else + { + const auto &profileList = m_portProfileListLookups[dir][port]; + if (!profileList.empty()) + { + // Happens only after "config qos reload" + if (!m_zeroProfilesLoaded) + { + loadZeroPoolAndProfiles(); + } + vector fvVector; + const string &zeroProfileNameList = constructZeroProfileListFromNormalProfileList(profileList, port); + fvVector.emplace_back(buffer_profile_list_field_name, zeroProfileNameList); + m_applBufferProfileListTables[dir].set(port, fvVector); + } + } } else if (op == DEL_COMMAND) { @@ -3189,7 +3423,8 @@ void BufferMgrDynamic::doTask(Consumer &consumer) { case task_process_status::task_failed: SWSS_LOG_ERROR("Failed to process table update"); - return; + it = consumer.m_toSync.erase(it); + break; case task_process_status::task_need_retry: SWSS_LOG_INFO("Unable to process table update. Will retry..."); it++; @@ -3238,7 +3473,7 @@ void BufferMgrDynamic::doTask(Consumer &consumer) */ void BufferMgrDynamic::handlePendingBufferObjects() { - if (m_bufferPoolReady) + if (m_bufferPoolReady && !m_defaultThreshold.empty()) { if (!m_pendingApplyZeroProfilePorts.empty()) { @@ -3370,9 +3605,25 @@ void BufferMgrDynamic::handlePendingBufferObjects() } } +void BufferMgrDynamic::cleanUpItemsForReclaimingBuffer(const string &port) +{ + // Clean up zero buffers when the buffer pools or a port has been removed + if (!m_bufferObjectIdsToZero[BUFFER_PG].empty()) + { + updateBufferObjectToDb(port + delimiter + m_bufferObjectIdsToZero[BUFFER_PG], "", false, BUFFER_PG); + } + if (!m_bufferObjectIdsToZero[BUFFER_QUEUE].empty()) + { + updateBufferObjectToDb(port + delimiter + m_bufferObjectIdsToZero[BUFFER_QUEUE], "", false, BUFFER_QUEUE); + } + removeSupportedButNotConfiguredItemsOnPort(m_portInfoLookup[port], port); +} + void BufferMgrDynamic::doTask(SelectableTimer &timer) { checkSharedBufferPoolSize(true); if (!m_bufferCompletelyInitialized) + { handlePendingBufferObjects(); + } } diff --git a/cfgmgr/buffermgrdyn.h b/cfgmgr/buffermgrdyn.h index ef1e4f567f..b50b0ced69 100644 --- a/cfgmgr/buffermgrdyn.h +++ b/cfgmgr/buffermgrdyn.h @@ -71,6 +71,7 @@ typedef struct { std::string xon_offset; std::string xoff; std::string threshold; + std::string threshold_mode; std::string pool_name; // port_pgs - stores pgs referencing this profile // An element will be added or removed when a PG added or removed @@ -146,7 +147,7 @@ typedef std::map gearbox_delay_t; class BufferMgrDynamic : public Orch { public: - BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, const std::vector &tables, std::shared_ptr> gearboxInfo, std::shared_ptr> zeroProfilesInfo); + BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBConnector *applDb, DBConnector *applStateDb, const std::vector &tables, std::shared_ptr> gearboxInfo, std::shared_ptr> zeroProfilesInfo); using Orch::doTask; private: @@ -177,7 +178,7 @@ class BufferMgrDynamic : public Orch std::string m_configuredSharedHeadroomPoolSize; - std::shared_ptr m_applDb = nullptr; + DBConnector *m_applDb = nullptr; SelectableTimer *m_buffermgrPeriodtimer = nullptr; // Fields for zero pool and profiles @@ -195,6 +196,7 @@ class BufferMgrDynamic : public Orch // key: port name // updated only when a port's speed and cable length updated port_info_lookup_t m_portInfoLookup; + std::map m_cableLengths; std::set m_adminDownPorts; std::set m_pendingApplyZeroProfilePorts; std::set m_pendingSupportedButNotConfiguredPorts[BUFFER_DIR_MAX]; @@ -202,6 +204,7 @@ class BufferMgrDynamic : public Orch // BUFFER_POOL table and cache ProducerStateTable m_applBufferPoolTable; + Table m_applStateBufferPoolTable; Table m_stateBufferPoolTable; buffer_pool_lookup_t m_bufferPoolLookup; @@ -292,6 +295,7 @@ class BufferMgrDynamic : public Orch task_process_status allocateProfile(const std::string &speed, const std::string &cable, const std::string &mtu, const std::string &threshold, const std::string &gearbox_model, long lane_count, std::string &profile_name); void releaseProfile(const std::string &profile_name); bool isHeadroomResourceValid(const std::string &port, const buffer_profile_t &profile, const std::string &new_pg); + bool isSharedHeadroomPoolEnabledInSai(); void refreshSharedHeadroomPool(bool enable_state_updated_by_ratio, bool enable_state_updated_by_size); task_process_status checkBufferProfileDirection(const std::string &profiles, buffer_direction_t dir); std::string constructZeroProfileListFromNormalProfileList(const std::string &normalProfileList, const std::string &port); @@ -301,6 +305,7 @@ class BufferMgrDynamic : public Orch void handleSetSingleBufferObjectOnAdminDownPort(buffer_direction_t direction, const std::string &port, const std::string &key, const std::string &profile); void handleDelSingleBufferObjectOnAdminDownPort(buffer_direction_t direction, const std::string &port, const std::string &key, port_info_t &portInfo); bool isReadyToReclaimBufferOnPort(const std::string &port); + void cleanUpItemsForReclaimingBuffer(const std::string &port); // Main flows template task_process_status reclaimReservedBufferForPort(const std::string &port, T &obj, buffer_direction_t dir); diff --git a/cfgmgr/coppmgr.cpp b/cfgmgr/coppmgr.cpp index 1721cc8593..5595096a27 100644 --- a/cfgmgr/coppmgr.cpp +++ b/cfgmgr/coppmgr.cpp @@ -9,6 +9,8 @@ #include "shellcmd.h" #include "warm_restart.h" #include "json.hpp" +#include +#include using json = nlohmann::json; @@ -255,6 +257,42 @@ void CoppMgr::mergeConfig(CoppCfg &init_cfg, CoppCfg &m_cfg, std::vector &fvs) +{ + /* Compare with the existing contents of copp tables, in case for a key K preserved fvs are the same + * as the fvs in trap_group_fvs it will be ignored as a duplicate continue to next key. + * In case one of the fvs differs the preserved entry will be deleted and new entry will be set instead. + */ + std::vector preserved_fvs; + bool key_found = m_coppTable.get(key, preserved_fvs); + if (!key_found) + { + return false; + } + else + { + unordered_map preserved_copp_entry; + for (auto prev_fv : preserved_fvs) + { + preserved_copp_entry[fvField(prev_fv)] = fvValue(prev_fv); + } + for (auto fv: fvs) + { + string field = fvField(fv); + string value = fvValue(fv); + auto preserved_copp_it = preserved_copp_entry.find(field); + bool field_found = (preserved_copp_it != preserved_copp_entry.end()); + if ((!field_found) || (field_found && preserved_copp_it->second.compare(value))) + { + // overwrite -> delete preserved entry from copp table and set a new entry instead + m_coppTable.del(key); + return false; + } + } + } + return true; +} + CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const vector &tableNames) : Orch(cfgDb, tableNames), m_cfgCoppTrapTable(cfgDb, CFG_COPP_TRAP_TABLE_NAME), @@ -270,9 +308,11 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c std::vector group_keys; std::vector trap_keys; std::vector feature_keys; + std::vector preserved_copp_keys; std::vector group_cfg_keys; std::vector trap_cfg_keys; + unordered_set supported_copp_keys; CoppCfg group_cfg; CoppCfg trap_cfg; @@ -280,6 +320,7 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c m_cfgCoppGroupTable.getKeys(group_cfg_keys); m_cfgCoppTrapTable.getKeys(trap_cfg_keys); m_cfgFeatureTable.getKeys(feature_keys); + m_coppTable.getKeys(preserved_copp_keys); for (auto i: feature_keys) @@ -352,8 +393,14 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c if (!trap_group_fvs.empty()) { + supported_copp_keys.emplace(i.first); + if (isDupEntry(i.first, trap_group_fvs)) + { + continue; + } m_appCoppTable.set(i.first, trap_group_fvs); } + setCoppGroupStateOk(i.first); auto g_cfg = std::find(group_cfg_keys.begin(), group_cfg_keys.end(), i.first); if (g_cfg != group_cfg_keys.end()) @@ -361,6 +408,16 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c g_copp_init_set.insert(i.first); } } + + // Delete unsupported keys from preserved copp tables + for (auto it : preserved_copp_keys) + { + auto copp_it = supported_copp_keys.find(it); + if (copp_it == supported_copp_keys.end()) + { + m_coppTable.del(it); + } + } } void CoppMgr::setCoppGroupStateOk(string alias) diff --git a/cfgmgr/coppmgr.h b/cfgmgr/coppmgr.h index 1d53756fce..44549d3bec 100644 --- a/cfgmgr/coppmgr.h +++ b/cfgmgr/coppmgr.h @@ -100,6 +100,7 @@ class CoppMgr : public Orch bool isTrapGroupInstalled(std::string key); bool isFeatureEnabled(std::string feature); void mergeConfig(CoppCfg &init_cfg, CoppCfg &m_cfg, std::vector &cfg_keys, Table &cfgTable); + bool isDupEntry(const std::string &key, std::vector &fvs); void removeTrap(std::string key); void addTrap(std::string trap_ids, std::string trap_group); diff --git a/cfgmgr/coppmgrd.cpp b/cfgmgr/coppmgrd.cpp index 60b0a2442a..16c15c1238 100644 --- a/cfgmgr/coppmgrd.cpp +++ b/cfgmgr/coppmgrd.cpp @@ -16,26 +16,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("coppmgrd"); diff --git a/cfgmgr/intfmgr.cpp b/cfgmgr/intfmgr.cpp index 93281dbcd9..78c9030807 100644 --- a/cfgmgr/intfmgr.cpp +++ b/cfgmgr/intfmgr.cpp @@ -40,8 +40,7 @@ IntfMgr::IntfMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c m_stateVrfTable(stateDb, STATE_VRF_TABLE_NAME), m_stateIntfTable(stateDb, STATE_INTERFACE_TABLE_NAME), m_appIntfTableProducer(appDb, APP_INTF_TABLE_NAME), - m_neighTable(appDb, APP_NEIGH_TABLE_NAME), - m_appLagTable(appDb, APP_LAG_TABLE_NAME) + m_neighTable(appDb, APP_NEIGH_TABLE_NAME) { auto subscriberStateTable = new swss::SubscriberStateTable(stateDb, STATE_PORT_TABLE_NAME, TableConsumable::DEFAULT_POP_BATCH_SIZE, 100); @@ -115,7 +114,21 @@ void IntfMgr::setIntfIp(const string &alias, const string &opCmd, int ret = swss::exec(cmd.str(), res); if (ret) { - SWSS_LOG_ERROR("Command '%s' failed with rc %d", cmd.str().c_str(), ret); + if (!ipPrefix.isV4() && opCmd == "add") + { + SWSS_LOG_NOTICE("Failed to assign IPv6 on interface %s with return code %d, trying to enable IPv6 and retry", alias.c_str(), ret); + if (!enableIpv6Flag(alias)) + { + SWSS_LOG_ERROR("Failed to enable IPv6 on interface %s", alias.c_str()); + return; + } + ret = swss::exec(cmd.str(), res); + } + + if (ret) + { + SWSS_LOG_ERROR("Command '%s' failed with rc %d", cmd.str().c_str(), ret); + } } } @@ -336,7 +349,7 @@ std::string IntfMgr::getIntfAdminStatus(const string &alias) } else if (!alias.compare(0, strlen("Po"), "Po")) { - portTable = &m_appLagTable; + portTable = &m_stateLagTable; } else { @@ -368,7 +381,7 @@ std::string IntfMgr::getIntfMtu(const string &alias) } else if (!alias.compare(0, strlen("Po"), "Po")) { - portTable = &m_appLagTable; + portTable = &m_stateLagTable; } else { @@ -433,8 +446,19 @@ std::string IntfMgr::setHostSubIntfMtu(const string &alias, const string &mtu, c } SWSS_LOG_INFO("subintf %s active mtu: %s", alias.c_str(), subifMtu.c_str()); cmd << IP_CMD " link set " << shellquote(alias) << " mtu " << shellquote(subifMtu); - EXEC_WITH_ERROR_THROW(cmd.str(), res); + std::string cmd_str = cmd.str(); + int ret = swss::exec(cmd_str, res); + if (ret && !isIntfStateOk(alias)) + { + // Can happen when a SET notification on the PORT_TABLE in the State DB + // followed by a new DEL notification that send by portmgrd + SWSS_LOG_WARN("Setting mtu to %s netdev failed with cmd:%s, rc:%d, error:%s", alias.c_str(), cmd_str.c_str(), ret, res.c_str()); + } + else if (ret) + { + throw runtime_error(cmd_str + " : " + res); + } return subifMtu; } @@ -454,7 +478,7 @@ void IntfMgr::updateSubIntfAdminStatus(const string &alias, const string &admin) continue; } std::vector fvVector; - string subintf_admin = setHostSubIntfAdminStatus(intf, m_subIntfList[intf].adminStatus, admin); + string subintf_admin = setHostSubIntfAdminStatus(intf, m_subIntfList[intf].adminStatus, admin); m_subIntfList[intf].currAdminStatus = subintf_admin; FieldValueTuple fvTuple("admin_status", subintf_admin); fvVector.push_back(fvTuple); @@ -466,13 +490,24 @@ void IntfMgr::updateSubIntfAdminStatus(const string &alias, const string &admin) std::string IntfMgr::setHostSubIntfAdminStatus(const string &alias, const string &admin_status, const string &parent_admin_status) { stringstream cmd; - string res; + string res, cmd_str; if (parent_admin_status == "up" || admin_status == "down") { SWSS_LOG_INFO("subintf %s admin_status: %s", alias.c_str(), admin_status.c_str()); cmd << IP_CMD " link set " << shellquote(alias) << " " << shellquote(admin_status); - EXEC_WITH_ERROR_THROW(cmd.str(), res); + cmd_str = cmd.str(); + int ret = swss::exec(cmd_str, res); + if (ret && !isIntfStateOk(alias)) + { + // Can happen when a DEL notification is sent by portmgrd immediately followed by a new SET notification + SWSS_LOG_WARN("Setting admin_status to %s netdev failed with cmd:%s, rc:%d, error:%s", + alias.c_str(), cmd_str.c_str(), ret, res.c_str()); + } + else if (ret) + { + throw runtime_error(cmd_str + " : " + res); + } return admin_status; } else @@ -521,11 +556,12 @@ void IntfMgr::removeSubIntfState(const string &alias) bool IntfMgr::setIntfGratArp(const string &alias, const string &grat_arp) { /* - * Enable gratuitous ARP by accepting unsolicited ARP replies + * Enable gratuitous ARP by accepting unsolicited ARP replies and untracked neighbor advertisements */ stringstream cmd; string res; string garp_enabled; + int rc; if (grat_arp == "enabled") { @@ -543,8 +579,23 @@ bool IntfMgr::setIntfGratArp(const string &alias, const string &grat_arp) cmd << ECHO_CMD << " " << garp_enabled << " > /proc/sys/net/ipv4/conf/" << alias << "/arp_accept"; EXEC_WITH_ERROR_THROW(cmd.str(), res); - SWSS_LOG_INFO("ARP accept set to \"%s\" on interface \"%s\"", grat_arp.c_str(), alias.c_str()); + + cmd.clear(); + cmd.str(std::string()); + + // `accept_untracked_na` is not available in all kernels, so check for it before trying to set it + cmd << "test -f /proc/sys/net/ipv6/conf/" << alias << "/accept_untracked_na"; + rc = swss::exec(cmd.str(), res); + + if (rc == 0) { + cmd.clear(); + cmd.str(std::string()); + cmd << ECHO_CMD << " " << garp_enabled << " > /proc/sys/net/ipv6/conf/" << alias << "/accept_untracked_na"; + EXEC_WITH_ERROR_THROW(cmd.str(), res); + SWSS_LOG_INFO("`accept_untracked_na` set to \"%s\" on interface \"%s\"", grat_arp.c_str(), alias.c_str()); + } + return true; } @@ -552,15 +603,15 @@ bool IntfMgr::setIntfProxyArp(const string &alias, const string &proxy_arp) { stringstream cmd; string res; - string proxy_arp_pvlan; + string proxy_arp_status; if (proxy_arp == "enabled") { - proxy_arp_pvlan = "1"; + proxy_arp_status = "1"; } else if (proxy_arp == "disabled") { - proxy_arp_pvlan = "0"; + proxy_arp_status = "0"; } else { @@ -568,7 +619,13 @@ bool IntfMgr::setIntfProxyArp(const string &alias, const string &proxy_arp) return false; } - cmd << ECHO_CMD << " " << proxy_arp_pvlan << " > /proc/sys/net/ipv4/conf/" << alias << "/proxy_arp_pvlan"; + cmd << ECHO_CMD << " " << proxy_arp_status << " > /proc/sys/net/ipv4/conf/" << alias << "/proxy_arp_pvlan"; + EXEC_WITH_ERROR_THROW(cmd.str(), res); + + cmd.clear(); + cmd.str(std::string()); + + cmd << ECHO_CMD << " " << proxy_arp_status << " > /proc/sys/net/ipv4/conf/" << alias << "/proxy_arp"; EXEC_WITH_ERROR_THROW(cmd.str(), res); SWSS_LOG_INFO("Proxy ARP set to \"%s\" on interface \"%s\"", proxy_arp.c_str(), alias.c_str()); @@ -708,6 +765,7 @@ bool IntfMgr::doIntfGeneralTask(const vector& keys, string grat_arp = ""; string mpls = ""; string ipv6_link_local_mode = ""; + string loopback_action = ""; for (auto idx : data) { @@ -750,6 +808,10 @@ bool IntfMgr::doIntfGeneralTask(const vector& keys, { vlanId = value; } + else if (field == "loopback_action") + { + loopback_action = value; + } } if (op == SET_COMMAND) @@ -791,6 +853,13 @@ bool IntfMgr::doIntfGeneralTask(const vector& keys, data.push_back(fvTuple); } + /* Set loopback action */ + if (!loopback_action.empty()) + { + FieldValueTuple fvTuple("loopback_action", loopback_action); + data.push_back(fvTuple); + } + /* Set mpls */ if (!setIntfMpls(alias, mpls)) { @@ -1139,3 +1208,13 @@ void IntfMgr::doPortTableTask(const string& key, vector data, s } } } + +bool IntfMgr::enableIpv6Flag(const string &alias) +{ + stringstream cmd; + string temp_res; + cmd << "sysctl -w net.ipv6.conf." << shellquote(alias) << ".disable_ipv6=0"; + int ret = swss::exec(cmd.str(), temp_res); + SWSS_LOG_INFO("disable_ipv6 flag is set to 0 for iface: %s, cmd: %s, ret: %d", alias.c_str(), cmd.str().c_str(), ret); + return (ret == 0) ? true : false; +} diff --git a/cfgmgr/intfmgr.h b/cfgmgr/intfmgr.h index 683e208c0e..4eca2402ce 100644 --- a/cfgmgr/intfmgr.h +++ b/cfgmgr/intfmgr.h @@ -30,7 +30,7 @@ class IntfMgr : public Orch private: ProducerStateTable m_appIntfTableProducer; Table m_cfgIntfTable, m_cfgVlanIntfTable, m_cfgLagIntfTable, m_cfgLoopbackIntfTable; - Table m_statePortTable, m_stateLagTable, m_stateVlanTable, m_stateVrfTable, m_stateIntfTable, m_appLagTable; + Table m_statePortTable, m_stateLagTable, m_stateVlanTable, m_stateVrfTable, m_stateIntfTable; Table m_neighTable; SubIntfMap m_subIntfList; @@ -75,6 +75,7 @@ class IntfMgr : public Orch void updateSubIntfAdminStatus(const std::string &alias, const std::string &admin); void updateSubIntfMtu(const std::string &alias, const std::string &mtu); + bool enableIpv6Flag(const std::string&); bool m_replayDone {false}; }; diff --git a/cfgmgr/intfmgrd.cpp b/cfgmgr/intfmgrd.cpp index 9ed3653333..e414590920 100644 --- a/cfgmgr/intfmgrd.cpp +++ b/cfgmgr/intfmgrd.cpp @@ -16,26 +16,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("intfmgrd"); @@ -62,8 +42,6 @@ int main(int argc, char **argv) WarmStart::checkWarmStart("intfmgrd", "swss"); IntfMgr intfmgr(&cfgDb, &appDb, &stateDb, cfg_intf_tables); - - // TODO: add tables in stateDB which interface depends on to monitor list std::vector cfgOrchList = {&intfmgr}; swss::Select s; diff --git a/cfgmgr/macsecmgr.cpp b/cfgmgr/macsecmgr.cpp index 39cf9eae02..0edb86a5af 100644 --- a/cfgmgr/macsecmgr.cpp +++ b/cfgmgr/macsecmgr.cpp @@ -148,7 +148,7 @@ static void wpa_cli_commands( } if (!network_id.empty()) { - wpa_cli_commands(ostream, "set_network", port_name); + wpa_cli_commands(ostream, "set_network", network_id); } wpa_cli_commands(ostream, args...); } diff --git a/cfgmgr/macsecmgrd.cpp b/cfgmgr/macsecmgrd.cpp index 913c0ac4ee..263c5b4395 100644 --- a/cfgmgr/macsecmgrd.cpp +++ b/cfgmgr/macsecmgrd.cpp @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -25,26 +26,20 @@ using namespace swss; MacAddress gMacAddress; -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; +static bool received_sigterm = false; +static struct sigaction old_sigaction; +static void sig_handler(int signo) +{ + SWSS_LOG_ENTER(); + + if (old_sigaction.sa_handler != SIG_IGN && old_sigaction.sa_handler != SIG_DFL) { + old_sigaction.sa_handler(signo); + } + + received_sigterm = true; + return; +} int main(int argc, char **argv) { @@ -54,6 +49,15 @@ int main(int argc, char **argv) Logger::linkToDbNative("macsecmgrd"); SWSS_LOG_NOTICE("--- Starting macsecmgrd ---"); + /* Register the signal handler for SIGTERM */ + struct sigaction sigact = {}; + sigact.sa_handler = sig_handler; + if (sigaction(SIGTERM, &sigact, &old_sigaction)) + { + SWSS_LOG_ERROR("failed to setup SIGTERM action handler"); + exit(EXIT_FAILURE); + } + swss::DBConnector cfgDb("CONFIG_DB", 0); swss::DBConnector stateDb("STATE_DB", 0); @@ -73,7 +77,7 @@ int main(int argc, char **argv) } SWSS_LOG_NOTICE("starting main loop"); - while (true) + while (!received_sigterm) { Selectable *sel; int ret; diff --git a/cfgmgr/natmgr.cpp b/cfgmgr/natmgr.cpp index 43077fbe32..d903544d9b 100644 --- a/cfgmgr/natmgr.cpp +++ b/cfgmgr/natmgr.cpp @@ -6129,7 +6129,7 @@ void NatMgr::doStaticNatTask(Consumer &consumer) else { SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } } @@ -6472,7 +6472,7 @@ void NatMgr::doStaticNaptTask(Consumer &consumer) else { SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } } @@ -6859,7 +6859,7 @@ void NatMgr::doNatPoolTask(Consumer &consumer) else { SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } } @@ -7095,7 +7095,7 @@ void NatMgr::doNatBindingTask(Consumer &consumer) else { SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } } @@ -7873,7 +7873,7 @@ void NatMgr::doNatAclTableTask(Consumer &consumer) else { SWSS_LOG_INFO("Unknown operation type %s", op.c_str()); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } } @@ -8137,7 +8137,7 @@ void NatMgr::doNatAclRuleTask(Consumer &consumer) else { SWSS_LOG_INFO("Unknown operation type %s", op.c_str()); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } } diff --git a/cfgmgr/natmgrd.cpp b/cfgmgr/natmgrd.cpp index c2baf7eb87..0e3a52fadc 100644 --- a/cfgmgr/natmgrd.cpp +++ b/cfgmgr/natmgrd.cpp @@ -39,38 +39,35 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -mutex gDbMutex; NatMgr *natmgr = NULL; NotificationConsumer *timeoutNotificationsConsumer = NULL; NotificationConsumer *flushNotificationsConsumer = NULL; +static volatile sig_atomic_t gExit = 0; + std::shared_ptr cleanupNotifier; +static struct sigaction old_sigaction; + void sigterm_handler(int signo) +{ + SWSS_LOG_ENTER(); + + if (old_sigaction.sa_handler != SIG_IGN && old_sigaction.sa_handler != SIG_DFL) { + old_sigaction.sa_handler(signo); + } + + gExit = 1; +} + +void cleanup() { int ret = 0; std::string res; const std::string conntrackFlush = "conntrack -F"; - SWSS_LOG_NOTICE("Got SIGTERM"); + SWSS_LOG_ENTER(); /*If there are any conntrack entries, clean them */ ret = swss::exec(conntrackFlush, res); @@ -129,10 +126,12 @@ int main(int argc, char **argv) cleanupNotifier = std::make_shared(&appDb, "NAT_DB_CLEANUP_NOTIFICATION"); - if (signal(SIGTERM, sigterm_handler) == SIG_ERR) + struct sigaction sigact = {}; + sigact.sa_handler = sigterm_handler; + if (sigaction(SIGTERM, &sigact, &old_sigaction)) { SWSS_LOG_ERROR("failed to setup SIGTERM action handler"); - exit(1); + exit(EXIT_FAILURE); } natmgr = new NatMgr(&cfgDb, &appDb, &stateDb, cfg_tables); @@ -154,7 +153,7 @@ int main(int argc, char **argv) s.addSelectable(flushNotificationsConsumer); SWSS_LOG_NOTICE("starting main loop"); - while (true) + while (!gExit) { Selectable *sel; int ret; @@ -197,10 +196,14 @@ int main(int argc, char **argv) auto *c = (Executor *)sel; c->execute(); } + + cleanup(); } catch(const std::exception &e) { SWSS_LOG_ERROR("Runtime error: %s", e.what()); + return EXIT_FAILURE; } - return -1; + + return 0; } diff --git a/cfgmgr/nbrmgr.cpp b/cfgmgr/nbrmgr.cpp index d6d5f410e1..b2cc3df5c6 100644 --- a/cfgmgr/nbrmgr.cpp +++ b/cfgmgr/nbrmgr.cpp @@ -397,7 +397,7 @@ void NbrMgr::doStateSystemNeighTask(Consumer &consumer) if (!addKernelNeigh(nbr_odev, ip_address, mac_address)) { - SWSS_LOG_ERROR("Neigh entry add on dev %s failed for '%s'", nbr_odev.c_str(), kfvKey(t).c_str()); + SWSS_LOG_INFO("Neigh entry add on dev %s failed for '%s'", nbr_odev.c_str(), kfvKey(t).c_str()); // Delete neigh to take care of deletion of exiting nbr for mac change. This makes sure that // re-try will be successful and route addtion (below) will be attempted and be successful delKernelNeigh(nbr_odev, ip_address); @@ -411,7 +411,7 @@ void NbrMgr::doStateSystemNeighTask(Consumer &consumer) if (!addKernelRoute(nbr_odev, ip_address)) { - SWSS_LOG_ERROR("Route entry add on dev %s failed for '%s'", nbr_odev.c_str(), kfvKey(t).c_str()); + SWSS_LOG_INFO("Route entry add on dev %s failed for '%s'", nbr_odev.c_str(), kfvKey(t).c_str()); delKernelNeigh(nbr_odev, ip_address); // Delete route to take care of deletion of exiting route of nbr for mac change. delKernelRoute(ip_address); @@ -522,8 +522,8 @@ bool NbrMgr::addKernelRoute(string odev, IpAddress ip_addr) if(ret) { - /* Just log error and return */ - SWSS_LOG_ERROR("Failed to add route for %s, error: %d", ip_str.c_str(), ret); + /* This failure the caller expects is due to mac move */ + SWSS_LOG_INFO("Failed to add route for %s, error: %d", ip_str.c_str(), ret); return false; } @@ -586,8 +586,8 @@ bool NbrMgr::addKernelNeigh(string odev, IpAddress ip_addr, MacAddress mac_addr) if(ret) { - /* Just log error and return */ - SWSS_LOG_ERROR("Failed to add Nbr for %s, error: %d", ip_str.c_str(), ret); + /* This failure the caller expects is due to mac move */ + SWSS_LOG_INFO("Failed to add Nbr for %s, error: %d", ip_str.c_str(), ret); return false; } diff --git a/cfgmgr/nbrmgrd.cpp b/cfgmgr/nbrmgrd.cpp index 338d8d9d0d..2d325551a2 100644 --- a/cfgmgr/nbrmgrd.cpp +++ b/cfgmgr/nbrmgrd.cpp @@ -20,26 +20,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("nbrmgrd"); diff --git a/cfgmgr/portmgr.cpp b/cfgmgr/portmgr.cpp index b385a5096a..5134b31861 100644 --- a/cfgmgr/portmgr.cpp +++ b/cfgmgr/portmgr.cpp @@ -23,62 +23,54 @@ PortMgr::PortMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c bool PortMgr::setPortMtu(const string &alias, const string &mtu) { stringstream cmd; - string res; + string res, cmd_str; // ip link set dev mtu cmd << IP_CMD << " link set dev " << shellquote(alias) << " mtu " << shellquote(mtu); - EXEC_WITH_ERROR_THROW(cmd.str(), res); - - // Set the port MTU in application database to update both - // the port MTU and possibly the port based router interface MTU - vector fvs; - FieldValueTuple fv("mtu", mtu); - fvs.push_back(fv); - m_appPortTable.set(alias, fvs); - - return true; -} - -bool PortMgr::setPortTpid(const string &alias, const string &tpid) -{ - stringstream cmd; - string res; - - // Set the port TPID in application database to update port TPID - vector fvs; - FieldValueTuple fv("tpid", tpid); - fvs.push_back(fv); - m_appPortTable.set(alias, fvs); - + cmd_str = cmd.str(); + int ret = swss::exec(cmd_str, res); + if (!ret) + { + // Set the port MTU in application database to update both + // the port MTU and possibly the port based router interface MTU + return writeConfigToAppDb(alias, "mtu", mtu); + } + else if (!isPortStateOk(alias)) + { + // Can happen when a DEL notification is sent by portmgrd immediately followed by a new SET notif + SWSS_LOG_WARN("Setting mtu to alias:%s netdev failed with cmd:%s, rc:%d, error:%s", alias.c_str(), cmd_str.c_str(), ret, res.c_str()); + return false; + } + else + { + throw runtime_error(cmd_str + " : " + res); + } return true; } - bool PortMgr::setPortAdminStatus(const string &alias, const bool up) { stringstream cmd; - string res; + string res, cmd_str; // ip link set dev [up|down] cmd << IP_CMD << " link set dev " << shellquote(alias) << (up ? " up" : " down"); - EXEC_WITH_ERROR_THROW(cmd.str(), res); - - vector fvs; - FieldValueTuple fv("admin_status", (up ? "up" : "down")); - fvs.push_back(fv); - m_appPortTable.set(alias, fvs); - - return true; -} - -bool PortMgr::setPortLearnMode(const string &alias, const string &learn_mode) -{ - // Set the port MAC learn mode in application database - vector fvs; - FieldValueTuple fv("learn_mode", learn_mode); - fvs.push_back(fv); - m_appPortTable.set(alias, fvs); - + cmd_str = cmd.str(); + int ret = swss::exec(cmd_str, res); + if (!ret) + { + return writeConfigToAppDb(alias, "admin_status", (up ? "up" : "down")); + } + else if (!isPortStateOk(alias)) + { + // Can happen when a DEL notification is sent by portmgrd immediately followed by a new SET notification + SWSS_LOG_WARN("Setting admin_status to alias:%s netdev failed with cmd%s, rc:%d, error:%s", alias.c_str(), cmd_str.c_str(), ret, res.c_str()); + return false; + } + else + { + throw runtime_error(cmd_str + " : " + res); + } return true; } @@ -117,14 +109,14 @@ void PortMgr::doTask(Consumer &consumer) if (op == SET_COMMAND) { - if (!isPortStateOk(alias)) - { - SWSS_LOG_INFO("Port %s is not ready, pending...", alias.c_str()); - it++; - continue; - } + /* portOk=true indicates that the port has been created in kernel. + * We should not call any ip command if portOk=false. However, it is + * valid to put port configuration to APP DB which will trigger port creation in kernel. + */ + bool portOk = isPortStateOk(alias); - string admin_status, mtu, learn_mode, tpid; + string admin_status, mtu; + std::vector field_values; bool configured = (m_portList.find(alias) != m_portList.end()); @@ -138,6 +130,11 @@ void PortMgr::doTask(Consumer &consumer) m_portList.insert(alias); } + else if (!portOk) + { + it++; + continue; + } for (auto i : kfvFieldsValues(t)) { @@ -149,38 +146,42 @@ void PortMgr::doTask(Consumer &consumer) { admin_status = fvValue(i); } - else if (fvField(i) == "learn_mode") + else { - learn_mode = fvValue(i); - } - else if (fvField(i) == "tpid") - { - tpid = fvValue(i); + field_values.emplace_back(i); } } - if (!mtu.empty()) + if (field_values.size()) { - setPortMtu(alias, mtu); - SWSS_LOG_NOTICE("Configure %s MTU to %s", alias.c_str(), mtu.c_str()); + writeConfigToAppDb(alias, field_values); } - if (!admin_status.empty()) + if (!portOk) { - setPortAdminStatus(alias, admin_status == "up"); - SWSS_LOG_NOTICE("Configure %s admin status to %s", alias.c_str(), admin_status.c_str()); + SWSS_LOG_INFO("Port %s is not ready, pending...", alias.c_str()); + + writeConfigToAppDb(alias, "mtu", mtu); + writeConfigToAppDb(alias, "admin_status", admin_status); + /* Retry setting these params after the netdev is created */ + field_values.clear(); + field_values.emplace_back("mtu", mtu); + field_values.emplace_back("admin_status", admin_status); + it->second = KeyOpFieldsValuesTuple{alias, SET_COMMAND, field_values}; + it++; + continue; } - if (!learn_mode.empty()) + if (!mtu.empty()) { - setPortLearnMode(alias, learn_mode); - SWSS_LOG_NOTICE("Configure %s MAC learn mode to %s", alias.c_str(), learn_mode.c_str()); + setPortMtu(alias, mtu); + SWSS_LOG_NOTICE("Configure %s MTU to %s", alias.c_str(), mtu.c_str()); } - if (!tpid.empty()) + if (!admin_status.empty()) { - setPortTpid(alias, tpid); - SWSS_LOG_NOTICE("Configure %s TPID to %s", alias.c_str(), tpid.c_str()); + setPortAdminStatus(alias, admin_status == "up"); + SWSS_LOG_NOTICE("Configure %s admin status to %s", alias.c_str(), admin_status.c_str()); } } else if (op == DEL_COMMAND) @@ -193,3 +194,19 @@ void PortMgr::doTask(Consumer &consumer) it = consumer.m_toSync.erase(it); } } + +bool PortMgr::writeConfigToAppDb(const std::string &alias, const std::string &field, const std::string &value) +{ + vector fvs; + FieldValueTuple fv(field, value); + fvs.push_back(fv); + m_appPortTable.set(alias, fvs); + + return true; +} + +bool PortMgr::writeConfigToAppDb(const std::string &alias, std::vector &field_values) +{ + m_appPortTable.set(alias, field_values); + return true; +} diff --git a/cfgmgr/portmgr.h b/cfgmgr/portmgr.h index 809cd1c004..683aacc488 100644 --- a/cfgmgr/portmgr.h +++ b/cfgmgr/portmgr.h @@ -29,10 +29,10 @@ class PortMgr : public Orch std::set m_portList; void doTask(Consumer &consumer); + bool writeConfigToAppDb(const std::string &alias, const std::string &field, const std::string &value); + bool writeConfigToAppDb(const std::string &alias, std::vector &field_values); bool setPortMtu(const std::string &alias, const std::string &mtu); - bool setPortTpid(const std::string &alias, const std::string &tpid); bool setPortAdminStatus(const std::string &alias, const bool up); - bool setPortLearnMode(const std::string &alias, const std::string &learn_mode); bool isPortStateOk(const std::string &alias); }; diff --git a/cfgmgr/portmgrd.cpp b/cfgmgr/portmgrd.cpp index 180bbc1d63..99c7974559 100644 --- a/cfgmgr/portmgrd.cpp +++ b/cfgmgr/portmgrd.cpp @@ -15,26 +15,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("portmgrd"); @@ -53,8 +33,6 @@ int main(int argc, char **argv) DBConnector stateDb("STATE_DB", 0); PortMgr portmgr(&cfgDb, &appDb, &stateDb, cfg_port_tables); - - // TODO: add tables in stateDB which interface depends on to monitor list vector cfgOrchList = {&portmgr}; swss::Select s; diff --git a/cfgmgr/sflowmgr.cpp b/cfgmgr/sflowmgr.cpp index bb732e83d5..122ffc0780 100644 --- a/cfgmgr/sflowmgr.cpp +++ b/cfgmgr/sflowmgr.cpp @@ -10,27 +10,42 @@ using namespace std; using namespace swss; -map sflowSpeedRateInitMap = -{ - {SFLOW_SAMPLE_RATE_KEY_400G, SFLOW_SAMPLE_RATE_VALUE_400G}, - {SFLOW_SAMPLE_RATE_KEY_200G, SFLOW_SAMPLE_RATE_VALUE_200G}, - {SFLOW_SAMPLE_RATE_KEY_100G, SFLOW_SAMPLE_RATE_VALUE_100G}, - {SFLOW_SAMPLE_RATE_KEY_50G, SFLOW_SAMPLE_RATE_VALUE_50G}, - {SFLOW_SAMPLE_RATE_KEY_40G, SFLOW_SAMPLE_RATE_VALUE_40G}, - {SFLOW_SAMPLE_RATE_KEY_25G, SFLOW_SAMPLE_RATE_VALUE_25G}, - {SFLOW_SAMPLE_RATE_KEY_10G, SFLOW_SAMPLE_RATE_VALUE_10G}, - {SFLOW_SAMPLE_RATE_KEY_1G, SFLOW_SAMPLE_RATE_VALUE_1G} -}; - -SflowMgr::SflowMgr(DBConnector *cfgDb, DBConnector *appDb, const vector &tableNames) : - Orch(cfgDb, tableNames), - m_cfgSflowTable(cfgDb, CFG_SFLOW_TABLE_NAME), - m_cfgSflowSessionTable(cfgDb, CFG_SFLOW_SESSION_TABLE_NAME), +SflowMgr::SflowMgr(DBConnector *appDb, const std::vector& tableNames) : + Orch(tableNames), m_appSflowTable(appDb, APP_SFLOW_TABLE_NAME), m_appSflowSessionTable(appDb, APP_SFLOW_SESSION_TABLE_NAME) { m_intfAllConf = true; m_gEnable = false; + m_gDirection = "rx"; + m_intfAllDir = "rx"; +} + +void SflowMgr::readPortConfig() +{ + auto consumer_it = m_consumerMap.find(CFG_PORT_TABLE_NAME); + if (consumer_it != m_consumerMap.end()) + { + consumer_it->second->drain(); + SWSS_LOG_NOTICE("Port Configuration Read.."); + } + else + { + SWSS_LOG_ERROR("Consumer object for PORT_TABLE not found"); + } +} + +bool SflowMgr::isPortEnabled(const std::string& alias) +{ + /* Checks if the sflow is enabled on the port */ + auto it = m_sflowPortConfMap.find(alias); + if (it == m_sflowPortConfMap.end()) + { + return false; + } + bool local_admin = it->second.local_admin_cfg; + bool status = it->second.admin == "up" ? true : false; + return m_gEnable && (m_intfAllConf || (local_admin && status)); } void SflowMgr::sflowHandleService(bool enable) @@ -69,7 +84,6 @@ void SflowMgr::sflowUpdatePortInfo(Consumer &consumer) while (it != consumer.m_toSync.end()) { KeyOpFieldsValuesTuple t = it->second; - string key = kfvKey(t); string op = kfvOp(t); auto values = kfvFieldsValues(t); @@ -85,14 +99,17 @@ void SflowMgr::sflowUpdatePortInfo(Consumer &consumer) new_port = true; port_info.local_rate_cfg = false; port_info.local_admin_cfg = false; - port_info.speed = SFLOW_ERROR_SPEED_STR; + port_info.speed = ERROR_SPEED; + port_info.oper_speed = NA_SPEED; + port_info.local_dir_cfg = false; port_info.rate = ""; port_info.admin = ""; + port_info.dir = ""; m_sflowPortConfMap[key] = port_info; } - bool speed_change = false; - string new_speed = SFLOW_ERROR_SPEED_STR; + bool rate_update = false; + string new_speed = ERROR_SPEED; for (auto i : values) { if (fvField(i) == "speed") @@ -103,16 +120,26 @@ void SflowMgr::sflowUpdatePortInfo(Consumer &consumer) if (m_sflowPortConfMap[key].speed != new_speed) { m_sflowPortConfMap[key].speed = new_speed; - speed_change = true; + /* if oper_speed is set, no need to write to APP_DB */ + if (m_sflowPortConfMap[key].oper_speed == NA_SPEED) + { + rate_update = true; + } + } + + string def_dir = "rx"; + if (m_sflowPortConfMap[key].dir != def_dir && !m_sflowPortConfMap[key].local_dir_cfg) + { + m_sflowPortConfMap[key].dir = def_dir; } - if (m_gEnable && m_intfAllConf) + if (isPortEnabled(key)) { - // If the Local rate Conf is already present, dont't override it even though the speed is changed - if (new_port || (speed_change && !m_sflowPortConfMap[key].local_rate_cfg)) + // If the Local rate conf is already present, dont't override it even though the speed is changed + if (new_port || (rate_update && !m_sflowPortConfMap[key].local_rate_cfg)) { vector fvs; - sflowGetGlobalInfo(fvs, m_sflowPortConfMap[key].speed); + sflowGetGlobalInfo(fvs, key, m_sflowPortConfMap[key].dir); m_appSflowSessionTable.set(key, fvs); } } @@ -123,7 +150,8 @@ void SflowMgr::sflowUpdatePortInfo(Consumer &consumer) if (sflowPortConf != m_sflowPortConfMap.end()) { bool local_cfg = m_sflowPortConfMap[key].local_rate_cfg || - m_sflowPortConfMap[key].local_admin_cfg; + m_sflowPortConfMap[key].local_admin_cfg || + m_sflowPortConfMap[key].local_dir_cfg; m_sflowPortConfMap.erase(key); if ((m_intfAllConf && m_gEnable) || local_cfg) @@ -136,14 +164,67 @@ void SflowMgr::sflowUpdatePortInfo(Consumer &consumer) } } -void SflowMgr::sflowHandleSessionAll(bool enable) +void SflowMgr::sflowProcessOperSpeed(Consumer &consumer) +{ + auto it = consumer.m_toSync.begin(); + + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + string alias = kfvKey(t); + string op = kfvOp(t); + auto values = kfvFieldsValues(t); + string oper_speed = ""; + bool rate_update = false; + + for (auto i : values) + { + if (fvField(i) == "speed") + { + oper_speed = fvValue(i); + } + } + + if (m_sflowPortConfMap.find(alias) != m_sflowPortConfMap.end() && op == SET_COMMAND) + { + SWSS_LOG_DEBUG("STATE_DB update: iface: %s, oper_speed: %s, cfg_speed: %s, new_speed: %s", + alias.c_str(), m_sflowPortConfMap[alias].oper_speed.c_str(), + m_sflowPortConfMap[alias].speed.c_str(), + oper_speed.c_str()); + /* oper_speed is updated by orchagent if the vendor supports and oper status is up */ + if (m_sflowPortConfMap[alias].oper_speed != oper_speed && !oper_speed.empty()) + { + rate_update = true; + if (oper_speed == m_sflowPortConfMap[alias].speed && m_sflowPortConfMap[alias].oper_speed == NA_SPEED) + { + /* if oper_speed is equal to cfg_speed, avoid the write to APP_DB + Can happen if auto-neg is not set */ + rate_update = false; + } + m_sflowPortConfMap[alias].oper_speed = oper_speed; + } + + if (isPortEnabled(alias) && rate_update && !m_sflowPortConfMap[alias].local_rate_cfg) + { + vector fvs; + sflowGetGlobalInfo(fvs, alias, m_sflowPortConfMap[alias].dir); + m_appSflowSessionTable.set(alias, fvs); + SWSS_LOG_NOTICE("Default sampling rate for %s updated to %s", alias.c_str(), findSamplingRate(alias).c_str()); + } + } + /* Do nothing for DEL as the SflowPortConfMap will already be cleared by the DEL from CONFIG_DB */ + it = consumer.m_toSync.erase(it); + } +} + +void SflowMgr::sflowHandleSessionAll(bool enable, string direction) { for (auto it: m_sflowPortConfMap) { if (enable) { vector fvs; - if (it.second.local_rate_cfg || it.second.local_admin_cfg) + if (it.second.local_rate_cfg || it.second.local_admin_cfg || it.second.local_dir_cfg) { sflowGetPortInfo(fvs, it.second); /* Use global admin state if there is not a local one */ @@ -151,10 +232,16 @@ void SflowMgr::sflowHandleSessionAll(bool enable) FieldValueTuple fv1("admin_state", "up"); fvs.push_back(fv1); } + + /* Use global sample direction state if there is not a local one */ + if (!it.second.local_dir_cfg) { + FieldValueTuple fv2("sample_direction", direction); + fvs.push_back(fv2); + } } else { - sflowGetGlobalInfo(fvs, it.second.speed); + sflowGetGlobalInfo(fvs, it.first, direction); } m_appSflowSessionTable.set(it.first, fvs); } @@ -169,7 +256,7 @@ void SflowMgr::sflowHandleSessionLocal(bool enable) { for (auto it: m_sflowPortConfMap) { - if (it.second.local_admin_cfg || it.second.local_rate_cfg) + if (it.second.local_admin_cfg || it.second.local_rate_cfg || it.second.local_dir_cfg) { vector fvs; sflowGetPortInfo(fvs, it.second); @@ -185,22 +272,16 @@ void SflowMgr::sflowHandleSessionLocal(bool enable) } } -void SflowMgr::sflowGetGlobalInfo(vector &fvs, string speed) +void SflowMgr::sflowGetGlobalInfo(vector &fvs, const string& alias, const string& dir) { - string rate; FieldValueTuple fv1("admin_state", "up"); fvs.push_back(fv1); - if (speed != SFLOW_ERROR_SPEED_STR && sflowSpeedRateInitMap.find(speed) != sflowSpeedRateInitMap.end()) - { - rate = sflowSpeedRateInitMap[speed]; - } - else - { - rate = SFLOW_ERROR_SPEED_STR; - } - FieldValueTuple fv2("sample_rate",rate); + FieldValueTuple fv2("sample_rate", findSamplingRate(alias)); fvs.push_back(fv2); + + FieldValueTuple fv3("sample_direction",dir); + fvs.push_back(fv3); } void SflowMgr::sflowGetPortInfo(vector &fvs, SflowPortInfo &local_info) @@ -213,6 +294,12 @@ void SflowMgr::sflowGetPortInfo(vector &fvs, SflowPortInfo &loc FieldValueTuple fv2("sample_rate", local_info.rate); fvs.push_back(fv2); + + if (local_info.local_dir_cfg) + { + FieldValueTuple fv3("sample_direction", local_info.dir); + fvs.push_back(fv3); + } } void SflowMgr::sflowCheckAndFillValues(string alias, vector &values, @@ -221,6 +308,7 @@ void SflowMgr::sflowCheckAndFillValues(string alias, vector &va string rate; bool admin_present = false; bool rate_present = false; + bool dir_present = false; for (auto i : values) { @@ -240,6 +328,14 @@ void SflowMgr::sflowCheckAndFillValues(string alias, vector &va FieldValueTuple fv(fvField(i), fvValue(i)); fvs.push_back(fv); } + if (fvField(i) == "sample_direction") + { + dir_present = true; + m_sflowPortConfMap[alias].dir = fvValue(i); + m_sflowPortConfMap[alias].local_dir_cfg = true; + FieldValueTuple fv(fvField(i), fvValue(i)); + fvs.push_back(fv); + } if (fvField(i) == "NULL") { continue; @@ -254,17 +350,7 @@ void SflowMgr::sflowCheckAndFillValues(string alias, vector &va if (m_sflowPortConfMap[alias].rate == "" || m_sflowPortConfMap[alias].local_rate_cfg) { - string speed = m_sflowPortConfMap[alias].speed; - - if (speed != SFLOW_ERROR_SPEED_STR && sflowSpeedRateInitMap.find(speed) != sflowSpeedRateInitMap.end()) - { - rate = sflowSpeedRateInitMap[speed]; - } - else - { - rate = SFLOW_ERROR_SPEED_STR; - } - m_sflowPortConfMap[alias].rate = rate; + m_sflowPortConfMap[alias].rate = findSamplingRate(alias); } m_sflowPortConfMap[alias].local_rate_cfg = false; FieldValueTuple fv("sample_rate", m_sflowPortConfMap[alias].rate); @@ -282,6 +368,36 @@ void SflowMgr::sflowCheckAndFillValues(string alias, vector &va FieldValueTuple fv("admin_state", m_sflowPortConfMap[alias].admin); fvs.push_back(fv); } + + if (!dir_present) + { + if (m_sflowPortConfMap[alias].dir == "") + { + /* By default direction is set to global, if not set explicitly */ + m_sflowPortConfMap[alias].dir = m_gDirection; + } + m_sflowPortConfMap[alias].local_dir_cfg = false; + FieldValueTuple fv("sample_direction", m_sflowPortConfMap[alias].dir); + fvs.push_back(fv); + } +} + +string SflowMgr::findSamplingRate(const string& alias) +{ + /* Default sampling rate is equal to the oper_speed, if present + if oper_speed is not found, use the configured speed */ + if (m_sflowPortConfMap.find(alias) == m_sflowPortConfMap.end()) + { + SWSS_LOG_ERROR("%s not found in port configuration map", alias.c_str()); + return ERROR_SPEED; + } + string oper_speed = m_sflowPortConfMap[alias].oper_speed; + string cfg_speed = m_sflowPortConfMap[alias].speed; + if (!oper_speed.empty() && oper_speed != NA_SPEED) + { + return oper_speed; + } + return cfg_speed; } void SflowMgr::doTask(Consumer &consumer) @@ -295,6 +411,11 @@ void SflowMgr::doTask(Consumer &consumer) sflowUpdatePortInfo(consumer); return; } + else if (table == STATE_PORT_TABLE_NAME) + { + sflowProcessOperSpeed(consumer); + return; + } auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) @@ -309,51 +430,92 @@ void SflowMgr::doTask(Consumer &consumer) { if (table == CFG_SFLOW_TABLE_NAME) { + SWSS_LOG_DEBUG("Current Cfg admin %d dir %s ", (unsigned int)m_gEnable, m_gDirection.c_str()); + bool enable = false; + string direction = "rx"; for (auto i : values) { if (fvField(i) == "admin_state") { - bool enable = false; if (fvValue(i) == "up") { enable = true; } - if (enable == m_gEnable) - { - break; - } - m_gEnable = enable; - sflowHandleService(enable); - if (m_intfAllConf) - { - sflowHandleSessionAll(enable); - } - sflowHandleSessionLocal(enable); } + else if (fvField(i) == "sample_direction") + { + direction = fvValue(i); + } + } + + if (direction != m_gDirection) + { + m_gDirection = direction; } + + if (m_gEnable != enable) + { + m_gEnable = enable; + sflowHandleService(enable); + } + + if (m_intfAllConf) + { + sflowHandleSessionAll(m_gEnable, m_gDirection); + } + + sflowHandleSessionLocal(m_gEnable); m_appSflowTable.set(key, values); + + SWSS_LOG_DEBUG("New config admin %d dir %s ", (unsigned int)m_gEnable, m_gDirection.c_str()); } else if (table == CFG_SFLOW_SESSION_TABLE_NAME) { if (key == "all") { + SWSS_LOG_DEBUG("current config gAdmin %d dir %s intfAllEna %d intfAllDir %s", + (unsigned int)m_gEnable, m_gDirection.c_str(), + (unsigned int)m_intfAllConf, m_intfAllDir.c_str()); + + string direction = m_intfAllDir; + bool enable = m_intfAllConf; for (auto i : values) { if (fvField(i) == "admin_state") { - bool enable = false; - if (fvValue(i) == "up") { enable = true; } - if ((enable != m_intfAllConf) && (m_gEnable)) + else if (fvValue(i) == "down") { - sflowHandleSessionAll(enable); + enable = false; } - m_intfAllConf = enable; } + else if (fvField(i) == "sample_direction") + { + direction = fvValue(i); + } + } + + if (m_intfAllDir != direction) + { + m_intfAllDir = direction; + } + + if (enable != m_intfAllConf) + { + m_intfAllConf = enable; } + + if (m_gEnable) + { + sflowHandleSessionAll(m_intfAllConf, m_intfAllDir); + } + + SWSS_LOG_DEBUG("New config gAdmin %d dir %s intfAllEna %d intfAllDir %s", + (unsigned int)m_gEnable, m_gDirection.c_str(), + (unsigned int)m_intfAllConf, m_intfAllDir.c_str()); } else { @@ -380,10 +542,11 @@ void SflowMgr::doTask(Consumer &consumer) if (m_gEnable) { sflowHandleService(false); - sflowHandleSessionAll(false); + sflowHandleSessionAll(false, ""); sflowHandleSessionLocal(false); } m_gEnable = false; + m_gDirection = "rx"; m_appSflowTable.del(key); } else if (table == CFG_SFLOW_SESSION_TABLE_NAME) @@ -394,7 +557,7 @@ void SflowMgr::doTask(Consumer &consumer) { if (m_gEnable) { - sflowHandleSessionAll(true); + sflowHandleSessionAll(true, m_gDirection); } } m_intfAllConf = true; @@ -404,14 +567,16 @@ void SflowMgr::doTask(Consumer &consumer) m_appSflowSessionTable.del(key); m_sflowPortConfMap[key].local_rate_cfg = false; m_sflowPortConfMap[key].local_admin_cfg = false; + m_sflowPortConfMap[key].local_dir_cfg = false; m_sflowPortConfMap[key].rate = ""; m_sflowPortConfMap[key].admin = ""; + m_sflowPortConfMap[key].dir = ""; /* If Global configured, set global session on port after local config is deleted */ if (m_intfAllConf) { vector fvs; - sflowGetGlobalInfo(fvs, m_sflowPortConfMap[key].speed); + sflowGetGlobalInfo(fvs, key, m_intfAllDir); m_appSflowSessionTable.set(key,fvs); } } diff --git a/cfgmgr/sflowmgr.h b/cfgmgr/sflowmgr.h index eb35ec2125..5cdc231d79 100644 --- a/cfgmgr/sflowmgr.h +++ b/cfgmgr/sflowmgr.h @@ -10,33 +10,19 @@ namespace swss { -#define SFLOW_SAMPLE_RATE_KEY_400G "400000" -#define SFLOW_SAMPLE_RATE_KEY_200G "200000" -#define SFLOW_SAMPLE_RATE_KEY_100G "100000" -#define SFLOW_SAMPLE_RATE_KEY_50G "50000" -#define SFLOW_SAMPLE_RATE_KEY_40G "40000" -#define SFLOW_SAMPLE_RATE_KEY_25G "25000" -#define SFLOW_SAMPLE_RATE_KEY_10G "10000" -#define SFLOW_SAMPLE_RATE_KEY_1G "1000" - -#define SFLOW_SAMPLE_RATE_VALUE_400G "400000" -#define SFLOW_SAMPLE_RATE_VALUE_200G "200000" -#define SFLOW_SAMPLE_RATE_VALUE_100G "100000" -#define SFLOW_SAMPLE_RATE_VALUE_50G "50000" -#define SFLOW_SAMPLE_RATE_VALUE_40G "40000" -#define SFLOW_SAMPLE_RATE_VALUE_25G "25000" -#define SFLOW_SAMPLE_RATE_VALUE_10G "10000" -#define SFLOW_SAMPLE_RATE_VALUE_1G "1000" - -#define SFLOW_ERROR_SPEED_STR "error" +#define ERROR_SPEED "error" +#define NA_SPEED "N/A" struct SflowPortInfo { bool local_rate_cfg; bool local_admin_cfg; + bool local_dir_cfg; std::string speed; + std::string oper_speed; std::string rate; std::string admin; + std::string dir; }; /* Port to Local config map */ @@ -45,26 +31,30 @@ typedef std::map SflowPortConfMap; class SflowMgr : public Orch { public: - SflowMgr(DBConnector *cfgDb, DBConnector *appDb, const std::vector &tableNames); + SflowMgr(DBConnector *appDb, const std::vector& tableNames); + void readPortConfig(); using Orch::doTask; private: - Table m_cfgSflowTable; - Table m_cfgSflowSessionTable; ProducerStateTable m_appSflowTable; ProducerStateTable m_appSflowSessionTable; - SflowPortConfMap m_sflowPortConfMap; + SflowPortConfMap m_sflowPortConfMap; bool m_intfAllConf; bool m_gEnable; + std::string m_intfAllDir; + std::string m_gDirection; void doTask(Consumer &consumer); void sflowHandleService(bool enable); void sflowUpdatePortInfo(Consumer &consumer); - void sflowHandleSessionAll(bool enable); + void sflowProcessOperSpeed(Consumer &consumer); + void sflowHandleSessionAll(bool enable, std::string direction); void sflowHandleSessionLocal(bool enable); void sflowCheckAndFillValues(std::string alias, std::vector &values, std::vector &fvs); void sflowGetPortInfo(std::vector &fvs, SflowPortInfo &local_info); - void sflowGetGlobalInfo(std::vector &fvs, std::string speed); + void sflowGetGlobalInfo(std::vector &fvs, const std::string& alias, const std::string& direction); + bool isPortEnabled(const std::string& alias); + std::string findSamplingRate(const std::string& speed); }; } diff --git a/cfgmgr/sflowmgrd.cpp b/cfgmgr/sflowmgrd.cpp index 7de5f15a2d..2eef82bac7 100644 --- a/cfgmgr/sflowmgrd.cpp +++ b/cfgmgr/sflowmgrd.cpp @@ -15,26 +15,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("sflowmgrd"); @@ -44,21 +24,31 @@ int main(int argc, char **argv) try { - vector cfg_sflow_tables = { - CFG_SFLOW_TABLE_NAME, - CFG_SFLOW_SESSION_TABLE_NAME, - CFG_PORT_TABLE_NAME - }; - DBConnector cfgDb("CONFIG_DB", 0); DBConnector appDb("APPL_DB", 0); + DBConnector stateDb("STATE_DB", 0); + + TableConnector conf_port_table(&cfgDb, CFG_PORT_TABLE_NAME); + TableConnector state_port_table(&stateDb, STATE_PORT_TABLE_NAME); + TableConnector conf_sflow_table(&cfgDb, CFG_SFLOW_TABLE_NAME); + TableConnector conf_sflow_session_table(&cfgDb, CFG_SFLOW_SESSION_TABLE_NAME); + + vector sflow_tables = { + conf_port_table, + state_port_table, + conf_sflow_table, + conf_sflow_session_table + }; - SflowMgr sflowmgr(&cfgDb, &appDb, cfg_sflow_tables); + SflowMgr sflowmgr(&appDb, sflow_tables); + /* During process startup, the ordering of config_db followed by state_db notifications cannot be guaranteed + and so handle the config events manually */ + sflowmgr.readPortConfig(); - vector cfgOrchList = {&sflowmgr}; + vector orchList = {&sflowmgr}; swss::Select s; - for (Orch *o : cfgOrchList) + for (Orch *o : orchList) { s.addSelectables(o->getSelectables()); } diff --git a/cfgmgr/teammgr.cpp b/cfgmgr/teammgr.cpp index ad8572e07b..40eca9d921 100644 --- a/cfgmgr/teammgr.cpp +++ b/cfgmgr/teammgr.cpp @@ -33,7 +33,8 @@ TeamMgr::TeamMgr(DBConnector *confDb, DBConnector *applDb, DBConnector *statDb, m_appPortTable(applDb, APP_PORT_TABLE_NAME), m_appLagTable(applDb, APP_LAG_TABLE_NAME), m_statePortTable(statDb, STATE_PORT_TABLE_NAME), - m_stateLagTable(statDb, STATE_LAG_TABLE_NAME) + m_stateLagTable(statDb, STATE_LAG_TABLE_NAME), + m_stateMACsecIngressSATable(statDb, STATE_MACSEC_INGRESS_SA_TABLE_NAME) { SWSS_LOG_ENTER(); @@ -98,6 +99,51 @@ bool TeamMgr::isLagStateOk(const string &alias) return true; } +bool TeamMgr::isMACsecAttached(const std::string &port) +{ + SWSS_LOG_ENTER(); + + vector temp; + + if (!m_cfgPortTable.get(port, temp)) + { + SWSS_LOG_INFO("Port %s is not ready", port.c_str()); + return false; + } + + auto macsec_opt = swss::fvsGetValue(temp, "macsec", true); + if (!macsec_opt || macsec_opt->empty()) + { + SWSS_LOG_INFO("MACsec isn't setted on the port %s", port.c_str()); + return false; + } + + return true; +} + +bool TeamMgr::isMACsecIngressSAOk(const std::string &port) +{ + SWSS_LOG_ENTER(); + + vector keys; + m_stateMACsecIngressSATable.getKeys(keys); + + for (auto key: keys) + { + auto tokens = tokenize(key, state_db_key_delimiter); + auto interface = tokens[0]; + + if (port == interface) + { + SWSS_LOG_NOTICE(" MACsec is ready on the port %s", port.c_str()); + return true; + } + } + + SWSS_LOG_INFO("MACsec is NOT ready on the port %s", port.c_str()); + return false; +} + void TeamMgr::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -206,6 +252,7 @@ void TeamMgr::doLagTask(Consumer &consumer) { int min_links = 0; bool fallback = false; + bool fast_rate = false; string admin_status = DEFAULT_ADMIN_STATUS_STR; string mtu = DEFAULT_MTU_STR; string learn_mode; @@ -247,12 +294,18 @@ void TeamMgr::doLagTask(Consumer &consumer) { tpid = fvValue(i); SWSS_LOG_INFO("Get TPID %s", tpid.c_str()); - } + } + else if (fvField(i) == "fast_rate") + { + fast_rate = fvValue(i) == "true"; + SWSS_LOG_INFO("Get fast_rate `%s`", + fast_rate ? "true" : "false"); + } } if (m_lagList.find(alias) == m_lagList.end()) { - if (addLag(alias, min_links, fallback) == task_need_retry) + if (addLag(alias, min_links, fallback, fast_rate) == task_need_retry) { it++; continue; @@ -309,7 +362,11 @@ void TeamMgr::doLagMemberTask(Consumer &consumer) it++; continue; } - + if (isMACsecAttached(member) && !isMACsecIngressSAOk(member)) + { + it++; + continue; + } if (addLagMember(lag, member) == task_need_retry) { it++; @@ -400,6 +457,13 @@ void TeamMgr::doPortUpdateTask(Consumer &consumer) string lag; if (findPortMaster(lag, alias)) { + if (isMACsecAttached(alias) && !isMACsecIngressSAOk(alias)) + { + it++; + SWSS_LOG_INFO("MACsec is NOT ready on the port %s", alias.c_str()); + continue; + } + if (addLagMember(lag, alias) == task_need_retry) { it++; @@ -496,7 +560,7 @@ bool TeamMgr::setLagLearnMode(const string &alias, const string &learn_mode) return true; } -task_process_status TeamMgr::addLag(const string &alias, int min_links, bool fallback) +task_process_status TeamMgr::addLag(const string &alias, int min_links, bool fallback, bool fast_rate) { SWSS_LOG_ENTER(); @@ -553,6 +617,11 @@ task_process_status TeamMgr::addLag(const string &alias, int min_links, bool fal conf << ",\"fallback\":true"; } + if (fast_rate) + { + conf << ",\"fast_rate\":true"; + } + conf << "}}'"; SWSS_LOG_INFO("Port channel %s teamd configuration: %s", @@ -595,7 +664,7 @@ bool TeamMgr::removeLag(const string &alias) } // Port-channel names are in the pattern of "PortChannel####" -// +// // The LACP key could be generated in 3 ways based on the value in config DB: // 1. "auto" - LACP key is extracted from the port-channel name and is set to be the number at the end of the port-channel name // We are adding 1 at the beginning to avoid LACP key collisions between similar LACP keys e.g. PortChannel10 and PortChannel010. @@ -647,6 +716,17 @@ task_process_status TeamMgr::addLagMember(const string &lag, const string &membe { SWSS_LOG_ENTER(); + stringstream cmd; + string res; + + // If port was already deleted, ignore this operation + cmd << IP_CMD << " link show " << shellquote(member); + if (exec(cmd.str(), res) != 0) + { + SWSS_LOG_WARN("Unable to find port %s", member.c_str()); + return task_ignore; + } + // If port is already enslaved, ignore this operation // TODO: check the current master if it is the same as to be configured if (isPortEnslaved(member)) @@ -654,9 +734,9 @@ task_process_status TeamMgr::addLagMember(const string &lag, const string &membe return task_ignore; } - stringstream cmd; - string res; uint16_t keyId = generateLacpKey(lag); + cmd.str(""); + cmd.clear(); // Set admin down LAG member (required by teamd) and enslave it // ip link set dev down; diff --git a/cfgmgr/teammgr.h b/cfgmgr/teammgr.h index c1b5d525c0..3c98f87dc5 100644 --- a/cfgmgr/teammgr.h +++ b/cfgmgr/teammgr.h @@ -27,6 +27,7 @@ class TeamMgr : public Orch Table m_cfgLagMemberTable; Table m_statePortTable; Table m_stateLagTable; + Table m_stateMACsecIngressSATable; ProducerStateTable m_appPortTable; ProducerStateTable m_appLagTable; @@ -40,7 +41,7 @@ class TeamMgr : public Orch void doLagMemberTask(Consumer &consumer); void doPortUpdateTask(Consumer &consumer); - task_process_status addLag(const std::string &alias, int min_links, bool fall_back); + task_process_status addLag(const std::string &alias, int min_links, bool fall_back, bool fast_rate); bool removeLag(const std::string &alias); task_process_status addLagMember(const std::string &lag, const std::string &member); bool removeLagMember(const std::string &lag, const std::string &member); @@ -55,6 +56,8 @@ class TeamMgr : public Orch bool checkPortIffUp(const std::string &); bool isPortStateOk(const std::string&); bool isLagStateOk(const std::string&); + bool isMACsecAttached(const std::string &); + bool isMACsecIngressSAOk(const std::string &); uint16_t generateLacpKey(const std::string&); }; diff --git a/cfgmgr/teammgrd.cpp b/cfgmgr/teammgrd.cpp index 66bfa4b6d2..a18838c959 100644 --- a/cfgmgr/teammgrd.cpp +++ b/cfgmgr/teammgrd.cpp @@ -12,20 +12,17 @@ using namespace swss; #define SELECT_TIMEOUT 1000 -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; - bool received_sigterm = false; +static struct sigaction old_sigaction; void sig_handler(int signo) { + SWSS_LOG_ENTER(); + + if (old_sigaction.sa_handler != SIG_IGN && old_sigaction.sa_handler != SIG_DFL) { + old_sigaction.sa_handler(signo); + } + received_sigterm = true; return; } @@ -38,7 +35,13 @@ int main(int argc, char **argv) SWSS_LOG_NOTICE("--- Starting teammrgd ---"); /* Register the signal handler for SIGTERM */ - signal(SIGTERM, sig_handler); + struct sigaction sigact = {}; + sigact.sa_handler = sig_handler; + if (sigaction(SIGTERM, &sigact, &old_sigaction)) + { + SWSS_LOG_ERROR("failed to setup SIGTERM action handler"); + exit(EXIT_FAILURE); + } try { diff --git a/cfgmgr/tunnelmgr.cpp b/cfgmgr/tunnelmgr.cpp index 7f4dc4dd3d..a81438470f 100644 --- a/cfgmgr/tunnelmgr.cpp +++ b/cfgmgr/tunnelmgr.cpp @@ -9,6 +9,7 @@ #include "tokenize.h" #include "shellcmd.h" #include "exec.h" +#include "warm_restart.h" using namespace std; using namespace swss; @@ -107,7 +108,8 @@ static int cmdIpTunnelRouteDel(const std::string& pfx, std::string & res) TunnelMgr::TunnelMgr(DBConnector *cfgDb, DBConnector *appDb, const std::vector &tableNames) : Orch(cfgDb, tableNames), m_appIpInIpTunnelTable(appDb, APP_TUNNEL_DECAP_TABLE_NAME), - m_cfgPeerTable(cfgDb, CFG_PEER_SWITCH_TABLE_NAME) + m_cfgPeerTable(cfgDb, CFG_PEER_SWITCH_TABLE_NAME), + m_cfgTunnelTable(cfgDb, CFG_TUNNEL_TABLE_NAME) { std::vector peer_keys; m_cfgPeerTable.getKeys(peer_keys); @@ -126,6 +128,23 @@ TunnelMgr::TunnelMgr(DBConnector *cfgDb, DBConnector *appDb, const std::vector tunnel_keys; + m_cfgTunnelTable.getKeys(tunnel_keys); + + for (auto tunnel: tunnel_keys) + { + m_tunnelReplay.insert(tunnel); + } + if (m_tunnelReplay.empty()) + { + finalizeWarmReboot(); + } + + } + auto consumerStateTable = new swss::ConsumerStateTable(appDb, APP_TUNNEL_ROUTE_TABLE_NAME, TableConsumable::DEFAULT_POP_BATCH_SIZE, default_orch_pri); @@ -191,6 +210,11 @@ void TunnelMgr::doTask(Consumer &consumer) ++it; } } + + if (!replayDone && m_tunnelReplay.empty() && WarmStart::isWarmStart()) + { + finalizeWarmReboot(); + } } bool TunnelMgr::doTunnelTask(const KeyOpFieldsValuesTuple & t) @@ -230,8 +254,16 @@ bool TunnelMgr::doTunnelTask(const KeyOpFieldsValuesTuple & t) SWSS_LOG_NOTICE("Peer/Remote IP not configured"); } - m_appIpInIpTunnelTable.set(tunnelName, kfvFieldsValues(t)); + /* If the tunnel is already in hardware (i.e. present in the replay), + * don't try to create it again since it will cause an OA crash + * (warmboot case) + */ + if (m_tunnelReplay.find(tunnelName) == m_tunnelReplay.end()) + { + m_appIpInIpTunnelTable.set(tunnelName, kfvFieldsValues(t)); + } } + m_tunnelReplay.erase(tunnelName); m_tunnelCache[tunnelName] = tunInfo; } else @@ -356,3 +388,13 @@ bool TunnelMgr::configIpTunnel(const TunnelInfo& tunInfo) return true; } + + +void TunnelMgr::finalizeWarmReboot() +{ + replayDone = true; + WarmStart::setWarmStartState("tunnelmgrd", WarmStart::REPLAYED); + SWSS_LOG_NOTICE("tunnelmgrd warmstart state set to REPLAYED"); + WarmStart::setWarmStartState("tunnelmgrd", WarmStart::RECONCILED); + SWSS_LOG_NOTICE("tunnelmgrd warmstart state set to RECONCILED"); +} diff --git a/cfgmgr/tunnelmgr.h b/cfgmgr/tunnelmgr.h index e2b601abe9..53d2f27278 100644 --- a/cfgmgr/tunnelmgr.h +++ b/cfgmgr/tunnelmgr.h @@ -4,6 +4,8 @@ #include "producerstatetable.h" #include "orch.h" +#include + namespace swss { struct TunnelInfo @@ -28,12 +30,18 @@ class TunnelMgr : public Orch bool configIpTunnel(const TunnelInfo& info); + void finalizeWarmReboot(); + ProducerStateTable m_appIpInIpTunnelTable; Table m_cfgPeerTable; + Table m_cfgTunnelTable; std::map m_tunnelCache; std::map m_intfCache; std::string m_peerIp; + + std::set m_tunnelReplay; + bool replayDone = false; }; } diff --git a/cfgmgr/tunnelmgrd.cpp b/cfgmgr/tunnelmgrd.cpp index 0165eb94b5..69157ba051 100644 --- a/cfgmgr/tunnelmgrd.cpp +++ b/cfgmgr/tunnelmgrd.cpp @@ -11,6 +11,7 @@ #include "exec.h" #include "schema.h" #include "tunnelmgr.h" +#include "warm_restart.h" using namespace std; using namespace swss; @@ -18,26 +19,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("tunnelmgrd"); @@ -54,6 +35,9 @@ int main(int argc, char **argv) DBConnector cfgDb("CONFIG_DB", 0); DBConnector appDb("APPL_DB", 0); + WarmStart::initialize("tunnelmgrd", "swss"); + WarmStart::checkWarmStart("tunnelmgrd", "swss"); + TunnelMgr tunnelmgr(&cfgDb, &appDb, cfgTunTables); std::vector cfgOrchList = {&tunnelmgr}; diff --git a/cfgmgr/vlanmgr.cpp b/cfgmgr/vlanmgr.cpp index aa02377819..ee5b7a7067 100644 --- a/cfgmgr/vlanmgr.cpp +++ b/cfgmgr/vlanmgr.cpp @@ -134,6 +134,11 @@ bool VlanMgr::addHostVlan(int vlan_id) std::string res; EXEC_WITH_ERROR_THROW(cmds, res); + res.clear(); + const std::string echo_cmd = std::string("") + + ECHO_CMD + " 0 > /proc/sys/net/ipv4/conf/" + VLAN_PREFIX + std::to_string(vlan_id) + "/arp_evict_nocarrier"; + swss::exec(echo_cmd, res); + return true; } @@ -330,10 +335,10 @@ void VlanMgr::doVlanTask(Consumer &consumer) */ if (isVlanStateOk(key) && m_vlans.find(key) == m_vlans.end()) { + SWSS_LOG_DEBUG("%s already created", kfvKey(t).c_str()); m_vlans.insert(key); m_vlanReplay.erase(kfvKey(t)); it = consumer.m_toSync.erase(it); - SWSS_LOG_DEBUG("%s already created", kfvKey(t).c_str()); continue; } @@ -426,13 +431,13 @@ void VlanMgr::doVlanTask(Consumer &consumer) { SWSS_LOG_ERROR("%s doesn't exist", key.c_str()); } - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } else { SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); it = consumer.m_toSync.erase(it); } } @@ -534,7 +539,7 @@ void VlanMgr::processUntaggedVlanMembers(string vlan, const string &members) fvVector.push_back(t); KeyOpFieldsValuesTuple tuple = make_tuple(member_key, SET_COMMAND, fvVector); consumer.addToSync(tuple); - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, tuple)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(tuple)).c_str()); } /* * There is pending task from consumer pipe, in this case just skip it. @@ -654,7 +659,7 @@ void VlanMgr::doVlanMemberTask(Consumer &consumer) { SWSS_LOG_DEBUG("%s doesn't exist", kfvKey(t).c_str()); } - SWSS_LOG_DEBUG("%s", (dumpTuple(consumer, t)).c_str()); + SWSS_LOG_DEBUG("%s", (consumer.dumpTuple(t)).c_str()); } else { diff --git a/cfgmgr/vlanmgrd.cpp b/cfgmgr/vlanmgrd.cpp index b69dc78122..84bc19cf08 100644 --- a/cfgmgr/vlanmgrd.cpp +++ b/cfgmgr/vlanmgrd.cpp @@ -23,26 +23,6 @@ using namespace swss; MacAddress gMacAddress; -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("vlanmgrd"); diff --git a/cfgmgr/vrfmgrd.cpp b/cfgmgr/vrfmgrd.cpp index 735e59191d..3dbc7e447e 100644 --- a/cfgmgr/vrfmgrd.cpp +++ b/cfgmgr/vrfmgrd.cpp @@ -16,26 +16,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; - int main(int argc, char **argv) { Logger::linkToDbNative("vrfmgrd"); @@ -64,7 +44,6 @@ int main(int argc, char **argv) isWarmStart = WarmStart::isWarmStart(); - // TODO: add tables in stateDB which interface depends on to monitor list std::vector cfgOrchList = {&vrfmgr}; swss::Select s; diff --git a/cfgmgr/vxlanmgr.cpp b/cfgmgr/vxlanmgr.cpp index e45c593803..4d41819053 100644 --- a/cfgmgr/vxlanmgr.cpp +++ b/cfgmgr/vxlanmgr.cpp @@ -392,8 +392,8 @@ bool VxlanMgr::doVxlanDeleteTask(const KeyOpFieldsValuesTuple & t) SWSS_LOG_WARN("Vxlan %s hasn't been created ", info.m_vxlan.c_str()); } - m_vnetCache.erase(it); SWSS_LOG_INFO("Delete vxlan %s", info.m_vxlan.c_str()); + m_vnetCache.erase(it); return true; } @@ -643,6 +643,7 @@ bool VxlanMgr::doVxlanTunnelMapDeleteTask(const KeyOpFieldsValuesTuple & t) vxlan_dev_name = map_entry.vxlan_dev_name; vlan = map_entry.vlan; vni_id = map_entry.vni_id; + downVxlanNetdevice(vxlan_dev_name); deleteVxlanNetdevice(vxlan_dev_name); m_vxlanTunnelMapCache.erase(vxlanTunnelMapName); @@ -906,11 +907,11 @@ void VxlanMgr::createAppDBTunnelMapTable(const KeyOpFieldsValuesTuple & t) std::replace(vxlanTunnelMapName.begin(), vxlanTunnelMapName.end(), config_db_key_delimiter, delimiter); /* Case 1: Entry exist - Erase from cache & return - * Case 2: Entry does not exist - Write to AppDB + * Case 2: Entry does not exist - Write to AppDB * Case 3: Entry exist but modified - Not taken care. Will address later */ if (m_in_reconcile) - { + { auto it = find(m_appVxlanTunnelMapKeysRecon.begin(), m_appVxlanTunnelMapKeysRecon.end(), vxlanTunnelMapName); if (it != m_appVxlanTunnelMapKeysRecon.end()) { @@ -939,28 +940,28 @@ void VxlanMgr::delAppDBTunnelMapTable(std::string vxlanTunnelMapName) m_appVxlanTunnelMapTable.del(vxlanTunnelMapName); } -int VxlanMgr::createVxlanNetdevice(std::string vxlanTunnelName, std::string vni_id, - std::string src_ip, std::string dst_ip, +int VxlanMgr::createVxlanNetdevice(std::string vxlanTunnelName, std::string vni_id, + std::string src_ip, std::string dst_ip, std::string vlan_id) { std::string res, cmds; - std::string link_add_cmd, link_set_master_cmd, link_up_cmd; + std::string link_add_cmd, link_set_master_cmd, link_up_cmd; std::string bridge_add_cmd, bridge_untagged_add_cmd, bridge_del_vid_cmd; std::string vxlan_dev_name; - vxlan_dev_name = std::string("") + std::string(vxlanTunnelName) + "-" + + vxlan_dev_name = std::string("") + std::string(vxlanTunnelName) + "-" + std::string(vlan_id); SWSS_LOG_INFO("Kernel tnl_name: %s vni_id: %s src_ip: %s dst_ip:%s vlan_id: %s", - vxlanTunnelName.c_str(), vni_id.c_str(), src_ip.c_str(), dst_ip.c_str(), + vxlanTunnelName.c_str(), vni_id.c_str(), src_ip.c_str(), dst_ip.c_str(), vlan_id.c_str()); // Case 1: Entry exist - Erase from cache & return // Case 2: Entry does not exist - Create netDevice in Kernel // Case 3: Entry exist but modified - Not taken care. Will address later - + if (m_in_reconcile) - { + { auto it = m_vxlanNetDevices.find(vxlan_dev_name); if (it != m_vxlanNetDevices.end()) { @@ -1024,6 +1025,15 @@ int VxlanMgr::createVxlanNetdevice(std::string vxlanTunnelName, std::string vni_ return swss::exec(cmds,res); } +int VxlanMgr::downVxlanNetdevice(std::string vxlan_dev_name) +{ + int ret = 0; + std::string res; + const std::string cmd = std::string("") + IP_CMD + " link set dev " + vxlan_dev_name + " down"; + exec(cmd, res); + return ret; +} + int VxlanMgr::deleteVxlanNetdevice(std::string vxlan_dev_name) { std::string res; @@ -1031,29 +1041,59 @@ int VxlanMgr::deleteVxlanNetdevice(std::string vxlan_dev_name) return swss::exec(cmd, res); } +std::vector VxlanMgr::parseNetDev(const string& stdout){ + std::vector netdevs; + std::regex device_name_pattern("^\\d+:\\s+([^:]+)"); + std::smatch match_result; + auto lines = tokenize(stdout, '\n'); + for (const std::string & line : lines) + { + SWSS_LOG_DEBUG("line : %s\n",line.c_str()); + if (!std::regex_search(line, match_result, device_name_pattern)) + { + continue; + } + std::string dev_name = match_result[1]; + netdevs.push_back(dev_name); + } + return netdevs; +} + void VxlanMgr::getAllVxlanNetDevices() { std::string stdout; - const std::string cmd = std::string("") + IP_CMD + " link show type vxlan"; + + // Get VxLan Netdev Interfaces + std::string cmd = std::string("") + IP_CMD + " link show type vxlan"; int ret = swss::exec(cmd, stdout); if (ret != 0) { - SWSS_LOG_ERROR("Cannot get devices by command : %s", cmd.c_str()); - return; + SWSS_LOG_ERROR("Cannot get vxlan devices by command : %s", cmd.c_str()); + stdout.clear(); } - std::regex device_name_pattern("^\\d+:\\s+([^:]+)"); - std::smatch match_result; - auto lines = tokenize(stdout, '\n'); - for (const std::string & line : lines) + std::vector netdevs = parseNetDev(stdout); + for (auto netdev : netdevs) { - SWSS_LOG_INFO("line : %s\n",line.c_str()); - if (!std::regex_search(line, match_result, device_name_pattern)) + m_vxlanNetDevices[netdev] = VXLAN; + } + + // Get VxLanIf Netdev Interfaces + cmd = std::string("") + IP_CMD + " link show type bridge"; + ret = swss::exec(cmd, stdout); + if (ret != 0) + { + SWSS_LOG_ERROR("Cannot get vxlanIf devices by command : %s", cmd.c_str()); + stdout.clear(); + } + netdevs = parseNetDev(stdout); + for (auto netdev : netdevs) + { + if (netdev.find(VXLAN_IF_NAME_PREFIX) == 0) { - continue; + m_vxlanNetDevices[netdev] = VXLAN_IF; } - std::string vxlan_dev_name = match_result[1]; - m_vxlanNetDevices[vxlan_dev_name] = vxlan_dev_name; } + return; } @@ -1150,8 +1190,22 @@ void VxlanMgr::clearAllVxlanDevices() { for (auto it = m_vxlanNetDevices.begin(); it != m_vxlanNetDevices.end();) { - SWSS_LOG_INFO("Deleting Stale NetDevice vxlandevname %s\n", (it->first).c_str()); - deleteVxlanNetdevice(it->first); + std::string netdev_name = it->first; + std::string netdev_type = it->second; + SWSS_LOG_INFO("Deleting Stale NetDevice %s, type: %s\n", netdev_name.c_str(), netdev_type.c_str()); + VxlanInfo info; + std::string res; + if (netdev_type.compare(VXLAN)) + { + info.m_vxlan = netdev_name; + downVxlanNetdevice(netdev_name); + cmdDeleteVxlan(info, res); + } + else if(netdev_type.compare(VXLAN_IF)) + { + info.m_vxlanIf = netdev_name; + cmdDeleteVxlanIf(info, res); + } it = m_vxlanNetDevices.erase(it); } } diff --git a/cfgmgr/vxlanmgr.h b/cfgmgr/vxlanmgr.h index 1988e253ae..68d6250fe5 100644 --- a/cfgmgr/vxlanmgr.h +++ b/cfgmgr/vxlanmgr.h @@ -6,6 +6,7 @@ #include "orch.h" #include +#include #include #include #include @@ -69,7 +70,9 @@ class VxlanMgr : public Orch void delAppDBTunnelMapTable(std::string vxlanTunnelMapName); int createVxlanNetdevice(std::string vxlanTunnelName, std::string vni_id, std::string src_ip, std::string dst_ip, std::string vlan_id); + int downVxlanNetdevice(std::string vxlan_dev_name); int deleteVxlanNetdevice(std::string vxlan_dev_name); + std::vector parseNetDev(const std::string& stdout); void getAllVxlanNetDevices(); /* diff --git a/cfgmgr/vxlanmgrd.cpp b/cfgmgr/vxlanmgrd.cpp index d47893a614..c992233c86 100644 --- a/cfgmgr/vxlanmgrd.cpp +++ b/cfgmgr/vxlanmgrd.cpp @@ -21,25 +21,6 @@ using namespace swss; /* select() function timeout retry time, in millisecond */ #define SELECT_TIMEOUT 1000 -/* - * Following global variables are defined here for the purpose of - * using existing Orch class which is to be refactored soon to - * eliminate the direct exposure of the global variables. - * - * Once Orch class refactoring is done, these global variables - * should be removed from here. - */ -int gBatchSize = 0; -bool gSwssRecord = false; -bool gLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; -/* Global database mutex */ -mutex gDbMutex; MacAddress gMacAddress; int main(int argc, char **argv) diff --git a/configure.ac b/configure.ac index 5e5ce44171..5efe0a67bd 100644 --- a/configure.ac +++ b/configure.ac @@ -121,6 +121,26 @@ fi AM_CONDITIONAL(GCOV_ENABLED, test x$enable_gcov = xyes) AC_MSG_RESULT($enable_gcov) +AC_ARG_ENABLE(asan, +[ --enable-asan Compile with address sanitizer], +[case "${enableval}" in + yes) asan_enabled=true ;; + no) asan_enabled=false ;; + *) AC_MSG_ERROR(bad value ${enableval} for --enable-asan) ;; +esac],[asan_enabled=false]) + +if test "x$asan_enabled" = "xtrue"; then + CFLAGS_ASAN+=" -fsanitize=address" + CFLAGS_ASAN+=" -DASAN_ENABLED" + CFLAGS_ASAN+=" -ggdb -fno-omit-frame-pointer -U_FORTIFY_SOURCE" + AC_SUBST(CFLAGS_ASAN) + + LDFLAGS_ASAN+=" -lasan" + AC_SUBST(LDFLAGS_ASAN) +fi + +AM_CONDITIONAL(ASAN_ENABLED, test x$asan_enabled = xtrue) + AC_SUBST(CFLAGS_COMMON) AC_CONFIG_FILES([ diff --git a/debian/rules b/debian/rules index a8a8b835fb..42e82b2f30 100755 --- a/debian/rules +++ b/debian/rules @@ -27,11 +27,18 @@ include /usr/share/dpkg/default.mk # dh_auto_configure -- \ # -DCMAKE_LIBRARY_PATH=$(DEB_HOST_MULTIARCH) +configure_opts = +ifeq ($(ENABLE_ASAN), y) + configure_opts += --enable-asan +endif + ifeq ($(ENABLE_GCOV), y) -override_dh_auto_configure: - dh_auto_configure -- --enable-gcov + configure_opts += --enable-gcov CFLAGS="-g -O0" CXXFLAGS="-g -O0" endif +override_dh_auto_configure: + dh_auto_configure -- $(configure_opts) + override_dh_auto_install: dh_auto_install --destdir=debian/swss ifeq ($(ENABLE_GCOV), y) diff --git a/doc/Configuration.md b/doc/Configuration.md deleted file mode 100644 index d4494283fa..0000000000 --- a/doc/Configuration.md +++ /dev/null @@ -1,1472 +0,0 @@ -# SONiC Configuration Database Manual - -Table of Contents -================= - - * [Introduction](#introduction) - * [Configuration](#configuration) - * [Config Load and Save](#config-load-and-save) - * [Incremental Configuration](#incremental-configuration) - * [Redis and Json Schema](#redis-and-json-schema) - * [ACL and Mirroring](#acl-and-mirroring) - * [BGP Sessions](#bgp-sessions) - * [BUFFER_PG](#buffer_pg) - * [Buffer pool](#buffer-pool) - * [Buffer profile](#buffer-profile) - * [Buffer queue](#buffer-queue) - * [Buffer port ingress profile list](#buffer-port-ingress-profile-list) - * [Buffer port egress profile list](#buffer-port-egress-profile-list) - * [Cable length](#cable-length) - * [COPP_TABLE](#copp_table) - * [CRM](#crm) - * [Data Plane L3 Interfaces](#data-plane-l3-interfaces) - * [DEFAULT_LOSSLESS_BUFFER_PARAMETER](#DEFAULT_LOSSLESS_BUFFER_PARAMETER) - * [Device Metadata](#device-metadata) - * [Device neighbor metada](#device-neighbor-metada) - * [DSCP_TO_TC_MAP](#dscp_to_tc_map) - * [FLEX_COUNTER_TABLE](#flex_counter_table) - * [L2 Neighbors](#l2-neighbors) - * [Loopback Interface](#loopback-interface) - * [LOSSLESS_TRAFFIC_PATTERN](#LOSSLESS_TRAFFIC_PATTERN) - * [Management Interface](#management-interface) - * [Management port](#management-port) - * [Management VRF](#management-vrf) - * [MAP_PFC_PRIORITY_TO_QUEUE](#map_pfc_priority_to_queue) - * [NTP Global Configuration](#ntp-global-configuration) - * [NTP and SYSLOG servers](#ntp-and-syslog-servers) - * [Port](#port) - * [Port Channel](#port-channel) - * [Portchannel member](#portchannel-member) - * [Scheduler](#scheduler) - * [Port QoS Map](#port-qos-map) - * [Queue](#queue) - * [Tacplus Server](#tacplus-server) - * [TC to Priority group map](#tc-to-priority-group-map) - * [TC to Queue map](#tc-to-queue-map) - * [Versions](#versions) - * [VLAN](#vlan) - * [VLAN_MEMBER](#vlan_member) - * [Virtual router](#virtual-router) - * [WRED_PROFILE](#wred_profile) - * [For Developers](#for-developers) - * [Generating Application Config by Jinja2 Template](#generating-application-config-by-jinja2-template) - * [Incremental Configuration by Subscribing to ConfigDB](#incremental-configuration-by-subscribing-to-configdb) - - - -# Introduction -This document lists the configuration commands schema applied in the SONiC eco system. All these commands find relevance in collecting system information, analysis and even for trouble shooting. All the commands are categorized under relevant topics with corresponding examples. - -# Configuration - -SONiC is managing configuration in a single source of truth - a redisDB -instance that we refer as ConfigDB. Applications subscribe to ConfigDB -and generate their running configuration correspondingly. - -(Before Sep 2017, we were using an XML file named minigraph.xml to -configure SONiC devices. For historical documentation, please refer to -[Configuration with -Minigraph](https://github.com/Azure/SONiC/wiki/Configuration-with-Minigraph-(~Sep-2017))) - -# **Config Load and Save** - -In current version of SONiC, ConfigDB is implemented as database 4 of -local redis. When system boots, configurations will be loaded from -/etc/sonic/config_db.json file into redis. Please note that ConfigDB -content won't be written back into /etc/sonic/config_db.json file -automatically. In order to do that, a config save command need to be -manually executed from CLI. Similarly, config load will trigger a force -load of json file into DB. Generally, content in -/etc/sonic/config_db.json can be considered as starting config, and -content in redisDB running config. - -We keep a way to load configuration from minigraph and write into -ConfigDB for backward compatibility. To do that, run `config -load_minigraph`. - -### Incremental Configuration - -The design of ConfigDB supports incremental configuration - application -could subscribe to changes in ConfigDB and response correspondingly. -However, this feature is not implemented by all applications yet. By Sep -2017 now, the only application that supports incremental configuration -is BGP (docker-fpm-quagga). For other applications, a manual restart is -required after configuration changes in ConfigDB. - -# **Redis and Json Schema** - -ConfigDB uses a table-object schema that is similar with -[AppDB](https://github.com/Azure/sonic-swss/blob/4c56d23b9ff4940bdf576cf7c9e5aa77adcbbdcc/doc/swss-schema.md), -and `config_db.json` is a straight-forward serialization of DB. As an -example, the following fragments could be BGP-related configuration in -redis and json, correspondingly: - - -***Redis format*** -``` -127.0.0.1:6379[4]> keys BGP_NEIGHBOR:* - -1) "BGP_NEIGHBOR:10.0.0.31" -2) "BGP_NEIGHBOR:10.0.0.39" -3) "BGP_NEIGHBOR:10.0.0.11" -4) "BGP_NEIGHBOR:10.0.0.7" - -... - -127.0.0.1:6379[4]> hgetall BGP_NEIGHBOR:10.0.0.3 - -1) "admin_status" -2) "up" -3) "peer_addr" -4) "10.0.0.2" -5) "asn" -6) "65200" -7) "name" -8) "ARISTA07T2" -``` - -***Json format*** -``` -"BGP_NEIGHBOR": { - "10.0.0.57": { - "rrclient": "0", - "name": "ARISTA01T1", - "local_addr": "10.0.0.56", - "nhopself": "0", - "holdtime": "10", - "asn": "64600", - "keepalive": "3" - }, - "10.0.0.59": { - "rrclient": "0", - "name": "ARISTA02T1", - "local_addr": "10.0.0.58", - "nhopself": "0", - "holdtime": "10", - "asn": "64600", - "keepalive": "3" - }, -} -``` - -Full sample config_db.json files are availables at -[here](https://github.com/Azure/SONiC/blob/gh-pages/doc/config_db.json) -and -[here](https://github.com/Azure/SONiC/blob/gh-pages/doc/config_db_t0.json). - - -### ACL and Mirroring - -ACL and mirroring related configuration are defined in -**MIRROR_SESSION**, **ACL_TABLE** and **ACL_RULE** tables. Those -tables are in progress of migrating from APPDB. Please refer to their -schema in APPDB -[here](https://github.com/Azure/sonic-swss/blob/4c56d23b9ff4940bdf576cf7c9e5aa77adcbbdcc/doc/swss-schema.md) -and migration plan -[here](https://github.com/Azure/SONiC/wiki/ACL-Configuration-Requirement-Description). - -``` -{ -"MIRROR_SESSION": { - "everflow0": { - "src_ip": "10.1.0.32", - "dst_ip": "2.2.2.2" - } - }, - -"ACL_TABLE": { - "DATAACL": { - "policy_desc" : "data_acl", - "type": "l3", - "ports": [ - "Ethernet0", - "Ethernet4", - "Ethernet8", - "Ethernet12" - ] - } - } -} -``` - -***Below ACL table added as per the mail*** -``` -{ -"ACL_TABLE": { - "aaa": { - "type": "L3", - "ports": "Ethernet0" - } - }, -"ACL_RULE": { - "aaa|rule_0": { - "PRIORITY": "55", - "PACKET_ACTION": "DROP", - "L4_SRC_PORT": "0" - }, - "aaa|rule_1": { - "PRIORITY": "55", - "PACKET_ACTION": "DROP", - "L4_SRC_PORT": "1" - } - } -} -``` - -***Below ACL table added by comparig minigraph.xml & config_db.json*** - -``` -{ -"ACL_TABLE": { - "EVERFLOW": { - "type": "MIRROR", - "policy_desc": "EVERFLOW", - "ports": [ - "PortChannel0001", - "PortChannel0002", - "PortChannel0003", - "PortChannel0004" - ] - }, - "EVERFLOWV6": { - "type": "MIRRORV6", - "policy_desc": "EVERFLOWV6", - "ports": [ - "PortChannel0001", - "PortChannel0002", - "PortChannel0003", - "PortChannel0004" - ] - }, - "SNMP_ACL": { - "services": [ - "SNMP" - ], - "type": "CTRLPLANE", - "policy_desc": "SNMP_ACL" - }, - "SSH_ONLY": { - "services": [ - "SSH" - ], - "type": "CTRLPLANE", - "policy_desc": "SSH_ONLY" - } - }, - -"ACL_RULE": { - "SNMP_ACL|DEFAULT_RULE": { - "PRIORITY": "1", - "PACKET_ACTION": "DROP", - "ETHER_TYPE": "2048" - }, - "SNMP_ACL|RULE_1": { - "PRIORITY": "9999", - "PACKET_ACTION": "ACCEPT", - "SRC_IP": "1.1.1.1/32", - "IP_PROTOCOL": "17" - }, - "SNMP_ACL|RULE_2": { - "PRIORITY": "9998", - "PACKET_ACTION": "ACCEPT", - "SRC_IP": "2.2.2.2/32", - "IP_PROTOCOL": "17" - }, - "SSH_ONLY|DEFAULT_RULE": { - "PRIORITY": "1", - "PACKET_ACTION": "DROP", - "ETHER_TYPE": "2048" - }, - "SSH_ONLY|RULE_1": { - "PRIORITY": "9999", - "PACKET_ACTION": "ACCEPT", - "SRC_IP": "4.4.4.4/8", - "IP_PROTOCOL": "6" - } - } -} - -``` - -***ACL table type configuration example*** -``` -{ - "ACL_TABLE_TYPE": { - "CUSTOM_L3": { - "MATCHES": [ - "IN_PORTS", - "OUT_PORTS", - "SRC_IP" - ], - "ACTIONS": [ - "PACKET_ACTION", - "MIRROR_INGRESS_ACTION" - ], - "BIND_POINTS": [ - "PORT", - "LAG" - ] - } - }, - "ACL_TABLE": { - "DATAACL": { - "STAGE": "INGRESS", - "TYPE": "CUSTOM_L3", - "PORTS": [ - "Ethernet0", - "PortChannel1" - ] - } - }, - "ACL_RULE": { - "DATAACL|RULE0": { - "PRIORITY": "999", - "PACKET_ACTION": "DROP", - "SRC_IP": "1.1.1.1/32", - } - } -} -``` - -### BGP Sessions - -BGP session configuration is defined in **BGP_NEIGHBOR** table. BGP -neighbor address is used as key of bgp neighbor objects. Object -attributes include remote AS number, neighbor router name, and local -peering address. Dynamic neighbor is also supported by defining peer -group name and IP ranges in **BGP_PEER_RANGE** table. - -``` -{ -"BGP_NEIGHBOR": { - "10.0.0.61": { - "local_addr": "10.0.0.60", - "asn": 64015, - "name": "ARISTA15T0" - }, - "10.0.0.49": { - "local_addr": "10.0.0.48", - "asn": 64009, - "name": "ARISTA09T0" - }, - - "10.0.0.63": { - "rrclient": "0", - "name": "ARISTA04T1", - "local_addr": "10.0.0.62", - "nhopself": "0", - "holdtime": "10", - "asn": "64600", - "keepalive": "3" - } - -"BGP_PEER_RANGE": { - "BGPSLBPassive": { - "name": "BGPSLBPassive", - "ip_range": [ - "10.250.0.0/27" - ] - }, - "BGPVac": { - "name": "BGPVac", - "ip_range": [ - "10.2.0.0/16" - ] - } - } -} -``` - -### BUFFER_PG - -When the system is running in traditional buffer model, profiles needs to explicitly configured: - -``` -{ -"BUFFER_PG": { - "Ethernet0|3-4": { - "profile": "pg_lossless_40000_5m_profile" - }, - "Ethernet1|3-4": { - "profile": "pg_lossless_40000_5m_profile" - }, - "Ethernet2|3-4": { - "profile": "pg_lossless_40000_5m_profile" - } - } -} - -``` - -When the system is running in dynamic buffer model, profiles can be: - - - either calculated dynamically according to ports' configuration and just configured as "NULL"; - - or configured explicitly. - -``` -{ -"BUFFER_PG": { - "Ethernet0|3-4": { - "profile": "NULL" - }, - "Ethernet1|3-4": { - "profile": "NULL" - }, - "Ethernet2|3-4": { - "profile": "static_profile" - } - } -} - -``` - -### Buffer pool - -When the system is running in traditional buffer model, the size of all of the buffer pools and xoff of ingress_lossless_pool need to be configured explicitly. - -``` -{ -"BUFFER_POOL": { - "egress_lossless_pool": { - "type": "egress", - "mode": "static", - "size": "15982720" - }, - "egress_lossy_pool": { - "type": "egress", - "mode": "dynamic", - "size": "9243812" - }, - "ingress_lossless_pool": { - "xoff": "4194112", - "type": "ingress", - "mode": "dynamic", - "size": "10875072" - } - } -} - -``` - -When the system is running in dynamic buffer model, the size of some of the buffer pools can be omitted and will be dynamically calculated. - -``` -{ -"BUFFER_POOL": { - "egress_lossless_pool": { - "type": "egress", - "mode": "static", - "size": "15982720" - }, - "egress_lossy_pool": { - "type": "egress", - "mode": "dynamic", - }, - "ingress_lossless_pool": { - "type": "ingress", - "mode": "dynamic", - } - } -} - -``` - - -### Buffer profile - -``` -{ -"BUFFER_PROFILE": { - "egress_lossless_profile": { - "static_th": "3995680", - "pool": "egress_lossless_pool", - "size": "1518" - }, - "egress_lossy_profile": { - "dynamic_th": "3", - "pool": "egress_lossy_pool", - "size": "1518" - }, - "ingress_lossy_profile": { - "dynamic_th": "3", - "pool": "ingress_lossless_pool", - "size": "0" - }, - "pg_lossless_40000_5m_profile": { - "xon_offset": "2288", - "dynamic_th": "-3", - "xon": "2288", - "xoff": "66560", - "pool": "ingress_lossless_pool", - "size": "1248" - }, - "pg_lossless_40000_40m_profile": { - "xon_offset": "2288", - "dynamic_th": "-3", - "xon": "2288", - "xoff": "71552", - "pool": "ingress_lossless_pool", - "size": "1248" - } - } -} - -``` - -When the system is running in dynamic buffer model and the headroom_type is dynamic, only dynamic_th needs to be configured and rest of fields can be omitted. -This kind of profiles will be handled by buffer manager and won't be applied to SAI. - -``` -{ - { - "non_default_dynamic_th_profile": { - "dynamic_th": 1, - "headroom_type": "dynamic" - } - } -} -``` - -### Buffer queue - -``` -{ -"BUFFER_QUEUE": { - "Ethernet50,Ethernet52,Ethernet54,Ethernet56|0-2": { - "profile": "egress_lossy_profile" - }, - "Ethernet50,Ethernet52,Ethernet54,Ethernet56|3-4": { - "profile": "egress_lossless_profile" - }, - "Ethernet50,Ethernet52,Ethernet54,Ethernet56|5-6": { - "profile": "egress_lossy_profile" - } - } -} - -``` - -### Buffer port ingress profile list - -``` -{ -"BUFFER_PORT_INGRESS_PROFILE_LIST": { - "Ethernet50": { - "profile_list": "ingress_lossy_profile,ingress_lossless_profile" - }, - "Ethernet52": { - "profile_list": "ingress_lossy_profile,ingress_lossless_profile" - }, - "Ethernet56": { - "profile_list": "ingress_lossy_profile,ingress_lossless_profile" - } - } -} - -``` - -### Buffer port egress profile list - -``` -{ -"BUFFER_PORT_EGRESS_PROFILE_LIST": { - "Ethernet50": { - "profile_list": "egress_lossy_profile,egress_lossless_profile" - }, - "Ethernet52": { - "profile_list": "egress_lossy_profile,egress_lossless_profile" - }, - "Ethernet56": { - "profile_list": "egress_lossy_profile,egress_lossless_profile" - } - } -} - -``` - -### Cable length - -``` -{ -"CABLE_LENGTH": { - "AZURE": { - "Ethernet8": "5m", - "Ethernet9": "5m", - "Ethernet2": "5m", - "Ethernet58": "5m", - "Ethernet59": "5m", - "Ethernet50": "40m", - "Ethernet51": "5m", - "Ethernet52": "40m", - "Ethernet53": "5m", - "Ethernet54": "40m", - "Ethernet55": "5m", - "Ethernet56": "40m" - } - } -} - -``` - -### COPP_TABLE - -``` -{ -"COPP_TABLE": { - "default": { - "cbs": "600", - "cir": "600", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "0", - "red_action": "drop" - }, - - "trap.group.arp": { - "cbs": "600", - "cir": "600", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "4", - "red_action": "drop", - "trap_action": "trap", - "trap_ids": "arp_req,arp_resp,neigh_discovery", - "trap_priority": "4" - }, - - "trap.group.lldp.dhcp.udld": { - "queue": "4", - "trap_action": "trap", - "trap_ids": "lldp,dhcp,udld", - "trap_priority": "4" - }, - - "trap.group.bgp.lacp": { - "queue": "4", - "trap_action": "trap", - "trap_ids": "bgp,bgpv6,lacp", - "trap_priority": "4" - }, - - "trap.group.ip2me": { - "cbs": "600", - "cir": "600", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "1", - "red_action": "drop", - "trap_action": "trap", - "trap_ids": "ip2me", - "trap_priority": "1" - } - } -} -``` - -### CRM - -``` -{ -"CRM": { - "Config": { - "acl_table_threshold_type": "percentage", - "nexthop_group_threshold_type": "percentage", - "fdb_entry_high_threshold": "85", - "acl_entry_threshold_type": "percentage", - "ipv6_neighbor_low_threshold": "70", - "nexthop_group_member_low_threshold": "70", - "acl_group_high_threshold": "85", - "ipv4_route_high_threshold": "85", - "acl_counter_high_threshold": "85", - "ipv4_route_low_threshold": "70", - "ipv4_route_threshold_type": "percentage", - "ipv4_neighbor_low_threshold": "70", - "acl_group_threshold_type": "percentage", - "ipv4_nexthop_high_threshold": "85", - "ipv6_route_threshold_type": "percentage", - "snat_entry_threshold_type": "percentage", - "snat_entry_high_threshold": "85", - "snat_entry_low_threshold": "70", - "dnat_entry_threshold_type": "percentage", - "dnat_entry_high_threshold": "85", - "dnat_entry_low_threshold": "70", - "ipmc_entry_threshold_type": "percentage", - "ipmc_entry_high_threshold": "85", - "ipmc_entry_low_threshold": "70" - } - } -} - -``` - -### Data Plane L3 Interfaces - -IP configuration for data plane are defined in **INTERFACE**, -**PORTCHANNEL_INTERFACE**, and **VLAN_INTERFACE** table. The objects -in all three tables have the interface (could be physical port, port -channel, or vlan) that IP address is attached to as first-level key, and -IP prefix as second-level key. IP interface objects don't have any -attributes. - -``` -{ -"INTERFACE": { - "Ethernet0|10.0.0.0/31": {}, - "Ethernet4|10.0.0.2/31": {}, - "Ethernet8|10.0.0.4/31": {} - ... - }, - -"PORTCHANNEL_INTERFACE": { - "PortChannel01|10.0.0.56/31": {}, - "PortChannel01|FC00::71/126": {}, - "PortChannel02|10.0.0.58/31": {}, - "PortChannel02|FC00::75/126": {} - ... - }, -"VLAN_INTERFACE": { - "Vlan1000|192.168.0.1/27": {} - } -} - -``` - - -### DEFAULT_LOSSLESS_BUFFER_PARAMETER - -This table stores the default lossless buffer parameters for dynamic buffer calculation. - -``` -{ - "DEFAULT_LOSSLESS_BUFFER_PARAMETER": { - "AZURE": { - "default_dynamic_th": "0", - "over_subscribe_ratio": "2" - } - } -} -``` - -### Device Metadata - -The **DEVICE_METADATA** table contains only one object named -*localhost*. In this table the device metadata such as hostname, hwsku, -deployment envionment id and deployment type are specified. BGP local AS -number is also specified in this table as current only single BGP -instance is supported in SONiC. - -``` -{ -"DEVICE_METADATA": { - "localhost": { - "hwsku": "Force10-S6100", - "default_bgp_status": "up", - "docker_routing_config_mode": "unified", - "hostname": "sonic-s6100-01", - "platform": "x86_64-dell_s6100_c2538-r0", - "mac": "4c:76:25:f4:70:82", - "default_pfcwd_status": "disable", - "bgp_asn": "65100", - "deployment_id": "1", - "type": "ToRRouter", - "buffer_model": "traditional" - } - } -} - -``` - - -### Device neighbor metada - -``` -{ -"DEVICE_NEIGHBOR_METADATA": { - "ARISTA01T1": { - "lo_addr": "None", - "mgmt_addr": "10.11.150.45", - "hwsku": "Arista-VM", - "type": "LeafRouter" - }, - "ARISTA02T1": { - "lo_addr": "None", - "mgmt_addr": "10.11.150.46", - "hwsku": "Arista-VM", - "type": "LeafRouter" - } - } -} - -``` - - -### DSCP_TO_TC_MAP -``` -{ -"DSCP_TO_TC_MAP": { - "AZURE": { - "1": "1", - "0": "1", - "3": "3", - "2": "1", - "5": "2", - "4": "4", - "7": "1", - "6": "1", - "9": "1", - "8": "0" - } - } -} - -``` - - -### MPLS_TC_TO_TC_MAP -``` -{ -"MPLS_TC_TO_TC_MAP": { - "AZURE": { - "0": "0", - "1": "1", - "2": "1", - "3": "2", - "4": "2", - "5": "3", - "6": "3", - "7": "4" - } - } -} - -``` - -### FLEX_COUNTER_TABLE - -``` -{ -"FLEX_COUNTER_TABLE": { - "PFCWD": { - "FLEX_COUNTER_STATUS": "enable" - }, - "PORT": { - "FLEX_COUNTER_STATUS": "enable" - }, - "QUEUE": { - "FLEX_COUNTER_STATUS": "enable" - } - } -} - -``` - - -### L2 Neighbors - -The L2 neighbor and connection information can be configured in -**DEVICE_NEIGHBOR** table. Those information are used mainly for LLDP. -While mandatory fields include neighbor name acting as object key and -remote port / local port information in attributes, optional information -about neighbor device such as device type, hwsku, management address and -loopback address can also be defined. - -``` -{ -"DEVICE_NEIGHBOR": { - "ARISTA04T1": { - "mgmt_addr": "10.20.0.163", - "hwsku": "Arista", - "lo_addr": null, - "local_port": "Ethernet124", - "type": "LeafRouter", - "port": "Ethernet1" - }, - "ARISTA03T1": { - "mgmt_addr": "10.20.0.162", - "hwsku": "Arista", - "lo_addr": null, - "local_port": "Ethernet120", - "type": "LeafRouter", - "port": "Ethernet1" - }, - "ARISTA02T1": { - "mgmt_addr": "10.20.0.161", - "hwsku": "Arista", - "lo_addr": null, - "local_port": "Ethernet116", - "type": "LeafRouter", - "port": "Ethernet1" - }, - "ARISTA01T1": { - "mgmt_addr": "10.20.0.160", - "hwsku": "Arista", - "lo_addr": null, - "local_port": "Ethernet112", - "type": "LeafRouter", - "port": "Ethernet1" - } - } -} -``` - -### Loopback Interface - -Loopback interface configuration lies in **LOOPBACK_INTERFACE** table -and has similar schema with data plane interfaces. The loopback device -name and loopback IP prefix act as multi-level key for loopback -interface objects. - -``` -{ -"LOOPBACK_INTERFACE": { - "Loopback0|10.1.0.32/32": {}, - "Loopback0|FC00:1::32/128": {} - } -} - -``` - -### LOSSLESS_TRAFFIC_PATTERN - -The LOSSLESS_TRAFFIC_PATTERN table stores parameters related to -lossless traffic for dynamic buffer calculation - -``` -{ - "LOSSLESS_TRAFFIC_PATTERN": { - "AZURE": { - "mtu": "1024", - "small_packet_percentage": "100" - } - } -} -``` - -### Management Interface - -Management interfaces are defined in **MGMT_INTERFACE** table. Object -key is composed of management interface name and IP prefix. Attribute -***gwaddr*** specify the gateway address of the prefix. -***forced_mgmt_routes*** attribute can be used to specify addresses / -prefixes traffic to which are forced to go through management network -instead of data network. - -``` -{ -"MGMT_INTERFACE": { - "eth0|10.11.150.11/16": { - "gwaddr": "10.11.0.1" - }, - "eth0|FC00:2::32/64": { - "forced_mgmt_routes": [ - "10.0.0.100/31", - "10.250.0.8", - "10.255.0.0/28" - ], - "gwaddr": "fc00:2::1" - } - } -} - -``` - -### Management port - -``` -{ -"MGMT_PORT": { - "eth0": { - "alias": "eth0", - "admin_status": "up" - } - } -} - -``` - - -### Management VRF - -``` -{ -"MGMT_VRF_CONFIG": { - "vrf_global": { - "mgmtVrfEnabled": "true" - } - } -} -``` - -### MAP_PFC_PRIORITY_TO_QUEUE - -``` -{ -"MAP_PFC_PRIORITY_TO_QUEUE": { - "AZURE": { - "1": "1", - "0": "0", - "3": "3", - "2": "2", - "5": "5", - "4": "4", - "7": "7", - "6": "6" - } - } -} -``` -### NTP Global Configuration - -These configuration options are used to modify the way that -ntp binds to the ports on the switch and which port it uses to -make ntp update requests from. - -***NTP VRF*** - -If this option is set to `default` then ntp will run within the default vrf -**when the management vrf is enabled**. If the mgmt vrf is enabled and this value is -not set to default then ntp will run within the mgmt vrf. - -This option **has no effect** if the mgmt vrf is not enabled. - -``` -{ -"NTP": { - "global": { - "vrf": "default" - } - } -} -``` - - -***NTP Source Port*** - -This option sets the port which ntp will choose to send time update requests from by. - -NOTE: If a Loopback interface is defined on the switch ntp will choose this by default, so this setting -is **required** if the switch has a Loopback interface and the ntp peer does not have defined routes -for that address. - -``` -{ -"NTP": { - "global": { - "src_intf": "Ethernet1" - } - } -} -``` - -### NTP and SYSLOG servers - -These information are configured in individual tables. Domain name or IP -address of the server is used as object key. Currently there are no -attributes in those objects. - -***NTP server*** -``` -{ -"NTP_SERVER": { - "2.debian.pool.ntp.org": {}, - "1.debian.pool.ntp.org": {}, - "3.debian.pool.ntp.org": {}, - "0.debian.pool.ntp.org": {} - }, - -"NTP_SERVER": { - "23.92.29.245": {}, - "204.2.134.164": {} - } -} -``` - -***Syslogserver*** -``` -{ -"SYSLOG_SERVER": { - "10.0.0.5": {}, - "10.0.0.6": {}, - "10.11.150.5": {} - } -} -``` - -### Port - -In this table the physical port configurations are defined. Each object -will have port name as its key, and port name alias and port speed as -optional attributes. - -``` -{ -"PORT": { - "Ethernet0": { - "index": "0", - "lanes": "101,102", - "description": "fortyGigE1/1/1", - "mtu": "9100", - "alias": "fortyGigE1/1/1", - "speed": "40000" - }, - "Ethernet1": { - "index": "1", - "lanes": "103,104", - "description": "fortyGigE1/1/2", - "mtu": "9100", - "alias": "fortyGigE1/1/2", - "admin_status": "up", - "speed": "40000" - }, - "Ethernet63": { - "index": "63", - "lanes": "87,88", - "description": "fortyGigE1/4/16", - "mtu": "9100", - "alias": "fortyGigE1/4/16", - "speed": "40000" - } - } -} - -``` - -### Port Channel - -Port channels are defined in **PORTCHANNEL** table with port channel -name as object key and member list as attribute. - -``` -{ -"PORTCHANNEL": { - "PortChannel0003": { - "admin_status": "up", - "min_links": "1", - "members": [ - "Ethernet54" - ], - "mtu": "9100" - }, - "PortChannel0004": { - "admin_status": "up", - "min_links": "1", - "members": [ - "Ethernet56" - ], - "mtu": "9100" - } - } -} -``` - - -### Portchannel member - -``` -{ -"PORTCHANNEL_MEMBER": { - "PortChannel0001|Ethernet50": {}, - "PortChannel0002|Ethernet52": {}, - "PortChannel0003|Ethernet54": {}, - "PortChannel0004|Ethernet56": {} - } -} - -``` -### Scheduler - -``` -{ -"SCHEDULER": { - "scheduler.0": { - "type": "STRICT" - }, - "scheduler.1": { - "type": "WRR" - "weight": "1", - "meter_type": "bytes", - "pir": "1250000000", - "pbs": "8192" - }, - "scheduler.port": { - "meter_type": "bytes", - "pir": "1000000000", - "pbs": "8192" - } - } -} -``` - -### Port QoS Map - -``` -{ -"PORT_QOS_MAP": { - "Ethernet50,Ethernet52,Ethernet54,Ethernet56": { - "tc_to_pg_map": "AZURE", - "tc_to_queue_map": "AZURE", - "pfc_enable": "3,4", - "pfc_to_queue_map": "AZURE", - "dscp_to_tc_map": "AZURE", - "dscp_to_fc_map": "AZURE", - "exp_to_fc_map": "AZURE", - "scheduler": "scheduler.port" - } - } -} -``` - -### Queue -``` -{ -"QUEUE": { - "Ethernet56|4": { - "wred_profile": "AZURE_LOSSLESS", - "scheduler": "scheduler.1" - }, - "Ethernet56|5": { - "scheduler": "scheduler.0" - }, - "Ethernet56|6": { - "scheduler": "scheduler.0" - } - } -} -``` - - -### Tacplus Server - -``` -{ -"TACPLUS_SERVER": { - "10.0.0.8": { - "priority": "1", - "tcp_port": "49" - }, - "10.0.0.9": { - "priority": "1", - "tcp_port": "49" - } - } -} -``` - - -### TC to Priority group map - -``` -{ -"TC_TO_PRIORITY_GROUP_MAP": { - "AZURE": { - "1": "1", - "0": "0", - "3": "3", - "2": "2", - "5": "5", - "4": "4", - "7": "7", - "6": "6" - } - } -} -``` - -### TC to Queue map - -``` -{ -"TC_TO_QUEUE_MAP": { - "AZURE": { - "1": "1", - "0": "0", - "3": "3", - "2": "2", - "5": "5", - "4": "4", - "7": "7", - "6": "6" - } - } -} -``` - -### Versions - -This table is where the curret version of the software is recorded. -``` -{ - "VERSIONS": { - "DATABASE": { - "VERSION": "version_1_0_1" - } - } -} -``` - -### VLAN - -This table is where VLANs are defined. VLAN name is used as object key, -and member list as well as an integer id are defined as attributes. If a -DHCP relay is required for this VLAN, a dhcp_servers attribute must be -specified for that VLAN, the value of which is a list that must contain -the domain name or IP address of one or more DHCP servers. - -``` -{ -"VLAN": { - "Vlan1000": { - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ], - "members": [ - "Ethernet0", - "Ethernet4", - "Ethernet8", - "Ethernet12" - ], - "vlanid": "1000" - } - } -} -``` - -### VLAN_MEMBER - -VLAN member table has Vlan name together with physical port or port -channel name as object key, and tagging mode as attributes. - -``` -{ -"VLAN_MEMBER": { - "Vlan1000|PortChannel47": { - "tagging_mode": "untagged" - }, - "Vlan1000|Ethernet8": { - "tagging_mode": "untagged" - }, - "Vlan2000|PortChannel47": { - "tagging_mode": "tagged" - } - } -} -``` - -### Virtual router - -The virtual router table allows to insert or update a new virtual router -instance. The key of the instance is its name. The attributes in the -table allow to change properties of a virtual router. Attributes: - -- 'v4' contains boolean value 'true' or 'false'. Enable or - disable IPv4 in the virtual router -- 'v6' contains boolean value 'true' or 'false'. Enable or - disable IPv6 in the virtual router -- 'src_mac' contains MAC address. What source MAC address will be - used for packets egressing from the virtual router -- 'ttl_action' contains packet action. Defines the action for - packets with TTL == 0 or TTL == 1 -- 'ip_opt_action' contains packet action. Defines the action for - packets with IP options -- 'l3_mc_action' contains packet action. Defines the action for - unknown L3 multicast packets - -The packet action could be: - -- 'drop' -- 'forward' -- 'copy' -- 'copy_cancel' -- 'trap' -- 'log' -- 'deny' -- 'transit' - - -***TBD*** -``` -'VRF:rid1': { - 'v4': 'true', - 'v6': 'false', - 'src_mac': '02:04:05:06:07:08', - 'ttl_action': 'copy', - 'ip_opt_action': 'deny', - 'l3_mc_action': 'drop' -} -``` - - -### WRED_PROFILE - -``` -{ -"WRED_PROFILE": { - "AZURE_LOSSLESS": { - "red_max_threshold": "2097152", - "wred_green_enable": "true", - "ecn": "ecn_all", - "green_min_threshold": "1048576", - "red_min_threshold": "1048576", - "wred_yellow_enable": "true", - "yellow_min_threshold": "1048576", - "green_max_threshold": "2097152", - "green_drop_probability": "5", - "yellow_max_threshold": "2097152", - "wred_red_enable": "true", - "yellow_drop_probability": "5", - "red_drop_probability": "5" - } - } -} -``` - -### BREAKOUT_CFG - -This table is introduced as part of Dynamic Port Breakout(DPB) feature. -It shows the current breakout mode of all ports(root ports). -The list of root ports, all possible breakout modes, and default breakout modes - are obtained/derived from platform.json and hwsku.json files. - -``` -"BREAKOUT_CFG": { - "Ethernet0": { - "brkout_mode": "4x25G[10G]" - }, - "Ethernet4": { - "brkout_mode": "4x25G[10G]" - }, - "Ethernet8": { - "brkout_mode": "4x25G[10G]" - }, - - ...... - - "Ethernet116": { - "brkout_mode": "2x50G" - }, - "Ethernet120": { - "brkout_mode": "2x50G" - }, - "Ethernet124": { - "brkout_mode": "2x50G" - } -} -``` - -For Developers -============== - -Generating Application Config by Jinja2 Template ------------------------------------------------- - -To be added. - -Incremental Configuration by Subscribing to ConfigDB ----------------------------------------------------- - -Detail instruction to be added. A sample could be found in this -[PR](https://github.com/Azure/sonic-buildimage/pull/861) that -implemented dynamic configuration for BGP. diff --git a/doc/swss-schema.md b/doc/swss-schema.md index ec28eb6c0f..74bfd687b8 100644 --- a/doc/swss-schema.md +++ b/doc/swss-schema.md @@ -233,6 +233,7 @@ and reflects the LAG ports into the redis under: `LAG_TABLE::port` key = ROUTE_TABLE:segment ; SRV6 segment name ; field = value path = STRING ; Comma-separated list of IPV6 prefixes for a SRV6 segment + type = STRING ; SRV6 segment list type like "insert", "encaps.red"; If not provided, default type will be "encaps.red" --------------------------------------------- ### SRV6_MY_SID_TABLE diff --git a/fdbsyncd/Makefile.am b/fdbsyncd/Makefile.am index 4ab2f5dddd..b35ee5f309 100644 --- a/fdbsyncd/Makefile.am +++ b/fdbsyncd/Makefile.am @@ -10,10 +10,15 @@ endif fdbsyncd_SOURCES = fdbsyncd.cpp fdbsync.cpp $(top_srcdir)/warmrestart/warmRestartAssist.cpp -fdbsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(COV_CFLAGS) -fdbsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(COV_CFLAGS) -fdbsyncd_LDADD = -lnl-3 -lnl-route-3 -lswsscommon $(COV_LDFLAGS) +fdbsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(COV_CFLAGS) $(CFLAGS_ASAN) +fdbsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(COV_CFLAGS) $(CFLAGS_ASAN) +fdbsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon $(COV_LDFLAGS) if GCOV_ENABLED fdbsyncd_LDADD += -lgcovpreload endif + +if ASAN_ENABLED +fdbsyncd_SOURCES += $(top_srcdir)/lib/asan.cpp +endif + diff --git a/fdbsyncd/fdbsync.cpp b/fdbsyncd/fdbsync.cpp index 0cdcc63214..0d71f721dc 100644 --- a/fdbsyncd/fdbsync.cpp +++ b/fdbsyncd/fdbsync.cpp @@ -307,13 +307,6 @@ void FdbSync::updateLocalMac (struct m_fdb_info *info) op = "replace"; port_name = info->port_name; fdb_type = info->type; - /* Check if this vlan+key is also learned by vxlan neighbor then delete learned on */ - if (m_mac.find(key) != m_mac.end()) - { - macDelVxlanEntry(key, info); - SWSS_LOG_INFO("Local learn event deleting from VXLAN table DEL_KEY %s", key.c_str()); - macDelVxlan(key); - } } else { @@ -335,7 +328,7 @@ void FdbSync::updateLocalMac (struct m_fdb_info *info) } else { - type = "static"; + type = "sticky static"; } const std::string cmds = std::string("") @@ -347,6 +340,17 @@ void FdbSync::updateLocalMac (struct m_fdb_info *info) SWSS_LOG_INFO("cmd:%s, res=%s, ret=%d", cmds.c_str(), res.c_str(), ret); + if (info->op_type == FDB_OPER_ADD) + { + /* Check if this vlan+key is also learned by vxlan neighbor then delete the dest entry */ + if (m_mac.find(key) != m_mac.end()) + { + macDelVxlanEntry(key, info); + SWSS_LOG_INFO("Local learn event deleting from VXLAN table DEL_KEY %s", key.c_str()); + macDelVxlan(key); + } + } + return; } diff --git a/fdbsyncd/fdbsyncd.cpp b/fdbsyncd/fdbsyncd.cpp index a83b2693e1..4f9405cbfd 100644 --- a/fdbsyncd/fdbsyncd.cpp +++ b/fdbsyncd/fdbsyncd.cpp @@ -19,7 +19,6 @@ int main(int argc, char **argv) DBConnector appDb(APPL_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); RedisPipeline pipelineAppDB(&appDb); DBConnector stateDb(STATE_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); - DBConnector log_db(LOGLEVEL_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); DBConnector config_db(CONFIG_DB, DBConnector::DEFAULT_UNIXSOCKET, 0); FdbSync sync(&pipelineAppDB, &stateDb, &config_db); diff --git a/fpmsyncd/Makefile.am b/fpmsyncd/Makefile.am index ef709db876..29b81d7381 100644 --- a/fpmsyncd/Makefile.am +++ b/fpmsyncd/Makefile.am @@ -10,10 +10,15 @@ endif fpmsyncd_SOURCES = fpmsyncd.cpp fpmlink.cpp routesync.cpp $(top_srcdir)/warmrestart/warmRestartHelper.cpp -fpmsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -fpmsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -fpmsyncd_LDADD = -lnl-3 -lnl-route-3 -lswsscommon +fpmsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +fpmsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +fpmsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon if GCOV_ENABLED fpmsyncd_LDADD += -lgcovpreload endif + +if ASAN_ENABLED +fpmsyncd_SOURCES += $(top_srcdir)/lib/asan.cpp +endif + diff --git a/fpmsyncd/bgp_eoiu_marker.py b/fpmsyncd/bgp_eoiu_marker.py index 83051d7878..d7f144e4bf 100644 --- a/fpmsyncd/bgp_eoiu_marker.py +++ b/fpmsyncd/bgp_eoiu_marker.py @@ -17,7 +17,6 @@ """ import sys -import swsssdk import time import syslog import traceback @@ -80,7 +79,7 @@ def init_peers_eor_status(self): # Only two families: 'ipv4' and 'ipv6' # state is "unknown" / "reached" / "consumed" def set_bgp_eoiu_marker(self, family, state): - db = swsssdk.SonicV2Connector(host='127.0.0.1') + db = swsscommon.SonicV2Connector(host='127.0.0.1') db.connect(db.STATE_DB, False) key = "BGP_STATE_TABLE|%s|eoiu" % family db.set(db.STATE_DB, key, 'state', state) @@ -90,7 +89,7 @@ def set_bgp_eoiu_marker(self, family, state): return def clean_bgp_eoiu_marker(self): - db = swsssdk.SonicV2Connector(host='127.0.0.1') + db = swsscommon.SonicV2Connector(host='127.0.0.1') db.connect(db.STATE_DB, False) db.delete(db.STATE_DB, "BGP_STATE_TABLE|IPv4|eoiu") db.delete(db.STATE_DB, "BGP_STATE_TABLE|IPv6|eoiu") diff --git a/fpmsyncd/fpminterface.h b/fpmsyncd/fpminterface.h new file mode 100644 index 0000000000..7d78b81808 --- /dev/null +++ b/fpmsyncd/fpminterface.h @@ -0,0 +1,27 @@ +#pragma once + +#include +#include + +#include "fpm/fpm.h" + +namespace swss +{ + +/** + * @brief FPM zebra communication interface + */ +class FpmInterface : public Selectable +{ +public: + virtual ~FpmInterface() = default; + + /** + * @brief Send netlink message through FPM socket + * @param msg Netlink message + * @return True on success, otherwise false is returned + */ + virtual bool send(nlmsghdr* nl_hdr) = 0; +}; + +} diff --git a/fpmsyncd/fpmlink.cpp b/fpmsyncd/fpmlink.cpp index d51b3b482a..13d170a805 100644 --- a/fpmsyncd/fpmlink.cpp +++ b/fpmsyncd/fpmlink.cpp @@ -39,7 +39,7 @@ bool FpmLink::isRawProcessing(struct nlmsghdr *h) int len; short encap_type = 0; struct rtmsg *rtm; - struct rtattr *tb[RTA_MAX + 1]; + struct rtattr *tb[RTA_MAX + 1] = {0}; rtm = (struct rtmsg *)NLMSG_DATA(h); @@ -54,7 +54,6 @@ bool FpmLink::isRawProcessing(struct nlmsghdr *h) return false; } - memset(tb, 0, sizeof(tb)); netlink_parse_rtattr(tb, RTA_MAX, RTM_RTA(rtm), len); if (!tb[RTA_MULTIPATH]) @@ -120,7 +119,7 @@ FpmLink::FpmLink(RouteSync *rsync, unsigned short port) : m_server_up(false), m_routesync(rsync) { - struct sockaddr_in addr; + struct sockaddr_in addr = {}; int true_val = 1; m_server_socket = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); @@ -141,7 +140,6 @@ FpmLink::FpmLink(RouteSync *rsync, unsigned short port) : throw system_error(errno, system_category()); } - memset (&addr, 0, sizeof (addr)); addr.sin_family = AF_INET; addr.sin_port = htons(port); addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); @@ -160,11 +158,17 @@ FpmLink::FpmLink(RouteSync *rsync, unsigned short port) : m_server_up = true; m_messageBuffer = new char[m_bufSize]; + m_sendBuffer = new char[m_bufSize]; + + m_routesync->onFpmConnected(*this); } FpmLink::~FpmLink() { + m_routesync->onFpmDisconnected(); + delete[] m_messageBuffer; + delete[] m_sendBuffer; if (m_connected) close(m_connection_socket); if (m_server_up) @@ -212,52 +216,103 @@ uint64_t FpmLink::readData() hdr = reinterpret_cast(static_cast(m_messageBuffer + start)); left = m_pos - start; if (left < FPM_MSG_HDR_LEN) + { break; + } + /* fpm_msg_len includes header size */ msg_len = fpm_msg_len(hdr); if (left < msg_len) + { break; + } if (!fpm_msg_ok(hdr, left)) + { throw system_error(make_error_code(errc::bad_message), "Malformed FPM message received"); + } + + processFpmMessage(hdr); + + start += msg_len; + } + + memmove(m_messageBuffer, m_messageBuffer + start, m_pos - start); + m_pos = m_pos - (uint32_t)start; + return 0; +} + +void FpmLink::processFpmMessage(fpm_msg_hdr_t* hdr) +{ + size_t msg_len = fpm_msg_len(hdr); + + if (hdr->msg_type != FPM_MSG_TYPE_NETLINK) + { + return; + } + nlmsghdr *nl_hdr = (nlmsghdr *)fpm_msg_data(hdr); + + /* Read all netlink messages inside FPM message */ + for (; NLMSG_OK (nl_hdr, msg_len); nl_hdr = NLMSG_NEXT(nl_hdr, msg_len)) + { + /* + * EVPN Type5 Add Routes need to be process in Raw mode as they contain + * RMAC, VLAN and L3VNI information. + * Where as all other route will be using rtnl api to extract information + * from the netlink msg. + */ + bool isRaw = isRawProcessing(nl_hdr); + + nl_msg *msg = nlmsg_convert(nl_hdr); + if (msg == NULL) + { + throw system_error(make_error_code(errc::bad_message), "Unable to convert nlmsg"); + } - if (hdr->msg_type == FPM_MSG_TYPE_NETLINK) + nlmsg_set_proto(msg, NETLINK_ROUTE); + + if (isRaw) { - bool isRaw = false; + /* EVPN Type5 Add route processing */ + processRawMsg(nl_hdr); + } + else + { + NetDispatcher::getInstance().onNetlinkMessage(msg); + } + nlmsg_free(msg); + } +} - nlmsghdr *nl_hdr = (nlmsghdr *)fpm_msg_data(hdr); +bool FpmLink::send(nlmsghdr* nl_hdr) +{ + fpm_msg_hdr_t hdr{}; - /* - * EVPN Type5 Add Routes need to be process in Raw mode as they contain - * RMAC, VLAN and L3VNI information. - * Where as all other route will be using rtnl api to extract information - * from the netlink msg. - * */ - isRaw = isRawProcessing(nl_hdr); + size_t len = fpm_msg_align(sizeof(hdr) + nl_hdr->nlmsg_len); - nl_msg *msg = nlmsg_convert(nl_hdr); - if (msg == NULL) - { - throw system_error(make_error_code(errc::bad_message), "Unable to convert nlmsg"); - } + if (len > m_bufSize) + { + SWSS_LOG_THROW("Message length %zu is greater than the send buffer size %d", len, m_bufSize); + } - nlmsg_set_proto(msg, NETLINK_ROUTE); + hdr.version = FPM_PROTO_VERSION; + hdr.msg_type = FPM_MSG_TYPE_NETLINK; + hdr.msg_len = htons(static_cast(len)); - if (isRaw) - { - /* EVPN Type5 Add route processing */ - processRawMsg(nl_hdr); - } - else - { - NetDispatcher::getInstance().onNetlinkMessage(msg); - } - nlmsg_free(msg); + memcpy(m_sendBuffer, &hdr, sizeof(hdr)); + memcpy(m_sendBuffer + sizeof(hdr), nl_hdr, nl_hdr->nlmsg_len); + + size_t sent = 0; + while (sent != len) + { + auto rc = ::send(m_connection_socket, m_sendBuffer + sent, len - sent, 0); + if (rc == -1) + { + SWSS_LOG_ERROR("Failed to send FPM message: %s", strerror(errno)); + return false; } - start += msg_len; + sent += rc; } - memmove(m_messageBuffer, m_messageBuffer + start, m_pos - start); - m_pos = m_pos - (uint32_t)start; - return 0; + return true; } diff --git a/fpmsyncd/fpmlink.h b/fpmsyncd/fpmlink.h index 6cceef34ea..c025750edf 100644 --- a/fpmsyncd/fpmlink.h +++ b/fpmsyncd/fpmlink.h @@ -11,13 +11,13 @@ #include #include -#include "selectable.h" #include "fpm/fpm.h" +#include "fpmsyncd/fpminterface.h" #include "fpmsyncd/routesync.h" namespace swss { -class FpmLink : public Selectable { +class FpmLink : public FpmInterface { public: const int MSG_BATCH_SIZE; FpmLink(RouteSync *rsync, unsigned short port = FPM_DEFAULT_PORT); @@ -39,10 +39,15 @@ class FpmLink : public Selectable { m_routesync->onMsgRaw(h); }; + void processFpmMessage(fpm_msg_hdr_t* hdr); + + bool send(nlmsghdr* nl_hdr) override; + private: RouteSync *m_routesync; unsigned int m_bufSize; char *m_messageBuffer; + char *m_sendBuffer; unsigned int m_pos; bool m_connected; diff --git a/fpmsyncd/fpmsyncd.cpp b/fpmsyncd/fpmsyncd.cpp index 8f797e178c..5e16a6a6ca 100644 --- a/fpmsyncd/fpmsyncd.cpp +++ b/fpmsyncd/fpmsyncd.cpp @@ -4,10 +4,14 @@ #include "select.h" #include "selectabletimer.h" #include "netdispatcher.h" +#include "netlink.h" +#include "notificationconsumer.h" +#include "subscriberstatetable.h" #include "warmRestartHelper.h" #include "fpmsyncd/fpmlink.h" #include "fpmsyncd/routesync.h" +#include using namespace std; using namespace swss; @@ -47,21 +51,47 @@ static bool eoiuFlagsSet(Table &bgpStateTable) int main(int argc, char **argv) { swss::Logger::linkToDbNative("fpmsyncd"); + + const auto routeResponseChannelName = std::string("APPL_DB_") + APP_ROUTE_TABLE_NAME + "_RESPONSE_CHANNEL"; + DBConnector db("APPL_DB", 0); + DBConnector cfgDb("CONFIG_DB", 0); + SubscriberStateTable deviceMetadataTableSubscriber(&cfgDb, CFG_DEVICE_METADATA_TABLE_NAME); + Table deviceMetadataTable(&cfgDb, CFG_DEVICE_METADATA_TABLE_NAME); + DBConnector applStateDb("APPL_STATE_DB", 0); + std::unique_ptr routeResponseChannel; + RedisPipeline pipeline(&db); RouteSync sync(&pipeline); DBConnector stateDb("STATE_DB", 0); Table bgpStateTable(&stateDb, STATE_BGP_TABLE_NAME); + NetLink netlink; + + netlink.registerGroup(RTNLGRP_LINK); + NetDispatcher::getInstance().registerMessageHandler(RTM_NEWROUTE, &sync); NetDispatcher::getInstance().registerMessageHandler(RTM_DELROUTE, &sync); + NetDispatcher::getInstance().registerMessageHandler(RTM_NEWLINK, &sync); + NetDispatcher::getInstance().registerMessageHandler(RTM_DELLINK, &sync); + + rtnl_route_read_protocol_names(DefaultRtProtoPath); + + std::string suppressionEnabledStr; + deviceMetadataTable.hget("localhost", "suppress-fib-pending", suppressionEnabledStr); + if (suppressionEnabledStr == "enabled") + { + routeResponseChannel = std::make_unique(&applStateDb, routeResponseChannelName); + sync.setSuppressionEnabled(true); + } while (true) { try { FpmLink fpm(&sync); + Select s; SelectableTimer warmStartTimer(timespec{0, 0}); // Before eoiu flags detected, check them periodically. It also stop upon detection of reconciliation done. @@ -80,6 +110,13 @@ int main(int argc, char **argv) cout << "Connected!" << endl; s.addSelectable(&fpm); + s.addSelectable(&netlink); + s.addSelectable(&deviceMetadataTableSubscriber); + + if (sync.isSuppressionEnabled()) + { + s.addSelectable(routeResponseChannel.get()); + } /* If warm-restart feature is enabled, execute 'restoration' logic */ bool warmStartEnabled = sync.m_warmStartHelper.checkAndStart(); @@ -139,11 +176,8 @@ int main(int argc, char **argv) SWSS_LOG_NOTICE("Warm-Restart EOIU hold timer expired."); } - if (sync.m_warmStartHelper.inProgress()) - { - sync.m_warmStartHelper.reconcile(); - SWSS_LOG_NOTICE("Warm-Restart reconciliation processed."); - } + sync.onWarmStartEnd(applStateDb); + // remove the one-shot timer. s.removeSelectable(temps); pipeline.flush(); @@ -182,6 +216,74 @@ int main(int argc, char **argv) s.removeSelectable(&eoiuCheckTimer); } } + else if (temps == &deviceMetadataTableSubscriber) + { + std::deque keyOpFvsQueue; + deviceMetadataTableSubscriber.pops(keyOpFvsQueue); + + for (const auto& keyOpFvs: keyOpFvsQueue) + { + const auto& key = kfvKey(keyOpFvs); + const auto& op = kfvOp(keyOpFvs); + const auto& fvs = kfvFieldsValues(keyOpFvs); + + if (op != SET_COMMAND) + { + continue; + } + + if (key != "localhost") + { + continue; + } + + for (const auto& fv: fvs) + { + const auto& field = fvField(fv); + const auto& value = fvValue(fv); + + if (field != "suppress-fib-pending") + { + continue; + } + + bool shouldEnable = (value == "enabled"); + + if (shouldEnable && !sync.isSuppressionEnabled()) + { + routeResponseChannel = std::make_unique(&applStateDb, routeResponseChannelName); + sync.setSuppressionEnabled(true); + s.addSelectable(routeResponseChannel.get()); + } + else if (!shouldEnable && sync.isSuppressionEnabled()) + { + /* When disabling suppression we mark all existing routes offloaded in zebra + * as there could be some transient routes which are pending response from + * orchagent, thus such updates might be missing. Since we are disabling suppression + * we no longer care about real HW offload status and can mark all routes as offloaded + * to avoid routes stuck in suppressed state after transition. */ + sync.markRoutesOffloaded(db); + + sync.setSuppressionEnabled(false); + s.removeSelectable(routeResponseChannel.get()); + routeResponseChannel.reset(); + } + } // end for fvs + } // end for keyOpFvsQueue + } + else if (routeResponseChannel && (temps == routeResponseChannel.get())) + { + std::deque notifications; + routeResponseChannel->pops(notifications); + + for (const auto& notification: notifications) + { + const auto& key = kfvKey(notification); + const auto& fieldValues = kfvFieldsValues(notification); + + sync.onRouteResponse(key, fieldValues); + } + } else if (!warmStartEnabled || sync.m_warmStartHelper.isReconciled()) { pipeline.flush(); diff --git a/fpmsyncd/routesync.cpp b/fpmsyncd/routesync.cpp index 6a128a0784..caf6210084 100644 --- a/fpmsyncd/routesync.cpp +++ b/fpmsyncd/routesync.cpp @@ -10,6 +10,7 @@ #include "fpmsyncd/fpmlink.h" #include "fpmsyncd/routesync.h" #include "macaddress.h" +#include "converter.h" #include #include @@ -44,6 +45,36 @@ using namespace swss; #define ETHER_ADDR_STRLEN (3*ETH_ALEN) +/* Returns name of the protocol passed number represents */ +static string getProtocolString(int proto) +{ + static constexpr size_t protocolNameBufferSize = 128; + char buffer[protocolNameBufferSize] = {}; + + if (!rtnl_route_proto2str(proto, buffer, sizeof(buffer))) + { + return std::to_string(proto); + } + + return buffer; +} + +/* Helper to create unique pointer with custom destructor */ +template +static decltype(auto) makeUniqueWithDestructor(T* ptr, F func) +{ + return std::unique_ptr(ptr, func); +} + +template +static decltype(auto) makeNlAddr(const T& ip) +{ + nl_addr* addr; + nl_addr_parse(ip.to_string().c_str(), AF_UNSPEC, &addr); + return makeUniqueWithDestructor(addr, nl_addr_put); +} + + RouteSync::RouteSync(RedisPipeline *pipeline) : m_routeTable(pipeline, APP_ROUTE_TABLE_NAME, true), m_label_routeTable(pipeline, APP_LABEL_ROUTE_TABLE_NAME, true), @@ -347,7 +378,7 @@ bool RouteSync::getEvpnNextHop(struct nlmsghdr *h, int received_bytes, void RouteSync::onEvpnRouteMsg(struct nlmsghdr *h, int len) { struct rtmsg *rtm; - struct rtattr *tb[RTA_MAX + 1]; + struct rtattr *tb[RTA_MAX + 1] = {0}; void *dest = NULL; char anyaddr[16] = {0}; char dstaddr[16] = {0}; @@ -360,7 +391,6 @@ void RouteSync::onEvpnRouteMsg(struct nlmsghdr *h, int len) rtm = (struct rtmsg *)NLMSG_DATA(h); /* Parse attributes and extract fields of interest. */ - memset(tb, 0, sizeof(tb)); netlink_parse_rtattr(tb, RTA_MAX, RTM_RTA(rtm), len); if (tb[RTA_DST]) @@ -470,6 +500,8 @@ void RouteSync::onEvpnRouteMsg(struct nlmsghdr *h, int len) return; } + sendOffloadReply(h); + switch (rtm->rtm_type) { case RTN_BLACKHOLE: @@ -570,6 +602,12 @@ void RouteSync::onMsgRaw(struct nlmsghdr *h) void RouteSync::onMsg(int nlmsg_type, struct nl_object *obj) { + if (nlmsg_type == RTM_NEWLINK || nlmsg_type == RTM_DELLINK) + { + nl_cache_refill(m_nl_sock, m_link_cache); + return; + } + struct rtnl_route *route_obj = (struct rtnl_route *)obj; /* Supports IPv4 or IPv6 address, otherwise return immediately */ @@ -686,6 +724,11 @@ void RouteSync::onRouteMsg(int nlmsg_type, struct nl_object *obj, char *vrf) return; } + if (!isSuppressionEnabled()) + { + sendOffloadReply(route_obj); + } + switch (rtnl_route_get_type(route_obj)) { case RTN_BLACKHOLE: @@ -734,14 +777,45 @@ void RouteSync::onRouteMsg(int nlmsg_type, struct nl_object *obj, char *vrf) { SWSS_LOG_DEBUG("Skip routes to eth0 or docker0: %s %s %s", destipprefix, gw_list.c_str(), intf_list.c_str()); + // If intf_list has only this interface, that means all of the next hops of this route + // have been removed and the next hop on the eth0/docker0 has become the only next hop. + // In this case since we do not want the route with next hop on eth0/docker0, we return. + // But still we need to clear the route from the APPL_DB. Otherwise the APPL_DB and data + // path will be left with stale route entry + if(alsv.size() == 1) + { + if (!warmRestartInProgress) + { + SWSS_LOG_NOTICE("RouteTable del msg for route with only one nh on eth0/docker0: %s %s %s %s", + destipprefix, gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); + + m_routeTable.del(destipprefix); + } + else + { + SWSS_LOG_NOTICE("Warm-Restart mode: Receiving delete msg for route with only nh on eth0/docker0: %s %s %s %s", + destipprefix, gw_list.c_str(), intf_list.c_str(), mpls_list.c_str()); + + vector fvVector; + const KeyOpFieldsValuesTuple kfv = std::make_tuple(destipprefix, + DEL_COMMAND, + fvVector); + m_warmStartHelper.insertRefreshMap(kfv); + } + } return; } } + auto proto_num = rtnl_route_get_protocol(route_obj); + auto proto_str = getProtocolString(proto_num); + vector fvVector; + FieldValueTuple proto("protocol", proto_str); FieldValueTuple gw("nexthop", gw_list); FieldValueTuple intf("ifname", intf_list); + fvVector.push_back(proto); fvVector.push_back(gw); fvVector.push_back(intf); if (!mpls_list.empty()) @@ -805,6 +879,8 @@ void RouteSync::onLabelRouteMsg(int nlmsg_type, struct nl_object *obj) return; } + sendOffloadReply(route_obj); + /* Get the index of the master device */ uint32_t master_index = rtnl_route_get_table(route_obj); /* if the table_id is not set in the route obj then route is for default vrf. */ @@ -910,6 +986,8 @@ void RouteSync::onVnetRouteMsg(int nlmsg_type, struct nl_object *obj, string vne return; } + sendOffloadReply(route_obj); + switch (rtnl_route_get_type(route_obj)) { case RTN_UNICAST: @@ -1010,6 +1088,18 @@ bool RouteSync::getIfName(int if_index, char *if_name, size_t name_len) return true; } +rtnl_link* RouteSync::getLinkByName(const char *name) +{ + auto link = rtnl_link_get_by_name(m_link_cache, name); + if (link == nullptr) + { + /* Trying to refill cache */ + nl_cache_refill(m_nl_sock ,m_link_cache); + link = rtnl_link_get_by_name(m_link_cache, name); + } + return link; +} + /* * getNextHopList() - parses next hop list attached to route_obj * @arg route_obj (input) Netlink route object @@ -1208,7 +1298,7 @@ string RouteSync::getNextHopWt(struct rtnl_route *route_obj) uint8_t weight = rtnl_route_nh_get_weight(nexthop); if (weight) { - result += to_string(weight + 1); + result += to_string(weight); } else { @@ -1223,3 +1313,198 @@ string RouteSync::getNextHopWt(struct rtnl_route *route_obj) return result; } + +bool RouteSync::sendOffloadReply(struct nlmsghdr* hdr) +{ + SWSS_LOG_ENTER(); + + if (hdr->nlmsg_type != RTM_NEWROUTE) + { + return false; + } + + // Add request flag (required by zebra) + hdr->nlmsg_flags |= NLM_F_REQUEST; + + rtmsg *rtm = static_cast(NLMSG_DATA(hdr)); + + // Add offload flag + rtm->rtm_flags |= RTM_F_OFFLOAD; + + if (!m_fpmInterface) + { + SWSS_LOG_ERROR("Cannot send offload reply to zebra: FPM is disconnected"); + return false; + } + + // Send to zebra + if (!m_fpmInterface->send(hdr)) + { + SWSS_LOG_ERROR("Failed to send reply to zebra"); + return false; + } + + return true; +} + +bool RouteSync::sendOffloadReply(struct rtnl_route* route_obj) +{ + SWSS_LOG_ENTER(); + + nl_msg* msg{}; + rtnl_route_build_add_request(route_obj, NLM_F_CREATE, &msg); + + auto nlMsg = makeUniqueWithDestructor(msg, nlmsg_free); + + return sendOffloadReply(nlmsg_hdr(nlMsg.get())); +} + +void RouteSync::setSuppressionEnabled(bool enabled) +{ + SWSS_LOG_ENTER(); + + m_isSuppressionEnabled = enabled; + + SWSS_LOG_NOTICE("Pending routes suppression is %s", (m_isSuppressionEnabled ? "enabled": "disabled")); +} + +void RouteSync::onRouteResponse(const std::string& key, const std::vector& fieldValues) +{ + IpPrefix prefix; + std::string vrfName; + std::string protocol; + + bool isSetOperation{false}; + bool isSuccessReply{false}; + + if (!isSuppressionEnabled()) + { + return; + } + + auto colon = key.find(':'); + if (colon != std::string::npos && key.substr(0, colon).find(VRF_PREFIX) != std::string::npos) + { + vrfName = key.substr(0, colon); + prefix = IpPrefix{key.substr(colon + 1)}; + } + else + { + prefix = IpPrefix{key}; + } + + for (const auto& fieldValue: fieldValues) + { + std::string field = fvField(fieldValue); + std::string value = fvValue(fieldValue); + + if (field == "err_str") + { + isSuccessReply = (value == "SWSS_RC_SUCCESS"); + } + else if (field == "protocol") + { + // If field "protocol" is present in the field values then + // it is a SET operation. This field is absent only if we are + // processing DEL operation. + isSetOperation = true; + protocol = value; + } + } + + if (!isSetOperation) + { + SWSS_LOG_DEBUG("Received response for prefix %s(%s) deletion, ignoring ", + prefix.to_string().c_str(), vrfName.c_str()); + return; + } + + if (!isSuccessReply) + { + SWSS_LOG_INFO("Received failure response for prefix %s(%s)", + prefix.to_string().c_str(), vrfName.c_str()); + return; + } + + auto routeObject = makeUniqueWithDestructor(rtnl_route_alloc(), rtnl_route_put); + auto dstAddr = makeNlAddr(prefix); + + rtnl_route_set_dst(routeObject.get(), dstAddr.get()); + + auto proto = rtnl_route_str2proto(protocol.c_str()); + if (proto < 0) + { + proto = swss::to_uint(protocol); + } + + rtnl_route_set_protocol(routeObject.get(), static_cast(proto)); + rtnl_route_set_family(routeObject.get(), prefix.isV4() ? AF_INET : AF_INET6); + + unsigned int vrfIfIndex = 0; + if (!vrfName.empty()) + { + auto* link = getLinkByName(vrfName.c_str()); + if (!link) + { + SWSS_LOG_DEBUG("Failed to find VRF when constructing response message for prefix %s(%s). " + "This message is probably outdated", prefix.to_string().c_str(), + vrfName.c_str()); + return; + } + vrfIfIndex = rtnl_link_get_ifindex(link); + } + + rtnl_route_set_table(routeObject.get(), vrfIfIndex); + + if (!sendOffloadReply(routeObject.get())) + { + SWSS_LOG_ERROR("Failed to send RTM_NEWROUTE message to zebra on prefix %s(%s)", + prefix.to_string().c_str(), vrfName.c_str()); + return; + } + + SWSS_LOG_INFO("Sent response to zebra for prefix %s(%s)", + prefix.to_string().c_str(), vrfName.c_str()); +} + +void RouteSync::sendOffloadReply(DBConnector& db, const std::string& tableName) +{ + SWSS_LOG_ENTER(); + + Table routeTable{&db, tableName}; + + std::vector keys; + routeTable.getKeys(keys); + + for (const auto& key: keys) + { + std::vector fieldValues; + routeTable.get(key, fieldValues); + fieldValues.emplace_back("err_str", "SWSS_RC_SUCCESS"); + + onRouteResponse(key, fieldValues); + } +} + +void RouteSync::markRoutesOffloaded(swss::DBConnector& db) +{ + SWSS_LOG_ENTER(); + + sendOffloadReply(db, APP_ROUTE_TABLE_NAME); +} + +void RouteSync::onWarmStartEnd(DBConnector& applStateDb) +{ + SWSS_LOG_ENTER(); + + if (isSuppressionEnabled()) + { + markRoutesOffloaded(applStateDb); + } + + if (m_warmStartHelper.inProgress()) + { + m_warmStartHelper.reconcile(); + SWSS_LOG_NOTICE("Warm-Restart reconciliation processed."); + } +} diff --git a/fpmsyncd/routesync.h b/fpmsyncd/routesync.h index 2e53bb8d17..fd18b9d25a 100644 --- a/fpmsyncd/routesync.h +++ b/fpmsyncd/routesync.h @@ -4,10 +4,20 @@ #include "dbconnector.h" #include "producerstatetable.h" #include "netmsg.h" +#include "linkcache.h" +#include "fpminterface.h" #include "warmRestartHelper.h" #include #include +#include + +// Add RTM_F_OFFLOAD define if it is not there. +// Debian buster does not provide one but it is neccessary for compilation. +#ifndef RTM_F_OFFLOAD +#define RTM_F_OFFLOAD 0x4000 /* route is offloaded */ +#endif + using namespace std; /* Parse the Raw netlink msg */ @@ -16,6 +26,9 @@ extern void netlink_parse_rtattr(struct rtattr **tb, int max, struct rtattr *rta namespace swss { +/* Path to protocol name database provided by iproute2 */ +constexpr auto DefaultRtProtoPath = "/etc/iproute2/rt_protos"; + class RouteSync : public NetMsg { public: @@ -26,6 +39,31 @@ class RouteSync : public NetMsg virtual void onMsg(int nlmsg_type, struct nl_object *obj); virtual void onMsgRaw(struct nlmsghdr *obj); + + void setSuppressionEnabled(bool enabled); + + bool isSuppressionEnabled() const + { + return m_isSuppressionEnabled; + } + + void onRouteResponse(const std::string& key, const std::vector& fieldValues); + + void onWarmStartEnd(swss::DBConnector& applStateDb); + + /* Mark all routes from DB with offloaded flag */ + void markRoutesOffloaded(swss::DBConnector& db); + + void onFpmConnected(FpmInterface& fpm) + { + m_fpmInterface = &fpm; + } + + void onFpmDisconnected() + { + m_fpmInterface = nullptr; + } + WarmStartHelper m_warmStartHelper; private: @@ -40,6 +78,9 @@ class RouteSync : public NetMsg struct nl_cache *m_link_cache; struct nl_sock *m_nl_sock; + bool m_isSuppressionEnabled{false}; + FpmInterface* m_fpmInterface {nullptr}; + /* Handle regular route (include VRF route) */ void onRouteMsg(int nlmsg_type, struct nl_object *obj, char *vrf); @@ -63,6 +104,9 @@ class RouteSync : public NetMsg /* Get interface name based on interface index */ bool getIfName(int if_index, char *if_name, size_t name_len); + /* Get interface if_index based on interface name */ + rtnl_link* getLinkByName(const char *name); + void getEvpnNextHopSep(string& nexthops, string& vni_list, string& mac_list, string& intf_list); @@ -87,6 +131,15 @@ class RouteSync : public NetMsg /* Get next hop weights*/ string getNextHopWt(struct rtnl_route *route_obj); + + /* Sends FPM message with RTM_F_OFFLOAD flag set to zebra */ + bool sendOffloadReply(struct nlmsghdr* hdr); + + /* Sends FPM message with RTM_F_OFFLOAD flag set to zebra */ + bool sendOffloadReply(struct rtnl_route* route_obj); + + /* Sends FPM message with RTM_F_OFFLOAD flag set for all routes in the table */ + void sendOffloadReply(swss::DBConnector& db, const std::string& table); }; } diff --git a/gearsyncd/Makefile.am b/gearsyncd/Makefile.am index c9df85853a..1a1d9983a2 100644 --- a/gearsyncd/Makefile.am +++ b/gearsyncd/Makefile.am @@ -1,6 +1,6 @@ INCLUDES = -I $(top_srcdir)/lib -I $(top_srcdir) -I $(top_srcdir)/warmrestart -I $(top_srcdir)/cfgmgr -bin_PROGRAMS = gearsyncd +bin_PROGRAMS = gearsyncd if DEBUG DBGFLAGS = -ggdb -DDEBUG @@ -8,12 +8,17 @@ else DBGFLAGS = -g endif -gearsyncd_SOURCES = $(top_srcdir)/lib/gearboxutils.cpp gearsyncd.cpp gearparserbase.cpp gearboxparser.cpp phyparser.cpp $(top_srcdir)/cfgmgr/shellcmd.h +gearsyncd_SOURCES = $(top_srcdir)/lib/gearboxutils.cpp gearsyncd.cpp gearparserbase.cpp gearboxparser.cpp phyparser.cpp $(top_srcdir)/cfgmgr/shellcmd.h -gearsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(COV_CFLAGS) $(ASAN_CFLAGS) +gearsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(COV_CFLAGS) $(CFLAGS_ASAN) -gearsyncd_LDADD = -lnl-3 -lnl-route-3 -lswsscommon $(COV_LDFLAGS) $(ASAN_LDFLAGS) +gearsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon $(COV_LDFLAGS) if GCOV_ENABLED gearsyncd_LDADD += -lgcovpreload endif + +if ASAN_ENABLED +gearsyncd_SOURCES += $(top_srcdir)/lib/asan.cpp +endif + diff --git a/gearsyncd/gearboxparser.cpp b/gearsyncd/gearboxparser.cpp index dfd68be2ec..879624fd25 100644 --- a/gearsyncd/gearboxparser.cpp +++ b/gearsyncd/gearboxparser.cpp @@ -15,6 +15,7 @@ */ #include "gearboxparser.h" +#include "gearboxutils.h" #include "phyparser.h" #include @@ -42,7 +43,7 @@ bool GearboxParser::parse() return false; } - json phys, phy, interfaces, interface, val, lanes; + json phys, phy, interfaces, interface, val, lanes, txFir; std::vector attrs; @@ -285,6 +286,27 @@ bool GearboxParser::parse() SWSS_LOG_ERROR("missing 'line_lanes' field in 'interfaces' item %d in gearbox configuration", iter); return false; } + + for (std::string txFirKey: swss::tx_fir_strings) + { + if (interface.find(txFirKey) != interface.end()) + { + txFir = interface[txFirKey]; // vec + std::string txFirValuesStr(""); + for (uint32_t iter2 = 0; iter2 < txFir.size(); iter2++) + { + val = txFir[iter2]; + if (txFirValuesStr.length() > 0) + { + txFirValuesStr += ","; + } + txFirValuesStr += std::to_string(val.get()); + } + attr = std::make_pair(txFirKey, txFirValuesStr); + attrs.push_back(attr); + } + } + std::string key; key = "interface:" + std::to_string(index); if (getWriteToDb() == true) diff --git a/lgtm.yml b/lgtm.yml index 59f2e812af..b3da3bba3f 100644 --- a/lgtm.yml +++ b/lgtm.yml @@ -16,6 +16,7 @@ extraction: - libnl-genl-3-dev - libnl-route-3-dev - libnl-nf-3-dev + - libyang-dev - libzmq3-dev - libzmq5 - swig3.0 @@ -29,6 +30,7 @@ extraction: - flex - graphviz - autoconf-archive + - uuid-dev after_prepare: - git clone https://github.com/Azure/sonic-buildimage; pushd sonic-buildimage/src/libnl3 - git clone https://github.com/thom311/libnl libnl3-3.5.0; pushd libnl3-3.5.0; git checkout tags/libnl3_5_0 diff --git a/lib/asan.cpp b/lib/asan.cpp new file mode 100644 index 0000000000..1f7d074e68 --- /dev/null +++ b/lib/asan.cpp @@ -0,0 +1,57 @@ +#include +#include +#include + +#include + +extern "C" { + const char* __lsan_default_suppressions() { + return "leak:__static_initialization_and_destruction_0\n"; + } +} + +static void swss_asan_sigterm_handler(int signo) +{ + SWSS_LOG_ENTER(); + + __lsan_do_leak_check(); + + struct sigaction sigact; + if (sigaction(SIGTERM, NULL, &sigact)) + { + SWSS_LOG_ERROR("failed to get current SIGTERM action handler"); + _exit(EXIT_FAILURE); + } + + // Check the currently set signal handler. + // If it is ASAN's signal handler this means that the application didn't set its own handler. + // To preserve default behavior set the default signal handler and raise the signal to trigger its execution. + // Otherwise, the application installed its own signal handler. + // In this case, just trigger a leak check and do nothing else. + if (sigact.sa_handler == swss_asan_sigterm_handler) { + sigemptyset(&sigact.sa_mask); + sigact.sa_flags = 0; + sigact.sa_handler = SIG_DFL; + if (sigaction(SIGTERM, &sigact, NULL)) + { + SWSS_LOG_ERROR("failed to setup SIGTERM action handler"); + _exit(EXIT_FAILURE); + } + + raise(signo); + } +} + +__attribute__((constructor)) +static void swss_asan_init() +{ + SWSS_LOG_ENTER(); + + struct sigaction sigact = {}; + sigact.sa_handler = swss_asan_sigterm_handler; + if (sigaction(SIGTERM, &sigact, NULL)) + { + SWSS_LOG_ERROR("failed to setup SIGTERM action handler"); + exit(EXIT_FAILURE); + } +} diff --git a/lib/gearboxutils.cpp b/lib/gearboxutils.cpp index f9b3228621..bc35ed3456 100644 --- a/lib/gearboxutils.cpp +++ b/lib/gearboxutils.cpp @@ -266,6 +266,11 @@ std::map GearboxUtils::loadInterfaceMap(Table *gearbox } } } + else if (tx_fir_strings.find(val.first) != tx_fir_strings.end()) + { + SWSS_LOG_DEBUG("Parsed key:%s, val:%s", val.first.c_str(), val.second.c_str()); + interface.tx_firs[val.first] = val.second; + } } gearboxInterfaceMap[interface.index] = interface; } diff --git a/lib/gearboxutils.h b/lib/gearboxutils.h index 28ab48761e..a239aa3a10 100644 --- a/lib/gearboxutils.h +++ b/lib/gearboxutils.h @@ -30,6 +30,24 @@ namespace swss { +static const std::set tx_fir_strings = +{ + "system_tx_fir_pre1", + "system_tx_fir_pre2", + "system_tx_fir_pre3", + "system_tx_fir_post1", + "system_tx_fir_post2", + "system_tx_fir_post3", + "system_tx_fir_main", + "line_tx_fir_pre1", + "line_tx_fir_pre2", + "line_tx_fir_pre3", + "line_tx_fir_post1", + "line_tx_fir_post2", + "line_tx_fir_post3", + "line_tx_fir_main" +}; + typedef struct { int phy_id; @@ -54,6 +72,7 @@ typedef struct int phy_id; std::set line_lanes; std::set system_lanes; + std::map tx_firs; } gearbox_interface_t; typedef struct diff --git a/lib/recorder.cpp b/lib/recorder.cpp new file mode 100644 index 0000000000..449039adff --- /dev/null +++ b/lib/recorder.cpp @@ -0,0 +1,121 @@ +#include "recorder.h" +#include "timestamp.h" +#include "logger.h" +#include + +using namespace swss; + +const std::string Recorder::DEFAULT_DIR = "."; +const std::string Recorder::REC_START = "|recording started"; +const std::string Recorder::SWSS_FNAME = "swss.rec"; +const std::string Recorder::SAIREDIS_FNAME = "sairedis.rec"; +const std::string Recorder::RESPPUB_FNAME = "responsepublisher.rec"; + + +Recorder& Recorder::Instance() +{ + static Recorder m_recorder; + return m_recorder; +} + + +SwSSRec::SwSSRec() +{ + /* Set Default values */ + setRecord(true); + setRotate(false); + setLocation(Recorder::DEFAULT_DIR); + setFileName(Recorder::SWSS_FNAME); + setName("SwSS"); +} + + +ResPubRec::ResPubRec() +{ + /* Set Default values */ + setRecord(false); + setRotate(false); + setLocation(Recorder::DEFAULT_DIR); + setFileName(Recorder::RESPPUB_FNAME); + setName("Response Publisher"); +} + + +SaiRedisRec::SaiRedisRec() +{ + /* Set Default values */ + setRecord(true); + setRotate(false); + setLocation(Recorder::DEFAULT_DIR); + setFileName(Recorder::SAIREDIS_FNAME); + setName("SaiRedis"); +} + + +void RecWriter::startRec(bool exit_if_failure) +{ + if (!isRecord()) + { + return ; + } + + fname = getLoc() + "/" + getFile(); + record_ofs.open(fname, std::ofstream::out | std::ofstream::app); + if (!record_ofs.is_open()) + { + SWSS_LOG_ERROR("%s Recorder: Failed to open recording file %s: error %s", getName().c_str(), fname.c_str(), strerror(errno)); + if (exit_if_failure) + { + exit(EXIT_FAILURE); + } + else + { + setRecord(false); + } + } + record_ofs << swss::getTimestamp() << Recorder::REC_START << std::endl; + SWSS_LOG_NOTICE("%s Recorder: Recording started at %s", getName().c_str(), fname.c_str()); +} + + +RecWriter::~RecWriter() +{ + if (record_ofs.is_open()) + { + record_ofs.close(); + } +} + + +void RecWriter::record(const std::string& val) +{ + if (!isRecord()) + { + return ; + } + record_ofs << swss::getTimestamp() << "|" << val << std::endl; + if (isRotate()) + { + setRotate(false); + logfileReopen(); + } +} + + +void RecWriter::logfileReopen() +{ + /* + * On log rotate we will use the same file name, we are assuming that + * logrotate daemon move filename to filename.1 and we will create new + * empty file here. + */ + record_ofs.close(); + record_ofs.open(fname, std::ofstream::out | std::ofstream::app); + + if (!record_ofs.is_open()) + { + SWSS_LOG_ERROR("%s Recorder: Failed to open file %s: %s", getName().c_str(), fname.c_str(), strerror(errno)); + return; + } + SWSS_LOG_INFO("%s Recorder: LogRotate request handled", getName().c_str()); +} diff --git a/lib/recorder.h b/lib/recorder.h new file mode 100644 index 0000000000..971c3a2bb7 --- /dev/null +++ b/lib/recorder.h @@ -0,0 +1,84 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace swss { + +class RecBase { +public: + RecBase() = default; + /* Setters */ + void setRecord(bool record) { m_recording = record; } + void setRotate(bool rotate) { m_rotate = rotate; } + void setLocation(const std::string& loc) { m_location = loc; } + void setFileName(const std::string& name) { m_filename = name; } + void setName(const std::string& name) { m_name = name; } + + /* getters */ + bool isRecord() { return m_recording; } + bool isRotate() { return m_rotate; } + std::string getLoc() { return m_location; } + std::string getFile() { return m_filename; } + std::string getName() { return m_name; } + +private: + bool m_recording; + bool m_rotate; + std::string m_location; + std::string m_filename; + std::string m_name; +}; + +class RecWriter : public RecBase { +public: + RecWriter() = default; + virtual ~RecWriter(); + void startRec(bool exit_if_failure); + void record(const std::string& val); + +protected: + void logfileReopen(); + +private: + std::ofstream record_ofs; + std::string fname; +}; + +class SwSSRec : public RecWriter { +public: + SwSSRec(); +}; + +/* Record Handler for Response Publisher Class */ +class ResPubRec : public RecWriter { +public: + ResPubRec(); +}; + +class SaiRedisRec : public RecBase { +public: + SaiRedisRec(); +}; + +/* Interface to access recorder classes */ +class Recorder { +public: + static Recorder& Instance(); + static const std::string DEFAULT_DIR; + static const std::string REC_START; + static const std::string SWSS_FNAME; + static const std::string SAIREDIS_FNAME; + static const std::string RESPPUB_FNAME; + + Recorder() = default; + /* Individual Handlers */ + SwSSRec swss; + SaiRedisRec sairedis; + ResPubRec respub; +}; + +} diff --git a/lib/subintf.cpp b/lib/subintf.cpp index ab26ee62de..4f934ff659 100644 --- a/lib/subintf.cpp +++ b/lib/subintf.cpp @@ -2,6 +2,7 @@ #include #include #include +#include #include "subintf.h" using namespace swss; diff --git a/lib/subintf.h b/lib/subintf.h index ec5d0c4656..263faf9f3a 100644 --- a/lib/subintf.h +++ b/lib/subintf.h @@ -1,5 +1,7 @@ #pragma once +#include + #define VLAN_SUB_INTERFACE_SEPARATOR "." namespace swss { class subIntf diff --git a/mclagsyncd/Makefile.am b/mclagsyncd/Makefile.am index e7bed8de7d..d4b4b03c40 100644 --- a/mclagsyncd/Makefile.am +++ b/mclagsyncd/Makefile.am @@ -10,10 +10,15 @@ endif mclagsyncd_SOURCES = mclagsyncd.cpp mclaglink.cpp -mclagsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -mclagsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -mclagsyncd_LDADD = -lnl-3 -lnl-route-3 -lswsscommon +mclagsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +mclagsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +mclagsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon if GCOV_ENABLED mclagsyncd_LDADD += -lgcovpreload endif + +if ASAN_ENABLED +mclagsyncd_SOURCES += $(top_srcdir)/lib/asan.cpp +endif + diff --git a/mclagsyncd/mclaglink.cpp b/mclagsyncd/mclaglink.cpp index b09660ee56..b8040c1646 100644 --- a/mclagsyncd/mclaglink.cpp +++ b/mclagsyncd/mclaglink.cpp @@ -191,7 +191,8 @@ void MclagLink::setPortIsolate(char *msg) { static const unordered_set supported { BRCM_PLATFORM_SUBSTRING, - BFN_PLATFORM_SUBSTRING + BFN_PLATFORM_SUBSTRING, + CTC_PLATFORM_SUBSTRING }; const char *platform = getenv("platform"); @@ -1744,7 +1745,7 @@ MclagLink::MclagLink(Select *select, int port) : m_server_up(false), m_select(select) { - struct sockaddr_in addr; + struct sockaddr_in addr = {}; int true_val = 1; m_server_socket = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); @@ -1765,7 +1766,6 @@ MclagLink::MclagLink(Select *select, int port) : throw system_error(errno, system_category()); } - memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_port = htons((unsigned short int)port); addr.sin_addr.s_addr = htonl(MCLAG_DEFAULT_IP); diff --git a/mclagsyncd/mclaglink.h b/mclagsyncd/mclaglink.h index a811f8cb2e..09129fd88f 100644 --- a/mclagsyncd/mclaglink.h +++ b/mclagsyncd/mclaglink.h @@ -53,6 +53,7 @@ #define BRCM_PLATFORM_SUBSTRING "broadcom" #define BFN_PLATFORM_SUBSTRING "barefoot" +#define CTC_PLATFORM_SUBSTRING "centec" using namespace std; diff --git a/natsyncd/Makefile.am b/natsyncd/Makefile.am index d8212ee4b4..cdee9d52ae 100644 --- a/natsyncd/Makefile.am +++ b/natsyncd/Makefile.am @@ -10,11 +10,15 @@ endif natsyncd_SOURCES = natsyncd.cpp natsync.cpp $(top_srcdir)/warmrestart/warmRestartAssist.cpp -natsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -natsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -natsyncd_LDADD = -lnl-3 -lnl-route-3 -lnl-nf-3 -lswsscommon +natsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +natsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +natsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lnl-nf-3 -lswsscommon if GCOV_ENABLED natsyncd_LDADD += -lgcovpreload endif +if ASAN_ENABLED +natsyncd_SOURCES += $(top_srcdir)/lib/asan.cpp +endif + diff --git a/neighsyncd/Makefile.am b/neighsyncd/Makefile.am index 23e76b6cd2..cb61a83bbc 100644 --- a/neighsyncd/Makefile.am +++ b/neighsyncd/Makefile.am @@ -10,10 +10,15 @@ endif neighsyncd_SOURCES = neighsyncd.cpp neighsync.cpp $(top_srcdir)/warmrestart/warmRestartAssist.cpp -neighsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -neighsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -neighsyncd_LDADD = -lnl-3 -lnl-route-3 -lswsscommon +neighsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +neighsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +neighsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon if GCOV_ENABLED neighsyncd_LDADD += -lgcovpreload endif + +if ASAN_ENABLED +neighsyncd_SOURCES += $(top_srcdir)/lib/asan.cpp +endif + diff --git a/neighsyncd/neighsync.cpp b/neighsyncd/neighsync.cpp index cb04371d41..46f51b9266 100644 --- a/neighsyncd/neighsync.cpp +++ b/neighsyncd/neighsync.cpp @@ -23,7 +23,8 @@ NeighSync::NeighSync(RedisPipeline *pipelineAppDB, DBConnector *stateDb, DBConne m_stateNeighRestoreTable(stateDb, STATE_NEIGH_RESTORE_TABLE_NAME), m_cfgInterfaceTable(cfgDb, CFG_INTF_TABLE_NAME), m_cfgLagInterfaceTable(cfgDb, CFG_LAG_INTF_TABLE_NAME), - m_cfgVlanInterfaceTable(cfgDb, CFG_VLAN_INTF_TABLE_NAME) + m_cfgVlanInterfaceTable(cfgDb, CFG_VLAN_INTF_TABLE_NAME), + m_cfgPeerSwitchTable(cfgDb, CFG_PEER_SWITCH_TABLE_NAME) { m_AppRestartAssist = new AppRestartAssist(pipelineAppDB, "neighsyncd", "swss", DEFAULT_NEIGHSYNC_WARMSTART_TIMER); if (m_AppRestartAssist) @@ -62,6 +63,9 @@ void NeighSync::onMsg(int nlmsg_type, struct nl_object *obj) string key; string family; string intfName; + std::vector peerSwitchKeys; + m_cfgPeerSwitchTable.getKeys(peerSwitchKeys); + bool is_dualtor = peerSwitchKeys.size() > 0; if ((nlmsg_type != RTM_NEWNEIGH) && (nlmsg_type != RTM_GETNEIGH) && (nlmsg_type != RTM_DELNEIGH)) @@ -79,6 +83,16 @@ void NeighSync::onMsg(int nlmsg_type, struct nl_object *obj) key+= ":"; nl_addr2str(rtnl_neigh_get_dst(neigh), ipStr, MAX_ADDR_SIZE); + + /* Ignore IPv4 link-local addresses as neighbors if subtype is dualtor */ + IpAddress ipAddress(ipStr); + if (family == IPV4_NAME && ipAddress.getAddrScope() == IpAddress::AddrScope::LINK_SCOPE && is_dualtor) + { + SWSS_LOG_INFO("Link Local address received on dualtor, ignoring for %s", ipStr); + return; + } + + /* Ignore IPv6 link-local addresses as neighbors, if ipv6 link local mode is disabled */ if (family == IPV6_NAME && IN6_IS_ADDR_LINKLOCAL(nl_addr_get_binary_addr(rtnl_neigh_get_dst(neigh)))) { @@ -99,13 +113,41 @@ void NeighSync::onMsg(int nlmsg_type, struct nl_object *obj) } bool delete_key = false; - if ((nlmsg_type == RTM_DELNEIGH) || (state == NUD_INCOMPLETE) || - (state == NUD_FAILED)) + bool use_zero_mac = false; + if (is_dualtor && (state == NUD_INCOMPLETE || state == NUD_FAILED)) + { + SWSS_LOG_INFO("Unable to resolve %s, setting zero MAC", key.c_str()); + use_zero_mac = true; + + // Unresolved neighbor deletion on dual ToR devices must be handled + // separately, otherwise delete_key is never set to true + // and neighorch is never able to remove the neighbor + if (nlmsg_type == RTM_DELNEIGH) + { + delete_key = true; + } + } + else if ((nlmsg_type == RTM_DELNEIGH) || + (state == NUD_INCOMPLETE) || (state == NUD_FAILED)) { delete_key = true; } - nl_addr2str(rtnl_neigh_get_lladdr(neigh), macStr, MAX_ADDR_SIZE); + if (use_zero_mac) + { + std::string zero_mac = "00:00:00:00:00:00"; + strncpy(macStr, zero_mac.c_str(), zero_mac.length()); + } + else + { + nl_addr2str(rtnl_neigh_get_lladdr(neigh), macStr, MAX_ADDR_SIZE); + } + + if (!delete_key && !strncmp(macStr, "none", MAX_ADDR_SIZE)) + { + SWSS_LOG_NOTICE("Mac address is 'none' for ADD op, ignoring for %s", ipStr); + return; + } /* Ignore neighbor entries with Broadcast Mac - Trigger for directed broadcast */ if (!delete_key && (MacAddress(macStr) == MacAddress("ff:ff:ff:ff:ff:ff"))) diff --git a/neighsyncd/neighsync.h b/neighsyncd/neighsync.h index 49a17ee6b6..8f25ee16c8 100644 --- a/neighsyncd/neighsync.h +++ b/neighsyncd/neighsync.h @@ -14,7 +14,7 @@ * service to finish, should be longer than the restore_neighbors timeout value (110) * This should not happen, if happens, system is in a unknown state, we should exit. */ -#define RESTORE_NEIGH_WAIT_TIME_OUT 120 +#define RESTORE_NEIGH_WAIT_TIME_OUT 180 namespace swss { @@ -36,7 +36,7 @@ class NeighSync : public NetMsg } private: - Table m_stateNeighRestoreTable; + Table m_stateNeighRestoreTable, m_cfgPeerSwitchTable; ProducerStateTable m_neighTable; AppRestartAssist *m_AppRestartAssist; Table m_cfgVlanInterfaceTable, m_cfgLagInterfaceTable, m_cfgInterfaceTable; diff --git a/neighsyncd/restore_neighbors.py b/neighsyncd/restore_neighbors.py index fac7b1f2df..19be323b7e 100755 --- a/neighsyncd/restore_neighbors.py +++ b/neighsyncd/restore_neighbors.py @@ -13,7 +13,6 @@ """ import sys -import swsssdk import netifaces import time from pyroute2 import IPRoute, NetlinkError @@ -81,21 +80,29 @@ def is_intf_oper_state_up(intf): return True return False -def is_intf_up(intf, db): - if not is_intf_oper_state_up(intf): - return False +def check_state_db(intf, db): + table_name = '' if 'Vlan' in intf: table_name = 'VLAN_MEMBER_TABLE|{}|*'.format(intf) - key = db.keys(db.STATE_DB, table_name) - if key is None: - log_info ("Vlan member is not yet created") - return False - if is_intf_up.counter == 0: - time.sleep(3*CHECK_INTERVAL) - is_intf_up.counter = 1 - log_info ("intf {} is up".format(intf)) + elif 'PortChannel' in intf: + table_name = 'LAG_MEMBER_TABLE|{}|*'.format(intf) + else: + return True + key = db.keys(db.STATE_DB, table_name) + if key is None: + log_info ("members for {} are not yet created".format(intf)) + return False + if is_intf_up.counter == 0: + time.sleep(3*CHECK_INTERVAL) + is_intf_up.counter = 1 + log_info ("intf {} is up".format(intf)) return True +def is_intf_up(intf, db): + if not is_intf_oper_state_up(intf): + return False + return check_state_db(intf, db) + # read the neigh table from AppDB to memory, format as below # build map as below, this can efficiently access intf and family groups later # { intf1 -> { { family1 -> [[ip1, mac1], [ip2, mac2] ...] } @@ -117,7 +124,7 @@ def is_intf_up(intf, db): # 2, need check interface state twice due to the split map def read_neigh_table_to_maps(): - db = swsssdk.SonicV2Connector(host='127.0.0.1') + db = swsscommon.SonicV2Connector(host='127.0.0.1') db.connect(db.APPL_DB, False) intf_neigh_map = {} @@ -207,7 +214,7 @@ def build_arp_ns_pkt(family, smac, src_ip, dst_ip): # Set the statedb "NEIGH_RESTORE_TABLE|Flags", so neighsyncd can start reconciliation def set_statedb_neigh_restore_done(): - db = swsssdk.SonicV2Connector(host='127.0.0.1') + db = swsscommon.SonicV2Connector(host='127.0.0.1') db.connect(db.STATE_DB, False) db.set(db.STATE_DB, 'NEIGH_RESTORE_TABLE|Flags', 'restored', 'true') db.close(db.STATE_DB) @@ -228,7 +235,7 @@ def restore_update_kernel_neighbors(intf_neigh_map, timeout=DEF_TIME_OUT): ipclass = IPRoute() start_time = time.monotonic() is_intf_up.counter = 0 - db = swsssdk.SonicV2Connector(host='127.0.0.1') + db = swsscommon.SonicV2Connector(host='127.0.0.1') db.connect(db.STATE_DB, False) while (time.monotonic() - start_time) < timeout: for intf, family_neigh_map in list(intf_neigh_map.items()): diff --git a/orchagent/Makefile.am b/orchagent/Makefile.am index 68aa474552..ee313d3b5c 100644 --- a/orchagent/Makefile.am +++ b/orchagent/Makefile.am @@ -1,11 +1,17 @@ INCLUDES = -I $(top_srcdir)/lib \ -I $(top_srcdir) \ -I $(top_srcdir)/warmrestart \ + -I switch \ -I flex_counter \ -I debug_counter \ + -I port \ -I pbh \ -I nhg +if GCOV_ENABLED +SUBDIRS = p4orch/tests +endif + CFLAGS_SAI = -I /usr/include/sai swssdir = $(datadir)/swss @@ -18,7 +24,7 @@ dist_swss_DATA = \ pfc_detect_barefoot.lua \ pfc_detect_nephos.lua \ pfc_detect_cisco-8000.lua \ - pfc_detect_vs.lua \ + pfc_detect_vs.lua \ pfc_restore.lua \ pfc_restore_cisco-8000.lua \ port_rates.lua \ @@ -41,6 +47,7 @@ orchagent_SOURCES = \ main.cpp \ $(top_srcdir)/lib/gearboxutils.cpp \ $(top_srcdir)/lib/subintf.cpp \ + $(top_srcdir)/lib/recorder.cpp \ orchdaemon.cpp \ orch.cpp \ notifications.cpp \ @@ -52,6 +59,7 @@ orchagent_SOURCES = \ mplsrouteorch.cpp \ neighorch.cpp \ intfsorch.cpp \ + port/porthlpr.cpp \ portsorch.cpp \ fabricportsorch.cpp \ fgnhgorch.cpp \ @@ -62,12 +70,15 @@ orchagent_SOURCES = \ mirrororch.cpp \ fdborch.cpp \ aclorch.cpp \ + pbh/pbhcap.cpp \ pbh/pbhcnt.cpp \ pbh/pbhmgr.cpp \ pbh/pbhrule.cpp \ pbhorch.cpp \ saihelper.cpp \ saiattr.cpp \ + switch/switch_capabilities.cpp \ + switch/switch_helper.cpp \ switchorch.cpp \ pfcwdorch.cpp \ pfcactionhandler.cpp \ @@ -92,14 +103,17 @@ orchagent_SOURCES = \ lagid.cpp \ bfdorch.cpp \ srv6orch.cpp \ - response_publisher.cpp + response_publisher.cpp \ + nvgreorch.cpp -orchagent_SOURCES += flex_counter/flex_counter_manager.cpp flex_counter/flex_counter_stat_manager.cpp flex_counter/flow_counter_handler.cpp +orchagent_SOURCES += flex_counter/flex_counter_manager.cpp flex_counter/flex_counter_stat_manager.cpp flex_counter/flow_counter_handler.cpp flex_counter/flowcounterrouteorch.cpp orchagent_SOURCES += debug_counter/debug_counter.cpp debug_counter/drop_counter.cpp orchagent_SOURCES += p4orch/p4orch.cpp \ p4orch/p4orch_util.cpp \ p4orch/p4oidmapper.cpp \ + p4orch/tables_definition_manager.cpp \ p4orch/router_interface_manager.cpp \ + p4orch/gre_tunnel_manager.cpp \ p4orch/neighbor_manager.cpp \ p4orch/next_hop_manager.cpp \ p4orch/route_manager.cpp \ @@ -107,23 +121,32 @@ orchagent_SOURCES += p4orch/p4orch.cpp \ p4orch/acl_table_manager.cpp \ p4orch/acl_rule_manager.cpp \ p4orch/wcmp_manager.cpp \ - p4orch/mirror_session_manager.cpp + p4orch/mirror_session_manager.cpp \ + p4orch/l3_admit_manager.cpp \ + p4orch/ext_tables_manager.cpp -orchagent_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -orchagent_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) -orchagent_LDADD = -lnl-3 -lnl-route-3 -lpthread -lsairedis -lsaimeta -lsaimetadata -lswsscommon -lzmq +orchagent_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +orchagent_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +orchagent_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lpthread -lsairedis -lsaimeta -lsaimetadata -lswsscommon -lzmq routeresync_SOURCES = routeresync.cpp -routeresync_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -routeresync_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -routeresync_LDADD = -lswsscommon +routeresync_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +routeresync_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +routeresync_LDADD = $(LDFLAGS_ASAN) -lswsscommon orchagent_restart_check_SOURCES = orchagent_restart_check.cpp -orchagent_restart_check_CPPFLAGS = $(DBGFLAGS) $(AM_CPPFLAGS) $(CFLAGS_COMMON) -orchagent_restart_check_LDADD = -lhiredis -lswsscommon -lpthread +orchagent_restart_check_CPPFLAGS = $(DBGFLAGS) $(AM_CPPFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +orchagent_restart_check_LDADD = $(LDFLAGS_ASAN) -lhiredis -lswsscommon -lpthread if GCOV_ENABLED orchagent_LDADD += -lgcovpreload routeresync_LDADD += -lgcovpreload orchagent_restart_check_LDADD += -lgcovpreload endif + +if ASAN_ENABLED +orchagent_SOURCES += $(top_srcdir)/lib/asan.cpp +routeresync_SOURCES += $(top_srcdir)/lib/asan.cpp +orchagent_restart_check_SOURCES += $(top_srcdir)/lib/asan.cpp +endif + diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index c3a9f23ec7..d12bdee7e9 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -34,6 +34,7 @@ extern CrmOrch *gCrmOrch; #define STATE_DB_ACL_ACTION_FIELD_IS_ACTION_LIST_MANDATORY "is_action_list_mandatory" #define STATE_DB_ACL_ACTION_FIELD_ACTION_LIST "action_list" +#define STATE_DB_ACL_L3V4V6_SUPPORTED "supported_L3V4V6" #define COUNTERS_ACL_COUNTER_RULE_MAP "ACL_COUNTER_RULE_MAP" #define ACL_COUNTER_DEFAULT_POLLING_INTERVAL_MS 10000 // ms @@ -69,7 +70,9 @@ acl_rule_attr_lookup_t aclMatchLookup = { MATCH_INNER_ETHER_TYPE, SAI_ACL_ENTRY_ATTR_FIELD_INNER_ETHER_TYPE }, { MATCH_INNER_IP_PROTOCOL, SAI_ACL_ENTRY_ATTR_FIELD_INNER_IP_PROTOCOL }, { MATCH_INNER_L4_SRC_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_SRC_PORT }, - { MATCH_INNER_L4_DST_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_DST_PORT } + { MATCH_INNER_L4_DST_PORT, SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_DST_PORT }, + { MATCH_BTH_OPCODE, SAI_ACL_ENTRY_ATTR_FIELD_BTH_OPCODE}, + { MATCH_AETH_SYNDROME, SAI_ACL_ENTRY_ATTR_FIELD_AETH_SYNDROME} }; static acl_range_type_lookup_t aclRangeTypeLookup = @@ -107,6 +110,11 @@ static acl_rule_attr_lookup_t aclDTelActionLookup = { ACTION_DTEL_REPORT_ALL_PACKETS, SAI_ACL_ENTRY_ATTR_ACTION_DTEL_REPORT_ALL_PACKETS } }; +static acl_rule_attr_lookup_t aclOtherActionLookup = +{ + { ACTION_COUNTER, SAI_ACL_ENTRY_ATTR_ACTION_COUNTER} +}; + static acl_packet_action_lookup_t aclPacketActionLookup = { { PACKET_ACTION_FORWARD, SAI_PACKET_ACTION_FORWARD }, @@ -153,6 +161,261 @@ static const acl_capabilities_t defaultAclActionsSupported = } }; +static acl_table_action_list_lookup_t defaultAclActionList = +{ + { + // L3 + TABLE_TYPE_L3, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + } + } + }, + { + // L3V6 + TABLE_TYPE_L3V6, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + } + } + }, + { + // L3V4V6 + TABLE_TYPE_L3V4V6, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + } + } + }, + { + // MIRROR + TABLE_TYPE_MIRROR, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_EGRESS + } + } + } + }, + { + // MIRRORV6 + TABLE_TYPE_MIRRORV6, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_EGRESS + } + } + } + }, + { + // MIRROR_DSCP + TABLE_TYPE_MIRROR_DSCP, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_EGRESS + } + } + } + }, + { + // TABLE_TYPE_PFCWD + TABLE_TYPE_PFCWD, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + } + } + }, + { + // MCLAG + TABLE_TYPE_MCLAG, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + } + } + }, + { + // MUX + TABLE_TYPE_MUX, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + } + } + }, + { + // DROP + TABLE_TYPE_DROP, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + } + } + } +}; + +// The match fields for certain ACL table type are not exactly the same between INGRESS and EGRESS. +// For example, we can only match IN_PORT for PFCWD table type at INGRESS. +// Hence we need to specify stage particular matching fields in stageMandatoryMatchFields +static acl_table_match_field_lookup_t stageMandatoryMatchFields = +{ + { + // TABLE_TYPE_PFCWD + TABLE_TYPE_PFCWD, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS + } + } + } + }, + { + // TABLE_TYPE_DROP + TABLE_TYPE_DROP, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS + } + } + } + }, + { + TABLE_TYPE_L3, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE + } + } + } + }, + { + TABLE_TYPE_L3V6, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE + } + } + } + } +}; + static acl_ip_type_lookup_t aclIpTypeLookup = { { IP_TYPE_ANY, SAI_ACL_IP_TYPE_ANY }, @@ -173,6 +436,14 @@ static map aclCounterLookup = {SAI_ACL_COUNTER_ATTR_ENABLE_PACKET_COUNT, SAI_ACL_COUNTER_ATTR_PACKETS}, }; +static map aclObjectStatusLookup = +{ + {AclObjectStatus::ACTIVE, "Active"}, + {AclObjectStatus::INACTIVE, "Inactive"}, + {AclObjectStatus::PENDING_CREATION, "Pending creation"}, + {AclObjectStatus::PENDING_REMOVAL, "Pending removal"} +}; + static sai_acl_table_attr_t AclEntryFieldToAclTableField(sai_acl_entry_attr_t attr) { if (!IS_ATTR_ID_IN_RANGE(attr, ACL_ENTRY, FIELD)) @@ -301,6 +572,18 @@ const set& AclTableType::getActions() const return m_aclAcitons; } +bool AclTableType::addAction(sai_acl_action_type_t action) +{ + m_aclAcitons.insert(action); + return true; +} + +bool AclTableType::addMatch(shared_ptr match) +{ + m_matches.emplace(match->getId(), match); + return true; +} + AclTableTypeBuilder& AclTableTypeBuilder::withName(string name) { m_tableType.m_name = name; @@ -421,6 +704,7 @@ bool AclTableTypeParser::parseAclTableTypeActions(const std::string& value, AclT auto l3Action = aclL3ActionLookup.find(action); auto mirrorAction = aclMirrorStageLookup.find(action); auto dtelAction = aclDTelActionLookup.find(action); + auto otherAction = aclOtherActionLookup.find(action); if (l3Action != aclL3ActionLookup.end()) { @@ -434,11 +718,16 @@ bool AclTableTypeParser::parseAclTableTypeActions(const std::string& value, AclT { saiActionAttr = dtelAction->second; } + else if (otherAction != aclOtherActionLookup.end()) + { + saiActionAttr = otherAction->second; + } else { SWSS_LOG_ERROR("Unknown action %s", action.c_str()); return false; } + SWSS_LOG_INFO("Added action %s", action.c_str()); builder.withAction(AclEntryActionToAclAction(saiActionAttr)); } @@ -712,6 +1001,36 @@ bool AclRule::validateAddMatch(string attr_name, string attr_value) matchData.data.u8 = to_uint(attr_value); matchData.mask.u8 = 0xFF; } + else if (attr_name == MATCH_BTH_OPCODE) + { + auto opcode_data = tokenize(attr_value, '/'); + + if (opcode_data.size() == 2) + { + matchData.data.u8 = to_uint(opcode_data[0]); + matchData.mask.u8 = to_uint(opcode_data[1]); + } + else + { + SWSS_LOG_ERROR("Invalid BTH_OPCODE configuration: %s, expected format /", attr_value.c_str()); + return false; + } + } + else if (attr_name == MATCH_AETH_SYNDROME) + { + auto syndrome_data = tokenize(attr_value, '/'); + + if (syndrome_data.size() == 2) + { + matchData.data.u8 = to_uint(syndrome_data[0]); + matchData.mask.u8 = to_uint(syndrome_data[1]); + } + else + { + SWSS_LOG_ERROR("Invalid AETH_SYNDROME configuration: %s, expected format /", attr_value.c_str()); + return false; + } + } } catch (exception &e) { @@ -842,13 +1161,21 @@ bool AclRule::createRule() status = sai_acl_api->create_acl_entry(&m_ruleOid, gSwitchId, (uint32_t)rule_attrs.size(), rule_attrs.data()); if (status != SAI_STATUS_SUCCESS) { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + SWSS_LOG_NOTICE("ACL rule %s already exists", m_id.c_str()); + return true; + } SWSS_LOG_ERROR("Failed to create ACL rule %s, rv:%d", m_id.c_str(), status); AclRange::remove(range_objects, range_object_list.count); decreaseNextHopRefCount(); } - gCrmOrch->incCrmAclTableUsedCounter(CrmResourceType::CRM_ACL_ENTRY, m_pTable->getOid()); + if (status == SAI_STATUS_SUCCESS) + { + gCrmOrch->incCrmAclTableUsedCounter(CrmResourceType::CRM_ACL_ENTRY, m_pTable->getOid()); + } return (status == SAI_STATUS_SUCCESS); } @@ -900,6 +1227,12 @@ bool AclRule::removeRule() auto status = sai_acl_api->remove_acl_entry(m_ruleOid); if (status != SAI_STATUS_SUCCESS) { + if (status == SAI_STATUS_ITEM_NOT_FOUND) + { + SWSS_LOG_NOTICE("ACL rule already deleted"); + m_ruleOid = SAI_NULL_OBJECT_ID; + return true; + } SWSS_LOG_ERROR("Failed to delete ACL rule, status %s", sai_serialize_status(status).c_str()); return false; } @@ -984,9 +1317,13 @@ bool AclRule::updateCounter(const AclRule& updatedRule) { return false; } + + m_pAclOrch->registerFlexCounter(*this); } else { + m_pAclOrch->deregisterFlexCounter(*this); + if (!disableCounter()) { return false; @@ -1256,6 +1593,11 @@ const vector& AclRule::getRangeConfig() const return m_rangeConfig; } +bool AclRule::getCreateCounter() const +{ + return m_createCounter; +} + shared_ptr AclRule::makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple& data) { shared_ptr aclRule; @@ -1272,23 +1614,14 @@ shared_ptr AclRule::makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOr { return make_shared(acl, rule, table); } - else if (aclDTelFlowOpTypeLookup.find(action) != aclDTelFlowOpTypeLookup.cend()) + else if (aclDTelActionLookup.find(action) != aclDTelActionLookup.cend()) { if (!dtel) { throw runtime_error("DTel feature is not enabled. Watchlists cannot be configured"); } - if (action == ACTION_DTEL_DROP_REPORT_ENABLE || - action == ACTION_DTEL_TAIL_DROP_REPORT_ENABLE || - action == ACTION_DTEL_REPORT_ALL_PACKETS) - { - return make_shared(acl, dtel, rule, table); - } - else - { - return make_shared(acl, dtel, rule, table); - } + return make_shared(acl, dtel, rule, table); } } @@ -1412,6 +1745,13 @@ bool AclRule::createCounter() bool AclRule::removeRanges() { SWSS_LOG_ENTER(); + if (!m_ranges.size()) + { + //The Acl Rules which have mirror action will not have ranges created till the mirror becomes active + SWSS_LOG_INFO("No Acl Range created for ACL Rule %s in table %s", m_id.c_str(), m_pTable->getId().c_str()); + return true; + } + for (const auto& rangeConfig: m_rangeConfig) { if (!AclRange::remove(rangeConfig.rangeType, rangeConfig.min, rangeConfig.max)) @@ -1712,6 +2052,16 @@ bool AclRuleMirror::activate() setAction(it.first, attr.value.aclaction); } + // If the rule with mirror action is removed and then mirror is activated, create the counter before rule is created + if (!hasCounter()) + { + if (getCreateCounter() && !createCounter()) + { + SWSS_LOG_ERROR("createCounter failed for Rule %s session %s", m_id.c_str(), m_sessionName.c_str()); + return false; + } + } + if (!AclRule::createRule()) { return false; @@ -1801,6 +2151,109 @@ AclTable::AclTable(AclOrch *pAclOrch) noexcept : m_pAclOrch(pAclOrch) } +bool AclTable::addMandatoryActions() +{ + SWSS_LOG_ENTER(); + + if (stage == ACL_STAGE_UNKNOWN) + { + return false; + } + + if (!m_pAclOrch->isAclActionListMandatoryOnTableCreation(stage)) + { + // No op if action list is not mandatory on table creation. + return true; + } + if (!type.getActions().empty()) + { + // No change if action_list is provided + return true; + } + + sai_acl_action_type_t acl_action = SAI_ACL_ACTION_TYPE_COUNTER; + if (m_pAclOrch->isAclActionSupported(stage, acl_action)) + { + SWSS_LOG_INFO("Add counter acl action"); + type.addAction(acl_action); + } + + if (defaultAclActionList.count(type.getName()) != 0) + { + // Add the default action list + for (auto action : defaultAclActionList[type.getName()][stage]) + { + if (m_pAclOrch->isAclActionSupported(stage, action)) + { + SWSS_LOG_INFO("Added default action for table type %s stage %s", + type.getName().c_str(), + ((stage == ACL_STAGE_INGRESS)? "INGRESS":"EGRESS")); + type.addAction(action); + } + } + } + + return true; +} + +bool AclTable::addStageMandatoryRangeFields() +{ + SWSS_LOG_ENTER(); + + string platform = getenv("platform") ? getenv("platform") : ""; + auto match = SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE; + + if ((platform == BRCM_PLATFORM_SUBSTRING) && + (stage == ACL_STAGE_EGRESS)) + { + return false; + } + + type.addMatch(make_shared(set{ + {SAI_ACL_RANGE_TYPE_L4_SRC_PORT_RANGE, SAI_ACL_RANGE_TYPE_L4_DST_PORT_RANGE}})); + SWSS_LOG_INFO("Added mandatory match field %s for table type %s stage %d", + sai_serialize_enum(match, &sai_metadata_enum_sai_acl_table_attr_t).c_str(), + type.getName().c_str(), stage); + + return true; +} + + +bool AclTable::addStageMandatoryMatchFields() +{ + SWSS_LOG_ENTER(); + + if (stage == ACL_STAGE_UNKNOWN) + { + return false; + } + + if (stageMandatoryMatchFields.count(type.getName()) != 0) + { + auto &fields_for_stage = stageMandatoryMatchFields[type.getName()]; + if (fields_for_stage.count(stage) != 0) + { + // Add the stage particular matching fields + for (auto match : fields_for_stage[stage]) + { + if (match != SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE) + { + type.addMatch(make_shared(match)); + SWSS_LOG_INFO("Added mandatory match field %s for table type %s stage %d", + sai_serialize_enum(match, &sai_metadata_enum_sai_acl_table_attr_t).c_str(), + type.getName().c_str(), stage); + } + else + { + addStageMandatoryRangeFields(); + } + } + } + } + + return true; +} + bool AclTable::validateAddType(const AclTableType &tableType) { SWSS_LOG_ENTER(); @@ -1871,6 +2324,19 @@ bool AclTable::validate() return false; } + if (type.getName() == TABLE_TYPE_L3V4V6) + { + if (!m_pAclOrch->isAclL3V4V6TableSupported(stage)) + { + + SWSS_LOG_ERROR("Table %s: table type %s in stage %d not supported on this platform.", + id.c_str(), type.getName().c_str(), stage); + return false; + } + } + + + if (m_pAclOrch->isAclActionListMandatoryOnTableCreation(stage)) { if (type.getActions().empty()) @@ -2207,6 +2673,12 @@ bool AclTable::clear() for (auto& rulepair: rules) { auto& rule = *rulepair.second; + + if (rule.hasCounter()) + { + m_pAclOrch->deregisterFlexCounter(rule); + } + bool suc = rule.remove(); if (!suc) { @@ -2219,13 +2691,13 @@ bool AclTable::clear() return true; } -AclRuleDTelFlowWatchListEntry::AclRuleDTelFlowWatchListEntry(AclOrch *aclOrch, DTelOrch *dtel, string rule, string table) : +AclRuleDTelWatchListEntry::AclRuleDTelWatchListEntry(AclOrch *aclOrch, DTelOrch *dtel, string rule, string table) : AclRule(aclOrch, rule, table), m_pDTelOrch(dtel) { } -bool AclRuleDTelFlowWatchListEntry::validateAddAction(string attr_name, string attr_val) +bool AclRuleDTelWatchListEntry::validateAddAction(string attr_name, string attr_val) { SWSS_LOG_ENTER(); @@ -2307,7 +2779,7 @@ bool AclRuleDTelFlowWatchListEntry::validateAddAction(string attr_name, string a return setAction(aclDTelActionLookup[attr_name], actionData); } -bool AclRuleDTelFlowWatchListEntry::validate() +bool AclRuleDTelWatchListEntry::validate() { SWSS_LOG_ENTER(); @@ -2324,19 +2796,19 @@ bool AclRuleDTelFlowWatchListEntry::validate() return true; } -bool AclRuleDTelFlowWatchListEntry::createRule() +bool AclRuleDTelWatchListEntry::createRule() { SWSS_LOG_ENTER(); return activate(); } -bool AclRuleDTelFlowWatchListEntry::removeRule() +bool AclRuleDTelWatchListEntry::removeRule() { return deactivate(); } -bool AclRuleDTelFlowWatchListEntry::activate() +bool AclRuleDTelWatchListEntry::activate() { SWSS_LOG_ENTER(); @@ -2353,7 +2825,7 @@ bool AclRuleDTelFlowWatchListEntry::activate() return AclRule::createRule(); } -bool AclRuleDTelFlowWatchListEntry::deactivate() +bool AclRuleDTelWatchListEntry::deactivate() { SWSS_LOG_ENTER(); @@ -2384,7 +2856,7 @@ bool AclRuleDTelFlowWatchListEntry::deactivate() return true; } -void AclRuleDTelFlowWatchListEntry::onUpdate(SubjectType type, void *cntx) +void AclRuleDTelWatchListEntry::onUpdate(SubjectType type, void *cntx) { sai_acl_action_data_t actionData; sai_object_id_t session_oid = SAI_NULL_OBJECT_ID; @@ -2445,72 +2917,19 @@ void AclRuleDTelFlowWatchListEntry::onUpdate(SubjectType type, void *cntx) } } -bool AclRuleDTelFlowWatchListEntry::update(const AclRule& rule) +bool AclRuleDTelWatchListEntry::update(const AclRule& rule) { - auto dtelDropWathcListRule = dynamic_cast(&rule); - if (!dtelDropWathcListRule) + auto dtelWatchListRule = dynamic_cast(&rule); + if (!dtelWatchListRule) { - SWSS_LOG_ERROR("Cannot update DTEL flow watch list rule with a rule of a different type"); + SWSS_LOG_ERROR("Cannot update DTEL watch list rule with a rule of a different type"); return false; } - SWSS_LOG_ERROR("Updating DTEL flow watch list rule is currently not implemented"); + SWSS_LOG_ERROR("Updating DTEL watch list rule is currently not implemented"); return false; } -AclRuleDTelDropWatchListEntry::AclRuleDTelDropWatchListEntry(AclOrch *aclOrch, DTelOrch *dtel, string rule, string table) : - AclRule(aclOrch, rule, table), - m_pDTelOrch(dtel) -{ -} - -bool AclRuleDTelDropWatchListEntry::validateAddAction(string attr_name, string attr_val) -{ - SWSS_LOG_ENTER(); - - if (!m_pDTelOrch) - { - return false; - } - - sai_acl_action_data_t actionData; - string attr_value = to_upper(attr_val); - - if (attr_name != ACTION_DTEL_DROP_REPORT_ENABLE && - attr_name != ACTION_DTEL_TAIL_DROP_REPORT_ENABLE && - attr_name != ACTION_DTEL_REPORT_ALL_PACKETS) - { - return false; - } - - actionData.parameter.booldata = (attr_value == DTEL_ENABLED) ? true : false; - actionData.enable = (attr_value == DTEL_ENABLED) ? true : false; - - return setAction(aclDTelActionLookup[attr_name], actionData); -} - -bool AclRuleDTelDropWatchListEntry::validate() -{ - SWSS_LOG_ENTER(); - - if (!m_pDTelOrch) - { - return false; - } - - if ((m_rangeConfig.empty() && m_matches.empty()) || m_actions.size() == 0) - { - return false; - } - - return true; -} - -void AclRuleDTelDropWatchListEntry::onUpdate(SubjectType, void *) -{ - // Do nothing -} - AclRange::AclRange(sai_acl_range_type_t type, sai_object_id_t oid, int min, int max): m_oid(oid), m_refCnt(0), m_min(min), m_max(max), m_type(type) { @@ -2640,11 +3059,16 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr { SWSS_LOG_ENTER(); + // Clear ACL_TABLE and ACL_RULE status from STATE_DB + removeAllAclTableStatus(); + removeAllAclRuleStatus(); + // TODO: Query SAI to get mirror table capabilities // Right now, verified platforms that support mirroring IPv6 packets are // Broadcom and Mellanox. Virtual switch is also supported for testing // purposes. string platform = getenv("platform") ? getenv("platform") : ""; + string sub_platform = getenv("sub_platform") ? getenv("sub_platform") : ""; if (platform == BRCM_PLATFORM_SUBSTRING || platform == CISCO_8000_PLATFORM_SUBSTRING || platform == MLNX_PLATFORM_SUBSTRING || @@ -2652,6 +3076,7 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr platform == MRVL_PLATFORM_SUBSTRING || platform == INVM_PLATFORM_SUBSTRING || platform == NPS_PLATFORM_SUBSTRING || + platform == XS_PLATFORM_SUBSTRING || platform == VS_PLATFORM_SUBSTRING) { m_mirrorTableCapabilities = @@ -2669,16 +3094,44 @@ void AclOrch::init(vector& connectors, PortsOrch *portOrch, Mirr }; } + if ( platform == MRVL_PLATFORM_SUBSTRING || + platform == INVM_PLATFORM_SUBSTRING || + platform == VS_PLATFORM_SUBSTRING) + { + m_L3V4V6Capability = + { + {ACL_STAGE_INGRESS, true}, + {ACL_STAGE_EGRESS, true}, + }; + } + else + { + m_L3V4V6Capability = + { + {ACL_STAGE_INGRESS, false}, + {ACL_STAGE_EGRESS, false}, + }; + + } + + SWSS_LOG_NOTICE("%s switch capability:", platform.c_str()); SWSS_LOG_NOTICE(" TABLE_TYPE_MIRROR: %s", m_mirrorTableCapabilities[TABLE_TYPE_MIRROR] ? "yes" : "no"); SWSS_LOG_NOTICE(" TABLE_TYPE_MIRRORV6: %s", m_mirrorTableCapabilities[TABLE_TYPE_MIRRORV6] ? "yes" : "no"); + SWSS_LOG_NOTICE(" TABLE_TYPE_L3V4V6: Ingress [%s], Egress [%s]", + m_L3V4V6Capability[ACL_STAGE_INGRESS] ? "yes" : "no", + m_L3V4V6Capability[ACL_STAGE_EGRESS] ? "yes" : "no"); + // In Mellanox platform, V4 and V6 rules are stored in different tables + // In Broadcom DNX platform also, V4 and V6 rules are stored in different tables if (platform == MLNX_PLATFORM_SUBSTRING || platform == CISCO_8000_PLATFORM_SUBSTRING || - platform == MRVL_PLATFORM_SUBSTRING) + platform == MRVL_PLATFORM_SUBSTRING || + platform == XS_PLATFORM_SUBSTRING || + (platform == BRCM_PLATFORM_SUBSTRING && sub_platform == BRCM_DNX_PLATFORM_SUBSTRING)) { m_isCombinedMirrorV6Table = false; } @@ -2765,8 +3218,6 @@ void AclOrch::initDefaultTableTypes() .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TCP_FLAGS)) - .withMatch(make_shared(set{ - {SAI_ACL_RANGE_TYPE_L4_SRC_PORT_RANGE, SAI_ACL_RANGE_TYPE_L4_DST_PORT_RANGE}})) .build() ); @@ -2784,8 +3235,31 @@ void AclOrch::initDefaultTableTypes() .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TCP_FLAGS)) - .withMatch(make_shared(set{ - {SAI_ACL_RANGE_TYPE_L4_SRC_PORT_RANGE, SAI_ACL_RANGE_TYPE_L4_DST_PORT_RANGE}})) + .build() + ); + + + addAclTableType( + builder.withName(TABLE_TYPE_L3V4V6) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_LAG) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_OUTER_VLAN_ID)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IP)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DST_IP)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMP_TYPE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMP_CODE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IP_PROTOCOL)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMPV6_CODE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMPV6_TYPE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TCP_FLAGS)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS)) .build() ); @@ -2814,13 +3288,13 @@ void AclOrch::initDefaultTableTypes() builder.withName(TABLE_TYPE_PFCWD) .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TC)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS)) .build() ); addAclTableType( builder.withName(TABLE_TYPE_DROP) .withBindPointType(SAI_ACL_BIND_POINT_TYPE_PORT) + .withBindPointType(SAI_ACL_BIND_POINT_TYPE_LAG) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TC)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS)) .build() @@ -3030,10 +3504,21 @@ void AclOrch::putAclActionCapabilityInDB(acl_stage_type_t stage) } } + is_action_list_mandatory_stream << boolalpha << capabilities.isActionListMandatoryOnTableCreation; fvVector.emplace_back(STATE_DB_ACL_ACTION_FIELD_IS_ACTION_LIST_MANDATORY, is_action_list_mandatory_stream.str()); fvVector.emplace_back(STATE_DB_ACL_ACTION_FIELD_ACTION_LIST, acl_action_value_stream.str()); + + for (auto const& it : m_L3V4V6Capability) + { + string value = it.second ? "true" : "false"; + if (it.first == stage) + { + fvVector.emplace_back(STATE_DB_ACL_L3V4V6_SUPPORTED, value); + } + } + m_aclStageCapabilityTable.set(stage_str, fvVector); } @@ -3084,8 +3569,6 @@ void AclOrch::queryAclActionAttrEnumValues(const string &action_name, SWSS_LOG_THROW("%s is not an enum", action_name.c_str()); } - // TODO: once sai object api is available make this code compile -#ifdef SAIREDIS_SUPPORT_OBJECT_API vector values_list(meta->enummetadata->valuescount); sai_s32_list_t values; values.count = static_cast(values_list.size()); @@ -3104,7 +3587,7 @@ void AclOrch::queryAclActionAttrEnumValues(const string &action_name, } else { - SWSS_LOG_WARN("Failed to query enum values supported for ACL action %s - ", + SWSS_LOG_WARN("Failed to query enum values supported for ACL action %s - " "API is not implemented, assuming all values are supported for this action", action_name.c_str()); /* assume all enum values are supported */ @@ -3113,13 +3596,6 @@ void AclOrch::queryAclActionAttrEnumValues(const string &action_name, m_aclEnumActionCapabilities[acl_action].insert(meta->enummetadata->values[i]); } } -#else - /* assume all enum values are supported until sai object api is available */ - for (size_t i = 0; i < meta->enummetadata->valuescount; i++) - { - m_aclEnumActionCapabilities[acl_action].insert(meta->enummetadata->values[i]); - } -#endif // put supported values in DB for (const auto& it: lookupMap) @@ -3143,6 +3619,8 @@ AclOrch::AclOrch(vector& connectors, DBConnector* stateDb, Switc PortsOrch *portOrch, MirrorOrch *mirrorOrch, NeighOrch *neighOrch, RouteOrch *routeOrch, DTelOrch *dtelOrch) : Orch(connectors), m_aclStageCapabilityTable(stateDb, STATE_ACL_STAGE_CAPABILITY_TABLE_NAME), + m_aclTableStateTable(stateDb, STATE_ACL_TABLE_TABLE_NAME), + m_aclRuleStateTable(stateDb, STATE_ACL_RULE_TABLE_NAME), m_switchOrch(switchOrch), m_mirrorOrch(mirrorOrch), m_neighOrch(neighOrch), @@ -3461,7 +3939,14 @@ bool AclOrch::addAclTable(AclTable &newTable) return true; } } + // Update matching field according to ACL stage + newTable.addStageMandatoryMatchFields(); + // Add mandatory ACL action if not present + // We need to call addMandatoryActions here because addAclTable is directly called in other orchs. + // The action_list is already added if the ACL table creation is triggered by CONFIGDD, but calling addMandatoryActions + // twice will make no effect + newTable.addMandatoryActions(); if (createBindAclTable(newTable, table_oid)) { m_AclTables[table_oid] = newTable; @@ -3617,7 +4102,9 @@ bool AclOrch::removeAclRule(string table_id, string rule_id) auto rule = getAclRule(table_id, rule_id); if (!rule) { - return false; + SWSS_LOG_NOTICE("ACL rule [%s] in table [%s] already deleted", + rule_id.c_str(), table_id.c_str()); + return true; } if (rule->hasCounter()) @@ -3830,6 +4317,16 @@ bool AclOrch::isAclActionListMandatoryOnTableCreation(acl_stage_type_t stage) co return it->second.isActionListMandatoryOnTableCreation; } +bool AclOrch::isAclL3V4V6TableSupported(acl_stage_type_t stage) const +{ + const auto& it = m_L3V4V6Capability.find(stage); + if (it == m_L3V4V6Capability.cend()) + { + return false; + } + return it->second; +} + bool AclOrch::isAclActionSupported(acl_stage_type_t stage, sai_acl_action_type_t action) const { const auto& it = m_aclCapabilities.find(stage); @@ -3936,7 +4433,8 @@ void AclOrch::doAclTableTask(Consumer &consumer) } newTable.validateAddType(*tableType); - + // Add mandatory ACL action if not present + newTable.addMandatoryActions(); // validate and create/update ACL Table if (bAllAttributesOk && newTable.validate()) { @@ -3956,6 +4454,8 @@ void AclOrch::doAclTableTask(Consumer &consumer) { SWSS_LOG_NOTICE("Successfully updated existing ACL table %s", table_id.c_str()); + // Mark ACL table as ACTIVE + setAclTableStatus(table_id, AclObjectStatus::ACTIVE); it = consumer.m_toSync.erase(it); } else @@ -3968,14 +4468,23 @@ void AclOrch::doAclTableTask(Consumer &consumer) else { if (addAclTable(newTable)) + { + // Mark ACL table as ACTIVE + setAclTableStatus(table_id, AclObjectStatus::ACTIVE); it = consumer.m_toSync.erase(it); + } else + { + setAclTableStatus(table_id, AclObjectStatus::PENDING_CREATION); it++; + } } } else { it = consumer.m_toSync.erase(it); + // Mark the ACL table as inactive if the configuration is invalid + setAclTableStatus(table_id, AclObjectStatus::INACTIVE); SWSS_LOG_ERROR("Failed to create ACL table %s, invalid configuration", table_id.c_str()); } @@ -3983,9 +4492,17 @@ void AclOrch::doAclTableTask(Consumer &consumer) else if (op == DEL_COMMAND) { if (removeAclTable(table_id)) + { + // Remove ACL table status from STATE_DB + removeAclTableStatus(table_id); it = consumer.m_toSync.erase(it); + } else + { + // Set the status of ACL_TABLE to pending removal if removeAclTable returns error + setAclTableStatus(table_id, AclObjectStatus::PENDING_REMOVAL); it++; + } } else { @@ -4063,6 +4580,8 @@ void AclOrch::doAclRuleTask(Consumer &consumer) } bool bHasTCPFlag = false; bool bHasIPProtocol = false; + bool bHasIPV4 = false; + bool bHasIPV6 = false; for (const auto& itr : kfvFieldsValues(t)) { string attr_name = to_upper(fvField(itr)); @@ -4073,6 +4592,14 @@ void AclOrch::doAclRuleTask(Consumer &consumer) { bHasTCPFlag = true; } + if (attr_name == MATCH_SRC_IP || attr_name == MATCH_DST_IP) + { + bHasIPV4 = true; + } + if (attr_name == MATCH_SRC_IPV6 || attr_name == MATCH_DST_IPV6) + { + bHasIPV6 = true; + } if (attr_name == MATCH_IP_PROTOCOL || attr_name == MATCH_NEXT_HEADER) { bHasIPProtocol = true; @@ -4121,26 +4648,50 @@ void AclOrch::doAclRuleTask(Consumer &consumer) } } + if (bHasIPV4 && bHasIPV6) + { + if (type == TABLE_TYPE_L3V4V6) + { + SWSS_LOG_ERROR("Rule '%s' is invalid since it has both v4 and v6 matchfields.", rule_id.c_str()); + bAllAttributesOk = false; + } + } + // validate and create ACL rule if (bAllAttributesOk && newRule->validate()) { if (addAclRule(newRule, table_id)) + { + setAclRuleStatus(table_id, rule_id, AclObjectStatus::ACTIVE); it = consumer.m_toSync.erase(it); + } else + { + setAclRuleStatus(table_id, rule_id, AclObjectStatus::PENDING_CREATION); it++; + } } else { it = consumer.m_toSync.erase(it); + // Mark the rule inactive if the configuration is invalid + setAclRuleStatus(table_id, rule_id, AclObjectStatus::INACTIVE); SWSS_LOG_ERROR("Failed to create ACL rule. Rule configuration is invalid"); } } else if (op == DEL_COMMAND) { if (removeAclRule(table_id, rule_id)) + { + removeAclRuleStatus(table_id, rule_id); it = consumer.m_toSync.erase(it); + } else + { + // Mark pending removal status if removeAclRule returns error + setAclRuleStatus(table_id, rule_id, AclObjectStatus::PENDING_REMOVAL); it++; + } } else { @@ -4172,10 +4723,12 @@ void AclOrch::doAclTableTypeTask(Consumer &consumer) } addAclTableType(builder.build()); + SWSS_LOG_NOTICE("Created ACL table type %s", key.c_str()); } else if (op == DEL_COMMAND) { removeAclTableType(key); + SWSS_LOG_NOTICE("Removed ACL table type %s", key.c_str()); } else { @@ -4384,11 +4937,10 @@ void AclOrch::createDTelWatchListTables() AclTableTypeBuilder builder; - AclTable flowWLTable(this, TABLE_TYPE_DTEL_FLOW_WATCHLIST); - AclTable dropWLTable(this, TABLE_TYPE_DTEL_DROP_WATCHLIST); + AclTable dtelWLTable(this, TABLE_TYPE_DTEL_FLOW_WATCHLIST); - flowWLTable.validateAddStage(ACL_STAGE_INGRESS); - flowWLTable.validateAddType(builder + dtelWLTable.validateAddStage(ACL_STAGE_INGRESS); + dtelWLTable.validateAddType(builder .withBindPointType(SAI_ACL_BIND_POINT_TYPE_SWITCH) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IP)) @@ -4400,31 +4952,28 @@ void AclOrch::createDTelWatchListTables() .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_INNER_ETHER_TYPE)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_INNER_SRC_IP)) .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_INNER_DST_IP)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_OUTER_VLAN_ID)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_TCP_FLAGS)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DSCP)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMP_TYPE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMP_CODE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMPV6_TYPE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ICMPV6_CODE)) + .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER)) .withAction(SAI_ACL_ACTION_TYPE_ACL_DTEL_FLOW_OP) .withAction(SAI_ACL_ACTION_TYPE_DTEL_INT_SESSION) - .withAction(SAI_ACL_ACTION_TYPE_DTEL_REPORT_ALL_PACKETS) - .withAction(SAI_ACL_ACTION_TYPE_DTEL_FLOW_SAMPLE_PERCENT) - .build() - ); - flowWLTable.setDescription("Dataplane Telemetry Flow Watchlist table"); - - dropWLTable.validateAddStage(ACL_STAGE_INGRESS); - dropWLTable.validateAddType(builder - .withBindPointType(SAI_ACL_BIND_POINT_TYPE_SWITCH) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_SRC_IP)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_DST_IP)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_SRC_PORT)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT)) - .withMatch(make_shared(SAI_ACL_TABLE_ATTR_FIELD_IP_PROTOCOL)) .withAction(SAI_ACL_ACTION_TYPE_DTEL_DROP_REPORT_ENABLE) .withAction(SAI_ACL_ACTION_TYPE_DTEL_TAIL_DROP_REPORT_ENABLE) + .withAction(SAI_ACL_ACTION_TYPE_DTEL_REPORT_ALL_PACKETS) + .withAction(SAI_ACL_ACTION_TYPE_DTEL_FLOW_SAMPLE_PERCENT) .build() ); - dropWLTable.setDescription("Dataplane Telemetry Drop Watchlist table"); + dtelWLTable.setDescription("Dataplane Telemetry Watchlist table"); - addAclTable(flowWLTable); - addAclTable(dropWLTable); + addAclTable(dtelWLTable); } void AclOrch::deleteDTelWatchListTables() @@ -4432,7 +4981,6 @@ void AclOrch::deleteDTelWatchListTables() SWSS_LOG_ENTER(); removeAclTable(TABLE_TYPE_DTEL_FLOW_WATCHLIST); - removeAclTable(TABLE_TYPE_DTEL_DROP_WATCHLIST); } void AclOrch::registerFlexCounter(const AclRule& rule) @@ -4462,7 +5010,7 @@ void AclOrch::registerFlexCounter(const AclRule& rule) void AclOrch::deregisterFlexCounter(const AclRule& rule) { auto ruleIdentifier = generateAclRuleIdentifierInCountersDb(rule); - m_countersDb.hdel(COUNTERS_ACL_COUNTER_RULE_MAP, rule.getId()); + m_countersDb.hdel(COUNTERS_ACL_COUNTER_RULE_MAP, ruleIdentifier); m_flex_counter_manager.clearCounterIdList(rule.getCounterOid()); } @@ -4501,3 +5049,55 @@ bool AclOrch::getAclBindPortId(Port &port, sai_object_id_t &port_id) return true; } + +// Set the status of ACL table in STATE_DB +void AclOrch::setAclTableStatus(string table_name, AclObjectStatus status) +{ + vector fvVector; + fvVector.emplace_back("status", aclObjectStatusLookup[status]); + m_aclTableStateTable.set(table_name, fvVector); +} + +// Remove the status record of given ACL table from STATE_DB +void AclOrch::removeAclTableStatus(string table_name) +{ + m_aclTableStateTable.del(table_name); +} + +// Set the status of ACL rule in STATE_DB +void AclOrch::setAclRuleStatus(string table_name, string rule_name, AclObjectStatus status) +{ + vector fvVector; + fvVector.emplace_back("status", aclObjectStatusLookup[status]); + m_aclRuleStateTable.set(table_name + string("|") + rule_name, fvVector); +} + +// Remove the status record of given ACL rule from STATE_DB +void AclOrch::removeAclRuleStatus(string table_name, string rule_name) +{ + m_aclRuleStateTable.del(table_name + string("|") + rule_name); +} + +// Remove all ACL table status from STATE_DB +void AclOrch::removeAllAclTableStatus() +{ + vector keys; + m_aclTableStateTable.getKeys(keys); + + for (auto key : keys) + { + m_aclTableStateTable.del(key); + } +} + +// Remove all ACL rule status from STATE_DB +void AclOrch::removeAllAclRuleStatus() +{ + vector keys; + m_aclRuleStateTable.getKeys(keys); + for (auto key : keys) + { + m_aclRuleStateTable.del(key); + } +} + diff --git a/orchagent/aclorch.h b/orchagent/aclorch.h index 9e6db3919c..f713aba3b3 100644 --- a/orchagent/aclorch.h +++ b/orchagent/aclorch.h @@ -49,6 +49,8 @@ #define MATCH_INNER_IP_PROTOCOL "INNER_IP_PROTOCOL" #define MATCH_INNER_L4_SRC_PORT "INNER_L4_SRC_PORT" #define MATCH_INNER_L4_DST_PORT "INNER_L4_DST_PORT" +#define MATCH_BTH_OPCODE "BTH_OPCODE" +#define MATCH_AETH_SYNDROME "AETH_SYNDROME" #define BIND_POINT_TYPE_PORT "PORT" #define BIND_POINT_TYPE_PORTCHANNEL "PORTCHANNEL" @@ -65,6 +67,7 @@ #define ACTION_DTEL_TAIL_DROP_REPORT_ENABLE "TAIL_DROP_REPORT_ENABLE" #define ACTION_DTEL_FLOW_SAMPLE_PERCENT "FLOW_SAMPLE_PERCENT" #define ACTION_DTEL_REPORT_ALL_PACKETS "REPORT_ALL_PACKETS" +#define ACTION_COUNTER "COUNTER" #define PACKET_ACTION_FORWARD "FORWARD" #define PACKET_ACTION_DROP "DROP" @@ -92,11 +95,20 @@ #define MLNX_MAX_RANGES_COUNT 16 #define INGRESS_TABLE_DROP "IngressTableDrop" +#define EGRESS_TABLE_DROP "EgressTableDrop" #define RULE_OPER_ADD 0 #define RULE_OPER_DELETE 1 #define ACL_COUNTER_FLEX_COUNTER_GROUP "ACL_STAT_COUNTER" +enum AclObjectStatus +{ + ACTIVE = 0, + INACTIVE, + PENDING_CREATION, + PENDING_REMOVAL +}; + struct AclActionCapabilities { set actionList; @@ -113,6 +125,12 @@ typedef tuple acl_range_properties_t; typedef map acl_capabilities_t; typedef map> acl_action_enum_values_capabilities_t; +typedef map > acl_stage_action_list_t; +typedef map acl_table_action_list_lookup_t; + +typedef map > acl_stage_match_field_t; +typedef map acl_table_match_field_lookup_t; + class AclRule; class AclTableMatchInterface @@ -156,6 +174,9 @@ class AclTableType const set& getRangeTypes() const; const set& getActions() const; + bool addAction(sai_acl_action_type_t action); + bool addMatch(shared_ptr match); + private: friend class AclTableTypeBuilder; @@ -254,6 +275,7 @@ class AclRule sai_object_id_t getCounterOid() const; bool hasCounter() const; vector getInPorts() const; + bool getCreateCounter() const; const vector& getRangeConfig() const; static shared_ptr makeShared(AclOrch *acl, MirrorOrch *mirror, DTelOrch *dtel, const string& rule, const string& table, const KeyOpFieldsValuesTuple&); @@ -334,10 +356,10 @@ class AclRuleMirror: public AclRule MirrorOrch *m_pMirrorOrch {nullptr}; }; -class AclRuleDTelFlowWatchListEntry: public AclRule +class AclRuleDTelWatchListEntry: public AclRule { public: - AclRuleDTelFlowWatchListEntry(AclOrch *m_pAclOrch, DTelOrch *m_pDTelOrch, string rule, string table); + AclRuleDTelWatchListEntry(AclOrch *m_pAclOrch, DTelOrch *m_pDTelOrch, string rule, string table); bool validateAddAction(string attr_name, string attr_value); bool validate(); bool createRule(); @@ -355,17 +377,6 @@ class AclRuleDTelFlowWatchListEntry: public AclRule bool INT_session_valid; }; -class AclRuleDTelDropWatchListEntry: public AclRule -{ -public: - AclRuleDTelDropWatchListEntry(AclOrch *m_pAclOrch, DTelOrch *m_pDTelOrch, string rule, string table); - bool validateAddAction(string attr_name, string attr_value); - bool validate(); - void onUpdate(SubjectType, void *) override; -protected: - DTelOrch *m_pDTelOrch; -}; - class AclTable { public: @@ -387,6 +398,15 @@ class AclTable bool validate(); bool create(); + // Add actions to ACL table if mandatory action list is required on table creation. + bool addMandatoryActions(); + + // Add stage mandatory matching fields to ACL table + bool addStageMandatoryMatchFields(); + + // Add stage mandatory range fields to ACL table + bool addStageMandatoryRangeFields(); + // validate AclRule match attribute against rule and table configuration bool validateAclRuleMatch(sai_acl_entry_attr_t matchId, const AclRule& rule) const; // validate AclRule action attribute against rule and table configuration @@ -482,12 +502,17 @@ class AclOrch : public Orch, public Observer bool isAclMirrorV6Supported() const; bool isAclMirrorV4Supported() const; bool isAclMirrorTableSupported(string type) const; + bool isAclL3V4V6TableSupported(acl_stage_type_t stage) const; bool isAclActionListMandatoryOnTableCreation(acl_stage_type_t stage) const; bool isAclActionSupported(acl_stage_type_t stage, sai_acl_action_type_t action) const; bool isAclActionEnumValueSupported(sai_acl_action_type_t action, sai_acl_action_parameter_t param) const; bool m_isCombinedMirrorV6Table = true; map m_mirrorTableCapabilities; + map m_L3V4V6Capability; + + void registerFlexCounter(const AclRule& rule); + void deregisterFlexCounter(const AclRule& rule); // Get the OID for the ACL bind point for a given port static bool getAclBindPortId(Port& port, sai_object_id_t& port_id); @@ -537,10 +562,17 @@ class AclOrch : public Orch, public Observer void createDTelWatchListTables(); void deleteDTelWatchListTables(); - void registerFlexCounter(const AclRule& rule); - void deregisterFlexCounter(const AclRule& rule); string generateAclRuleIdentifierInCountersDb(const AclRule& rule) const; + void setAclTableStatus(string table_name, AclObjectStatus status); + void setAclRuleStatus(string table_name, string rule_name, AclObjectStatus status); + + void removeAclTableStatus(string table_name); + void removeAclRuleStatus(string table_name, string rule_name); + + void removeAllAclTableStatus(); + void removeAllAclRuleStatus(); + map m_AclTables; // TODO: Move all ACL tables into one map: name -> instance map m_ctrlAclTables; @@ -551,6 +583,9 @@ class AclOrch : public Orch, public Observer Table m_aclStageCapabilityTable; + Table m_aclTableStateTable; + Table m_aclRuleStateTable; + map m_mirrorTableId; map m_mirrorV6TableId; diff --git a/orchagent/acltable.h b/orchagent/acltable.h index 3ec7f1a757..1b1cdeb29a 100644 --- a/orchagent/acltable.h +++ b/orchagent/acltable.h @@ -25,13 +25,13 @@ extern "C" { #define TABLE_TYPE_L3 "L3" #define TABLE_TYPE_L3V6 "L3V6" +#define TABLE_TYPE_L3V4V6 "L3V4V6" #define TABLE_TYPE_MIRROR "MIRROR" #define TABLE_TYPE_MIRRORV6 "MIRRORV6" #define TABLE_TYPE_MIRROR_DSCP "MIRROR_DSCP" #define TABLE_TYPE_PFCWD "PFCWD" #define TABLE_TYPE_CTRLPLANE "CTRLPLANE" #define TABLE_TYPE_DTEL_FLOW_WATCHLIST "DTEL_FLOW_WATCHLIST" -#define TABLE_TYPE_DTEL_DROP_WATCHLIST "DTEL_DROP_WATCHLIST" #define TABLE_TYPE_MCLAG "MCLAG" #define TABLE_TYPE_MUX "MUX" #define TABLE_TYPE_DROP "DROP" diff --git a/orchagent/bfdorch.cpp b/orchagent/bfdorch.cpp index 68295842b3..25c6c20cf2 100644 --- a/orchagent/bfdorch.cpp +++ b/orchagent/bfdorch.cpp @@ -6,21 +6,26 @@ #include "notifier.h" #include "sai_serialize.h" #include "directory.h" +#include "notifications.h" using namespace std; using namespace swss; #define BFD_SESSION_DEFAULT_TX_INTERVAL 1000 #define BFD_SESSION_DEFAULT_RX_INTERVAL 1000 -#define BFD_SESSION_DEFAULT_DETECT_MULTIPLIER 3 +#define BFD_SESSION_DEFAULT_DETECT_MULTIPLIER 10 +// TOS: default 6-bit DSCP value 48, default 2-bit ecn value 0. 48<<2 = 192 +#define BFD_SESSION_DEFAULT_TOS 192 #define BFD_SESSION_MILLISECOND_TO_MICROSECOND 1000 #define BFD_SRCPORTINIT 49152 #define BFD_SRCPORTMAX 65536 +#define NUM_BFD_SRCPORT_RETRIES 3 extern sai_bfd_api_t* sai_bfd_api; extern sai_object_id_t gSwitchId; extern sai_object_id_t gVirtualRouterId; extern PortsOrch* gPortsOrch; +extern sai_switch_api_t* sai_switch_api; extern Directory gDirectory; const map session_type_map = @@ -56,7 +61,19 @@ BfdOrch::BfdOrch(DBConnector *db, string tableName, TableConnector stateDbBfdSes DBConnector *notificationsDb = new DBConnector("ASIC_DB", 0); m_bfdStateNotificationConsumer = new swss::NotificationConsumer(notificationsDb, "NOTIFICATIONS"); auto bfdStateNotificatier = new Notifier(m_bfdStateNotificationConsumer, this, "BFD_STATE_NOTIFICATIONS"); + + // Clean up state database BFD entries + vector keys; + + m_stateBfdSessionTable.getKeys(keys); + + for (auto alias : keys) + { + m_stateBfdSessionTable.del(alias); + } + Orch::addExecutor(bfdStateNotificatier); + register_state_change_notif = false; } BfdOrch::~BfdOrch(void) @@ -152,8 +169,52 @@ void BfdOrch::doTask(NotificationConsumer &consumer) } } +bool BfdOrch::register_bfd_state_change_notification(void) +{ + sai_attribute_t attr; + sai_status_t status; + sai_attr_capability_t capability; + + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_SWITCH, + SAI_SWITCH_ATTR_BFD_SESSION_STATE_CHANGE_NOTIFY, + &capability); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Unable to query the BFD change notification capability"); + return false; + } + + if (!capability.set_implemented) + { + SWSS_LOG_ERROR("BFD register change notification not supported"); + return false; + } + + attr.id = SAI_SWITCH_ATTR_BFD_SESSION_STATE_CHANGE_NOTIFY; + attr.value.ptr = (void *)on_bfd_session_state_change; + + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to register BFD notification handler"); + return false; + } + return true; +} + bool BfdOrch::create_bfd_session(const string& key, const vector& data) { + if (!register_state_change_notif) + { + if (!register_bfd_state_change_notification()) + { + SWSS_LOG_ERROR("BFD session for %s cannot be created", key.c_str()); + return false; + } + register_state_change_notif = true; + } if (bfd_session_map.find(key) != bfd_session_map.end()) { SWSS_LOG_ERROR("BFD session for %s already exists", key.c_str()); @@ -184,6 +245,7 @@ bool BfdOrch::create_bfd_session(const string& key, const vector(value); + } else SWSS_LOG_ERROR("Unsupported BFD attribute %s\n", fvField(i).c_str()); } @@ -247,9 +313,11 @@ bool BfdOrch::create_bfd_session(const string& key, const vectorcreate_bfd_session(&bfd_session_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); + + if (status != SAI_STATUS_SUCCESS) + { + status = retry_create_bfd_session(bfd_session_id, attrs); + } + if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to create bfd session %s, rv:%d", key.c_str(), status); @@ -392,6 +470,38 @@ bool BfdOrch::create_bfd_session(const string& key, const vector &attrs) +{ + for (uint32_t attr_idx = 0; attr_idx < (uint32_t)attrs.size(); attr_idx++) + { + if (attrs[attr_idx].id == SAI_BFD_SESSION_ATTR_UDP_SRC_PORT) + { + auto old_num = attrs[attr_idx].value.u32; + attrs[attr_idx].value.u32 = bfd_src_port(); + SWSS_LOG_WARN("BFD create using port number %d failed. Retrying with port number %d", + old_num, attrs[attr_idx].value.u32); + return; + } + } +} + +sai_status_t BfdOrch::retry_create_bfd_session(sai_object_id_t &bfd_session_id, vector attrs) +{ + sai_status_t status = SAI_STATUS_FAILURE; + + for (int retry = 0; retry < NUM_BFD_SRCPORT_RETRIES; retry++) + { + update_port_number(attrs); + status = sai_bfd_api->create_bfd_session(&bfd_session_id, gSwitchId, + (uint32_t)attrs.size(), attrs.data()); + if (status == SAI_STATUS_SUCCESS) + { + return status; + } + } + return status; +} + bool BfdOrch::remove_bfd_session(const string& key) { if (bfd_session_map.find(key) == bfd_session_map.end()) @@ -440,3 +550,4 @@ uint32_t BfdOrch::bfd_src_port(void) return (port++); } + diff --git a/orchagent/bfdorch.h b/orchagent/bfdorch.h index 6be1f8deae..4a0cb9edfb 100644 --- a/orchagent/bfdorch.h +++ b/orchagent/bfdorch.h @@ -26,12 +26,17 @@ class BfdOrch: public Orch, public Subject uint32_t bfd_gen_id(void); uint32_t bfd_src_port(void); + bool register_bfd_state_change_notification(void); + void update_port_number(std::vector &attrs); + sai_status_t retry_create_bfd_session(sai_object_id_t &bfd_session_id, vector attrs); + std::map bfd_session_map; std::map bfd_session_lookup; swss::Table m_stateBfdSessionTable; swss::NotificationConsumer* m_bfdStateNotificationConsumer; + bool register_state_change_notif; }; #endif /* SWSS_BFDORCH_H */ diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index e7204344d5..767eb8bc98 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -1,5 +1,6 @@ #include "tokenize.h" #include "bufferorch.h" +#include "directory.h" #include "logger.h" #include "sai_serialize.h" #include "warm_restart.h" @@ -16,7 +17,11 @@ extern sai_switch_api_t *sai_switch_api; extern sai_buffer_api_t *sai_buffer_api; extern PortsOrch *gPortsOrch; +extern Directory gDirectory; extern sai_object_id_t gSwitchId; +extern string gMySwitchType; +extern string gMyHostName; +extern string gMyAsicName; #define BUFFER_POOL_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS "60000" @@ -42,6 +47,9 @@ map buffer_to_ref_table_map = { {buffer_profile_list_field_name, APP_BUFFER_PROFILE_TABLE_NAME} }; +std::map> pg_port_flags; +std::map> queue_port_flags; + BufferOrch::BufferOrch(DBConnector *applDb, DBConnector *confDb, DBConnector *stateDb, vector &tableNames) : Orch(applDb, tableNames), m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), @@ -98,16 +106,32 @@ void BufferOrch::initBufferReadyLists(DBConnector *applDb, DBConnector *confDb) Table pg_table(applDb, APP_BUFFER_PG_TABLE_NAME); initBufferReadyList(pg_table, false); - Table queue_table(applDb, APP_BUFFER_QUEUE_TABLE_NAME); - initBufferReadyList(queue_table, false); + if(gMySwitchType == "voq") + { + Table queue_table(applDb, APP_BUFFER_QUEUE_TABLE_NAME); + initVoqBufferReadyList(queue_table, false); + } + else + { + Table queue_table(applDb, APP_BUFFER_QUEUE_TABLE_NAME); + initBufferReadyList(queue_table, false); + } } else { Table pg_table(confDb, CFG_BUFFER_PG_TABLE_NAME); initBufferReadyList(pg_table, true); - Table queue_table(confDb, CFG_BUFFER_QUEUE_TABLE_NAME); - initBufferReadyList(queue_table, true); + if(gMySwitchType == "voq") + { + Table queue_table(confDb, CFG_BUFFER_QUEUE_TABLE_NAME); + initVoqBufferReadyList(queue_table, true); + } + else + { + Table queue_table(confDb, CFG_BUFFER_QUEUE_TABLE_NAME); + initBufferReadyList(queue_table, true); + } } } @@ -144,6 +168,38 @@ void BufferOrch::initBufferReadyList(Table& table, bool isConfigDb) } } +void BufferOrch::initVoqBufferReadyList(Table& table, bool isConfigDb) +{ + SWSS_LOG_ENTER(); + + std::vector keys; + table.getKeys(keys); + + const char dbKeyDelimiter = (isConfigDb ? config_db_key_delimiter : delimiter); + + // populate the lists with buffer configuration information + for (const auto& key: keys) + { + auto &&tokens = tokenize(key, dbKeyDelimiter); + if (tokens.size() != 4) + { + SWSS_LOG_ERROR("Wrong format of a table '%s' key '%s'. Skip it", table.getTableName().c_str(), key.c_str()); + continue; + } + + // We need transform the key from config db format to appl db format + auto appldb_key = tokens[0] + config_db_key_delimiter + tokens[1] + config_db_key_delimiter + tokens[2] + delimiter + tokens[3]; + m_ready_list[appldb_key] = false; + + auto &&port_names = tokenize(tokens[0] + config_db_key_delimiter + tokens[1] + config_db_key_delimiter + tokens[2], list_item_delimiter); + for(const auto& port_name: port_names) + { + SWSS_LOG_INFO("Item %s has been inserted into ready list", appldb_key.c_str()); + m_port_ready_list_ref[port_name].push_back(appldb_key); + } + } +} + void BufferOrch::initBufferConstants() { sai_status_t status; @@ -318,14 +374,21 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) string map_type_name = APP_BUFFER_POOL_TABLE_NAME; string object_name = kfvKey(tuple); string op = kfvOp(tuple); + string xoff; SWSS_LOG_DEBUG("object name:%s", object_name.c_str()); if (m_buffer_type_maps[map_type_name]->find(object_name) != m_buffer_type_maps[map_type_name]->end()) { sai_object = (*(m_buffer_type_maps[map_type_name]))[object_name].m_saiObjectId; SWSS_LOG_DEBUG("found existing object:%s of type:%s", object_name.c_str(), map_type_name.c_str()); + if ((*(m_buffer_type_maps[map_type_name]))[object_name].m_pendingRemove && op == SET_COMMAND) + { + SWSS_LOG_NOTICE("Entry %s %s is pending remove, need retry", map_type_name.c_str(), object_name.c_str()); + return task_process_status::task_need_retry; + } } SWSS_LOG_DEBUG("processing command:%s", op.c_str()); + if (op == SET_COMMAND) { vector attribs; @@ -406,6 +469,7 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) attr.value.u64 = (uint64_t)stoul(value); attr.id = SAI_BUFFER_POOL_ATTR_XOFF_SIZE; attribs.push_back(attr); + xoff = value; } else { @@ -447,7 +511,9 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) return handle_status; } } + (*(m_buffer_type_maps[map_type_name]))[object_name].m_saiObjectId = sai_object; + (*(m_buffer_type_maps[map_type_name]))[object_name].m_pendingRemove = false; SWSS_LOG_NOTICE("Created buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); // Here we take the PFC watchdog approach to update the COUNTERS_DB metadata (e.g., PFC_WD_DETECTION_TIME per queue) // at initialization (creation and registration phase) @@ -456,6 +522,15 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) // "FLEX_COUNTER_STATUS" m_countersDb->hset(COUNTERS_BUFFER_POOL_NAME_MAP, object_name, sai_serialize_object_id(sai_object)); } + + // Only publish the result when shared headroom pool is enabled and it has been successfully applied to SAI + if (!xoff.empty()) + { + vector fvs; + fvs.emplace_back("xoff", xoff); + SWSS_LOG_INFO("Publishing the result after applying the shared headroom pool size %s to SAI", xoff.c_str()); + m_publisher.publish(APP_BUFFER_POOL_TABLE_NAME, object_name, fvs, ReturnCode(SAI_STATUS_SUCCESS), true); + } } else if (op == DEL_COMMAND) { @@ -463,6 +538,7 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) { auto hint = objectReferenceInfo(m_buffer_type_maps, map_type_name, object_name); SWSS_LOG_NOTICE("Can't remove object %s due to being referenced (%s)", object_name.c_str(), hint.c_str()); + (*(m_buffer_type_maps[map_type_name]))[object_name].m_pendingRemove = true; return task_process_status::task_need_retry; } @@ -480,11 +556,14 @@ task_process_status BufferOrch::processBufferPool(KeyOpFieldsValuesTuple &tuple) return handle_status; } } + SWSS_LOG_NOTICE("Removed buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); } - SWSS_LOG_NOTICE("Removed buffer pool %s with type %s", object_name.c_str(), map_type_name.c_str()); auto it_to_delete = (m_buffer_type_maps[map_type_name])->find(object_name); (m_buffer_type_maps[map_type_name])->erase(it_to_delete); m_countersDb->hdel(COUNTERS_BUFFER_POOL_NAME_MAP, object_name); + + vector fvs; + m_publisher.publish(APP_BUFFER_POOL_TABLE_NAME, object_name, fvs, ReturnCode(SAI_STATUS_SUCCESS), true); } else { @@ -509,6 +588,11 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup { sai_object = (*(m_buffer_type_maps[map_type_name]))[object_name].m_saiObjectId; SWSS_LOG_DEBUG("found existing object:%s of type:%s", object_name.c_str(), map_type_name.c_str()); + if ((*(m_buffer_type_maps[map_type_name]))[object_name].m_pendingRemove && op == SET_COMMAND) + { + SWSS_LOG_NOTICE("Entry %s %s is pending remove, need retry", map_type_name.c_str(), object_name.c_str()); + return task_process_status::task_need_retry; + } } SWSS_LOG_DEBUG("processing command:%s", op.c_str()); if (op == SET_COMMAND) @@ -523,13 +607,6 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup sai_attribute_t attr; if (field == buffer_pool_field_name) { - if (SAI_NULL_OBJECT_ID != sai_object) - { - // We should skip the profile's pool name because it's create only when setting a profile's attribute. - SWSS_LOG_INFO("Skip setting buffer profile's pool %s for profile %s", value.c_str(), object_name.c_str()); - continue; - } - sai_object_id_t sai_pool; ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_pool_field_name, buffer_to_ref_table_map.at(buffer_pool_field_name), @@ -656,6 +733,7 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup } } (*(m_buffer_type_maps[map_type_name]))[object_name].m_saiObjectId = sai_object; + (*(m_buffer_type_maps[map_type_name]))[object_name].m_pendingRemove = false; SWSS_LOG_NOTICE("Created buffer profile %s with type %s", object_name.c_str(), map_type_name.c_str()); } @@ -668,6 +746,7 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup { auto hint = objectReferenceInfo(m_buffer_type_maps, map_type_name, object_name); SWSS_LOG_NOTICE("Can't remove object %s due to being referenced (%s)", object_name.c_str(), hint.c_str()); + (*(m_buffer_type_maps[map_type_name]))[object_name].m_pendingRemove = true; return task_process_status::task_need_retry; } @@ -698,7 +777,8 @@ task_process_status BufferOrch::processBufferProfile(KeyOpFieldsValuesTuple &tup } /* -Input sample "BUFFER_QUEUE|Ethernet4,Ethernet45|10-15" + Input sample "BUFFER_QUEUE|Ethernet4,Ethernet45|10-15" or + "BUFFER_QUEUE|STG01-0101-0400-01T2-LC6|ASIC0|Ethernet4|10-15" */ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) { @@ -710,18 +790,47 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) vector tokens; sai_uint32_t range_low, range_high; bool need_update_sai = true; + bool local_port = false; + string local_port_name; SWSS_LOG_DEBUG("Processing:%s", key.c_str()); tokens = tokenize(key, delimiter); - if (tokens.size() != 2) + + vector port_names; + if (gMySwitchType == "voq") { - SWSS_LOG_ERROR("malformed key:%s. Must contain 2 tokens", key.c_str()); - return task_process_status::task_invalid_entry; + if (tokens.size() != 4) + { + SWSS_LOG_ERROR("malformed key:%s. Must contain 4 tokens", key.c_str()); + return task_process_status::task_invalid_entry; + } + + port_names = tokenize(tokens[0] + config_db_key_delimiter + tokens[1] + config_db_key_delimiter + tokens[2], list_item_delimiter); + if (!parseIndexRange(tokens[3], range_low, range_high)) + { + return task_process_status::task_invalid_entry; + } + + if(tokens[0] == gMyHostName) + { + local_port = true; + local_port_name = tokens[2]; + SWSS_LOG_INFO("System port %s is local port %d local port name %s", port_names[0].c_str(), local_port, local_port_name.c_str()); + } } - vector port_names = tokenize(tokens[0], list_item_delimiter); - if (!parseIndexRange(tokens[1], range_low, range_high)) + else { - return task_process_status::task_invalid_entry; + if (tokens.size() != 2) + { + SWSS_LOG_ERROR("malformed key:%s. Must contain 2 tokens", key.c_str()); + return task_process_status::task_invalid_entry; + } + + port_names = tokenize(tokens[0], list_item_delimiter); + if (!parseIndexRange(tokens[1], range_low, range_high)) + { + return task_process_status::task_invalid_entry; + } } if (op == SET_COMMAND) @@ -741,6 +850,21 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return task_process_status::task_failed; } + string old_buffer_profile_name; + if (doesObjectExist(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key, buffer_profile_field_name, old_buffer_profile_name) + && (old_buffer_profile_name == buffer_profile_name)) + { + if (m_partiallyAppliedQueues.find(key) == m_partiallyAppliedQueues.end()) + { + SWSS_LOG_INFO("Skip setting buffer queue %s to %s since it is not changed", key.c_str(), buffer_profile_name.c_str()); + return task_process_status::task_success; + } + else + { + m_partiallyAppliedQueues.erase(key); + } + } + SWSS_LOG_NOTICE("Set buffer queue %s to %s", key.c_str(), buffer_profile_name.c_str()); setObjectReference(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key, buffer_profile_field_name, buffer_profile_name); @@ -756,6 +880,7 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) sai_buffer_profile = SAI_NULL_OBJECT_ID; SWSS_LOG_NOTICE("Remove buffer queue %s", key.c_str()); removeObject(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key); + m_partiallyAppliedQueues.erase(key); } else { @@ -770,6 +895,12 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) { Port port; SWSS_LOG_DEBUG("processing port:%s", port_name.c_str()); + + if(local_port == true) + { + port_name = local_port_name; + } + if (!gPortsOrch->getPort(port_name, port)) { SWSS_LOG_ERROR("Port with alias:%s not found", port_name.c_str()); @@ -778,20 +909,36 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) for (size_t ind = range_low; ind <= range_high; ind++) { SWSS_LOG_DEBUG("processing queue:%zd", ind); - if (port.m_queue_ids.size() <= ind) + sai_object_id_t queue_id; + + if (gMySwitchType == "voq") { - SWSS_LOG_ERROR("Invalid queue index specified:%zd", ind); - return task_process_status::task_invalid_entry; - } - if (port.m_queue_lock[ind]) + std :: vector queue_ids = gPortsOrch->getPortVoQIds(port); + if (queue_ids.size() <= ind) + { + SWSS_LOG_ERROR("Invalid voq index specified:%zd", ind); + return task_process_status::task_invalid_entry; + } + queue_id = queue_ids[ind]; + } + else { - SWSS_LOG_WARN("Queue %zd on port %s is locked, will retry", ind, port_name.c_str()); - return task_process_status::task_need_retry; + if (port.m_queue_ids.size() <= ind) + { + SWSS_LOG_ERROR("Invalid queue index specified:%zd", ind); + return task_process_status::task_invalid_entry; + } + if (port.m_queue_lock[ind]) + { + SWSS_LOG_WARN("Queue %zd on port %s is locked, will retry", ind, port_name.c_str()); + m_partiallyAppliedQueues.insert(key); + return task_process_status::task_need_retry; + } + queue_id = port.m_queue_ids[ind]; } + if (need_update_sai) { - sai_object_id_t queue_id; - queue_id = port.m_queue_ids[ind]; SWSS_LOG_DEBUG("Applying buffer profile:0x%" PRIx64 " to queue index:%zd, queue sai_id:0x%" PRIx64, sai_buffer_profile, ind, queue_id); sai_status_t sai_status = sai_queue_api->set_queue_attribute(queue_id, &attr); if (sai_status != SAI_STATUS_SUCCESS) @@ -803,7 +950,56 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return handle_status; } } + // create/remove a port queue counter for the queue buffer + else + { + auto flexCounterOrch = gDirectory.get(); + auto queues = tokens[1]; + if (op == SET_COMMAND && + (flexCounterOrch->getQueueCountersState() || flexCounterOrch->getQueueWatermarkCountersState())) + { + gPortsOrch->createPortBufferQueueCounters(port, queues); + } + else if (op == DEL_COMMAND && + (flexCounterOrch->getQueueCountersState() || flexCounterOrch->getQueueWatermarkCountersState())) + { + gPortsOrch->removePortBufferQueueCounters(port, queues); + } + } } + + /* when we apply buffer configuration we need to increase the ref counter of this port + * or decrease the ref counter for this port when we remove buffer cfg + * so for each priority cfg in each port we will increase/decrease the ref counter + * also we need to know when the set command is for creating a buffer cfg or modifying buffer cfg - + * we need to increase ref counter only on create flow. + * so we added a map that will help us to know what was the last command for this port and priority - + * if the last command was set command then it is a modify command and we dont need to increase the buffer counter + * all other cases (no last command exist or del command was the last command) it means that we need to increase the ref counter */ + if (op == SET_COMMAND) + { + if (queue_port_flags[port_name][ind] != SET_COMMAND) + { + /* if the last operation was not "set" then it's create and not modify - need to increase ref counter */ + gPortsOrch->increasePortRefCount(port_name); + } + } + else if (op == DEL_COMMAND) + { + if (queue_port_flags[port_name][ind] == SET_COMMAND) + { + /* we need to decrease ref counter only if the last operation was "SET_COMMAND" */ + gPortsOrch->decreasePortRefCount(port_name); + } + } + else + { + SWSS_LOG_ERROR("operation value is not SET or DEL (op = %s)", op.c_str()); + return task_process_status::task_invalid_entry; + } + /* save the last command (set or delete) */ + queue_port_flags[port_name][ind] = op; + } } @@ -822,8 +1018,17 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) // set order is detected. for (const auto &port_name : port_names) { - if (gPortsOrch->isPortAdminUp(port_name)) { - SWSS_LOG_WARN("Queue profile '%s' applied after port %s is up", key.c_str(), port_name.c_str()); + if(local_port == true) + { + if (gPortsOrch->isPortAdminUp(local_port_name)) { + SWSS_LOG_WARN("Queue profile '%s' applied after port %s is up", key.c_str(), port_name.c_str()); + } + } + else + { + if (gPortsOrch->isPortAdminUp(port_name)) { + SWSS_LOG_WARN("Queue profile '%s' applied after port %s is up", key.c_str(), port_name.c_str()); + } } } } @@ -862,7 +1067,7 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup if (op == SET_COMMAND) { ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, - buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, + buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, sai_buffer_profile, buffer_profile_name); if (ref_resolve_status::success != resolve_result) { @@ -876,6 +1081,14 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup return task_process_status::task_failed; } + string old_buffer_profile_name; + if (doesObjectExist(m_buffer_type_maps, APP_BUFFER_PG_TABLE_NAME, key, buffer_profile_field_name, old_buffer_profile_name) + && (old_buffer_profile_name == buffer_profile_name)) + { + SWSS_LOG_INFO("Skip setting buffer priority group %s to %s since it is not changed", key.c_str(), buffer_profile_name.c_str()); + return task_process_status::task_success; + } + SWSS_LOG_NOTICE("Set buffer PG %s to %s", key.c_str(), buffer_profile_name.c_str()); setObjectReference(m_buffer_type_maps, APP_BUFFER_PG_TABLE_NAME, key, buffer_profile_field_name, buffer_profile_name); @@ -904,7 +1117,6 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup for (string port_name : port_names) { Port port; - bool portUpdated = false; SWSS_LOG_DEBUG("processing port:%s", port_name.c_str()); if (!gPortsOrch->getPort(port_name, port)) { @@ -919,12 +1131,6 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup SWSS_LOG_ERROR("Invalid pg index specified:%zd", ind); return task_process_status::task_invalid_entry; } - if (port.m_priority_group_lock[ind]) - { - SWSS_LOG_WARN("Priority group %zd on port %s is locked, pending profile 0x%" PRIx64 " until unlocked", ind, port_name.c_str(), sai_buffer_profile); - portUpdated = true; - port.m_priority_group_pending_profile[ind] = sai_buffer_profile; - } else { if (need_update_sai) @@ -942,12 +1148,57 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup return handle_status; } } + // create or remove a port PG counter for the PG buffer + else + { + auto flexCounterOrch = gDirectory.get(); + auto pgs = tokens[1]; + if (op == SET_COMMAND && + (flexCounterOrch->getPgCountersState() || flexCounterOrch->getPgWatermarkCountersState())) + { + gPortsOrch->createPortBufferPgCounters(port, pgs); + } + else if (op == DEL_COMMAND && + (flexCounterOrch->getPgCountersState() || flexCounterOrch->getPgWatermarkCountersState())) + { + gPortsOrch->removePortBufferPgCounters(port, pgs); + } + } } } - } - if (portUpdated) - { - gPortsOrch->setPort(port_name, port); + + /* when we apply buffer configuration we need to increase the ref counter of this port + * or decrease the ref counter for this port when we remove buffer cfg + * so for each priority cfg in each port we will increase/decrease the ref counter + * also we need to know when the set command is for creating a buffer cfg or modifying buffer cfg - + * we need to increase ref counter only on create flow. + * so we added a map that will help us to know what was the last command for this port and priority - + * if the last command was set command then it is a modify command and we dont need to increase the buffer counter + * all other cases (no last command exist or del command was the last command) it means that we need to increase the ref counter */ + if (op == SET_COMMAND) + { + if (pg_port_flags[port_name][ind] != SET_COMMAND) + { + /* if the last operation was not "set" then it's create and not modify - need to increase ref counter */ + gPortsOrch->increasePortRefCount(port_name); + } + } + else if (op == DEL_COMMAND) + { + if (pg_port_flags[port_name][ind] == SET_COMMAND) + { + /* we need to decrease ref counter only if the last operation was "SET_COMMAND" */ + gPortsOrch->decreasePortRefCount(port_name); + } + } + else + { + SWSS_LOG_ERROR("operation value is not SET or DEL (op = %s)", op.c_str()); + return task_process_status::task_invalid_entry; + } + /* save the last command (set or delete) */ + pg_port_flags[port_name][ind] = op; + } } @@ -989,28 +1240,51 @@ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValue vector port_names = tokenize(key, list_item_delimiter); vector profile_list; + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_QOS_INGRESS_BUFFER_PROFILE_LIST; - string profile_name_list; - ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, - buffer_to_ref_table_map.at(buffer_profile_list_field_name), tuple, - profile_list, profile_name_list); - if (ref_resolve_status::success != resolve_status) + if (op == SET_COMMAND) { - if(ref_resolve_status::not_resolved == resolve_status) + string profile_name_list; + ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, + buffer_to_ref_table_map.at(buffer_profile_list_field_name), tuple, + profile_list, profile_name_list); + if (ref_resolve_status::success != resolve_status) { - SWSS_LOG_INFO("Missing or invalid ingress buffer profile reference specified for:%s", key.c_str()); - return task_process_status::task_need_retry; + if(ref_resolve_status::not_resolved == resolve_status) + { + SWSS_LOG_INFO("Missing or invalid ingress buffer profile reference specified for:%s", key.c_str()); + return task_process_status::task_need_retry; + } + SWSS_LOG_ERROR("Failed resolving ingress buffer profile reference specified for:%s", key.c_str()); + return task_process_status::task_failed; } - SWSS_LOG_ERROR("Failed resolving ingress buffer profile reference specified for:%s", key.c_str()); - return task_process_status::task_failed; - } - setObjectReference(m_buffer_type_maps, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, profile_name_list); + string old_profile_name_list; + if (doesObjectExist(m_buffer_type_maps, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, old_profile_name_list) + && (old_profile_name_list == profile_name_list)) + { + SWSS_LOG_INFO("Skip setting buffer ingress profile list %s to %s since it is not changed", key.c_str(), profile_name_list.c_str()); + return task_process_status::task_success; + } + + setObjectReference(m_buffer_type_maps, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, profile_name_list); + + attr.value.objlist.count = (uint32_t)profile_list.size(); + attr.value.objlist.list = profile_list.data(); + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_NOTICE("%s has been removed from BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE", key.c_str()); + removeObject(m_buffer_type_maps, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, key); + attr.value.objlist.count = 0; + attr.value.objlist.list = profile_list.data(); + } + else + { + SWSS_LOG_ERROR("Unknown command %s when handling BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE key %s", op.c_str(), key.c_str()); + } - sai_attribute_t attr; - attr.id = SAI_PORT_ATTR_QOS_INGRESS_BUFFER_PROFILE_LIST; - attr.value.objlist.count = (uint32_t)profile_list.size(); - attr.value.objlist.list = profile_list.data(); for (string port_name : port_names) { if (!gPortsOrch->getPort(port_name, port)) @@ -1045,28 +1319,51 @@ task_process_status BufferOrch::processEgressBufferProfileList(KeyOpFieldsValues SWSS_LOG_DEBUG("processing:%s", key.c_str()); vector port_names = tokenize(key, list_item_delimiter); vector profile_list; + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_QOS_EGRESS_BUFFER_PROFILE_LIST; - string profile_name_list; - ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, - buffer_to_ref_table_map.at(buffer_profile_list_field_name), tuple, - profile_list, profile_name_list); - if (ref_resolve_status::success != resolve_status) + if (op == SET_COMMAND) { - if(ref_resolve_status::not_resolved == resolve_status) + string profile_name_list; + ref_resolve_status resolve_status = resolveFieldRefArray(m_buffer_type_maps, buffer_profile_list_field_name, + buffer_to_ref_table_map.at(buffer_profile_list_field_name), tuple, + profile_list, profile_name_list); + if (ref_resolve_status::success != resolve_status) { - SWSS_LOG_INFO("Missing or invalid egress buffer profile reference specified for:%s", key.c_str()); - return task_process_status::task_need_retry; + if(ref_resolve_status::not_resolved == resolve_status) + { + SWSS_LOG_INFO("Missing or invalid egress buffer profile reference specified for:%s", key.c_str()); + return task_process_status::task_need_retry; + } + SWSS_LOG_ERROR("Failed resolving egress buffer profile reference specified for:%s", key.c_str()); + return task_process_status::task_failed; } - SWSS_LOG_ERROR("Failed resolving egress buffer profile reference specified for:%s", key.c_str()); - return task_process_status::task_failed; - } - setObjectReference(m_buffer_type_maps, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, profile_name_list); + string old_profile_name_list; + if (doesObjectExist(m_buffer_type_maps, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, old_profile_name_list) + && (old_profile_name_list == profile_name_list)) + { + SWSS_LOG_INFO("Skip setting buffer egress profile list %s to %s since it is not changed", key.c_str(), profile_name_list.c_str()); + return task_process_status::task_success; + } + + setObjectReference(m_buffer_type_maps, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, profile_name_list); + + attr.value.objlist.count = (uint32_t)profile_list.size(); + attr.value.objlist.list = profile_list.data(); + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_NOTICE("%s has been removed from BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE", key.c_str()); + removeObject(m_buffer_type_maps, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, key); + attr.value.objlist.count = 0; + attr.value.objlist.list = profile_list.data(); + } + else + { + SWSS_LOG_ERROR("Unknown command %s when handling BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE key %s", op.c_str(), key.c_str()); + } - sai_attribute_t attr; - attr.id = SAI_PORT_ATTR_QOS_EGRESS_BUFFER_PROFILE_LIST; - attr.value.objlist.count = (uint32_t)profile_list.size(); - attr.value.objlist.list = profile_list.data(); for (string port_name : port_names) { if (!gPortsOrch->getPort(port_name, port)) @@ -1127,7 +1424,15 @@ void BufferOrch::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); - if (!gPortsOrch->isConfigDone()) + if (gMySwitchType == "voq") + { + if(!gPortsOrch->isInitDone()) + { + SWSS_LOG_INFO("Buffer task for %s can't be executed ahead of port config done", consumer.getTableName().c_str()); + return; + } + } + else if (!gPortsOrch->isConfigDone()) { SWSS_LOG_INFO("Buffer task for %s can't be executed ahead of port config done", consumer.getTableName().c_str()); return; diff --git a/orchagent/bufferorch.h b/orchagent/bufferorch.h index 05fdd7917f..de1e75c0a6 100644 --- a/orchagent/bufferorch.h +++ b/orchagent/bufferorch.h @@ -49,6 +49,7 @@ class BufferOrch : public Orch void initTableHandlers(); void initBufferReadyLists(DBConnector *confDb, DBConnector *applDb); void initBufferReadyList(Table& table, bool isConfigDb); + void initVoqBufferReadyList(Table& table, bool isConfigDb); void initFlexCounterGroupTable(void); void initBufferConstants(); task_process_status processBufferPool(KeyOpFieldsValuesTuple &tuple); @@ -71,6 +72,7 @@ class BufferOrch : public Orch unique_ptr m_countersDb; bool m_isBufferPoolWatermarkCounterIdListGenerated = false; + set m_partiallyAppliedQueues; }; #endif /* SWSS_BUFFORCH_H */ diff --git a/orchagent/bulker.h b/orchagent/bulker.h index 2ff86644ac..bb5ca496c9 100644 --- a/orchagent/bulker.h +++ b/orchagent/bulker.h @@ -224,7 +224,7 @@ class EntityBulker auto& attrs = it->second.first; attrs.insert(attrs.end(), attr_list, attr_list + attr_count); it->second.second = object_status; - SWSS_LOG_INFO("EntityBulker.create_entry %zu, %zu, %d, %d\n", creating_entries.size(), it->second.first.size(), (int)it->second.first[0].id, inserted); + SWSS_LOG_INFO("EntityBulker.create_entry %zu, %zu, %d\n", creating_entries.size(), it->second.first.size(), inserted); *object_status = SAI_STATUS_NOT_EXECUTED; return *object_status; } diff --git a/orchagent/cbf/cbfnhgorch.cpp b/orchagent/cbf/cbfnhgorch.cpp index 76435ad12d..fe396b207c 100644 --- a/orchagent/cbf/cbfnhgorch.cpp +++ b/orchagent/cbf/cbfnhgorch.cpp @@ -343,10 +343,10 @@ bool CbfNhg::sync() SWSS_LOG_ERROR("Failed to create CBF next hop group %s, rv %d", m_key.c_str(), status); - task_process_status handle_status = gCbfNhgOrch->handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); if (handle_status != task_success) { - return gCbfNhgOrch->parseHandleSaiStatusFailure(handle_status); + return parseHandleSaiStatusFailure(handle_status); } } diff --git a/orchagent/crmorch.cpp b/orchagent/crmorch.cpp index 7895bc38a4..7d4a78383e 100644 --- a/orchagent/crmorch.cpp +++ b/orchagent/crmorch.cpp @@ -4,6 +4,7 @@ #include "crmorch.h" #include "converter.h" #include "timer.h" +#include "saihelper.h" #define CRM_POLLING_INTERVAL "polling_interval" #define CRM_COUNTERS_TABLE_KEY "STATS" @@ -18,6 +19,7 @@ extern sai_object_id_t gSwitchId; extern sai_switch_api_t *sai_switch_api; extern sai_acl_api_t *sai_acl_api; +extern event_handle_t g_events_handle; using namespace std; using namespace swss; @@ -46,6 +48,7 @@ const map crmResTypeNameMap = { CrmResourceType::CRM_SRV6_MY_SID_ENTRY, "SRV6_MY_SID_ENTRY" }, { CrmResourceType::CRM_SRV6_NEXTHOP, "SRV6_NEXTHOP" }, { CrmResourceType::CRM_NEXTHOP_GROUP_MAP, "NEXTHOP_GROUP_MAP" }, + { CrmResourceType::CRM_EXT_TABLE, "EXTENSION_TABLE" }, }; const map crmResSaiAvailAttrMap = @@ -66,11 +69,48 @@ const map crmResSaiAvailAttrMap = { CrmResourceType::CRM_IPMC_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_IPMC_ENTRY}, { CrmResourceType::CRM_SNAT_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_SNAT_ENTRY }, { CrmResourceType::CRM_DNAT_ENTRY, SAI_SWITCH_ATTR_AVAILABLE_DNAT_ENTRY }, +}; + +const map crmResSaiObjAttrMap = +{ + { CrmResourceType::CRM_IPV4_ROUTE, SAI_OBJECT_TYPE_ROUTE_ENTRY }, + { CrmResourceType::CRM_IPV6_ROUTE, SAI_OBJECT_TYPE_ROUTE_ENTRY }, + { CrmResourceType::CRM_IPV4_NEXTHOP, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_IPV6_NEXTHOP, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_IPV4_NEIGHBOR, SAI_OBJECT_TYPE_NEIGHBOR_ENTRY }, + { CrmResourceType::CRM_IPV6_NEIGHBOR, SAI_OBJECT_TYPE_NEIGHBOR_ENTRY }, + { CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_NEXTHOP_GROUP, SAI_OBJECT_TYPE_NEXT_HOP_GROUP }, + { CrmResourceType::CRM_ACL_TABLE, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_ACL_GROUP, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_ACL_ENTRY, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_ACL_COUNTER, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_FDB_ENTRY, SAI_OBJECT_TYPE_FDB_ENTRY }, + { CrmResourceType::CRM_IPMC_ENTRY, SAI_OBJECT_TYPE_NULL}, + { CrmResourceType::CRM_SNAT_ENTRY, SAI_OBJECT_TYPE_NULL }, + { CrmResourceType::CRM_DNAT_ENTRY, SAI_OBJECT_TYPE_NULL }, { CrmResourceType::CRM_MPLS_INSEG, SAI_OBJECT_TYPE_INSEG_ENTRY }, { CrmResourceType::CRM_MPLS_NEXTHOP, SAI_OBJECT_TYPE_NEXT_HOP }, { CrmResourceType::CRM_SRV6_MY_SID_ENTRY, SAI_OBJECT_TYPE_MY_SID_ENTRY }, { CrmResourceType::CRM_SRV6_NEXTHOP, SAI_OBJECT_TYPE_NEXT_HOP }, { CrmResourceType::CRM_NEXTHOP_GROUP_MAP, SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MAP }, + { CrmResourceType::CRM_EXT_TABLE, SAI_OBJECT_TYPE_GENERIC_PROGRAMMABLE }, +}; + +const map crmResAddrFamilyAttrMap = +{ + { CrmResourceType::CRM_IPV4_ROUTE, SAI_ROUTE_ENTRY_ATTR_IP_ADDR_FAMILY }, + { CrmResourceType::CRM_IPV6_ROUTE, SAI_ROUTE_ENTRY_ATTR_IP_ADDR_FAMILY }, + { CrmResourceType::CRM_IPV4_NEIGHBOR, SAI_NEIGHBOR_ENTRY_ATTR_IP_ADDR_FAMILY }, + { CrmResourceType::CRM_IPV6_NEIGHBOR, SAI_NEIGHBOR_ENTRY_ATTR_IP_ADDR_FAMILY }, +}; + +const map crmResAddrFamilyValMap = +{ + { CrmResourceType::CRM_IPV4_ROUTE, SAI_IP_ADDR_FAMILY_IPV4 }, + { CrmResourceType::CRM_IPV6_ROUTE, SAI_IP_ADDR_FAMILY_IPV6 }, + { CrmResourceType::CRM_IPV4_NEIGHBOR, SAI_IP_ADDR_FAMILY_IPV4 }, + { CrmResourceType::CRM_IPV6_NEIGHBOR, SAI_IP_ADDR_FAMILY_IPV6 }, }; const map crmThreshTypeResMap = @@ -96,6 +136,7 @@ const map crmThreshTypeResMap = { "srv6_my_sid_entry_threshold_type", CrmResourceType::CRM_SRV6_MY_SID_ENTRY }, { "srv6_nexthop_threshold_type", CrmResourceType::CRM_SRV6_NEXTHOP }, { "nexthop_group_map_threshold_type", CrmResourceType::CRM_NEXTHOP_GROUP_MAP }, + { "extension_table_threshold_type", CrmResourceType::CRM_EXT_TABLE }, }; const map crmThreshLowResMap = @@ -121,6 +162,7 @@ const map crmThreshLowResMap = {"srv6_my_sid_entry_low_threshold", CrmResourceType::CRM_SRV6_MY_SID_ENTRY }, {"srv6_nexthop_low_threshold", CrmResourceType::CRM_SRV6_NEXTHOP }, {"nexthop_group_map_low_threshold", CrmResourceType::CRM_NEXTHOP_GROUP_MAP }, + {"extension_table_low_threshold", CrmResourceType::CRM_EXT_TABLE }, }; const map crmThreshHighResMap = @@ -146,6 +188,7 @@ const map crmThreshHighResMap = {"srv6_my_sid_entry_high_threshold", CrmResourceType::CRM_SRV6_MY_SID_ENTRY }, {"srv6_nexthop_high_threshold", CrmResourceType::CRM_SRV6_NEXTHOP }, {"nexthop_group_map_high_threshold", CrmResourceType::CRM_NEXTHOP_GROUP_MAP }, + {"extension_table_high_threshold", CrmResourceType::CRM_EXT_TABLE }, }; const map crmThreshTypeMap = @@ -178,6 +221,7 @@ const map crmAvailCntsTableMap = { "crm_stats_srv6_my_sid_entry_available", CrmResourceType::CRM_SRV6_MY_SID_ENTRY }, { "crm_stats_srv6_nexthop_available", CrmResourceType::CRM_SRV6_NEXTHOP }, { "crm_stats_nexthop_group_map_available", CrmResourceType::CRM_NEXTHOP_GROUP_MAP }, + { "crm_stats_extension_table_available", CrmResourceType::CRM_EXT_TABLE }, }; const map crmUsedCntsTableMap = @@ -203,6 +247,7 @@ const map crmUsedCntsTableMap = { "crm_stats_srv6_my_sid_entry_used", CrmResourceType::CRM_SRV6_MY_SID_ENTRY }, { "crm_stats_srv6_nexthop_used", CrmResourceType::CRM_SRV6_NEXTHOP }, { "crm_stats_nexthop_group_map_used", CrmResourceType::CRM_NEXTHOP_GROUP_MAP }, + { "crm_stats_extension_table_used", CrmResourceType::CRM_EXT_TABLE }, }; CrmOrch::CrmOrch(DBConnector *db, string tableName): @@ -303,10 +348,19 @@ void CrmOrch::handleSetCommand(const string& key, const vector& } else if (crmThreshTypeResMap.find(field) != crmThreshTypeResMap.end()) { - auto resourceType = crmThreshTypeResMap.at(field); auto thresholdType = crmThreshTypeMap.at(value); + auto resourceType = crmThreshTypeResMap.at(field); + auto &resource = m_resourcesMap.at(resourceType); + + if (resource.thresholdType != thresholdType) + { + resource.thresholdType = thresholdType; - m_resourcesMap.at(resourceType).thresholdType = thresholdType; + for (auto &cnt : resource.countersMap) + { + cnt.second.exceededLogCounter = 0; + } + } } else if (crmThreshLowResMap.find(field) != crmThreshLowResMap.end()) { @@ -455,6 +509,36 @@ void CrmOrch::decCrmAclTableUsedCounter(CrmResourceType resource, sai_object_id_ } } +void CrmOrch::incCrmExtTableUsedCounter(CrmResourceType resource, std::string table_name) +{ + SWSS_LOG_ENTER(); + + try + { + m_resourcesMap.at(resource).countersMap[getCrmP4rtTableKey(table_name)].usedCounter++; + } + catch (...) + { + SWSS_LOG_ERROR("Failed to increment \"used\" counter for the EXT %s CRM resource.", table_name.c_str()); + return; + } +} + +void CrmOrch::decCrmExtTableUsedCounter(CrmResourceType resource, std::string table_name) +{ + SWSS_LOG_ENTER(); + + try + { + m_resourcesMap.at(resource).countersMap[getCrmP4rtTableKey(table_name)].usedCounter--; + } + catch (...) + { + SWSS_LOG_ERROR("Failed to decrement \"used\" counter for the EXT %s CRM resource.", table_name.c_str()); + return; + } +} + void CrmOrch::doTask(SelectableTimer &timer) { SWSS_LOG_ENTER(); @@ -464,6 +548,74 @@ void CrmOrch::doTask(SelectableTimer &timer) checkCrmThresholds(); } +bool CrmOrch::getResAvailability(CrmResourceType type, CrmResourceEntry &res) +{ + sai_attribute_t attr; + uint64_t availCount = 0; + sai_status_t status = SAI_STATUS_SUCCESS; + + sai_object_type_t objType = crmResSaiObjAttrMap.at(type); + + if (objType != SAI_OBJECT_TYPE_NULL) + { + uint32_t attrCount = 0; + + if ((type == CrmResourceType::CRM_IPV4_ROUTE) || (type == CrmResourceType::CRM_IPV6_ROUTE) || + (type == CrmResourceType::CRM_IPV4_NEIGHBOR) || (type == CrmResourceType::CRM_IPV6_NEIGHBOR)) + { + attr.id = crmResAddrFamilyAttrMap.at(type); + attr.value.s32 = crmResAddrFamilyValMap.at(type); + attrCount = 1; + } + else if (type == CrmResourceType::CRM_MPLS_NEXTHOP) + { + attr.id = SAI_NEXT_HOP_ATTR_TYPE; + attr.value.s32 = SAI_NEXT_HOP_TYPE_MPLS; + attrCount = 1; + } + else if (type == CrmResourceType::CRM_SRV6_NEXTHOP) + { + attr.id = SAI_NEXT_HOP_ATTR_TYPE; + attr.value.s32 = SAI_NEXT_HOP_TYPE_SRV6_SIDLIST; + attrCount = 1; + } + + status = sai_object_type_get_availability(gSwitchId, objType, attrCount, &attr, &availCount); + } + + if ((status != SAI_STATUS_SUCCESS) || (objType == SAI_OBJECT_TYPE_NULL)) + { + if (crmResSaiAvailAttrMap.find(type) != crmResSaiAvailAttrMap.end()) + { + attr.id = crmResSaiAvailAttrMap.at(type); + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + } + + if ((status == SAI_STATUS_NOT_SUPPORTED) || + (status == SAI_STATUS_NOT_IMPLEMENTED) || + SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || + SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) + { + // mark unsupported resources + res.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; + SWSS_LOG_NOTICE("CRM resource %s not supported", crmResTypeNameMap.at(type).c_str()); + return false; + } + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get availability counter for %s CRM resourse", crmResTypeNameMap.at(type).c_str()); + return false; + } + + availCount = attr.value.u32; + } + + res.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); + + return true; +} + void CrmOrch::getResAvailableCounters() { SWSS_LOG_ENTER(); @@ -490,33 +642,13 @@ void CrmOrch::getResAvailableCounters() case CrmResourceType::CRM_IPMC_ENTRY: case CrmResourceType::CRM_SNAT_ENTRY: case CrmResourceType::CRM_DNAT_ENTRY: + case CrmResourceType::CRM_MPLS_INSEG: + case CrmResourceType::CRM_NEXTHOP_GROUP_MAP: + case CrmResourceType::CRM_SRV6_MY_SID_ENTRY: + case CrmResourceType::CRM_MPLS_NEXTHOP: + case CrmResourceType::CRM_SRV6_NEXTHOP: { - sai_attribute_t attr; - attr.id = crmResSaiAvailAttrMap.at(res.first); - - sai_status_t status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) - { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("Switch attribute %u not supported", attr.id); - break; - } - SWSS_LOG_ERROR("Failed to get switch attribute %u , rv:%d", attr.id, status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); - if (handle_status != task_process_status::task_success) - { - break; - } - } - - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = attr.value.u32; - + getResAvailability(res.first, res.second); break; } @@ -578,116 +710,30 @@ void CrmOrch::getResAvailableCounters() break; } - case CrmResourceType::CRM_MPLS_INSEG: - case CrmResourceType::CRM_NEXTHOP_GROUP_MAP: + case CrmResourceType::CRM_EXT_TABLE: { - sai_object_type_t objType = static_cast(crmResSaiAvailAttrMap.at(res.first)); - uint64_t availCount = 0; - sai_status_t status = sai_object_type_get_availability(gSwitchId, objType, 0, nullptr, &availCount); - if (status != SAI_STATUS_SUCCESS) - { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) - { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("CRM Resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); - break; - } - SWSS_LOG_ERROR("Failed to get availability for object_type %u , rv:%d", objType, status); - break; - } - - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); - - break; - } - - case CrmResourceType::CRM_MPLS_NEXTHOP: - { - sai_object_type_t objType = static_cast(crmResSaiAvailAttrMap.at(res.first)); - sai_attribute_t attr; - uint64_t availCount = 0; - - attr.id = SAI_NEXT_HOP_ATTR_TYPE; - attr.value.s32 = SAI_NEXT_HOP_TYPE_MPLS; - sai_status_t status = sai_object_type_get_availability(gSwitchId, objType, 1, &attr, &availCount); - if (status != SAI_STATUS_SUCCESS) + for (auto &cnt : res.second.countersMap) { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) - { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("CRM Resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); - break; - } - SWSS_LOG_ERROR("Failed to get availability for object_type %u , rv:%d", objType, status); - break; - } + std::string table_name = cnt.first; + sai_object_type_t objType = crmResSaiObjAttrMap.at(res.first); + sai_attribute_t attr; + uint64_t availCount = 0; - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); + attr.id = SAI_GENERIC_PROGRAMMABLE_ATTR_OBJECT_NAME; + attr.value.s8list.count = (uint32_t)table_name.size(); + attr.value.s8list.list = (int8_t *)const_cast(table_name.c_str()); - break; - } - - case CrmResourceType::CRM_SRV6_MY_SID_ENTRY: - { - sai_object_type_t objType = static_cast(crmResSaiAvailAttrMap.at(res.first)); - uint64_t availCount = 0; - sai_status_t status = sai_object_type_get_availability(gSwitchId, objType, 0, nullptr, &availCount); - if (status != SAI_STATUS_SUCCESS) - { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) + sai_status_t status = sai_object_type_get_availability( + gSwitchId, objType, 1, &attr, &availCount); + if (status != SAI_STATUS_SUCCESS) { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("CRM Resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); + SWSS_LOG_ERROR("Failed to get EXT table resource count %s , rv:%d", + table_name.c_str(), status); break; } - SWSS_LOG_ERROR("Failed to get availability for object_type %u , rv:%d", objType, status); - break; - } - - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); - - break; - } - case CrmResourceType::CRM_SRV6_NEXTHOP: - { - sai_object_type_t objType = static_cast(crmResSaiAvailAttrMap.at(res.first)); - sai_attribute_t attr; - uint64_t availCount = 0; - - attr.id = SAI_NEXT_HOP_ATTR_TYPE; - attr.value.s32 = SAI_NEXT_HOP_TYPE_SRV6_SIDLIST; - sai_status_t status = sai_object_type_get_availability(gSwitchId, objType, 1, &attr, &availCount); - if (status != SAI_STATUS_SUCCESS) - { - if ((status == SAI_STATUS_NOT_SUPPORTED) || - (status == SAI_STATUS_NOT_IMPLEMENTED) || - SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || - SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) - { - // mark unsupported resources - res.second.resStatus = CrmResourceStatus::CRM_RES_NOT_SUPPORTED; - SWSS_LOG_NOTICE("CRM Resource %s not supported", crmResTypeNameMap.at(res.first).c_str()); - break; - } - SWSS_LOG_ERROR("Failed to get availability for object_type %u , rv:%d", objType, status); - break; + cnt.second.availableCounter = static_cast(availCount); } - - res.second.countersMap[CRM_COUNTERS_TABLE_KEY].availableCounter = static_cast(availCount); - break; } @@ -747,7 +793,7 @@ void CrmOrch::checkCrmThresholds() { auto &res = i.second; - for (const auto &j : i.second.countersMap) + for (auto &j : i.second.countersMap) { auto &cnt = j.second; uint64_t utilization = 0; @@ -786,19 +832,25 @@ void CrmOrch::checkCrmThresholds() throw runtime_error("Unknown threshold type for CRM resource"); } - if ((utilization >= res.highThreshold) && (res.exceededLogCounter < CRM_EXCEEDED_MSG_MAX)) + if ((utilization >= res.highThreshold) && (cnt.exceededLogCounter < CRM_EXCEEDED_MSG_MAX)) { + event_params_t params = { + { "percent", to_string(percentageUtil) }, + { "used_cnt", to_string(cnt.usedCounter) }, + { "free_cnt", to_string(cnt.availableCounter) }}; + SWSS_LOG_WARN("%s THRESHOLD_EXCEEDED for %s %u%% Used count %u free count %u", res.name.c_str(), threshType.c_str(), percentageUtil, cnt.usedCounter, cnt.availableCounter); - res.exceededLogCounter++; + event_publish(g_events_handle, "chk_crm_threshold", ¶ms); + cnt.exceededLogCounter++; } - else if ((utilization <= res.lowThreshold) && (res.exceededLogCounter > 0)) + else if ((utilization <= res.lowThreshold) && (cnt.exceededLogCounter > 0) && (res.highThreshold != res.lowThreshold)) { SWSS_LOG_WARN("%s THRESHOLD_CLEAR for %s %u%% Used count %u free count %u", res.name.c_str(), threshType.c_str(), percentageUtil, cnt.usedCounter, cnt.availableCounter); - res.exceededLogCounter = 0; + cnt.exceededLogCounter = 0; } } // end of counters loop } // end of resources loop @@ -851,3 +903,10 @@ string CrmOrch::getCrmAclTableKey(sai_object_id_t id) ss << "ACL_TABLE_STATS:" << "0x" << std::hex << id; return ss.str(); } + +string CrmOrch::getCrmP4rtTableKey(std::string table_name) +{ + std::stringstream ss; + ss << "EXT_TABLE_STATS:" << table_name; + return ss.str(); +} diff --git a/orchagent/crmorch.h b/orchagent/crmorch.h index 345caa2cf6..0ca118ef43 100644 --- a/orchagent/crmorch.h +++ b/orchagent/crmorch.h @@ -5,6 +5,7 @@ #include #include "orch.h" #include "port.h" +#include "events.h" extern "C" { #include "sai.h" @@ -33,6 +34,7 @@ enum class CrmResourceType CRM_SRV6_MY_SID_ENTRY, CRM_SRV6_NEXTHOP, CRM_NEXTHOP_GROUP_MAP, + CRM_EXT_TABLE, }; enum class CrmThresholdType @@ -62,6 +64,10 @@ class CrmOrch : public Orch void incCrmAclTableUsedCounter(CrmResourceType resource, sai_object_id_t tableId); // Decrement "used" counter for the per ACL table CRM resources (ACL entry/counter) void decCrmAclTableUsedCounter(CrmResourceType resource, sai_object_id_t tableId); + // Increment "used" counter for the EXT table CRM resources + void incCrmExtTableUsedCounter(CrmResourceType resource, std::string table_name); + // Decrement "used" counter for the EXT table CRM resources + void decCrmExtTableUsedCounter(CrmResourceType resource, std::string table_name); private: std::shared_ptr m_countersDb = nullptr; @@ -73,6 +79,7 @@ class CrmOrch : public Orch sai_object_id_t id = 0; uint32_t availableCounter = 0; uint32_t usedCounter = 0; + uint32_t exceededLogCounter = 0; }; struct CrmResourceEntry @@ -87,7 +94,6 @@ class CrmOrch : public Orch std::map countersMap; - uint32_t exceededLogCounter = 0; CrmResourceStatus resStatus = CrmResourceStatus::CRM_RES_SUPPORTED; }; @@ -98,9 +104,11 @@ class CrmOrch : public Orch void doTask(Consumer &consumer); void handleSetCommand(const std::string& key, const std::vector& data); void doTask(swss::SelectableTimer &timer); + bool getResAvailability(CrmResourceType type, CrmResourceEntry &res); void getResAvailableCounters(); void updateCrmCountersTable(); void checkCrmThresholds(); std::string getCrmAclKey(sai_acl_stage_t stage, sai_acl_bind_point_type_t bindPoint); std::string getCrmAclTableKey(sai_object_id_t id); + std::string getCrmP4rtTableKey(std::string table_name); }; diff --git a/orchagent/debugcounterorch.cpp b/orchagent/debugcounterorch.cpp index 25d0b94589..ed27f400d4 100644 --- a/orchagent/debugcounterorch.cpp +++ b/orchagent/debugcounterorch.cpp @@ -5,6 +5,7 @@ #include "schema.h" #include "drop_counter.h" #include +#include "observer.h" using std::string; using std::unordered_map; @@ -34,6 +35,8 @@ DebugCounterOrch::DebugCounterOrch(DBConnector *db, const vector& table_ { SWSS_LOG_ENTER(); publishDropCounterCapabilities(); + + gPortsOrch->attach(this); } DebugCounterOrch::~DebugCounterOrch(void) @@ -41,6 +44,52 @@ DebugCounterOrch::~DebugCounterOrch(void) SWSS_LOG_ENTER(); } +void DebugCounterOrch::update(SubjectType type, void *cntx) +{ + SWSS_LOG_ENTER(); + + if (type == SUBJECT_TYPE_PORT_CHANGE) + { + if (!cntx) + { + SWSS_LOG_ERROR("cntx is NULL"); + return; + } + + PortUpdate *update = static_cast(cntx); + Port &port = update->port; + + if (update->add) + { + for (const auto& debug_counter: debug_counters) + { + DebugCounter *counter = debug_counter.second.get(); + auto counter_type = counter->getCounterType(); + auto counter_stat = counter->getDebugCounterSAIStat(); + auto flex_counter_type = getFlexCounterType(counter_type); + if (flex_counter_type == CounterType::PORT_DEBUG) + { + installDebugFlexCounters(counter_type, counter_stat, port.m_port_id); + } + } + } + else + { + for (const auto& debug_counter: debug_counters) + { + DebugCounter *counter = debug_counter.second.get(); + auto counter_type = counter->getCounterType(); + auto counter_stat = counter->getDebugCounterSAIStat(); + auto flex_counter_type = getFlexCounterType(counter_type); + if (flex_counter_type == CounterType::PORT_DEBUG) + { + uninstallDebugFlexCounters(counter_type, counter_stat, port.m_port_id); + } + } + } + } +} + // doTask processes updates from the consumer and modifies the state of the // following components: // 1) The ASIC, by creating, modifying, and deleting debug counters @@ -476,7 +525,8 @@ CounterType DebugCounterOrch::getFlexCounterType(const string& counter_type) } void DebugCounterOrch::installDebugFlexCounters(const string& counter_type, - const string& counter_stat) + const string& counter_stat, + sai_object_id_t port_id) { SWSS_LOG_ENTER(); CounterType flex_counter_type = getFlexCounterType(counter_type); @@ -489,6 +539,14 @@ void DebugCounterOrch::installDebugFlexCounters(const string& counter_type, { for (auto const &curr : gPortsOrch->getAllPorts()) { + if (port_id != SAI_NULL_OBJECT_ID) + { + if (curr.second.m_port_id != port_id) + { + continue; + } + } + if (curr.second.m_type != Port::Type::PHY) { continue; @@ -503,7 +561,8 @@ void DebugCounterOrch::installDebugFlexCounters(const string& counter_type, } void DebugCounterOrch::uninstallDebugFlexCounters(const string& counter_type, - const string& counter_stat) + const string& counter_stat, + sai_object_id_t port_id) { SWSS_LOG_ENTER(); CounterType flex_counter_type = getFlexCounterType(counter_type); @@ -516,6 +575,14 @@ void DebugCounterOrch::uninstallDebugFlexCounters(const string& counter_type, { for (auto const &curr : gPortsOrch->getAllPorts()) { + if (port_id != SAI_NULL_OBJECT_ID) + { + if (curr.second.m_port_id != port_id) + { + continue; + } + } + if (curr.second.m_type != Port::Type::PHY) { continue; @@ -616,3 +683,6 @@ bool DebugCounterOrch::isDropReasonValid(const string& drop_reason) const return true; } + + + diff --git a/orchagent/debugcounterorch.h b/orchagent/debugcounterorch.h index e5b512c8e4..edfb5d98e0 100644 --- a/orchagent/debugcounterorch.h +++ b/orchagent/debugcounterorch.h @@ -10,6 +10,7 @@ #include "flex_counter_stat_manager.h" #include "debug_counter.h" #include "drop_counter.h" +#include "observer.h" extern "C" { #include "sai.h" @@ -17,9 +18,11 @@ extern "C" { #define DEBUG_COUNTER_FLEX_COUNTER_GROUP "DEBUG_COUNTER" +using DebugCounterMap = std::unordered_map>; + // DebugCounterOrch is an orchestrator for managing debug counters. It handles // the creation, deletion, and modification of debug counters. -class DebugCounterOrch: public Orch +class DebugCounterOrch: public Orch, public Observer { public: DebugCounterOrch(swss::DBConnector *db, const std::vector& table_names, int poll_interval); @@ -27,6 +30,7 @@ class DebugCounterOrch: public Orch void doTask(Consumer& consumer); + void update(SubjectType, void *cntx); private: // Debug Capability Reporting Functions void publishDropCounterCapabilities(); @@ -48,10 +52,12 @@ class DebugCounterOrch: public Orch CounterType getFlexCounterType(const std::string& counter_type) noexcept(false); void installDebugFlexCounters( const std::string& counter_type, - const std::string& counter_stat); + const std::string& counter_stat, + sai_object_id_t port_id = SAI_NULL_OBJECT_ID); void uninstallDebugFlexCounters( const std::string& counter_type, - const std::string& counter_stat); + const std::string& counter_stat, + sai_object_id_t port_id = SAI_NULL_OBJECT_ID); // Debug Counter Initialization Helper Functions std::string getDebugCounterType( @@ -83,7 +89,7 @@ class DebugCounterOrch: public Orch FlexCounterStatManager flex_counter_manager; - std::unordered_map> debug_counters; + DebugCounterMap debug_counters; // free_drop_counters are drop counters that have been created by a user // that do not have any drop reasons associated with them yet. Because diff --git a/orchagent/directory.h b/orchagent/directory.h index ecae1564a2..4d5857b18e 100644 --- a/orchagent/directory.h +++ b/orchagent/directory.h @@ -6,6 +6,7 @@ #include #include +#include #include #include diff --git a/orchagent/dtelorch.cpp b/orchagent/dtelorch.cpp index 378a225e37..084d078452 100644 --- a/orchagent/dtelorch.cpp +++ b/orchagent/dtelorch.cpp @@ -5,6 +5,8 @@ #include "converter.h" #include "ipprefix.h" #include "swssnet.h" +#include "directory.h" +#include "vrforch.h" using namespace std; using namespace swss; @@ -13,6 +15,7 @@ extern sai_switch_api_t* sai_switch_api; extern sai_dtel_api_t* sai_dtel_api; extern sai_object_id_t gVirtualRouterId; extern sai_object_id_t gSwitchId; +extern Directory gDirectory; dtelEventLookup_t dTelEventLookup = { @@ -1152,9 +1155,14 @@ void DTelOrch::doDtelReportSessionTableTask(Consumer &consumer) } else if (fvField(i) == VRF) { - rs_attr.id = SAI_DTEL_REPORT_SESSION_ATTR_VIRTUAL_ROUTER_ID; - /* TODO: find a way to convert vrf to oid */ + string vrf_name = fvValue(i); rs_attr.value.oid = gVirtualRouterId; + if (vrf_name != "default") + { + VRFOrch* vrf_orch = gDirectory.get(); + rs_attr.value.oid = vrf_orch->getVRFid(vrf_name); + } + rs_attr.id = SAI_DTEL_REPORT_SESSION_ATTR_VIRTUAL_ROUTER_ID; report_session_attr.push_back(rs_attr); } else if (fvField(i) == TRUNCATE_SIZE) diff --git a/orchagent/fabricportsorch.cpp b/orchagent/fabricportsorch.cpp index 1adb84ec08..d521e02b1a 100644 --- a/orchagent/fabricportsorch.cpp +++ b/orchagent/fabricportsorch.cpp @@ -9,19 +9,21 @@ #include "schema.h" #include "sai_serialize.h" #include "timer.h" +#include "saihelper.h" #define FABRIC_POLLING_INTERVAL_DEFAULT (30) +#define FABRIC_PORT_PREFIX "PORT" #define FABRIC_PORT_ERROR 0 #define FABRIC_PORT_SUCCESS 1 #define FABRIC_PORT_STAT_COUNTER_FLEX_COUNTER_GROUP "FABRIC_PORT_STAT_COUNTER" #define FABRIC_PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 #define FABRIC_QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP "FABRIC_QUEUE_STAT_COUNTER" #define FABRIC_QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 100000 -#define FABRIC_PORT_TABLE "FABRIC_PORT_TABLE" extern sai_object_id_t gSwitchId; extern sai_switch_api_t *sai_switch_api; extern sai_port_api_t *sai_port_api; +extern sai_queue_api_t *sai_queue_api; const vector port_stat_ids = { @@ -42,7 +44,8 @@ static const vector queue_stat_ids = SAI_QUEUE_STAT_CURR_OCCUPANCY_LEVEL, }; -FabricPortsOrch::FabricPortsOrch(DBConnector *appl_db, vector &tableNames) : +FabricPortsOrch::FabricPortsOrch(DBConnector *appl_db, vector &tableNames, + bool fabricPortStatEnabled, bool fabricQueueStatEnabled) : Orch(appl_db, tableNames), port_stat_manager(FABRIC_PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, FABRIC_PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), @@ -55,14 +58,17 @@ FabricPortsOrch::FabricPortsOrch(DBConnector *appl_db, vector(new DBConnector("STATE_DB", 0)); - m_stateTable = unique_ptr(new Table(m_state_db.get(), FABRIC_PORT_TABLE)); + m_stateTable = unique_ptr
(new Table(m_state_db.get(), APP_FABRIC_PORT_TABLE_NAME)); m_counter_db = shared_ptr(new DBConnector("COUNTERS_DB", 0)); - m_laneQueueCounterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_NAME_MAP)); - m_lanePortCounterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_PORT_MAP)); + m_portNameQueueCounterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_FABRIC_QUEUE_NAME_MAP)); + m_portNamePortCounterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_FABRIC_PORT_NAME_MAP)); m_flex_db = shared_ptr(new DBConnector("FLEX_COUNTER_DB", 0)); - m_flexCounterTable = unique_ptr(new ProducerTable(m_flex_db.get(), FABRIC_PORT_TABLE)); + m_flexCounterTable = unique_ptr(new ProducerTable(m_flex_db.get(), APP_FABRIC_PORT_TABLE_NAME)); + + m_fabricPortStatEnabled = fabricPortStatEnabled; + m_fabricQueueStatEnabled = fabricQueueStatEnabled; getFabricPortList(); @@ -135,8 +141,6 @@ int FabricPortsOrch::getFabricPortList() m_getFabricPortListDone = true; - updateFabricPortState(); - return FABRIC_PORT_SUCCESS; } @@ -147,32 +151,96 @@ bool FabricPortsOrch::allPortsReady() void FabricPortsOrch::generatePortStats() { - // FIX_ME: This function installs flex counters for port stats - // on fabric ports for fabric asics and voq asics (that connect - // to fabric asics via fabric ports). These counters will be - // installed in FLEX_COUNTER_DB, and queried by syncd and updated - // to COUNTERS_DB. - // However, currently BCM SAI doesn't update its code to query - // port stats (metrics in list port_stat_ids) yet. - // Also, BCM sets too low value for "Max logical port count" (256), - // causing syncd to crash on voq asics that now include regular front - // panel ports, fabric ports, and multiple logical ports. - // So, this function will just do nothing for now, and we will readd - // code to install port stats counters when BCM completely supports. + if (!m_fabricPortStatEnabled) return; + + SWSS_LOG_NOTICE("Generate fabric port stats"); + + vector portNamePortCounterMap; + for (auto p : m_fabricLanePortMap) + { + int lane = p.first; + sai_object_id_t port = p.second; + + std::ostringstream portName; + portName << FABRIC_PORT_PREFIX << lane; + portNamePortCounterMap.emplace_back(portName.str(), sai_serialize_object_id(port)); + + // Install flex counters for port stats + std::unordered_set counter_stats; + for (const auto& it: port_stat_ids) + { + counter_stats.emplace(sai_serialize_port_stat(it)); + } + port_stat_manager.setCounterIdList(port, CounterType::PORT, counter_stats); + } + m_portNamePortCounterTable->set("", portNamePortCounterMap); } void FabricPortsOrch::generateQueueStats() { + if (!m_fabricQueueStatEnabled) return; if (m_isQueueStatsGenerated) return; if (!m_getFabricPortListDone) return; - // FIX_ME: Similar to generatePortStats(), generateQueueStats() installs - // flex counters for queue stats on fabric ports for fabric asics and voq asics. - // However, currently BCM SAI doesn't fully support queue stats query. - // Query on queue type and index is not supported for fabric asics while - // voq asics are not completely supported. - // So, this function will just do nothing for now, and we will readd - // code to install queue stats counters when BCM completely supports. + SWSS_LOG_NOTICE("Generate queue map for fabric ports"); + + sai_status_t status; + sai_attribute_t attr; + + for (auto p : m_fabricLanePortMap) + { + int lane = p.first; + sai_object_id_t port = p.second; + + // Each serdes has some pipes (queues) for unicast and multicast. + // But normally fabric serdes uses only one pipe. + attr.id = SAI_PORT_ATTR_QOS_NUMBER_OF_QUEUES; + status = sai_port_api->get_port_attribute(port, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + throw runtime_error("FabricPortsOrch get port queue number failure"); + } + int num_queues = attr.value.u32; + + if (num_queues > 0) + { + vector m_queue_ids; + m_queue_ids.resize(num_queues); + + attr.id = SAI_PORT_ATTR_QOS_QUEUE_LIST; + attr.value.objlist.count = (uint32_t) num_queues; + attr.value.objlist.list = m_queue_ids.data(); + + status = sai_port_api->get_port_attribute(port, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + throw runtime_error("FabricPortsOrch get port queue list failure"); + } + + // Maintain queue map and install flex counters for queue stats + vector portNameQueueMap; + + // Fabric serdes queue type is SAI_QUEUE_TYPE_FABRIC_TX. Since we always + // maintain only one queue for fabric serdes, m_queue_ids size is 1. + // And so, there is no need to query SAI_QUEUE_ATTR_TYPE and SAI_QUEUE_ATTR_INDEX + // for queue. Actually, SAI does not support query these attributes on fabric serdes. + int queueIndex = 0; + std::ostringstream portName; + portName << FABRIC_PORT_PREFIX << lane << ":" << queueIndex; + const auto queue = sai_serialize_object_id(m_queue_ids[queueIndex]); + portNameQueueMap.emplace_back(portName.str(), queue); + + // We collect queue counters like occupancy level + std::unordered_set counter_stats; + for (const auto& it: queue_stat_ids) + { + counter_stats.emplace(sai_serialize_queue_stat(it)); + } + queue_stat_manager.setCounterIdList(m_queue_ids[queueIndex], CounterType::QUEUE, counter_stats); + + m_portNameQueueCounterTable->set("", portNameQueueMap); + } + } m_isQueueStatsGenerated = true; } @@ -199,7 +267,7 @@ void FabricPortsOrch::updateFabricPortState() int lane = p.first; sai_object_id_t port = p.second; - string key = "PORT" + to_string(lane); + string key = FABRIC_PORT_PREFIX + to_string(lane); std::vector values; uint32_t remote_peer; uint32_t remote_port; diff --git a/orchagent/fabricportsorch.h b/orchagent/fabricportsorch.h index c641ee566d..de7ee7a7b0 100644 --- a/orchagent/fabricportsorch.h +++ b/orchagent/fabricportsorch.h @@ -12,18 +12,22 @@ class FabricPortsOrch : public Orch, public Subject { public: - FabricPortsOrch(DBConnector *appl_db, vector &tableNames); + FabricPortsOrch(DBConnector *appl_db, vector &tableNames, + bool fabricPortStatEnabled=true, bool fabricQueueStatEnabled=true); bool allPortsReady(); void generateQueueStats(); private: + bool m_fabricPortStatEnabled; + bool m_fabricQueueStatEnabled; + shared_ptr m_state_db; shared_ptr m_counter_db; shared_ptr m_flex_db; unique_ptr
m_stateTable; - unique_ptr
m_laneQueueCounterTable; - unique_ptr
m_lanePortCounterTable; + unique_ptr
m_portNameQueueCounterTable; + unique_ptr
m_portNamePortCounterTable; unique_ptr m_flexCounterTable; swss::SelectableTimer *m_timer = nullptr; diff --git a/orchagent/fdborch.cpp b/orchagent/fdborch.cpp index daab3ad52e..48683d5ea6 100644 --- a/orchagent/fdborch.cpp +++ b/orchagent/fdborch.cpp @@ -18,7 +18,6 @@ extern sai_fdb_api_t *sai_fdb_api; extern sai_object_id_t gSwitchId; -extern PortsOrch* gPortsOrch; extern CrmOrch * gCrmOrch; extern MlagOrch* gMlagOrch; extern Directory gDirectory; @@ -76,6 +75,7 @@ bool FdbOrch::storeFdbEntryState(const FdbUpdate& update) string portName = port.m_alias; Port vlan; + oldFdbData.origin = FDB_ORIGIN_INVALID; if (!m_portsOrch->getPort(entry.bv_id, vlan)) { SWSS_LOG_NOTICE("FdbOrch notification: Failed to locate \ @@ -109,6 +109,7 @@ bool FdbOrch::storeFdbEntryState(const FdbUpdate& update) fdbdata.bridge_port_id = update.port.m_bridge_port_id; fdbdata.type = update.type; + fdbdata.sai_fdb_type = update.sai_fdb_type; fdbdata.origin = FDB_ORIGIN_LEARN; fdbdata.remote_ip = ""; fdbdata.esi = ""; @@ -174,9 +175,110 @@ bool FdbOrch::storeFdbEntryState(const FdbUpdate& update) } } +/* +clears stateDb and decrements corresponding internal fdb counters +*/ +void FdbOrch::clearFdbEntry(const FdbEntry& entry) +{ + FdbUpdate update; + update.entry = entry; + update.add = false; + + /* Fetch Vlan and decrement the counter */ + Port temp_vlan; + if (m_portsOrch->getPort(entry.bv_id, temp_vlan)) + { + m_portsOrch->decrFdbCount(temp_vlan.m_alias, 1); + } + + /* Decrement port fdb_counter */ + m_portsOrch->decrFdbCount(entry.port_name, 1); + + /* Remove the FdbEntry from the internal cache, update state DB and CRM counter */ + storeFdbEntryState(update); + notify(SUBJECT_TYPE_FDB_CHANGE, &update); + + SWSS_LOG_INFO("FdbEntry removed from internal cache, MAC: %s , port: %s, BVID: 0x%" PRIx64, + update.entry.mac.to_string().c_str(), update.entry.port_name.c_str(), update.entry.bv_id); +} + +/* +Handles the SAI_FDB_EVENT_FLUSHED notification recieved from syncd +*/ +void FdbOrch::handleSyncdFlushNotif(const sai_object_id_t& bv_id, + const sai_object_id_t& bridge_port_id, + const MacAddress& mac, + const sai_fdb_entry_type_t& sai_fdb_type) +{ + // Consolidated flush will have a zero mac + MacAddress flush_mac("00:00:00:00:00:00"); + + if (bridge_port_id == SAI_NULL_OBJECT_ID && bv_id == SAI_NULL_OBJECT_ID) + { + for (auto itr = m_entries.begin(); itr != m_entries.end();) + { + auto curr = itr++; + if (curr->second.sai_fdb_type == sai_fdb_type && + (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) + { + clearFdbEntry(curr->first); + } + } + } + else if (bv_id == SAI_NULL_OBJECT_ID) + { + /* FLUSH based on PORT */ + for (auto itr = m_entries.begin(); itr != m_entries.end();) + { + auto curr = itr++; + if (curr->second.bridge_port_id == bridge_port_id) + { + if (curr->second.sai_fdb_type == sai_fdb_type && + (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) + { + clearFdbEntry(curr->first); + } + } + } + } + else if (bridge_port_id == SAI_NULL_OBJECT_ID) + { + /* FLUSH based on BV_ID */ + for (auto itr = m_entries.begin(); itr != m_entries.end();) + { + auto curr = itr++; + if (curr->first.bv_id == bv_id) + { + if (curr->second.sai_fdb_type == sai_fdb_type && + (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) + { + clearFdbEntry(curr->first); + } + } + } + } + else + { + /* FLUSH based on port and VLAN */ + for (auto itr = m_entries.begin(); itr != m_entries.end();) + { + auto curr = itr++; + if (curr->first.bv_id == bv_id && curr->second.bridge_port_id == bridge_port_id) + { + if (curr->second.sai_fdb_type == sai_fdb_type && + (curr->first.mac == mac || mac == flush_mac) && curr->second.is_flush_pending) + { + clearFdbEntry(curr->first); + } + } + } + } +} + void FdbOrch::update(sai_fdb_event_t type, const sai_fdb_entry_t* entry, - sai_object_id_t bridge_port_id) + sai_object_id_t bridge_port_id, + const sai_fdb_entry_type_t &sai_fdb_type) { SWSS_LOG_ENTER(); @@ -191,24 +293,29 @@ void FdbOrch::update(sai_fdb_event_t type, type, update.entry.mac.to_string().c_str(), entry->bv_id, bridge_port_id); - if (bridge_port_id && !m_portsOrch->getPortByBridgePortId(bridge_port_id, update.port)) { if (type == SAI_FDB_EVENT_FLUSHED) { - /* In case of flush - can be ignored due to a race. - There are notifications about FDB FLUSH (syncd/sai_redis) on port, - which was already removed by orchagent as a result of - removeVlanMember action (removeBridgePort) */ + /* There are notifications about FDB FLUSH (syncd/sai_redis) on port, + which was already removed by orchagent as a result of removeVlanMember + action (removeBridgePort). But the internal cleanup of statedb and + internal counters is yet to be performed, thus continue + */ SWSS_LOG_INFO("Flush event: Failed to get port by bridge port ID 0x%" PRIx64 ".", bridge_port_id); - } else { SWSS_LOG_ERROR("Failed to get port by bridge port ID 0x%" PRIx64 ".", bridge_port_id); - + return; } + } + + if (entry->bv_id && + !m_portsOrch->getPort(entry->bv_id, vlan)) + { + SWSS_LOG_NOTICE("FdbOrch notification type %d: Failed to locate vlan port from bv_id 0x%" PRIx64, type, entry->bv_id); return; } @@ -218,12 +325,6 @@ void FdbOrch::update(sai_fdb_event_t type, { SWSS_LOG_INFO("Received LEARN event for bvid=0x%" PRIx64 "mac=%s port=0x%" PRIx64, entry->bv_id, update.entry.mac.to_string().c_str(), bridge_port_id); - if (!m_portsOrch->getPort(entry->bv_id, vlan)) - { - SWSS_LOG_ERROR("FdbOrch LEARN notification: Failed to locate vlan port from bv_id 0x%" PRIx64, entry->bv_id); - return; - } - // we already have such entries auto existing_entry = m_entries.find(update.entry); if (existing_entry != m_entries.end()) @@ -268,6 +369,7 @@ void FdbOrch::update(sai_fdb_event_t type, attr.id = SAI_FDB_ENTRY_ATTR_TYPE; attr.value.s32 = SAI_FDB_ENTRY_TYPE_DYNAMIC; + update.sai_fdb_type = SAI_FDB_ENTRY_TYPE_DYNAMIC; attrs.push_back(attr); attr.id = SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID; @@ -302,6 +404,7 @@ void FdbOrch::update(sai_fdb_event_t type, update.add = true; update.entry.port_name = update.port.m_alias; + update.sai_fdb_type = SAI_FDB_ENTRY_TYPE_DYNAMIC; update.type = "dynamic"; update.port.m_fdb_count++; m_portsOrch->setPort(update.port.m_alias, update.port); @@ -318,11 +421,6 @@ void FdbOrch::update(sai_fdb_event_t type, SWSS_LOG_INFO("Received AGE event for bvid=0x%" PRIx64 " mac=%s port=0x%" PRIx64, entry->bv_id, update.entry.mac.to_string().c_str(), bridge_port_id); - if (!m_portsOrch->getPort(entry->bv_id, vlan)) - { - SWSS_LOG_NOTICE("FdbOrch AGE notification: Failed to locate vlan port from bv_id 0x%" PRIx64, entry->bv_id); - } - auto existing_entry = m_entries.find(update.entry); // we don't have such entries if (existing_entry == m_entries.end()) @@ -456,17 +554,12 @@ void FdbOrch::update(sai_fdb_event_t type, SWSS_LOG_INFO("Received MOVE event for bvid=0x%" PRIx64 " mac=%s port=0x%" PRIx64, entry->bv_id, update.entry.mac.to_string().c_str(), bridge_port_id); - if (!m_portsOrch->getPort(entry->bv_id, vlan)) - { - SWSS_LOG_ERROR("FdbOrch MOVE notification: Failed to locate vlan port from bv_id 0x%" PRIx64, entry->bv_id); - return; - } - // We should already have such entry if (existing_entry == m_entries.end()) { SWSS_LOG_WARN("FdbOrch MOVE notification: mac %s is not found in bv_id 0x%" PRIx64, update.entry.mac.to_string().c_str(), entry->bv_id); + break; } else if (!m_portsOrch->getPortByBridgePortId(existing_entry->second.bridge_port_id, port_old)) { @@ -474,7 +567,45 @@ void FdbOrch::update(sai_fdb_event_t type, return; } + /* If the existing MAC is MCLAG remote, change its type to dynamic. */ + if (existing_entry->second.origin == FDB_ORIGIN_MCLAG_ADVERTIZED) + { + if (existing_entry->second.bridge_port_id != bridge_port_id) + { + sai_status_t status; + sai_fdb_entry_t fdb_entry; + fdb_entry.switch_id = gSwitchId; + memcpy(fdb_entry.mac_address, entry->mac_address, sizeof(sai_mac_t)); + fdb_entry.bv_id = entry->bv_id; + sai_attribute_t attr; + vector attrs; + + attr.id = SAI_FDB_ENTRY_ATTR_ALLOW_MAC_MOVE; + attr.value.booldata = false; + attrs.push_back(attr); + + attr.id = SAI_FDB_ENTRY_ATTR_TYPE; + attr.value.s32 = SAI_FDB_ENTRY_TYPE_DYNAMIC; + attrs.push_back(attr); + + attr.id = SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID; + attr.value.oid = bridge_port_id; + attrs.push_back(attr); + + for(auto itr : attrs) + { + status = sai_fdb_api->set_fdb_entry_attribute(&fdb_entry, &itr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("macUpdate-Failed for MCLAG mac attr.id=0x%x for FDB %s in 0x%" PRIx64 "on %s, rv:%d", + itr.id, update.entry.mac.to_string().c_str(), entry->bv_id, update.port.m_alias.c_str(), status); + } + } + } + } + update.add = true; + update.entry.port_name = update.port.m_alias; if (!port_old.m_alias.empty()) { port_old.m_fdb_count--; @@ -482,6 +613,7 @@ void FdbOrch::update(sai_fdb_event_t type, } update.port.m_fdb_count++; m_portsOrch->setPort(update.port.m_alias, update.port); + update.sai_fdb_type = SAI_FDB_ENTRY_TYPE_DYNAMIC; storeFdbEntryState(update); notify(SUBJECT_TYPE_FDB_CHANGE, &update); @@ -498,80 +630,15 @@ void FdbOrch::update(sai_fdb_event_t type, bridge_port_id); string vlanName = "-"; - if (entry->bv_id) { - Port vlan; - - if (!m_portsOrch->getPort(entry->bv_id, vlan)) - { - SWSS_LOG_NOTICE("FdbOrch notification: Failed to locate vlan\ - port from bv_id 0x%" PRIx64, entry->bv_id); - return; - } + if (!vlan.m_alias.empty()) { vlanName = "Vlan" + to_string(vlan.m_vlan_info.vlan_id); } + SWSS_LOG_INFO("FDB Flush: [ %s , %s ] = { port: %s }", update.entry.mac.to_string().c_str(), + vlanName.c_str(), update.port.m_alias.c_str()); - if (bridge_port_id == SAI_NULL_OBJECT_ID && - entry->bv_id == SAI_NULL_OBJECT_ID) - { - SWSS_LOG_INFO("FDB Flush: [ %s , %s ] = { port: - }", - update.entry.mac.to_string().c_str(), vlanName.c_str()); - for (auto itr = m_entries.begin(); itr != m_entries.end();) - { - /* - TODO: here should only delete the dynamic fdb entries, - but unfortunately in structure FdbEntry currently have - no member to indicate the fdb entry type, - if there is static mac added, here will have issue. - */ - update.entry.mac = itr->first.mac; - update.entry.bv_id = itr->first.bv_id; - update.add = false; - itr++; - - storeFdbEntryState(update); - - notify(SUBJECT_TYPE_FDB_CHANGE, &update); - - } - } - else if (entry->bv_id == SAI_NULL_OBJECT_ID) - { - /* FLUSH based on port */ - SWSS_LOG_INFO("FDB Flush: [ %s , %s ] = { port: %s }", - update.entry.mac.to_string().c_str(), - vlanName.c_str(), update.port.m_alias.c_str()); + handleSyncdFlushNotif(entry->bv_id, bridge_port_id, update.entry.mac, sai_fdb_type); - for (auto itr = m_entries.begin(); itr != m_entries.end();) - { - auto next_item = std::next(itr); - if (itr->first.port_name == update.port.m_alias) - { - update.entry.mac = itr->first.mac; - update.entry.bv_id = itr->first.bv_id; - update.add = false; - - storeFdbEntryState(update); - notify(SUBJECT_TYPE_FDB_CHANGE, &update); - } - itr = next_item; - } - } - else if (bridge_port_id == SAI_NULL_OBJECT_ID) - { - /* FLUSH based on VLAN - unsupported */ - SWSS_LOG_ERROR("Unsupported FDB Flush: [ %s , %s ] = { port: - }", - update.entry.mac.to_string().c_str(), - vlanName.c_str()); - - } - else - { - /* FLUSH based on port and VLAN - unsupported */ - SWSS_LOG_ERROR("Unsupported FDB Flush: [ %s , %s ] = { port: %s }", - update.entry.mac.to_string().c_str(), - vlanName.c_str(), update.port.m_alias.c_str()); - } break; } @@ -614,29 +681,23 @@ bool FdbOrch::getPort(const MacAddress& mac, uint16_t vlan, Port& port) return false; } - sai_fdb_entry_t entry; - entry.switch_id = gSwitchId; - memcpy(entry.mac_address, mac.getMac(), sizeof(sai_mac_t)); + FdbEntry entry; + entry.mac = mac; entry.bv_id = port.m_vlan_info.vlan_oid; - sai_attribute_t attr; - attr.id = SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID; - - sai_status_t status = sai_fdb_api->get_fdb_entry_attribute(&entry, 1, &attr); - if (status != SAI_STATUS_SUCCESS) + auto it = m_entries.find(entry); + if (it == m_entries.end()) { - SWSS_LOG_ERROR("Failed to get bridge port ID for FDB entry %s, rv:%d", - mac.to_string().c_str(), status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_FDB, status); - if (handle_status != task_process_status::task_success) - { - return false; - } + // This message is now expected in many cases since orchagent will process events such as + // learning new neighbor entries prior to updating the m_entries FDB cache. + SWSS_LOG_INFO("Failed to get cached bridge port ID for FDB entry %s", + mac.to_string().c_str()); + return false; } - if (!m_portsOrch->getPortByBridgePortId(attr.value.oid, port)) + if (!m_portsOrch->getPortByBridgePortId(it->second.bridge_port_id, port)) { - SWSS_LOG_ERROR("Failed to get port by bridge port ID 0x%" PRIx64, attr.value.oid); + SWSS_LOG_ERROR("Failed to get port by bridge port ID 0x%" PRIx64, it->second.bridge_port_id); return false; } @@ -647,7 +708,7 @@ void FdbOrch::doTask(Consumer& consumer) { SWSS_LOG_ENTER(); - if (!gPortsOrch->allPortsReady()) + if (!m_portsOrch->allPortsReady()) { return; } @@ -797,6 +858,7 @@ void FdbOrch::doTask(Consumer& consumer) fdbData.remote_ip = remote_ip; fdbData.esi = esi; fdbData.vni = vni; + fdbData.is_flush_pending = false; if (addFdbEntry(entry, port, fdbData)) { if (origin == FDB_ORIGIN_MCLAG_ADVERTIZED) @@ -854,7 +916,7 @@ void FdbOrch::doTask(NotificationConsumer& consumer) { SWSS_LOG_ENTER(); - if (!gPortsOrch->allPortsReady()) + if (!m_portsOrch->allPortsReady()) { return; } @@ -874,12 +936,25 @@ void FdbOrch::doTask(NotificationConsumer& consumer) { if (op == "ALL") { - status = sai_fdb_api->flush_fdb_entries(gSwitchId, 0, NULL); + vector attrs; + sai_attribute_t attr; + attr.id = SAI_FDB_FLUSH_ATTR_ENTRY_TYPE; + attr.value.s32 = SAI_FDB_FLUSH_ENTRY_TYPE_DYNAMIC; + attrs.push_back(attr); + status = sai_fdb_api->flush_fdb_entries(gSwitchId, (uint32_t)attrs.size(), attrs.data()); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Flush fdb failed, return code %x", status); } + if (status == SAI_STATUS_SUCCESS) { + for (map::iterator it = m_entries.begin(); + it != m_entries.end(); it++) + { + it->second.is_flush_pending = true; + } + } + return; } else if (op == "PORT") @@ -890,7 +965,7 @@ void FdbOrch::doTask(NotificationConsumer& consumer) SWSS_LOG_ERROR("Receive wrong port to flush fdb!"); return; } - if (!gPortsOrch->getPort(alias, port)) + if (!m_portsOrch->getPort(alias, port)) { SWSS_LOG_ERROR("Get Port from port(%s) failed!", alias.c_str()); return; @@ -911,7 +986,7 @@ void FdbOrch::doTask(NotificationConsumer& consumer) SWSS_LOG_ERROR("Receive wrong vlan to flush fdb!"); return; } - if (!gPortsOrch->getPort(vlan, vlanPort)) + if (!m_portsOrch->getPort(vlan, vlanPort)) { SWSS_LOG_ERROR("Get Port from vlan(%s) failed!", vlan.c_str()); return; @@ -937,12 +1012,12 @@ void FdbOrch::doTask(NotificationConsumer& consumer) SWSS_LOG_ERROR("Receive wrong port or vlan to flush fdb!"); return; } - if (!gPortsOrch->getPort(alias, port)) + if (!m_portsOrch->getPort(alias, port)) { SWSS_LOG_ERROR("Get Port from port(%s) failed!", alias.c_str()); return; } - if (!gPortsOrch->getPort(vlan, vlanPort)) + if (!m_portsOrch->getPort(vlan, vlanPort)) { SWSS_LOG_ERROR("Get Port from vlan(%s) failed!", vlan.c_str()); return; @@ -966,6 +1041,7 @@ void FdbOrch::doTask(NotificationConsumer& consumer) { uint32_t count; sai_fdb_event_notification_data_t *fdbevent = nullptr; + sai_fdb_entry_type_t sai_fdb_type = SAI_FDB_ENTRY_TYPE_DYNAMIC; sai_deserialize_fdb_event_ntf(data, count, &fdbevent); @@ -978,11 +1054,14 @@ void FdbOrch::doTask(NotificationConsumer& consumer) if (fdbevent[i].attr[j].id == SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID) { oid = fdbevent[i].attr[j].value.oid; - break; + } + else if (fdbevent[i].attr[j].id == SAI_FDB_ENTRY_ATTR_TYPE) + { + sai_fdb_type = (sai_fdb_entry_type_t)fdbevent[i].attr[j].value.s32; } } - this->update(fdbevent[i].event_type, &fdbevent[i].fdb_entry, oid); + this->update(fdbevent[i].event_type, &fdbevent[i].fdb_entry, oid, sai_fdb_type); } sai_deserialize_free_fdb_event_ntf(count, fdbevent); @@ -1031,6 +1110,11 @@ void FdbOrch::flushFDBEntries(sai_object_id_t bridge_port_oid, attr.value.oid = vlan_oid; attrs.push_back(attr); } + + /* do not flush static mac */ + attr.id = SAI_FDB_FLUSH_ATTR_ENTRY_TYPE; + attr.value.s32 = SAI_FDB_FLUSH_ENTRY_TYPE_DYNAMIC; + attrs.push_back(attr); SWSS_LOG_INFO("Flushing FDB bridge_port_oid: 0x%" PRIx64 ", and bvid_oid:0x%" PRIx64 ".", bridge_port_oid, vlan_oid); @@ -1039,6 +1123,20 @@ void FdbOrch::flushFDBEntries(sai_object_id_t bridge_port_oid, { SWSS_LOG_ERROR("Flushing FDB failed. rv:%d", rv); } + + if (SAI_STATUS_SUCCESS == rv) { + for (map::iterator it = m_entries.begin(); + it != m_entries.end(); it++) + { + if ((bridge_port_oid != SAI_NULL_OBJECT_ID && + it->second.bridge_port_id == bridge_port_oid) || + (vlan_oid != SAI_NULL_OBJECT_ID && + it->first.bv_id == vlan_oid)) + { + it->second.is_flush_pending = true; + } + } + } } void FdbOrch::notifyObserversFDBFlush(Port &port, sai_object_id_t& bvid) @@ -1291,6 +1389,7 @@ bool FdbOrch::addFdbEntry(const FdbEntry& entry, const string& port_name, { attr.value.s32 = (fdbData.type == "dynamic") ? SAI_FDB_ENTRY_TYPE_DYNAMIC : SAI_FDB_ENTRY_TYPE_STATIC; } + fdbData.sai_fdb_type = (sai_fdb_entry_type_t)attr.value.s32; attrs.push_back(attr); @@ -1407,6 +1506,11 @@ bool FdbOrch::addFdbEntry(const FdbEntry& entry, const string& port_name, { //If the MAC is dynamic_local change the origin accordingly //MAC is added/updated as dynamic to allow aging. + SWSS_LOG_INFO("MAC-Update Modify to dynamic FDB %s in %s on from-%s:to-%s from-%s:to-%s origin-%d-to-%d", + entry.mac.to_string().c_str(), vlan.m_alias.c_str(), oldPort.m_alias.c_str(), + port_name.c_str(), oldType.c_str(), fdbData.type.c_str(), + oldOrigin, fdbData.origin); + storeFdbData.origin = FDB_ORIGIN_LEARN; storeFdbData.type = "dynamic"; } @@ -1415,8 +1519,10 @@ bool FdbOrch::addFdbEntry(const FdbEntry& entry, const string& port_name, string key = "Vlan" + to_string(vlan.m_vlan_info.vlan_id) + ":" + entry.mac.to_string(); - if ((fdbData.origin != FDB_ORIGIN_MCLAG_ADVERTIZED) && - (fdbData.origin != FDB_ORIGIN_VXLAN_ADVERTIZED)) + if (((fdbData.origin != FDB_ORIGIN_MCLAG_ADVERTIZED) && + (fdbData.origin != FDB_ORIGIN_VXLAN_ADVERTIZED)) || + ((fdbData.origin == FDB_ORIGIN_MCLAG_ADVERTIZED) && + (fdbData.type == "dynamic_local"))) { /* State-DB is updated only for Local Mac addresses */ // Write to StateDb diff --git a/orchagent/fdborch.h b/orchagent/fdborch.h index 82611e686f..09bc6dcc69 100644 --- a/orchagent/fdborch.h +++ b/orchagent/fdborch.h @@ -36,6 +36,7 @@ struct FdbUpdate Port port; string type; bool add; + sai_fdb_entry_type_t sai_fdb_type; }; struct FdbFlushUpdate @@ -57,11 +58,13 @@ struct FdbData {"static", FDB_ORIGIN_PROVISIONED} => statically provisioned {"static", FDB_ORIGIN_ADVERTIZED} => sticky synced from remote device */ + bool is_flush_pending; /* Remote FDB related info */ string remote_ip; string esi; unsigned int vni; + sai_fdb_entry_type_t sai_fdb_type; }; struct SavedFdbEntry @@ -90,7 +93,7 @@ class FdbOrch: public Orch, public Subject, public Observer } bool bake() override; - void update(sai_fdb_event_t, const sai_fdb_entry_t *, sai_object_id_t); + void update(sai_fdb_event_t, const sai_fdb_entry_t *, sai_object_id_t, const sai_fdb_entry_type_t &); void update(SubjectType type, void *cntx); bool getPort(const MacAddress&, uint16_t, Port&); @@ -122,6 +125,10 @@ class FdbOrch: public Orch, public Subject, public Observer bool storeFdbEntryState(const FdbUpdate& update); void notifyTunnelOrch(Port& port); + + void clearFdbEntry(const FdbEntry&); + void handleSyncdFlushNotif(const sai_object_id_t&, const sai_object_id_t&, const MacAddress&, + const sai_fdb_entry_type_t&); }; #endif /* SWSS_FDBORCH_H */ diff --git a/orchagent/flex_counter/flex_counter_manager.cpp b/orchagent/flex_counter/flex_counter_manager.cpp index 71731e84d3..95fb28171d 100644 --- a/orchagent/flex_counter/flex_counter_manager.cpp +++ b/orchagent/flex_counter/flex_counter_manager.cpp @@ -44,6 +44,7 @@ const unordered_map FlexCounterManager::counter_id_field_lo { CounterType::ACL_COUNTER, ACL_COUNTER_ATTR_ID_LIST }, { CounterType::TUNNEL, TUNNEL_COUNTER_ID_LIST }, { CounterType::HOSTIF_TRAP, FLOW_COUNTER_ID_LIST }, + { CounterType::ROUTE, FLOW_COUNTER_ID_LIST }, }; FlexManagerDirectory g_FlexManagerDirectory; @@ -88,14 +89,28 @@ FlexCounterManager::FlexCounterManager( const uint polling_interval, const bool enabled, FieldValueTuple fv_plugin) : + FlexCounterManager("FLEX_COUNTER_DB", group_name, stats_mode, + polling_interval, enabled, fv_plugin) +{ +} + +FlexCounterManager::FlexCounterManager( + const string& db_name, + const string& group_name, + const StatsMode stats_mode, + const uint polling_interval, + const bool enabled, + FieldValueTuple fv_plugin) : group_name(group_name), stats_mode(stats_mode), polling_interval(polling_interval), enabled(enabled), fv_plugin(fv_plugin), - flex_counter_db(new DBConnector("FLEX_COUNTER_DB", 0)), - flex_counter_group_table(new ProducerTable(flex_counter_db.get(), FLEX_COUNTER_GROUP_TABLE)), - flex_counter_table(new ProducerTable(flex_counter_db.get(), FLEX_COUNTER_TABLE)) + flex_counter_db(new DBConnector(db_name, 0)), + flex_counter_group_table(new ProducerTable(flex_counter_db.get(), + FLEX_COUNTER_GROUP_TABLE)), + flex_counter_table(new ProducerTable(flex_counter_db.get(), + FLEX_COUNTER_TABLE)) { SWSS_LOG_ENTER(); @@ -113,7 +128,10 @@ FlexCounterManager::~FlexCounterManager() flex_counter_table->del(getFlexCounterTableKey(group_name, counter)); } - flex_counter_group_table->del(group_name); + if (flex_counter_group_table != nullptr) + { + flex_counter_group_table->del(group_name); + } SWSS_LOG_DEBUG("Deleted flex counter group '%s'.", group_name.c_str()); } diff --git a/orchagent/flex_counter/flex_counter_manager.h b/orchagent/flex_counter/flex_counter_manager.h index 6e80feb8fb..38bf829058 100644 --- a/orchagent/flex_counter/flex_counter_manager.h +++ b/orchagent/flex_counter/flex_counter_manager.h @@ -31,6 +31,7 @@ enum class CounterType ACL_COUNTER, TUNNEL, HOSTIF_TRAP, + ROUTE, }; // FlexCounterManager allows users to manage a group of flex counters. @@ -51,6 +52,14 @@ class FlexCounterManager FlexCounterManager() {} + FlexCounterManager( + const std::string& db_name, + const std::string& group_name, + const StatsMode stats_mode, + const uint polling_interval, + const bool enabled, + swss::FieldValueTuple fv_plugin = std::make_pair("","")); + FlexCounterManager(const FlexCounterManager&) = delete; FlexCounterManager& operator=(const FlexCounterManager&) = delete; virtual ~FlexCounterManager(); diff --git a/orchagent/flex_counter/flow_counter_handler.cpp b/orchagent/flex_counter/flow_counter_handler.cpp index 89f621fe7b..27ba357a8e 100644 --- a/orchagent/flex_counter/flow_counter_handler.cpp +++ b/orchagent/flex_counter/flow_counter_handler.cpp @@ -47,3 +47,16 @@ void FlowCounterHandler::getGenericCounterStatIdList(std::unordered_set& counter_stats); + static bool queryRouteFlowCounterCapability(); }; diff --git a/orchagent/flex_counter/flowcounterrouteorch.cpp b/orchagent/flex_counter/flowcounterrouteorch.cpp new file mode 100644 index 0000000000..9f5e6e2355 --- /dev/null +++ b/orchagent/flex_counter/flowcounterrouteorch.cpp @@ -0,0 +1,997 @@ +#include "dbconnector.h" +#include "directory.h" +#include "flow_counter_handler.h" +#include "logger.h" +#include "routeorch.h" +#include "flowcounterrouteorch.h" +#include "schema.h" +#include "swssnet.h" +#include "table.h" +#include "vnetorch.h" + +#include + +extern Directory gDirectory; +extern RouteOrch* gRouteOrch; +extern size_t gMaxBulkSize; +extern sai_route_api_t* sai_route_api; +extern sai_object_id_t gVirtualRouterId; +extern sai_object_id_t gSwitchId; + +#define FLEX_COUNTER_UPD_INTERVAL 1 +#define FLOW_COUNTER_ROUTE_KEY "route" +#define FLOW_COUNTER_SUPPORT_FIELD "support" +#define ROUTE_PATTERN_MAX_MATCH_COUNT_FIELD "max_match_count" +#define ROUTE_PATTERN_DEFAULT_MAX_MATCH_COUNT 30 +#define ROUTE_FLOW_COUNTER_POLLING_INTERVAL_MS 10000 + +FlowCounterRouteOrch::FlowCounterRouteOrch(swss::DBConnector *db, const std::vector &tableNames): +Orch(db, tableNames), +mAsicDb(std::shared_ptr(new DBConnector("ASIC_DB", 0))), +mCounterDb(std::shared_ptr(new DBConnector("COUNTERS_DB", 0))), +mVidToRidTable(std::unique_ptr
(new Table(mAsicDb.get(), "VIDTORID"))), +mPrefixToCounterTable(std::unique_ptr
(new Table(mCounterDb.get(), COUNTERS_ROUTE_NAME_MAP))), +mPrefixToPatternTable(std::unique_ptr
(new Table(mCounterDb.get(), COUNTERS_ROUTE_TO_PATTERN_MAP))), +mRouteFlowCounterMgr(ROUTE_FLOW_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, ROUTE_FLOW_COUNTER_POLLING_INTERVAL_MS, false), +gRouteBulker(sai_route_api, gMaxBulkSize) +{ + SWSS_LOG_ENTER(); + initRouteFlowCounterCapability(); + + if (mRouteFlowCounterSupported) + { + auto intervT = timespec { .tv_sec = FLEX_COUNTER_UPD_INTERVAL , .tv_nsec = 0 }; + mFlexCounterUpdTimer = new SelectableTimer(intervT); + auto executorT = new ExecutableTimer(mFlexCounterUpdTimer, this, "FLEX_COUNTER_UPD_TIMER"); + Orch::addExecutor(executorT); + } +} + +FlowCounterRouteOrch::~FlowCounterRouteOrch() +{ + SWSS_LOG_ENTER(); +} + +void FlowCounterRouteOrch::doTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + if (!gRouteOrch || !mRouteFlowCounterSupported) + { + return; + } + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + + const auto &key = kfvKey(t); + const auto &op = kfvOp(t); + const auto &data = kfvFieldsValues(t); + if (op == SET_COMMAND) + { + size_t maxMatchCount = ROUTE_PATTERN_DEFAULT_MAX_MATCH_COUNT; + for (auto valuePair : data) + { + const auto &field = fvField(valuePair); + const auto &value = fvValue(valuePair); + if (field == ROUTE_PATTERN_MAX_MATCH_COUNT_FIELD) + { + maxMatchCount = (size_t)std::stoul(value); + if (maxMatchCount == 0) + { + SWSS_LOG_WARN("Max match count for route pattern cannot be 0, set it to default value 30"); + maxMatchCount = ROUTE_PATTERN_DEFAULT_MAX_MATCH_COUNT; + } + } + } + + addRoutePattern(key, maxMatchCount); + } + else if (op == DEL_COMMAND) + { + removeRoutePattern(key); + } + consumer.m_toSync.erase(it++); + } +} + +void FlowCounterRouteOrch::doTask(SelectableTimer &timer) +{ + SWSS_LOG_ENTER(); + SWSS_LOG_NOTICE("Add flex counters, pending in queue: %zu", mPendingAddToFlexCntr.size()); + string value; + std::string nameMapKey; + std::string pattern; + vector prefixToCounterMap; + vector prefixToPatternMap; + for (auto it = mPendingAddToFlexCntr.begin(); it != mPendingAddToFlexCntr.end(); ) + { + const auto& route_pattern = it->first; + auto vrf_id = route_pattern.vrf_id; + + for(auto inner_iter = it->second.begin(); inner_iter != it->second.end(); ) + { + const auto id = sai_serialize_object_id(inner_iter->second); + if (mVidToRidTable->hget("", id, value)) + { + auto ip_prefix = inner_iter->first; + SWSS_LOG_INFO("Registering %s, id %s", ip_prefix.to_string().c_str(), id.c_str()); + + std::unordered_set counter_stats; + FlowCounterHandler::getGenericCounterStatIdList(counter_stats); + mRouteFlowCounterMgr.setCounterIdList(inner_iter->second, CounterType::ROUTE, counter_stats); + + getRouteFlowCounterNameMapKey(vrf_id, ip_prefix, nameMapKey); + prefixToCounterMap.emplace_back(nameMapKey, id); + + getRouteFlowCounterNameMapKey(vrf_id, route_pattern.ip_prefix, pattern); + prefixToPatternMap.emplace_back(nameMapKey, pattern); + + updateRouterFlowCounterCache(route_pattern, ip_prefix, inner_iter->second, mBoundRouteCounters); + inner_iter = it->second.erase(inner_iter); + } + else + { + ++inner_iter; + } + } + + if (it->second.empty()) + { + it = mPendingAddToFlexCntr.erase(it); + } + else + { + ++it; + } + } + + if (!prefixToCounterMap.empty()) + { + mPrefixToCounterTable->set("", prefixToCounterMap); + } + + if (!prefixToPatternMap.empty()) + { + mPrefixToPatternTable->set("", prefixToPatternMap); + } + + if (mPendingAddToFlexCntr.empty()) + { + mFlexCounterUpdTimer->stop(); + } +} + +void FlowCounterRouteOrch::initRouteFlowCounterCapability() +{ + SWSS_LOG_ENTER(); + mRouteFlowCounterSupported = FlowCounterHandler::queryRouteFlowCounterCapability(); + if (!mRouteFlowCounterSupported) + { + SWSS_LOG_NOTICE("Route flow counter is not supported on this platform"); + } + swss::DBConnector state_db("STATE_DB", 0); + swss::Table capability_table(&state_db, STATE_FLOW_COUNTER_CAPABILITY_TABLE_NAME); + std::vector fvs; + fvs.emplace_back(FLOW_COUNTER_SUPPORT_FIELD, mRouteFlowCounterSupported ? "true" : "false"); + capability_table.set(FLOW_COUNTER_ROUTE_KEY, fvs); +} + +void FlowCounterRouteOrch::generateRouteFlowStats() +{ + SWSS_LOG_ENTER(); + if (!mRouteFlowCounterSupported) + { + return; + } + + for (const auto &route_pattern : mRoutePatternSet) + { + createRouteFlowCounterByPattern(route_pattern, 0); + } +} + +void FlowCounterRouteOrch::clearRouteFlowStats() +{ + SWSS_LOG_ENTER(); + if (!mBoundRouteCounters.empty() || !mPendingAddToFlexCntr.empty()) + { + for (auto &entry : mBoundRouteCounters) + { + const auto& route_pattern = entry.first; + for (auto &inner_entry : entry.second) + { + removeRouteFlowCounterFromDB(route_pattern.vrf_id, inner_entry.first, inner_entry.second); + unbindFlowCounter(route_pattern, route_pattern.vrf_id, inner_entry.first, inner_entry.second); + } + } + + for (auto &entry : mPendingAddToFlexCntr) + { + const auto& route_pattern = entry.first; + for (auto &inner_entry : entry.second) + { + unbindFlowCounter(route_pattern, route_pattern.vrf_id, inner_entry.first, inner_entry.second); + } + } + + mBoundRouteCounters.clear(); + mPendingAddToFlexCntr.clear(); + } +} + +void FlowCounterRouteOrch::addRoutePattern(const std::string &pattern, size_t max_match_count) +{ + SWSS_LOG_ENTER(); + sai_object_id_t vrf_id; + IpPrefix ip_prefix; + std::string vrf_name; + if (!parseRouteKeyForRoutePattern(pattern, '|', vrf_id, ip_prefix, vrf_name)) + { + vrf_id = SAI_NULL_OBJECT_ID; + } + + auto insert_result = mRoutePatternSet.emplace(vrf_name, vrf_id, ip_prefix, max_match_count); + if (insert_result.second) + { + SWSS_LOG_NOTICE("Inserting route pattern %s, max match count is %zu", pattern.c_str(), max_match_count); + if (!validateRoutePattern(*insert_result.first)) + { + mRoutePatternSet.erase(insert_result.first); + return; + } + + createRouteFlowCounterByPattern(*insert_result.first, 0); + } + else + { + SWSS_LOG_NOTICE("Updating route pattern %s max match count to %zu", pattern.c_str(), max_match_count); + RoutePattern &existing = const_cast(*insert_result.first); + onRoutePatternMaxMatchCountChange(existing, max_match_count); + } +} + +void FlowCounterRouteOrch::removeRoutePattern(const std::string& pattern) +{ + SWSS_LOG_ENTER(); + sai_object_id_t vrf_id; + IpPrefix ip_prefix; + std::string vrf_name; + if (!parseRouteKeyForRoutePattern(pattern, '|', vrf_id, ip_prefix, vrf_name)) + { + vrf_id = SAI_NULL_OBJECT_ID; + } + + SWSS_LOG_NOTICE("Removing route pattern %s", pattern.c_str()); + RoutePattern route_pattern(vrf_name, vrf_id, ip_prefix, 0); + auto iter = mRoutePatternSet.find(route_pattern); + if (iter == mRoutePatternSet.end()) + { + // Should not go to this branch, just in case + SWSS_LOG_ERROR("Trying to remove route pattern %s, but it does not exist", pattern.c_str()); + return; + } + mRoutePatternSet.erase(iter); + + removeRoutePattern(route_pattern); +} + +void FlowCounterRouteOrch::removeRoutePattern(const RoutePattern &route_pattern) +{ + SWSS_LOG_ENTER(); + auto cache_iter = mBoundRouteCounters.find(route_pattern); + if (cache_iter != mBoundRouteCounters.end()) + { + for (auto &entry : cache_iter->second) + { + removeRouteFlowCounterFromDB(route_pattern.vrf_id, entry.first, entry.second); + unbindFlowCounter(route_pattern, route_pattern.vrf_id, entry.first, entry.second); + } + mBoundRouteCounters.erase(cache_iter); + } + + auto pending_iter = mPendingAddToFlexCntr.find(route_pattern); + if (pending_iter != mPendingAddToFlexCntr.end()) + { + for (auto &entry : pending_iter->second) + { + unbindFlowCounter(route_pattern, route_pattern.vrf_id, entry.first, entry.second); + } + mPendingAddToFlexCntr.erase(pending_iter); + } +} + +void FlowCounterRouteOrch::onAddMiscRouteEntry(sai_object_id_t vrf_id, const sai_ip_prefix_t& ip_pfx, bool add_to_cache) +{ + SWSS_LOG_ENTER(); + if (!mRouteFlowCounterSupported) + { + return; + } + + IpPrefix ip_prefix = getIpPrefixFromSaiPrefix(ip_pfx); + onAddMiscRouteEntry(vrf_id, ip_prefix, add_to_cache); +} + +void FlowCounterRouteOrch::onAddMiscRouteEntry(sai_object_id_t vrf_id, const IpPrefix& ip_prefix, bool add_to_cache) +{ + SWSS_LOG_ENTER(); + if (!mRouteFlowCounterSupported) + { + return; + } + + if (add_to_cache) + { + auto iter = mMiscRoutes.find(vrf_id); + if (iter == mMiscRoutes.end()) + { + mMiscRoutes.emplace(vrf_id, std::set({ip_prefix})); + } + else + { + iter->second.insert(ip_prefix); + } + } + + if (!isRouteFlowCounterEnabled()) + { + return; + } + + if (mRoutePatternSet.empty()) + { + return; + } + + handleRouteAdd(vrf_id, ip_prefix); +} + +void FlowCounterRouteOrch::onRemoveMiscRouteEntry(sai_object_id_t vrf_id, const sai_ip_prefix_t& ip_pfx, bool remove_from_cache) +{ + SWSS_LOG_ENTER(); + if (!mRouteFlowCounterSupported) + { + return; + } + + IpPrefix ip_prefix = getIpPrefixFromSaiPrefix(ip_pfx); + onRemoveMiscRouteEntry(vrf_id, ip_prefix, remove_from_cache); +} + +void FlowCounterRouteOrch::onRemoveMiscRouteEntry(sai_object_id_t vrf_id, const IpPrefix& ip_prefix, bool remove_from_cache) +{ + SWSS_LOG_ENTER(); + if (!mRouteFlowCounterSupported) + { + return; + } + + if (remove_from_cache) + { + auto iter = mMiscRoutes.find(vrf_id); + if (iter != mMiscRoutes.end()) + { + auto prefix_iter = iter->second.find(ip_prefix); + if (prefix_iter != iter->second.end()) + { + iter->second.erase(prefix_iter); + if (iter->second.empty()) + { + mMiscRoutes.erase(iter); + } + } + } + } + + if (!isRouteFlowCounterEnabled()) + { + return; + } + + if (mRoutePatternSet.empty()) + { + return; + } + + handleRouteRemove(vrf_id, ip_prefix); +} + +void FlowCounterRouteOrch::onAddVR(sai_object_id_t vrf_id) +{ + SWSS_LOG_ENTER(); + if (!mRouteFlowCounterSupported) + { + return; + } + + assert(vrf_id != gVirtualRouterId); + auto *vrf_orch = gDirectory.get(); + std::string vrf_name = vrf_orch->getVRFname(vrf_id); + if (vrf_name == "") + { + getVnetNameByVrfId(vrf_id, vrf_name); + } + + if (vrf_name == "") + { + SWSS_LOG_WARN("Failed to get VRF name for vrf id %s", sai_serialize_object_id(vrf_id).c_str()); + } + + for (auto &route_pattern : mRoutePatternSet) + { + if (route_pattern.vrf_name == vrf_name) + { + RoutePattern &existing = const_cast(route_pattern); + existing.vrf_id = vrf_id; + createRouteFlowCounterByPattern(existing, 0); + break; + } + } +} + +void FlowCounterRouteOrch::onRemoveVR(sai_object_id_t vrf_id) +{ + SWSS_LOG_ENTER(); + if (!mRouteFlowCounterSupported) + { + return; + } + + for (auto &route_pattern : mRoutePatternSet) + { + if (route_pattern.vrf_id == vrf_id) + { + SWSS_LOG_NOTICE("Removing route pattern %s and all related counters due to VRF %s has been removed", route_pattern.to_string().c_str(), route_pattern.vrf_name.c_str()); + removeRoutePattern(route_pattern); + RoutePattern &existing = const_cast(route_pattern); + existing.vrf_id = SAI_NULL_OBJECT_ID; + } + } +} + +bool FlowCounterRouteOrch::bindFlowCounter(const RoutePattern &route_pattern, sai_object_id_t vrf_id, const IpPrefix& ip_prefix) +{ + SWSS_LOG_ENTER(); + + SWSS_LOG_NOTICE("Binding route entry vrf=%s prefix=%s to flow counter", route_pattern.vrf_name.c_str(), ip_prefix.to_string().c_str()); + + sai_object_id_t counter_oid; + if (!FlowCounterHandler::createGenericCounter(counter_oid)) + { + SWSS_LOG_ERROR("Failed to create generic counter"); + return false; + } + + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = route_pattern.vrf_id; + copy(route_entry.destination, ip_prefix); + sai_attribute_t attr; + attr.id = SAI_ROUTE_ENTRY_ATTR_COUNTER_ID; + attr.value.oid = counter_oid; + + auto status = sai_route_api->set_route_entry_attribute(&route_entry, &attr); + if (status != SAI_STATUS_SUCCESS) + { + FlowCounterHandler::removeGenericCounter(counter_oid); + SWSS_LOG_WARN("Failed to bind route entry vrf=%s prefix=%s to flow counter", route_pattern.vrf_name.c_str(), ip_prefix.to_string().c_str()); + return false; + } + + pendingUpdateFlexDb(route_pattern, ip_prefix, counter_oid); + return true; +} + +void FlowCounterRouteOrch::unbindFlowCounter(const RoutePattern &route_pattern, sai_object_id_t vrf_id, const IpPrefix& ip_prefix, sai_object_id_t counter_oid) +{ + SWSS_LOG_ENTER(); + + SWSS_LOG_NOTICE("Unbinding route entry vrf=%s prefix=%s to flow counter", route_pattern.vrf_name.c_str(), ip_prefix.to_string().c_str()); + + sai_route_entry_t route_entry; + route_entry.switch_id = gSwitchId; + route_entry.vr_id = route_pattern.vrf_id; + copy(route_entry.destination, ip_prefix); + sai_attribute_t attr; + attr.id = SAI_ROUTE_ENTRY_ATTR_COUNTER_ID; + attr.value.oid = SAI_NULL_OBJECT_ID; + + auto status = sai_route_api->set_route_entry_attribute(&route_entry, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Failed to unbind route entry vrf=%s prefix=%s from flow counter", route_pattern.vrf_name.c_str(), ip_prefix.to_string().c_str()); + } + + FlowCounterHandler::removeGenericCounter(counter_oid); +} + +bool FlowCounterRouteOrch::removeRouteFlowCounter(const RoutePattern &route_pattern, sai_object_id_t vrf_id, const IpPrefix& ip_prefix) +{ + SWSS_LOG_ENTER(); + + SWSS_LOG_NOTICE("Removing route entry vrf=%s prefix=%s from flow counter", route_pattern.vrf_name.c_str(), ip_prefix.to_string().c_str()); + + // Check if the entry is in mPendingAddToFlexCntr + sai_object_id_t counter_oid = SAI_NULL_OBJECT_ID; + auto pending_iter = mPendingAddToFlexCntr.find(route_pattern); + if (pending_iter != mPendingAddToFlexCntr.end()) + { + auto iter_prefix = pending_iter->second.find(ip_prefix); + if (iter_prefix != pending_iter->second.end()) + { + counter_oid = iter_prefix->second; + pending_iter->second.erase(iter_prefix); + if (pending_iter->second.empty()) + { + mPendingAddToFlexCntr.erase(pending_iter); + } + } + } + + if (counter_oid == SAI_NULL_OBJECT_ID) + { + // Check if the entry is in mBoundRouteCounters + auto cache_iter = mBoundRouteCounters.find(route_pattern); + if (cache_iter != mBoundRouteCounters.end()) + { + auto iter_prefix = cache_iter->second.find(ip_prefix); + if (iter_prefix != cache_iter->second.end()) + { + counter_oid = iter_prefix->second; + removeRouteFlowCounterFromDB(vrf_id, ip_prefix, counter_oid); + cache_iter->second.erase(iter_prefix); + if (cache_iter->second.empty()) + { + mBoundRouteCounters.erase(cache_iter); + } + } + } + } + + // No need unbind because the route entry has been removed, just remove the generic counter + if (counter_oid != SAI_NULL_OBJECT_ID) + { + FlowCounterHandler::removeGenericCounter(counter_oid); + return true; + } + + return false; +} + +void FlowCounterRouteOrch::pendingUpdateFlexDb(const RoutePattern &route_pattern, const IpPrefix& ip_prefix, sai_object_id_t counter_oid) +{ + SWSS_LOG_ENTER(); + bool was_empty = mPendingAddToFlexCntr.empty(); + updateRouterFlowCounterCache(route_pattern, ip_prefix, counter_oid, mPendingAddToFlexCntr); + if (was_empty) + { + mFlexCounterUpdTimer->start(); + } +} + +bool FlowCounterRouteOrch::validateRoutePattern(const RoutePattern &route_pattern) const +{ + SWSS_LOG_ENTER(); + + for (const auto& existing : mRoutePatternSet) + { + if (existing.is_overlap_with(route_pattern)) + { + SWSS_LOG_ERROR("Configured route pattern %s is conflict with existing one %s", route_pattern.to_string().c_str(), existing.to_string().c_str()); + return false; + } + } + + return true; +} + +size_t FlowCounterRouteOrch::getRouteFlowCounterSizeByPattern(const RoutePattern &route_pattern) const +{ + SWSS_LOG_ENTER(); + + auto cache_iter = mBoundRouteCounters.find(route_pattern); + auto cache_count = cache_iter == mBoundRouteCounters.end() ? 0 : cache_iter->second.size(); + auto pending_iter = mPendingAddToFlexCntr.find(route_pattern); + auto pending_count = pending_iter == mPendingAddToFlexCntr.end() ? 0 : pending_iter->second.size(); + return cache_count + pending_count; +} + +bool FlowCounterRouteOrch::isRouteAlreadyBound(const RoutePattern &route_pattern, const IpPrefix &ip_prefix) const +{ + SWSS_LOG_ENTER(); + + auto iter_bound = mBoundRouteCounters.find(route_pattern); + if (iter_bound != mBoundRouteCounters.end()) + { + if (iter_bound->second.find(ip_prefix) != iter_bound->second.end()) + { + return true; + } + } + + auto iter_pending = mPendingAddToFlexCntr.find(route_pattern); + if (iter_pending != mPendingAddToFlexCntr.end()) + { + if (iter_pending->second.find(ip_prefix) != iter_pending->second.end()) + { + return true; + } + } + + return false; +} + +void FlowCounterRouteOrch::createRouteFlowCounterByPattern(const RoutePattern &route_pattern, size_t current_bound_count) +{ + SWSS_LOG_ENTER(); + if (!isRouteFlowCounterEnabled()) + { + return; + } + + auto &syncdRoutes = gRouteOrch->getSyncdRoutes(); + auto iter = syncdRoutes.find(route_pattern.vrf_id); + if (iter != syncdRoutes.end()) + { + SWSS_LOG_NOTICE("Creating route flow counter for pattern %s", route_pattern.to_string().c_str()); + + for (auto &entry : iter->second) + { + if (current_bound_count == route_pattern.max_match_count) + { + return; + } + + if (route_pattern.is_match(route_pattern.vrf_id, entry.first)) + { + if (isRouteAlreadyBound(route_pattern, entry.first)) + { + continue; + } + + if (bindFlowCounter(route_pattern, route_pattern.vrf_id, entry.first)) + { + ++current_bound_count; + } + } + } + } + + createRouteFlowCounterFromVnetRoutes(route_pattern, current_bound_count); + + auto misc_iter = mMiscRoutes.find(route_pattern.vrf_id); + if (misc_iter != mMiscRoutes.end()) + { + SWSS_LOG_NOTICE("Creating route flow counter for pattern %s for other type route entries", route_pattern.to_string().c_str()); + + for (auto ip_prefix : misc_iter->second) + { + if (current_bound_count == route_pattern.max_match_count) + { + return; + } + + if (route_pattern.is_match(route_pattern.vrf_id, ip_prefix)) + { + if (isRouteAlreadyBound(route_pattern, ip_prefix)) + { + continue; + } + + if (bindFlowCounter(route_pattern, route_pattern.vrf_id, ip_prefix)) + { + ++current_bound_count; + } + } + } + } +} + +void FlowCounterRouteOrch::createRouteFlowCounterFromVnetRoutes(const RoutePattern &route_pattern, size_t& current_bound_count) +{ + SWSS_LOG_ENTER(); + + auto *vnet_orch = gDirectory.get(); + assert(vnet_orch); // VnetOrch instance is created before RouteOrch + + if (!vnet_orch->isVnetExists(route_pattern.vrf_name)) + { + return; + } + + SWSS_LOG_NOTICE("Creating route flow counter for pattern %s for VNET route entries", route_pattern.to_string().c_str()); + + auto *vrf_obj = vnet_orch->getTypePtr(route_pattern.vrf_name); + const auto &route_map = vrf_obj->getRouteMap(); + for (const auto &entry : route_map) + { + if (current_bound_count == route_pattern.max_match_count) + { + return; + } + + if (route_pattern.is_match(route_pattern.vrf_id, entry.first)) + { + if (isRouteAlreadyBound(route_pattern, entry.first)) + { + continue; + } + + if (bindFlowCounter(route_pattern, route_pattern.vrf_id, entry.first)) + { + ++current_bound_count; + } + } + } + + const auto &tunnel_routes = vrf_obj->getTunnelRoutes(); + for (const auto &entry : tunnel_routes) + { + if (current_bound_count == route_pattern.max_match_count) + { + return; + } + if (route_pattern.is_match(route_pattern.vrf_id, entry.first)) + { + if (isRouteAlreadyBound(route_pattern, entry.first)) + { + continue; + } + + if (bindFlowCounter(route_pattern, route_pattern.vrf_id, entry.first)) + { + ++current_bound_count; + } + } + } +} + +void FlowCounterRouteOrch::reapRouteFlowCounterByPattern(const RoutePattern &route_pattern, size_t current_bound_count) +{ + SWSS_LOG_ENTER(); + + auto pending_iter = mPendingAddToFlexCntr.find(route_pattern); + auto iter = mBoundRouteCounters.find(route_pattern); + if (iter == mBoundRouteCounters.end() && pending_iter == mPendingAddToFlexCntr.end()) + { + return; + } + + // Remove from pending cache first + if (pending_iter != mPendingAddToFlexCntr.end()) + { + while(current_bound_count > route_pattern.max_match_count) + { + auto bound_iter = pending_iter->second.begin(); + if (bound_iter == pending_iter->second.end()) + { + break; + } + unbindFlowCounter(route_pattern, route_pattern.vrf_id, bound_iter->first, bound_iter->second); + pending_iter->second.erase(bound_iter); + --current_bound_count; + } + } + + // Remove from bound cache + if (iter != mBoundRouteCounters.end()) + { + while(current_bound_count > route_pattern.max_match_count) + { + auto bound_iter = iter->second.begin(); + if (bound_iter == iter->second.end()) + { + break; + } + + removeRouteFlowCounterFromDB(route_pattern.vrf_id, bound_iter->first, bound_iter->second); + unbindFlowCounter(route_pattern, route_pattern.vrf_id, bound_iter->first, bound_iter->second); + iter->second.erase(bound_iter); + --current_bound_count; + } + } +} + +void FlowCounterRouteOrch::onRoutePatternMaxMatchCountChange(RoutePattern &route_pattern, size_t new_max_match_count) +{ + SWSS_LOG_ENTER(); + + if (route_pattern.max_match_count != new_max_match_count) + { + auto old_max_match_count = route_pattern.max_match_count; + route_pattern.max_match_count = new_max_match_count; + + if (!isRouteFlowCounterEnabled()) + { + return; + } + + auto current_bound_count = getRouteFlowCounterSizeByPattern(route_pattern); + SWSS_LOG_NOTICE("Current bound route flow counter count is %zu, new limit is %zu, old limit is %zu", current_bound_count, new_max_match_count, old_max_match_count); + if (new_max_match_count > old_max_match_count) + { + if (current_bound_count == old_max_match_count) + { + createRouteFlowCounterByPattern(route_pattern, current_bound_count); + } + } + else + { + if (current_bound_count > new_max_match_count) + { + reapRouteFlowCounterByPattern(route_pattern, current_bound_count); + } + } + } +} + +void FlowCounterRouteOrch::getRouteFlowCounterNameMapKey(sai_object_id_t vrf_id, const IpPrefix& ip_prefix, std::string &key) +{ + SWSS_LOG_ENTER(); + std::ostringstream oss; + if (gVirtualRouterId != vrf_id) + { + auto *vrf_orch = gDirectory.get(); + auto vrf_name = vrf_orch->getVRFname(vrf_id); + if (vrf_name == "") + { + getVnetNameByVrfId(vrf_id, vrf_name); + } + + if (vrf_name != "") + { + oss << vrf_name; + oss << "|"; + } + else + { + // Should not happen, just in case + SWSS_LOG_ERROR("Failed to get VRF/VNET name for vrf id %s", sai_serialize_object_id(vrf_id).c_str()); + } + } + oss << ip_prefix.to_string(); + key = oss.str(); +} + +void FlowCounterRouteOrch::handleRouteAdd(sai_object_id_t vrf_id, const IpPrefix& ip_prefix) +{ + if (!mRouteFlowCounterSupported) + { + return; + } + + if (!isRouteFlowCounterEnabled()) + { + return; + } + + for (const auto &route_pattern : mRoutePatternSet) + { + if (route_pattern.is_match(vrf_id, ip_prefix)) + { + auto current_bound_count = getRouteFlowCounterSizeByPattern(route_pattern); + if (current_bound_count < route_pattern.max_match_count) + { + bindFlowCounter(route_pattern, vrf_id, ip_prefix); + } + break; + } + } +} + +void FlowCounterRouteOrch::handleRouteRemove(sai_object_id_t vrf_id, const IpPrefix& ip_prefix) +{ + if (!mRouteFlowCounterSupported) + { + return; + } + + if (!isRouteFlowCounterEnabled()) + { + return; + } + + for (const auto &route_pattern : mRoutePatternSet) + { + if (route_pattern.is_match(vrf_id, ip_prefix)) + { + if (isRouteAlreadyBound(route_pattern, ip_prefix)) + { + if (removeRouteFlowCounter(route_pattern, vrf_id, ip_prefix)) + { + auto current_bound_count = getRouteFlowCounterSizeByPattern(route_pattern); + if (current_bound_count == route_pattern.max_match_count - 1) + { + createRouteFlowCounterByPattern(route_pattern, current_bound_count); + } + } + } + break; + } + } +} + +void FlowCounterRouteOrch::removeRouteFlowCounterFromDB(sai_object_id_t vrf_id, const IpPrefix& ip_prefix, sai_object_id_t counter_oid) +{ + SWSS_LOG_ENTER(); + std::string nameMapKey; + getRouteFlowCounterNameMapKey(vrf_id, ip_prefix, nameMapKey); + mPrefixToPatternTable->hdel("", nameMapKey); + mPrefixToCounterTable->hdel("", nameMapKey); + mRouteFlowCounterMgr.clearCounterIdList(counter_oid); +} + +void FlowCounterRouteOrch::updateRouterFlowCounterCache( + const RoutePattern &route_pattern, + const IpPrefix &ip_prefix, + sai_object_id_t counter_oid, + RouterFlowCounterCache &cache) +{ + SWSS_LOG_ENTER(); + auto iter = cache.find(route_pattern); + if (iter == cache.end()) + { + cache.emplace(route_pattern, std::map({{ip_prefix, counter_oid}})); + } + else + { + iter->second.emplace(ip_prefix, counter_oid); + } +} + +bool FlowCounterRouteOrch::isRouteFlowCounterEnabled() const +{ + SWSS_LOG_ENTER(); + FlexCounterOrch *flexCounterOrch = gDirectory.get(); + return flexCounterOrch && flexCounterOrch->getRouteFlowCountersState(); +} + +bool FlowCounterRouteOrch::parseRouteKeyForRoutePattern(const std::string &key, char sep, sai_object_id_t &vrf_id, IpPrefix &ip_prefix, std::string &vrf_name) +{ + size_t found = key.find(sep); + if (found == std::string::npos) + { + vrf_id = gVirtualRouterId; + ip_prefix = IpPrefix(key); + vrf_name = ""; + } + else + { + vrf_name = key.substr(0, found); + ip_prefix = IpPrefix(key.substr(found+1)); + auto *vrf_orch = gDirectory.get(); + if (!key.compare(0, strlen(VRF_PREFIX), VRF_PREFIX) && vrf_orch->isVRFexists(vrf_name)) + { + vrf_id = vrf_orch->getVRFid(vrf_name); + } + else + { + if (!getVrfIdByVnetName(vrf_name, vrf_id)) + { + SWSS_LOG_NOTICE("VRF/VNET name %s is not resolved", vrf_name.c_str()); + return false; + } + } + } + + return true; +} + +bool FlowCounterRouteOrch::getVrfIdByVnetName(const std::string& vnet_name, sai_object_id_t &vrf_id) +{ + auto *vnet_orch = gDirectory.get(); + assert(vnet_orch); // VnetOrch instance is created before RouteOrch + + return vnet_orch->getVrfIdByVnetName(vnet_name, vrf_id); +} + +bool FlowCounterRouteOrch::getVnetNameByVrfId(sai_object_id_t vrf_id, std::string& vnet_name) +{ + auto *vnet_orch = gDirectory.get(); + assert(vnet_orch); // VnetOrch instance is created before RouteOrch + + return vnet_orch->getVnetNameByVrfId(vrf_id, vnet_name); +} + diff --git a/orchagent/flex_counter/flowcounterrouteorch.h b/orchagent/flex_counter/flowcounterrouteorch.h new file mode 100644 index 0000000000..38ac413ab9 --- /dev/null +++ b/orchagent/flex_counter/flowcounterrouteorch.h @@ -0,0 +1,178 @@ +#pragma once + +#include "bulker.h" +#include "dbconnector.h" +#include "ipprefix.h" +#include "orch.h" +#include +#include +#include +#include +#include + +#define ROUTE_FLOW_COUNTER_FLEX_COUNTER_GROUP "ROUTE_FLOW_COUNTER" + +struct RoutePattern +{ + RoutePattern(const std::string& input_vrf_name, sai_object_id_t vrf, IpPrefix prefix, size_t max_match_count) + :vrf_name(input_vrf_name), vrf_id(vrf), ip_prefix(prefix), max_match_count(max_match_count), exact_match(prefix.isDefaultRoute()) + { + } + + std::string vrf_name; + sai_object_id_t vrf_id; + IpPrefix ip_prefix; + size_t max_match_count; + bool exact_match; + + bool operator < (const RoutePattern &other) const + { + // We don't compare the vrf id here because: + // 1. vrf id could be SAI_NULL_OBJECT_ID if the VRF name is not resolved, two pattern with different VRF name and vrf_id=SAI_NULL_OBJECT_ID + // and same prefix will be treat as same route pattern, which is not expected + // 2. vrf name must be different + auto vrf_name_compare = vrf_name.compare(other.vrf_name); + if (vrf_name_compare < 0) + { + return true; + } + else if (vrf_name_compare == 0 && ip_prefix < other.ip_prefix) + { + return true; + } + else + { + return false; + } + } + + bool is_match(sai_object_id_t vrf, IpPrefix prefix) const + { + // No need compare VRF name here because: + // 1. If the VRF is not resolved, the vrf_id shall be SAI_NULL_OBJECT_ID, it cannot match any input vrf_id + // 2. If the VRF is resolved, different vrf must have different vrf id + if (vrf_id != vrf) + { + return false; + } + + if (!exact_match) + { + return (ip_prefix.getMaskLength() <= prefix.getMaskLength() && ip_prefix.isAddressInSubnet(prefix.getIp())); + } + else + { + return prefix == ip_prefix; + } + } + + bool is_overlap_with(const RoutePattern &other) const + { + if (this == &other) + { + return false; + } + + if (vrf_name != other.vrf_name) + { + return false; + } + + if (vrf_name != other.vrf_name) + { + return false; + } + + return is_match(other.vrf_id, other.ip_prefix) || other.is_match(vrf_id, ip_prefix); + } + + std::string to_string() const + { + std::ostringstream oss; + oss << "RoutePattern(vrf_id=" << vrf_id << ",ip_prefix=" << ip_prefix.to_string() << ")"; + return oss.str(); + } +}; + + + +typedef std::set RoutePatternSet; +/* RoutePattern to */ +typedef std::map> RouterFlowCounterCache; +/* IP2ME, MUX, VNET route entries */ +typedef std::map> MiscRouteEntryMap; + +class FlowCounterRouteOrch : public Orch +{ +public: + FlowCounterRouteOrch(swss::DBConnector *db, const std::vector &tableNames); + virtual ~FlowCounterRouteOrch(void); + + bool getRouteFlowCounterSupported() const { return mRouteFlowCounterSupported; } + void generateRouteFlowStats(); + void clearRouteFlowStats(); + void addRoutePattern(const std::string &pattern, size_t); + void removeRoutePattern(const std::string &pattern); + void onAddMiscRouteEntry(sai_object_id_t vrf_id, const IpPrefix& ip_prefix, bool add_to_cache = true); + void onAddMiscRouteEntry(sai_object_id_t vrf_id, const sai_ip_prefix_t& ip_pfx, bool add_to_cache = true); + void onRemoveMiscRouteEntry(sai_object_id_t vrf_id, const IpPrefix& ip_prefix, bool remove_from_cache = true); + void onRemoveMiscRouteEntry(sai_object_id_t vrf_id, const sai_ip_prefix_t& ip_pfx, bool remove_from_cache = true); + void onAddVR(sai_object_id_t vrf_id); + void onRemoveVR(sai_object_id_t vrf_id); + void handleRouteAdd(sai_object_id_t vrf_id, const IpPrefix& ip_prefix); + void handleRouteRemove(sai_object_id_t vrf_id, const IpPrefix& ip_prefix); + void processRouteFlowCounterBinding(); + +protected: + void doTask(Consumer &consumer) override; + void doTask(SelectableTimer &timer) override; + +private: + std::shared_ptr mAsicDb; + std::shared_ptr mCounterDb; + std::unique_ptr
mVidToRidTable; + std::unique_ptr
mPrefixToCounterTable; + std::unique_ptr
mPrefixToPatternTable; + + bool mRouteFlowCounterSupported = false; + /* Route pattern set, store configured route patterns */ + RoutePatternSet mRoutePatternSet; + /* Cache for those bound route flow counters*/ + RouterFlowCounterCache mBoundRouteCounters; + /* Cache for those route flow counters pending update to FLEX DB */ + RouterFlowCounterCache mPendingAddToFlexCntr; + /* IP2ME */ + MiscRouteEntryMap mMiscRoutes; // Save here for route flow counter + /* Flex counter manager for route flow counter */ + FlexCounterManager mRouteFlowCounterMgr; + /* Timer to create flex counter and update counters DB */ + SelectableTimer *mFlexCounterUpdTimer = nullptr; + + EntityBulker gRouteBulker; + + void initRouteFlowCounterCapability(); + void removeRoutePattern(const RoutePattern &route_pattern); + void removeRouteFlowCounterFromDB(sai_object_id_t vrf_id, const IpPrefix& ip_prefix, sai_object_id_t counter_oid); + bool bindFlowCounter(const RoutePattern &route_pattern, sai_object_id_t vrf_id, const IpPrefix& ip_prefix); + void unbindFlowCounter(const RoutePattern &route_pattern, sai_object_id_t vrf_id, const IpPrefix& ip_prefix, sai_object_id_t counter_oid); + void pendingUpdateFlexDb(const RoutePattern &route_pattern, const IpPrefix &ip_prefix, sai_object_id_t counter_oid); + void updateRouterFlowCounterCache( + const RoutePattern &route_pattern, + const IpPrefix& ip_prefix, + sai_object_id_t counter_oid, + RouterFlowCounterCache &cache); + bool validateRoutePattern(const RoutePattern &route_pattern) const; + void onRoutePatternMaxMatchCountChange(RoutePattern &route_pattern, size_t new_max_match_count); + bool isRouteAlreadyBound(const RoutePattern &route_pattern, const IpPrefix &ip_prefix) const; + void createRouteFlowCounterByPattern(const RoutePattern &route_pattern, size_t currentBoundCount); + /* Return true if it actaully removed a counter so that caller need to fill the hole if possible*/ + bool removeRouteFlowCounter(const RoutePattern &route_pattern, sai_object_id_t vrf_id, const IpPrefix& ip_prefix); + void createRouteFlowCounterFromVnetRoutes(const RoutePattern &route_pattern, size_t& current_bound_count); + void reapRouteFlowCounterByPattern(const RoutePattern &route_pattern, size_t currentBoundCount); + bool isRouteFlowCounterEnabled() const; + void getRouteFlowCounterNameMapKey(sai_object_id_t vrf_id, const IpPrefix &ip_prefix, std::string &key); + size_t getRouteFlowCounterSizeByPattern(const RoutePattern &route_pattern) const; + bool parseRouteKeyForRoutePattern(const std::string &key, char sep, sai_object_id_t &vrf_id, IpPrefix &ip_prefix, std::string& vrf_name); + bool getVrfIdByVnetName(const std::string& vnet_name, sai_object_id_t &vrf_id); + bool getVnetNameByVrfId(sai_object_id_t vrf_id, std::string& vnet_name); +}; diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index dc14998774..a48bd9ceb7 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -10,6 +10,10 @@ #include "debugcounterorch.h" #include "directory.h" #include "copporch.h" +#include +#include "routeorch.h" +#include "macsecorch.h" +#include "flowcounterrouteorch.h" extern sai_port_api_t *sai_port_api; @@ -19,16 +23,20 @@ extern IntfsOrch *gIntfsOrch; extern BufferOrch *gBufferOrch; extern Directory gDirectory; extern CoppOrch *gCoppOrch; +extern FlowCounterRouteOrch *gFlowCounterRouteOrch; #define BUFFER_POOL_WATERMARK_KEY "BUFFER_POOL_WATERMARK" #define PORT_KEY "PORT" #define PORT_BUFFER_DROP_KEY "PORT_BUFFER_DROP" #define QUEUE_KEY "QUEUE" +#define QUEUE_WATERMARK "QUEUE_WATERMARK" #define PG_WATERMARK_KEY "PG_WATERMARK" +#define PG_DROP_KEY "PG_DROP" #define RIF_KEY "RIF" #define ACL_KEY "ACL" #define TUNNEL_KEY "TUNNEL" #define FLOW_CNT_TRAP_KEY "FLOW_CNT_TRAP" +#define FLOW_CNT_ROUTE_KEY "FLOW_CNT_ROUTE" unordered_map flexCounterGroupMap = { @@ -47,14 +55,22 @@ unordered_map flexCounterGroupMap = {"ACL", ACL_COUNTER_FLEX_COUNTER_GROUP}, {"TUNNEL", TUNNEL_STAT_COUNTER_FLEX_COUNTER_GROUP}, {FLOW_CNT_TRAP_KEY, HOSTIF_TRAP_COUNTER_FLEX_COUNTER_GROUP}, + {FLOW_CNT_ROUTE_KEY, ROUTE_FLOW_COUNTER_FLEX_COUNTER_GROUP}, + {"MACSEC_SA", COUNTERS_MACSEC_SA_GROUP}, + {"MACSEC_SA_ATTR", COUNTERS_MACSEC_SA_ATTR_GROUP}, + {"MACSEC_FLOW", COUNTERS_MACSEC_FLOW_GROUP}, }; FlexCounterOrch::FlexCounterOrch(DBConnector *db, vector &tableNames): Orch(db, tableNames), m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), + m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), + m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME), m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), - m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)) + m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)), + m_gbflexCounterDb(new DBConnector("GB_FLEX_COUNTER_DB", 0)), + m_gbflexCounterGroupTable(new ProducerTable(m_gbflexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)) { SWSS_LOG_ENTER(); } @@ -114,6 +130,13 @@ void FlexCounterOrch::doTask(Consumer &consumer) vector fieldValues; fieldValues.emplace_back(POLL_INTERVAL_FIELD, value); m_flexCounterGroupTable->set(flexCounterGroupMap[key], fieldValues); + if (gPortsOrch && gPortsOrch->isGearboxEnabled()) + { + if (key == PORT_KEY || key.rfind("MACSEC", 0) == 0) + { + m_gbflexCounterGroupTable->set(flexCounterGroupMap[key], fieldValues); + } + } } else if(field == FLEX_COUNTER_STATUS_FIELD) { @@ -139,11 +162,27 @@ void FlexCounterOrch::doTask(Consumer &consumer) } else if(key == QUEUE_KEY) { - gPortsOrch->generateQueueMap(); + gPortsOrch->generateQueueMap(getQueueConfigurations()); + m_queue_enabled = true; + gPortsOrch->addQueueFlexCounters(getQueueConfigurations()); + } + else if(key == QUEUE_WATERMARK) + { + gPortsOrch->generateQueueMap(getQueueConfigurations()); + m_queue_watermark_enabled = true; + gPortsOrch->addQueueWatermarkFlexCounters(getQueueConfigurations()); + } + else if(key == PG_DROP_KEY) + { + gPortsOrch->generatePriorityGroupMap(getPgConfigurations()); + m_pg_enabled = true; + gPortsOrch->addPriorityGroupFlexCounters(getPgConfigurations()); } else if(key == PG_WATERMARK_KEY) { - gPortsOrch->generatePriorityGroupMap(); + gPortsOrch->generatePriorityGroupMap(getPgConfigurations()); + m_pg_watermark_enabled = true; + gPortsOrch->addPriorityGroupWatermarkFlexCounters(getPgConfigurations()); } } if(gIntfsOrch && (key == RIF_KEY) && (value == "enable")) @@ -175,9 +214,30 @@ void FlexCounterOrch::doTask(Consumer &consumer) m_hostif_trap_counter_enabled = false; } } + if (gFlowCounterRouteOrch && gFlowCounterRouteOrch->getRouteFlowCounterSupported() && key == FLOW_CNT_ROUTE_KEY) + { + if (value == "enable" && !m_route_flow_counter_enabled) + { + m_route_flow_counter_enabled = true; + gFlowCounterRouteOrch->generateRouteFlowStats(); + } + else if (value == "disable" && m_route_flow_counter_enabled) + { + gFlowCounterRouteOrch->clearRouteFlowStats(); + m_route_flow_counter_enabled = false; + } + } vector fieldValues; fieldValues.emplace_back(FLEX_COUNTER_STATUS_FIELD, value); m_flexCounterGroupTable->set(flexCounterGroupMap[key], fieldValues); + + if (gPortsOrch && gPortsOrch->isGearboxEnabled()) + { + if (key == PORT_KEY || key.rfind("MACSEC", 0) == 0) + { + m_gbflexCounterGroupTable->set(flexCounterGroupMap[key], fieldValues); + } + } } else if(field == FLEX_COUNTER_DELAY_STATUS_FIELD) { @@ -206,6 +266,26 @@ bool FlexCounterOrch::getPortBufferDropCountersState() const return m_port_buffer_drop_counter_enabled; } +bool FlexCounterOrch::getQueueCountersState() const +{ + return m_queue_enabled; +} + +bool FlexCounterOrch::getQueueWatermarkCountersState() const +{ + return m_queue_watermark_enabled; +} + +bool FlexCounterOrch::getPgCountersState() const +{ + return m_pg_enabled; +} + +bool FlexCounterOrch::getPgWatermarkCountersState() const +{ + return m_pg_watermark_enabled; +} + bool FlexCounterOrch::bake() { /* @@ -247,3 +327,165 @@ bool FlexCounterOrch::bake() Consumer* consumer = dynamic_cast(getExecutor(CFG_FLEX_COUNTER_TABLE_NAME)); return consumer->addToSync(entries); } + +map FlexCounterOrch::getQueueConfigurations() +{ + SWSS_LOG_ENTER(); + + map queuesStateVector; + std::vector portQueueKeys; + m_bufferQueueConfigTable.getKeys(portQueueKeys); + + for (const auto& portQueueKey : portQueueKeys) + { + auto toks = tokenize(portQueueKey, '|'); + if (toks.size() != 2) + { + SWSS_LOG_ERROR("Invalid BUFFER_QUEUE key: [%s]", portQueueKey.c_str()); + continue; + } + + auto configPortNames = tokenize(toks[0], ','); + auto configPortQueues = toks[1]; + toks = tokenize(configPortQueues, '-'); + + for (const auto& configPortName : configPortNames) + { + uint32_t maxQueueNumber = gPortsOrch->getNumberOfPortSupportedQueueCounters(configPortName); + uint32_t maxQueueIndex = maxQueueNumber - 1; + uint32_t minQueueIndex = 0; + + if (!queuesStateVector.count(configPortName)) + { + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(configPortName, flexCounterQueueState)); + } + + try { + auto startIndex = to_uint(toks[0], minQueueIndex, maxQueueIndex); + if (toks.size() > 1) + { + auto endIndex = to_uint(toks[1], minQueueIndex, maxQueueIndex); + queuesStateVector.at(configPortName).enableQueueCounters(startIndex, endIndex); + } + else + { + queuesStateVector.at(configPortName).enableQueueCounter(startIndex); + } + } catch (std::invalid_argument const& e) { + SWSS_LOG_ERROR("Invalid queue index [%s] for port [%s]", configPortQueues.c_str(), configPortName.c_str()); + continue; + } + } + } + + return queuesStateVector; +} + +map FlexCounterOrch::getPgConfigurations() +{ + SWSS_LOG_ENTER(); + + map pgsStateVector; + std::vector portPgKeys; + m_bufferPgConfigTable.getKeys(portPgKeys); + + for (const auto& portPgKey : portPgKeys) + { + auto toks = tokenize(portPgKey, '|'); + if (toks.size() != 2) + { + SWSS_LOG_ERROR("Invalid BUFFER_PG key: [%s]", portPgKey.c_str()); + continue; + } + + auto configPortNames = tokenize(toks[0], ','); + auto configPortPgs = toks[1]; + toks = tokenize(configPortPgs, '-'); + + for (const auto& configPortName : configPortNames) + { + uint32_t maxPgNumber = gPortsOrch->getNumberOfPortSupportedPgCounters(configPortName); + uint32_t maxPgIndex = maxPgNumber - 1; + uint32_t minPgIndex = 0; + + if (!pgsStateVector.count(configPortName)) + { + FlexCounterPgStates flexCounterPgState(maxPgNumber); + pgsStateVector.insert(make_pair(configPortName, flexCounterPgState)); + } + + try { + auto startIndex = to_uint(toks[0], minPgIndex, maxPgIndex); + if (toks.size() > 1) + { + auto endIndex = to_uint(toks[1], minPgIndex, maxPgIndex); + pgsStateVector.at(configPortName).enablePgCounters(startIndex, endIndex); + } + else + { + pgsStateVector.at(configPortName).enablePgCounter(startIndex); + } + } catch (std::invalid_argument const& e) { + SWSS_LOG_ERROR("Invalid pg index [%s] for port [%s]", configPortPgs.c_str(), configPortName.c_str()); + continue; + } + } + } + + return pgsStateVector; +} + +FlexCounterQueueStates::FlexCounterQueueStates(uint32_t maxQueueNumber) +{ + SWSS_LOG_ENTER(); + m_queueStates.resize(maxQueueNumber, false); +} + +bool FlexCounterQueueStates::isQueueCounterEnabled(uint32_t index) const +{ + SWSS_LOG_ENTER(); + return m_queueStates[index]; +} + +void FlexCounterQueueStates::enableQueueCounters(uint32_t startIndex, uint32_t endIndex) +{ + SWSS_LOG_ENTER(); + for (uint32_t queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + enableQueueCounter(queueIndex); + } +} + +void FlexCounterQueueStates::enableQueueCounter(uint32_t queueIndex) +{ + SWSS_LOG_ENTER(); + m_queueStates[queueIndex] = true; +} + +FlexCounterPgStates::FlexCounterPgStates(uint32_t maxPgNumber) +{ + SWSS_LOG_ENTER(); + m_pgStates.resize(maxPgNumber, false); +} + +bool FlexCounterPgStates::isPgCounterEnabled(uint32_t index) const +{ + SWSS_LOG_ENTER(); + return m_pgStates[index]; +} + +void FlexCounterPgStates::enablePgCounters(uint32_t startIndex, uint32_t endIndex) +{ + SWSS_LOG_ENTER(); + for (uint32_t pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + { + enablePgCounter(pgIndex); + } +} + +void FlexCounterPgStates::enablePgCounter(uint32_t pgIndex) +{ + SWSS_LOG_ENTER(); + m_pgStates[pgIndex] = true; +} diff --git a/orchagent/flexcounterorch.h b/orchagent/flexcounterorch.h index ceb8187506..6126588261 100644 --- a/orchagent/flexcounterorch.h +++ b/orchagent/flexcounterorch.h @@ -10,6 +10,30 @@ extern "C" { #include "sai.h" } +class FlexCounterQueueStates +{ +public: + FlexCounterQueueStates(uint32_t maxQueueNumber); + bool isQueueCounterEnabled(uint32_t index) const; + void enableQueueCounters(uint32_t startIndex, uint32_t endIndex); + void enableQueueCounter(uint32_t queueIndex); + +private: + std::vector m_queueStates{}; +}; + +class FlexCounterPgStates +{ +public: + FlexCounterPgStates(uint32_t maxPgNumber); + bool isPgCounterEnabled(uint32_t index) const; + void enablePgCounters(uint32_t startIndex, uint32_t endIndex); + void enablePgCounter(uint32_t pgIndex); + +private: + std::vector m_pgStates{}; +}; + class FlexCounterOrch: public Orch { public: @@ -18,17 +42,32 @@ class FlexCounterOrch: public Orch virtual ~FlexCounterOrch(void); bool getPortCountersState() const; bool getPortBufferDropCountersState() const; + bool getQueueCountersState() const; + bool getQueueWatermarkCountersState() const; + bool getPgCountersState() const; + bool getPgWatermarkCountersState() const; + std::map getQueueConfigurations(); + std::map getPgConfigurations(); bool getHostIfTrapCounterState() const {return m_hostif_trap_counter_enabled;} + bool getRouteFlowCountersState() const {return m_route_flow_counter_enabled;} bool bake() override; - private: std::shared_ptr m_flexCounterDb = nullptr; std::shared_ptr m_flexCounterGroupTable = nullptr; + std::shared_ptr m_gbflexCounterDb = nullptr; + std::shared_ptr m_gbflexCounterGroupTable = nullptr; bool m_port_counter_enabled = false; bool m_port_buffer_drop_counter_enabled = false; + bool m_queue_enabled = false; + bool m_queue_watermark_enabled = false; + bool m_pg_enabled = false; + bool m_pg_watermark_enabled = false; bool m_hostif_trap_counter_enabled = false; + bool m_route_flow_counter_enabled = false; Table m_flexCounterConfigTable; + Table m_bufferQueueConfigTable; + Table m_bufferPgConfigTable; }; #endif diff --git a/orchagent/intfsorch.cpp b/orchagent/intfsorch.cpp index 1feebb4d75..4363beb9ea 100644 --- a/orchagent/intfsorch.cpp +++ b/orchagent/intfsorch.cpp @@ -12,6 +12,7 @@ #include "swssnet.h" #include "tokenize.h" #include "routeorch.h" +#include "flowcounterrouteorch.h" #include "crmorch.h" #include "bufferorch.h" #include "directory.h" @@ -29,7 +30,7 @@ extern sai_vlan_api_t* sai_vlan_api; extern sai_object_id_t gSwitchId; extern PortsOrch *gPortsOrch; -extern RouteOrch *gRouteOrch; +extern FlowCounterRouteOrch *gFlowCounterRouteOrch; extern CrmOrch *gCrmOrch; extern BufferOrch *gBufferOrch; extern bool gIsNatSupported; @@ -182,7 +183,7 @@ void IntfsOrch::increaseRouterIntfsRefCount(const string &alias) SWSS_LOG_ENTER(); m_syncdIntfses[alias].ref_count++; - SWSS_LOG_DEBUG("Router interface %s ref count is increased to %d", + SWSS_LOG_INFO("Router interface %s ref count is increased to %d", alias.c_str(), m_syncdIntfses[alias].ref_count); } @@ -191,7 +192,7 @@ void IntfsOrch::decreaseRouterIntfsRefCount(const string &alias) SWSS_LOG_ENTER(); m_syncdIntfses[alias].ref_count--; - SWSS_LOG_DEBUG("Router interface %s ref count is decreased to %d", + SWSS_LOG_INFO("Router interface %s ref count is decreased to %d", alias.c_str(), m_syncdIntfses[alias].ref_count); } @@ -415,6 +416,37 @@ bool IntfsOrch::setIntfProxyArp(const string &alias, const string &proxy_arp) return true; } +bool IntfsOrch::setIntfLoopbackAction(const Port &port, string actionStr) +{ + sai_attribute_t attr; + sai_packet_action_t action; + + if (!getSaiLoopbackAction(actionStr, action)) + { + return false; + } + + attr.id = SAI_ROUTER_INTERFACE_ATTR_LOOPBACK_PACKET_ACTION; + attr.value.s32 = action; + + sai_status_t status = sai_router_intfs_api->set_router_interface_attribute(port.m_rif_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Loopback action [%s] set failed, interface [%s], rc [%d]", + actionStr.c_str(), port.m_alias.c_str(), status); + + task_process_status handle_status = handleSaiSetStatus(SAI_API_ROUTER_INTERFACE, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + SWSS_LOG_NOTICE("Loopback action [%s] set success, interface [%s]", + actionStr.c_str(), port.m_alias.c_str()); + return true; +} + set IntfsOrch:: getSubnetRoutes() { SWSS_LOG_ENTER(); @@ -432,17 +464,24 @@ set IntfsOrch:: getSubnetRoutes() return subnet_routes; } -bool IntfsOrch::setIntf(const string& alias, sai_object_id_t vrf_id, const IpPrefix *ip_prefix, const bool adminUp, const uint32_t mtu) +bool IntfsOrch::setIntf(const string& alias, sai_object_id_t vrf_id, const IpPrefix *ip_prefix, + const bool adminUp, const uint32_t mtu, string loopbackAction) + { SWSS_LOG_ENTER(); + if (m_removingIntfses.find(alias) != m_removingIntfses.end()) + { + return false; + } + Port port; gPortsOrch->getPort(alias, port); auto it_intfs = m_syncdIntfses.find(alias); if (it_intfs == m_syncdIntfses.end()) { - if (!ip_prefix && addRouterIntfs(vrf_id, port)) + if (!ip_prefix && addRouterIntfs(vrf_id, port, loopbackAction)) { gPortsOrch->increasePortRefCount(alias); IntfsEntry intfs_entry; @@ -644,7 +683,7 @@ void IntfsOrch::doTask(Consumer &consumer) if(table_name == CHASSIS_APP_SYSTEM_INTERFACE_TABLE_NAME) { - if(!isRemoteSystemPortIntf(alias)) + if(isLocalSystemPortIntf(alias)) { //Synced local interface. Skip it = consumer.m_toSync.erase(it); @@ -657,13 +696,14 @@ void IntfsOrch::doTask(Consumer &consumer) MacAddress mac; uint32_t mtu = 0; - bool adminUp; + bool adminUp = false; bool adminStateChanged = false; uint32_t nat_zone_id = 0; string proxy_arp = ""; string inband_type = ""; bool mpls = false; string vlan = ""; + string loopbackAction = ""; for (auto idx : data) { @@ -740,10 +780,6 @@ void IntfsOrch::doTask(Consumer &consumer) } adminStateChanged = true; } - else if (field == "nat_zone") - { - nat_zone = value; - } else if (field == "proxy_arp") { proxy_arp = value; @@ -756,6 +792,10 @@ void IntfsOrch::doTask(Consumer &consumer) { vlan = value; } + else if (field == "loopback_action") + { + loopbackAction = value; + } } if (alias == "eth0" || alias == "docker0") @@ -825,10 +865,11 @@ void IntfsOrch::doTask(Consumer &consumer) { if (!ip_prefix_in_key && isSubIntf) { - if (adminStateChanged == false) + if (!adminStateChanged) { adminUp = port.m_admin_state_up; } + if (!gPortsOrch->addSubPort(port, alias, vlan, adminUp, mtu)) { it++; @@ -856,6 +897,12 @@ void IntfsOrch::doTask(Consumer &consumer) it++; continue; } + + if (!adminStateChanged) + { + adminUp = port.m_admin_state_up; + } + if (!vnet_orch->setIntf(alias, vnet_name, ip_prefix_in_key ? &ip_prefix : nullptr, adminUp, mtu)) { it++; @@ -869,11 +916,12 @@ void IntfsOrch::doTask(Consumer &consumer) } else { - if (adminStateChanged == false) + if (!adminStateChanged) { adminUp = port.m_admin_state_up; } - if (!setIntf(alias, vrf_id, ip_prefix_in_key ? &ip_prefix : nullptr, adminUp, mtu)) + + if (!setIntf(alias, vrf_id, ip_prefix_in_key ? &ip_prefix : nullptr, adminUp, mtu, loopbackAction)) { it++; continue; @@ -905,6 +953,12 @@ void IntfsOrch::doTask(Consumer &consumer) setRouterIntfsMpls(port); gPortsOrch->setPort(alias, port); } + + /* Set loopback action */ + if (!loopbackAction.empty()) + { + setIntfLoopbackAction(port, loopbackAction); + } } } @@ -1034,10 +1088,12 @@ void IntfsOrch::doTask(Consumer &consumer) { if (removeIntf(alias, port.m_vr_id, ip_prefix_in_key ? &ip_prefix : nullptr)) { + m_removingIntfses.erase(alias); it = consumer.m_toSync.erase(it); } else { + m_removingIntfses.insert(alias); it++; continue; } @@ -1046,7 +1102,28 @@ void IntfsOrch::doTask(Consumer &consumer) } } -bool IntfsOrch::addRouterIntfs(sai_object_id_t vrf_id, Port &port) +bool IntfsOrch::getSaiLoopbackAction(const string &actionStr, sai_packet_action_t &action) +{ + const unordered_map loopbackActionMap = + { + {"drop", SAI_PACKET_ACTION_DROP}, + {"forward", SAI_PACKET_ACTION_FORWARD}, + }; + + auto it = loopbackActionMap.find(actionStr); + if (it != loopbackActionMap.end()) + { + action = loopbackActionMap.at(actionStr); + return true; + } + else + { + SWSS_LOG_WARN("Unsupported loopback action [%s]", actionStr.c_str()); + return false; + } +} + +bool IntfsOrch::addRouterIntfs(sai_object_id_t vrf_id, Port &port, string loopbackActionStr) { SWSS_LOG_ENTER(); @@ -1066,6 +1143,17 @@ bool IntfsOrch::addRouterIntfs(sai_object_id_t vrf_id, Port &port) attr.value.oid = vrf_id; attrs.push_back(attr); + if (!loopbackActionStr.empty()) + { + sai_packet_action_t loopbackAction; + if (getSaiLoopbackAction(loopbackActionStr, loopbackAction)) + { + attr.id = SAI_ROUTER_INTERFACE_ATTR_LOOPBACK_PACKET_ACTION; + attr.value.s32 = loopbackAction; + attrs.push_back(attr); + } + } + attr.id = SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS; if (port.m_mac) { @@ -1197,7 +1285,7 @@ bool IntfsOrch::removeRouterIntfs(Port &port) if (m_syncdIntfses[port.m_alias].ref_count > 0) { - SWSS_LOG_NOTICE("Router interface is still referenced"); + SWSS_LOG_NOTICE("Router interface %s is still referenced with ref count %d", port.m_alias.c_str(), m_syncdIntfses[port.m_alias].ref_count); return false; } @@ -1272,6 +1360,8 @@ void IntfsOrch::addIp2MeRoute(sai_object_id_t vrf_id, const IpPrefix &ip_prefix) { gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); } + + gFlowCounterRouteOrch->onAddMiscRouteEntry(vrf_id, IpPrefix(ip_prefix.getIp().to_string())); } void IntfsOrch::removeIp2MeRoute(sai_object_id_t vrf_id, const IpPrefix &ip_prefix) @@ -1301,6 +1391,8 @@ void IntfsOrch::removeIp2MeRoute(sai_object_id_t vrf_id, const IpPrefix &ip_pref { gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); } + + gFlowCounterRouteOrch->onRemoveMiscRouteEntry(vrf_id, IpPrefix(ip_prefix.getIp().to_string())); } void IntfsOrch::addDirectedBroadcast(const Port &port, const IpPrefix &ip_prefix) @@ -1506,6 +1598,22 @@ bool IntfsOrch::isRemoteSystemPortIntf(string alias) return false; } +bool IntfsOrch::isLocalSystemPortIntf(string alias) +{ + Port port; + if(gPortsOrch->getPort(alias, port)) + { + if (port.m_type == Port::LAG) + { + return(port.m_system_lag_info.switch_id == gVoqMySwitchId); + } + + return(port.m_system_port_info.type != SAI_SYSTEM_PORT_TYPE_REMOTE); + } + //Given alias is system port alias of the local port/LAG + return false; +} + void IntfsOrch::voqSyncAddIntf(string &alias) { //Sync only local interface. Confirm for the local interface and diff --git a/orchagent/intfsorch.h b/orchagent/intfsorch.h index 5605abf133..ea15ada14b 100644 --- a/orchagent/intfsorch.h +++ b/orchagent/intfsorch.h @@ -54,7 +54,9 @@ class IntfsOrch : public Orch void addRifToFlexCounter(const string&, const string&, const string&); void removeRifFromFlexCounter(const string&, const string&); - bool setIntf(const string& alias, sai_object_id_t vrf_id = gVirtualRouterId, const IpPrefix *ip_prefix = nullptr, const bool adminUp = true, const uint32_t mtu = 0); + bool setIntfLoopbackAction(const Port &port, string actionStr); + bool getSaiLoopbackAction(const string &actionStr, sai_packet_action_t &action); + bool setIntf(const string& alias, sai_object_id_t vrf_id = gVirtualRouterId, const IpPrefix *ip_prefix = nullptr, const bool adminUp = true, const uint32_t mtu = 0, string loopbackAction = ""); bool removeIntf(const string& alias, sai_object_id_t vrf_id = gVirtualRouterId, const IpPrefix *ip_prefix = nullptr); void addIp2MeRoute(sai_object_id_t vrf_id, const IpPrefix &ip_prefix); @@ -68,6 +70,7 @@ class IntfsOrch : public Orch bool updateSyncdIntfPfx(const string &alias, const IpPrefix &ip_prefix, bool add = true); bool isRemoteSystemPortIntf(string alias); + bool isLocalSystemPortIntf(string alias); private: @@ -89,9 +92,11 @@ class IntfsOrch : public Orch unique_ptr m_flexCounterTable; unique_ptr m_flexCounterGroupTable; + std::set m_removingIntfses; + std::string getRifFlexCounterTableKey(std::string s); - bool addRouterIntfs(sai_object_id_t vrf_id, Port &port); + bool addRouterIntfs(sai_object_id_t vrf_id, Port &port, string loopbackAction); bool removeRouterIntfs(Port &port); void addDirectedBroadcast(const Port &port, const IpPrefix &ip_prefix); diff --git a/orchagent/macsecorch.cpp b/orchagent/macsecorch.cpp index 12c5b1ddb9..9a5e48f883 100644 --- a/orchagent/macsecorch.cpp +++ b/orchagent/macsecorch.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -13,15 +14,20 @@ #include #include #include +#include +#include /* Global Variables*/ #define AVAILABLE_ACL_PRIORITIES_LIMITATION (32) #define EAPOL_ETHER_TYPE (0x888e) -#define MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS (1000) -#define COUNTERS_MACSEC_SA_ATTR_GROUP "COUNTERS_MACSEC_SA_ATTR" -#define COUNTERS_MACSEC_SA_GROUP "COUNTERS_MACSEC_SA" -#define COUNTERS_MACSEC_FLOW_GROUP "COUNTERS_MACSEC_FLOW" +#define PAUSE_ETHER_TYPE (0x8808) +#define MACSEC_STAT_XPN_POLLING_INTERVAL_MS (1000) +#define MACSEC_STAT_POLLING_INTERVAL_MS (10000) +#define PFC_MODE_BYPASS "bypass" +#define PFC_MODE_ENCRYPT "encrypt" +#define PFC_MODE_STRICT_ENCRYPT "strict_encrypt" +#define PFC_MODE_DEFAULT PFC_MODE_BYPASS extern sai_object_id_t gSwitchId; extern sai_macsec_api_t *sai_macsec_api; @@ -213,6 +219,68 @@ static void lexical_convert(const std::string &buffer, MACsecAuthKey &auth_key) } } +class MACsecSCI +{ +public: + operator sai_uint64_t () const + { + SWSS_LOG_ENTER(); + + return m_sci; + } + + std::string str() const + { + SWSS_LOG_ENTER(); + + return boost::algorithm::to_lower_copy(swss::binary_to_hex(&m_sci, sizeof(m_sci))); + } + + MACsecSCI& operator= (const std::string &buffer) + { + SWSS_LOG_ENTER(); + + if (!swss::hex_to_binary(buffer, reinterpret_cast(&m_sci), sizeof(m_sci))) + { + SWSS_LOG_THROW("Invalid SCI %s", buffer.c_str()); + } + + return *this; + } + + MACsecSCI() = default; + + MACsecSCI(const sai_uint64_t sci) + { + SWSS_LOG_ENTER(); + + this->m_sci = sci; + } + +private: + sai_uint64_t m_sci; +}; + +namespace swss { + +template<> +inline void lexical_convert(const std::string &buffer, MACsecSCI &sci) +{ + SWSS_LOG_ENTER(); + + sci = buffer; +} + +} + +std::ostream& operator<<(std::ostream& stream, const MACsecSCI& sci) +{ + SWSS_LOG_ENTER(); + + stream << sci.str(); + return stream; +} + /* Recover from a fail action by a serial of pre-defined recover actions */ class RecoverStack { @@ -535,24 +603,38 @@ MACsecOrch::MACsecOrch( m_state_macsec_ingress_sc(state_db, STATE_MACSEC_INGRESS_SC_TABLE_NAME), m_state_macsec_egress_sa(state_db, STATE_MACSEC_EGRESS_SA_TABLE_NAME), m_state_macsec_ingress_sa(state_db, STATE_MACSEC_INGRESS_SA_TABLE_NAME), + m_applPortTable(app_db, APP_PORT_TABLE_NAME), m_counter_db("COUNTERS_DB", 0), m_macsec_counters_map(&m_counter_db, COUNTERS_MACSEC_NAME_MAP), - m_macsec_flow_tx_counters_map(&m_counter_db, COUNTERS_MACSEC_FLOW_TX_NAME_MAP), - m_macsec_flow_rx_counters_map(&m_counter_db, COUNTERS_MACSEC_FLOW_RX_NAME_MAP), - m_macsec_sa_tx_counters_map(&m_counter_db, COUNTERS_MACSEC_SA_TX_NAME_MAP), - m_macsec_sa_rx_counters_map(&m_counter_db, COUNTERS_MACSEC_SA_RX_NAME_MAP), + m_gb_counter_db("GB_COUNTERS_DB", 0), + m_gb_macsec_counters_map(&m_gb_counter_db, COUNTERS_MACSEC_NAME_MAP), m_macsec_sa_attr_manager( COUNTERS_MACSEC_SA_ATTR_GROUP, StatsMode::READ, - MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), + MACSEC_STAT_XPN_POLLING_INTERVAL_MS, true), m_macsec_sa_stat_manager( COUNTERS_MACSEC_SA_GROUP, StatsMode::READ, - MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), + MACSEC_STAT_POLLING_INTERVAL_MS, true), m_macsec_flow_stat_manager( COUNTERS_MACSEC_FLOW_GROUP, StatsMode::READ, - MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true) + MACSEC_STAT_POLLING_INTERVAL_MS, true), + m_gb_macsec_sa_attr_manager( + "GB_FLEX_COUNTER_DB", + COUNTERS_MACSEC_SA_ATTR_GROUP, + StatsMode::READ, + MACSEC_STAT_XPN_POLLING_INTERVAL_MS, true), + m_gb_macsec_sa_stat_manager( + "GB_FLEX_COUNTER_DB", + COUNTERS_MACSEC_SA_GROUP, + StatsMode::READ, + MACSEC_STAT_POLLING_INTERVAL_MS, true), + m_gb_macsec_flow_stat_manager( + "GB_FLEX_COUNTER_DB", + COUNTERS_MACSEC_FLOW_GROUP, + StatsMode::READ, + MACSEC_STAT_POLLING_INTERVAL_MS, true) { SWSS_LOG_ENTER(); } @@ -799,7 +881,7 @@ task_process_status MACsecOrch::taskUpdateEgressSA( { SWSS_LOG_ENTER(); std::string port_name; - sai_uint64_t sci = 0; + MACsecSCI sci; macsec_an_t an = 0; if (!extract_variables(port_sci_an, ':', port_name, sci, an) || an > MAX_SA_NUMBER) { @@ -810,12 +892,35 @@ task_process_status MACsecOrch::taskUpdateEgressSA( MACsecOrchContext ctx(this, port_name, SAI_MACSEC_DIRECTION_EGRESS, sci, an); if (ctx.get_macsec_sc() == nullptr) { - SWSS_LOG_INFO("The MACsec SC 0x%" PRIx64 " hasn't been created at the port %s.", sci, port_name.c_str()); + SWSS_LOG_INFO("The MACsec SC %s hasn't been created at the port %s.", sci.str().c_str(), port_name.c_str()); return task_need_retry; } if (ctx.get_macsec_sc()->m_encoding_an == an) { - return createMACsecSA(port_sci_an, sa_attr, SAI_MACSEC_DIRECTION_EGRESS); + if (ctx.get_macsec_sa() == nullptr) + { + // The MACsec SA hasn't been created + return createMACsecSA(port_sci_an, sa_attr, SAI_MACSEC_DIRECTION_EGRESS); + } + else + { + // The MACsec SA has enabled, update SA's attributes + sai_uint64_t pn; + + if (get_value(sa_attr, "next_pn", pn)) + { + sai_attribute_t attr; + attr.id = SAI_MACSEC_SA_ATTR_CONFIGURED_EGRESS_XPN; + attr.value.u64 = pn; + if (!this->updateMACsecAttr(SAI_OBJECT_TYPE_MACSEC_SA, *(ctx.get_macsec_sa()), attr)) + { + SWSS_LOG_WARN("Fail to update next pn (%" PRIu64 ") of egress MACsec SA %s", pn, port_sci_an.c_str()); + return task_failed; + } + } + + return task_success; + } } return task_need_retry; } @@ -835,7 +940,7 @@ task_process_status MACsecOrch::taskUpdateIngressSA( SWSS_LOG_ENTER(); swss::AlphaBoolean alpha_boolean = false; - get_value(sa_attr, "active", alpha_boolean); + bool has_active_field = get_value(sa_attr, "active", alpha_boolean); bool active = alpha_boolean.operator bool(); if (active) { @@ -845,7 +950,7 @@ task_process_status MACsecOrch::taskUpdateIngressSA( { std::string port_name; - sai_uint64_t sci = 0; + MACsecSCI sci; macsec_an_t an = 0; if (!extract_variables(port_sci_an, ':', port_name, sci, an) || an > MAX_SA_NUMBER) { @@ -857,7 +962,29 @@ task_process_status MACsecOrch::taskUpdateIngressSA( if (ctx.get_macsec_sa() != nullptr) { - return deleteMACsecSA(port_sci_an, SAI_MACSEC_DIRECTION_INGRESS); + if (has_active_field) + { + // Delete MACsec SA explicitly by set active to false + return deleteMACsecSA(port_sci_an, SAI_MACSEC_DIRECTION_INGRESS); + } + else + { + sai_uint64_t pn; + + if (get_value(sa_attr, "lowest_acceptable_pn", pn)) + { + sai_attribute_t attr; + attr.id = SAI_MACSEC_SA_ATTR_MINIMUM_INGRESS_XPN; + attr.value.u64 = pn; + if (!this->updateMACsecAttr(SAI_OBJECT_TYPE_MACSEC_SA, *(ctx.get_macsec_sa()), attr)) + { + SWSS_LOG_WARN("Fail to update lowest acceptable PN (%" PRIu64 ") of ingress MACsec SA %s", pn, port_sci_an.c_str()); + return task_failed; + } + } + + return task_success; + } } else { @@ -868,6 +995,8 @@ task_process_status MACsecOrch::taskUpdateIngressSA( return task_need_retry; } } + + return task_success; } task_process_status MACsecOrch::taskDeleteIngressSA( @@ -964,6 +1093,32 @@ bool MACsecOrch::initMACsecObject(sai_object_id_t switch_id) } macsec_obj.first->second.m_sci_in_ingress_macsec_acl = attrs.front().value.booldata; + attrs.clear(); + attr.id = SAI_MACSEC_ATTR_MAX_SECURE_ASSOCIATIONS_PER_SC; + attrs.push_back(attr); + status = sai_macsec_api->get_macsec_attribute( + macsec_obj.first->second.m_ingress_id, + static_cast(attrs.size()), + attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + // Default to 4 if SAI_MACSEC_ATTR_MAX_SECURE_ASSOCIATION_PER_SC isn't supported + macsec_obj.first->second.m_max_sa_per_sc = 4; + } else { + switch (attrs.front().value.s32) + { + case SAI_MACSEC_MAX_SECURE_ASSOCIATIONS_PER_SC_TWO: + macsec_obj.first->second.m_max_sa_per_sc = 2; + break; + case SAI_MACSEC_MAX_SECURE_ASSOCIATIONS_PER_SC_FOUR: + macsec_obj.first->second.m_max_sa_per_sc = 4; + break; + default: + SWSS_LOG_WARN( "Unsupported value returned from SAI_MACSEC_ATTR_MAX_SECURE_ASSOCIATION_PER_SC" ); + return false; + } + } + recover.clear(); return true; } @@ -1085,16 +1240,19 @@ bool MACsecOrch::createMACsecPort( port_id, switch_id, SAI_MACSEC_DIRECTION_EGRESS, - macsec_port.m_sci_in_sectag)) + macsec_port.m_sci_in_sectag, + port_name, + phy)) { SWSS_LOG_WARN("Cannot init the ACL Table at the port %s.", port_name.c_str()); return false; } - recover.add_action([this, &macsec_port, port_id]() { + recover.add_action([this, &macsec_port, port_id, phy]() { this->deinitMACsecACLTable( macsec_port.m_egress_acl_table, port_id, - SAI_MACSEC_DIRECTION_EGRESS); + SAI_MACSEC_DIRECTION_EGRESS, + phy); }); if (!initMACsecACLTable( @@ -1102,35 +1260,52 @@ bool MACsecOrch::createMACsecPort( port_id, switch_id, SAI_MACSEC_DIRECTION_INGRESS, - macsec_port.m_sci_in_sectag)) + macsec_port.m_sci_in_sectag, + port_name, + phy)) { SWSS_LOG_WARN("Cannot init the ACL Table at the port %s.", port_name.c_str()); return false; } - recover.add_action([this, &macsec_port, port_id]() { + recover.add_action([this, &macsec_port, port_id, phy]() { this->deinitMACsecACLTable( macsec_port.m_ingress_acl_table, port_id, - SAI_MACSEC_DIRECTION_INGRESS); + SAI_MACSEC_DIRECTION_INGRESS, + phy); }); - if (phy && phy->macsec_ipg != 0) + m_port_orch->setMACsecEnabledState(port_id, true); + + if (phy) { - if (!m_port_orch->getPortIPG(port.m_port_id, macsec_port.m_original_ipg)) + if (!setPFCForward(port_id, true)) { - SWSS_LOG_WARN("Cannot get Port IPG at the port %s", port_name.c_str()); + SWSS_LOG_WARN("Cannot enable PFC forward at the port %s.", port_name.c_str()); return false; } - if (!m_port_orch->setPortIPG(port.m_port_id, phy->macsec_ipg)) + recover.add_action([this, port_id]() + { this->setPFCForward(port_id, false); }); + + if (phy->macsec_ipg != 0) { - SWSS_LOG_WARN("Cannot set MACsec IPG to %u at the port %s", phy->macsec_ipg, port_name.c_str()); - return false; + if (!m_port_orch->getPortIPG(port.m_port_id, macsec_port.m_original_ipg)) + { + SWSS_LOG_WARN("Cannot get Port IPG at the port %s", port_name.c_str()); + return false; + } + if (!m_port_orch->setPortIPG(port.m_port_id, phy->macsec_ipg)) + { + SWSS_LOG_WARN("Cannot set MACsec IPG to %u at the port %s", phy->macsec_ipg, port_name.c_str()); + return false; + } } } SWSS_LOG_NOTICE("MACsec port %s is created.", port_name.c_str()); std::vector fvVector; + fvVector.emplace_back("max_sa_per_sc", std::to_string(macsec_obj.m_max_sa_per_sc)); fvVector.emplace_back("state", "ok"); m_state_macsec_port.set(port_name, fvVector); @@ -1277,16 +1452,18 @@ bool MACsecOrch::updateMACsecSCs(MACsecPort &macsec_port, std::functionsecond)) { return false; } } - for (auto &sc : macsec_port.m_ingress_scs) + sc = macsec_port.m_ingress_scs.begin(); + while (sc != macsec_port.m_ingress_scs.end()) { - if (!action(sc.second)) + if (!action((sc++)->second)) { return false; } @@ -1307,18 +1484,22 @@ bool MACsecOrch::deleteMACsecPort( bool result = true; - for (auto &sc : macsec_port.m_egress_scs) + auto sc = macsec_port.m_ingress_scs.begin(); + while (sc != macsec_port.m_ingress_scs.end()) { - const std::string port_sci = swss::join(':', port_name, sc.first); - if (deleteMACsecSC(port_sci, SAI_MACSEC_DIRECTION_EGRESS) != task_success) + const std::string port_sci = swss::join(':', port_name, MACsecSCI(sc->first)); + sc ++; + if (deleteMACsecSC(port_sci, SAI_MACSEC_DIRECTION_INGRESS) != task_success) { result &= false; } } - for (auto &sc : macsec_port.m_ingress_scs) + sc = macsec_port.m_egress_scs.begin(); + while (sc != macsec_port.m_egress_scs.end()) { - const std::string port_sci = swss::join(':', port_name, sc.first); - if (deleteMACsecSC(port_sci, SAI_MACSEC_DIRECTION_INGRESS) != task_success) + const std::string port_sci = swss::join(':', port_name, MACsecSCI(sc->first)); + sc ++; + if (deleteMACsecSC(port_sci, SAI_MACSEC_DIRECTION_EGRESS) != task_success) { result &= false; } @@ -1339,13 +1520,13 @@ bool MACsecOrch::deleteMACsecPort( } } - if (!deinitMACsecACLTable(macsec_port.m_ingress_acl_table, port_id, SAI_MACSEC_DIRECTION_INGRESS)) + if (!deinitMACsecACLTable(macsec_port.m_ingress_acl_table, port_id, SAI_MACSEC_DIRECTION_INGRESS, phy)) { SWSS_LOG_WARN("Cannot deinit ingress ACL table at the port %s.", port_name.c_str()); result &= false; } - if (!deinitMACsecACLTable(macsec_port.m_egress_acl_table, port_id, SAI_MACSEC_DIRECTION_EGRESS)) + if (!deinitMACsecACLTable(macsec_port.m_egress_acl_table, port_id, SAI_MACSEC_DIRECTION_EGRESS, phy)) { SWSS_LOG_WARN("Cannot deinit egress ACL table at the port %s.", port_name.c_str()); result &= false; @@ -1363,13 +1544,24 @@ bool MACsecOrch::deleteMACsecPort( result &= false; } - if (phy && phy->macsec_ipg != 0) + m_port_orch->setMACsecEnabledState(port_id, false); + + if (phy) { - if (!m_port_orch->setPortIPG(port.m_port_id, macsec_port.m_original_ipg)) + if (!setPFCForward(port_id, false)) { - SWSS_LOG_WARN("Cannot set MACsec IPG to %u at the port %s", macsec_port.m_original_ipg, port_name.c_str()); + SWSS_LOG_WARN("Cannot disable PFC forward at the port %s.", port_name.c_str()); result &= false; } + + if (phy->macsec_ipg != 0) + { + if (!m_port_orch->setPortIPG(port.m_port_id, macsec_port.m_original_ipg)) + { + SWSS_LOG_WARN("Cannot set MACsec IPG to %u at the port %s", macsec_port.m_original_ipg, port_name.c_str()); + result &= false; + } + } } m_state_macsec_port.del(port_name); @@ -1447,7 +1639,7 @@ task_process_status MACsecOrch::updateMACsecSC( SWSS_LOG_ENTER(); std::string port_name; - sai_uint64_t sci = {0}; + MACsecSCI sci; if (!extract_variables(port_sci, ':', port_name, sci)) { SWSS_LOG_WARN("The key %s isn't correct.", port_sci.c_str()); @@ -1526,7 +1718,7 @@ bool MACsecOrch::createMACsecSC( RecoverStack recover; - const std::string port_sci = swss::join(':', port_name, sci); + const std::string port_sci = swss::join(':', port_name, MACsecSCI(sci)); auto scs = (direction == SAI_MACSEC_DIRECTION_EGRESS) @@ -1619,11 +1811,11 @@ bool MACsecOrch::createMACsecSC( fvVector.emplace_back("state", "ok"); if (direction == SAI_MACSEC_DIRECTION_EGRESS) { - m_state_macsec_egress_sc.set(swss::join('|', port_name, sci), fvVector); + m_state_macsec_egress_sc.set(swss::join('|', port_name, MACsecSCI(sci)), fvVector); } else { - m_state_macsec_ingress_sc.set(swss::join('|', port_name, sci), fvVector); + m_state_macsec_ingress_sc.set(swss::join('|', port_name, MACsecSCI(sci)), fvVector); } recover.clear(); @@ -1671,7 +1863,7 @@ bool MACsecOrch::createMACsecSC( attrs.data()); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_WARN("Cannot create MACsec egress SC 0x%" PRIx64, sci); + SWSS_LOG_WARN("Cannot create MACsec egress SC %s", MACsecSCI(sci).str().c_str()); task_process_status handle_status = handleSaiCreateStatus(SAI_API_MACSEC, status); if (handle_status != task_success) { @@ -1688,7 +1880,7 @@ task_process_status MACsecOrch::deleteMACsecSC( SWSS_LOG_ENTER(); std::string port_name; - sai_uint64_t sci = 0; + MACsecSCI sci; if (!extract_variables(port_sci, ':', port_name, sci)) { SWSS_LOG_WARN("The key %s isn't correct.", port_sci.c_str()); @@ -1705,9 +1897,11 @@ task_process_status MACsecOrch::deleteMACsecSC( auto result = task_success; - for (auto &sa : ctx.get_macsec_sc()->m_sa_ids) + auto sa = ctx.get_macsec_sc()->m_sa_ids.begin(); + while (sa != ctx.get_macsec_sc()->m_sa_ids.end()) { - const std::string port_sci_an = swss::join(':', port_sci, sa.first); + const std::string port_sci_an = swss::join(':', port_sci, sa->first); + sa ++; deleteMACsecSA(port_sci_an, direction); } @@ -1739,11 +1933,11 @@ task_process_status MACsecOrch::deleteMACsecSC( if (direction == SAI_MACSEC_DIRECTION_EGRESS) { - m_state_macsec_egress_sc.del(swss::join('|', port_name, sci)); + m_state_macsec_egress_sc.del(swss::join('|', port_name, MACsecSCI(sci))); } else { - m_state_macsec_ingress_sc.del(swss::join('|', port_name, sci)); + m_state_macsec_ingress_sc.del(swss::join('|', port_name, MACsecSCI(sci))); } return result; @@ -1809,7 +2003,7 @@ task_process_status MACsecOrch::createMACsecSA( SWSS_LOG_ENTER(); std::string port_name; - sai_uint64_t sci = 0; + MACsecSCI sci; macsec_an_t an = 0; if (!extract_variables(port_sci_an, ':', port_name, sci, an) || an > MAX_SA_NUMBER) { @@ -1827,7 +2021,7 @@ task_process_status MACsecOrch::createMACsecSA( if (ctx.get_macsec_sc() == nullptr) { - SWSS_LOG_INFO("The MACsec SC 0x%" PRIx64 " hasn't been created at the port %s.", sci, port_name.c_str()); + SWSS_LOG_INFO("The MACsec SC %s hasn't been created at the port %s.", sci.str().c_str(), port_name.c_str()); return task_need_retry; } auto sc = ctx.get_macsec_sc(); @@ -1943,17 +2137,17 @@ task_process_status MACsecOrch::createMACsecSA( sc->m_sa_ids.erase(an); }); - installCounter(CounterType::MACSEC_SA_ATTR, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_attrs); + installCounter(ctx, CounterType::MACSEC_SA_ATTR, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_attrs); std::vector fvVector; fvVector.emplace_back("state", "ok"); if (direction == SAI_MACSEC_DIRECTION_EGRESS) { - installCounter(CounterType::MACSEC_SA, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_egress_stats); + installCounter(ctx, CounterType::MACSEC_SA, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_egress_stats); m_state_macsec_egress_sa.set(swss::join('|', port_name, sci, an), fvVector); } else { - installCounter(CounterType::MACSEC_SA, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_ingress_stats); + installCounter(ctx, CounterType::MACSEC_SA, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_ingress_stats); m_state_macsec_ingress_sa.set(swss::join('|', port_name, sci, an), fvVector); } @@ -1970,7 +2164,7 @@ task_process_status MACsecOrch::deleteMACsecSA( SWSS_LOG_ENTER(); std::string port_name = ""; - sai_uint64_t sci = 0; + MACsecSCI sci; macsec_an_t an = 0; if (!extract_variables(port_sci_an, ':', port_name, sci, an) || an > MAX_SA_NUMBER) { @@ -1988,8 +2182,8 @@ task_process_status MACsecOrch::deleteMACsecSA( auto result = task_success; - uninstallCounter(CounterType::MACSEC_SA_ATTR, direction, port_sci_an, ctx.get_macsec_sc()->m_sa_ids[an]); - uninstallCounter(CounterType::MACSEC_SA, direction, port_sci_an, ctx.get_macsec_sc()->m_sa_ids[an]); + uninstallCounter(ctx, CounterType::MACSEC_SA_ATTR, direction, port_sci_an, ctx.get_macsec_sc()->m_sa_ids[an]); + uninstallCounter(ctx, CounterType::MACSEC_SA, direction, port_sci_an, ctx.get_macsec_sc()->m_sa_ids[an]); if (!deleteMACsecSA(ctx.get_macsec_sc()->m_sa_ids[an])) { SWSS_LOG_WARN("Cannot delete the MACsec SA %s.", port_sci_an.c_str()); @@ -2114,17 +2308,42 @@ bool MACsecOrch::deleteMACsecSA(sai_object_id_t sa_id) return true; } +FlexCounterManager& MACsecOrch::MACsecSaStatManager(MACsecOrchContext &ctx) +{ + if (ctx.get_gearbox_phy() != nullptr) + return m_gb_macsec_sa_stat_manager; + return m_macsec_sa_stat_manager; +} + +FlexCounterManager& MACsecOrch::MACsecSaAttrStatManager(MACsecOrchContext &ctx) +{ + if (ctx.get_gearbox_phy() != nullptr) + return m_gb_macsec_sa_attr_manager; + return m_macsec_sa_attr_manager; +} + +FlexCounterManager& MACsecOrch::MACsecFlowStatManager(MACsecOrchContext &ctx) +{ + if (ctx.get_gearbox_phy() != nullptr) + return m_gb_macsec_flow_stat_manager; + return m_macsec_flow_stat_manager; +} + +Table& MACsecOrch::MACsecCountersMap(MACsecOrchContext &ctx) +{ + if (ctx.get_gearbox_phy() != nullptr) + return m_gb_macsec_counters_map; + return m_macsec_counters_map; +} + void MACsecOrch::installCounter( + MACsecOrchContext &ctx, CounterType counter_type, sai_macsec_direction_t direction, const std::string &obj_name, sai_object_id_t obj_id, const std::vector &stats) { - FieldValueTuple tuple(obj_name, sai_serialize_object_id(obj_id)); - vector fields; - fields.push_back(tuple); - std::unordered_set counter_stats; for (const auto &stat : stats) { @@ -2133,24 +2352,16 @@ void MACsecOrch::installCounter( switch(counter_type) { case CounterType::MACSEC_SA_ATTR: - m_macsec_sa_attr_manager.setCounterIdList(obj_id, counter_type, counter_stats); - m_macsec_counters_map.set("", fields); + MACsecSaAttrStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); break; case CounterType::MACSEC_SA: - m_macsec_sa_stat_manager.setCounterIdList(obj_id, counter_type, counter_stats); - if (direction == SAI_MACSEC_DIRECTION_EGRESS) - { - m_macsec_sa_tx_counters_map.set("", fields); - } - else - { - m_macsec_sa_rx_counters_map.set("", fields); - } + MACsecSaStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); + MACsecCountersMap(ctx).hset("", obj_name, sai_serialize_object_id(obj_id)); break; case CounterType::MACSEC_FLOW: - m_macsec_flow_stat_manager.setCounterIdList(obj_id, counter_type, counter_stats); + MACsecFlowStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); break; default: @@ -2161,6 +2372,7 @@ void MACsecOrch::installCounter( } void MACsecOrch::uninstallCounter( + MACsecOrchContext &ctx, CounterType counter_type, sai_macsec_direction_t direction, const std::string &obj_name, @@ -2169,24 +2381,16 @@ void MACsecOrch::uninstallCounter( switch(counter_type) { case CounterType::MACSEC_SA_ATTR: - m_macsec_sa_attr_manager.clearCounterIdList(obj_id); - m_counter_db.hdel(COUNTERS_MACSEC_NAME_MAP, obj_name); + MACsecSaAttrStatManager(ctx).clearCounterIdList(obj_id); break; case CounterType::MACSEC_SA: - m_macsec_sa_stat_manager.clearCounterIdList(obj_id); - if (direction == SAI_MACSEC_DIRECTION_EGRESS) - { - m_counter_db.hdel(COUNTERS_MACSEC_SA_TX_NAME_MAP, obj_name); - } - else - { - m_counter_db.hdel(COUNTERS_MACSEC_SA_RX_NAME_MAP, obj_name); - } + MACsecSaStatManager(ctx).clearCounterIdList(obj_id); + MACsecCountersMap(ctx).hdel("", obj_name); break; case CounterType::MACSEC_FLOW: - m_macsec_flow_stat_manager.clearCounterIdList(obj_id); + MACsecFlowStatManager(ctx).clearCounterIdList(obj_id); break; default: @@ -2202,7 +2406,9 @@ bool MACsecOrch::initMACsecACLTable( sai_object_id_t port_id, sai_object_id_t switch_id, sai_macsec_direction_t direction, - bool sci_in_sectag) + bool sci_in_sectag, + const std::string &port_name, + const gearbox_phy_t* phy) { SWSS_LOG_ENTER(); @@ -2260,6 +2466,36 @@ bool MACsecOrch::initMACsecACLTable( } recover.add_action([&acl_table]() { acl_table.m_available_acl_priorities.clear(); }); + if (phy) + { + if (acl_table.m_available_acl_priorities.empty()) + { + SWSS_LOG_WARN("Available ACL priorities have been exhausted."); + return false; + } + priority = *(acl_table.m_available_acl_priorities.rbegin()); + acl_table.m_available_acl_priorities.erase(std::prev(acl_table.m_available_acl_priorities.end())); + + TaskArgs values; + if (!m_applPortTable.get(port_name, values)) + { + SWSS_LOG_ERROR("Port %s isn't existing", port_name.c_str()); + return false; + } + std::string pfc_mode = PFC_MODE_DEFAULT; + get_value(values, "pfc_encryption_mode", pfc_mode); + + if (!createPFCEntry(acl_table.m_pfc_entry_id, acl_table.m_table_id, switch_id, direction, priority, pfc_mode)) + { + return false; + } + recover.add_action([this, &acl_table, priority]() { + this->deleteMACsecACLEntry(acl_table.m_pfc_entry_id); + acl_table.m_pfc_entry_id = SAI_NULL_OBJECT_ID; + acl_table.m_available_acl_priorities.insert(priority); + }); + } + recover.clear(); return true; } @@ -2267,7 +2503,8 @@ bool MACsecOrch::initMACsecACLTable( bool MACsecOrch::deinitMACsecACLTable( const MACsecACLTable &acl_table, sai_object_id_t port_id, - sai_macsec_direction_t direction) + sai_macsec_direction_t direction, + const gearbox_phy_t* phy) { bool result = true; @@ -2278,9 +2515,17 @@ bool MACsecOrch::deinitMACsecACLTable( } if (!deleteMACsecACLEntry(acl_table.m_eapol_packet_forward_entry_id)) { - SWSS_LOG_WARN("Cannot delete ACL entry"); + SWSS_LOG_WARN("Cannot delete EAPOL ACL entry"); result &= false; } + if (phy) + { + if (!deleteMACsecACLEntry(acl_table.m_pfc_entry_id)) + { + SWSS_LOG_WARN("Cannot delete PFC ACL entry"); + result &= false; + } + } if (!deleteMACsecACLTable(acl_table.m_table_id)) { SWSS_LOG_WARN("Cannot delete ACL table"); @@ -2554,6 +2799,11 @@ bool MACsecOrch::setMACsecFlowActive(sai_object_id_t entry_id, sai_object_id_t f bool MACsecOrch::deleteMACsecACLEntry(sai_object_id_t entry_id) { + if (entry_id == SAI_NULL_OBJECT_ID) + { + return true; + } + sai_status_t status = sai_acl_api->remove_acl_entry(entry_id); if (status != SAI_STATUS_SUCCESS) { @@ -2585,3 +2835,151 @@ bool MACsecOrch::getAclPriority(sai_object_id_t switch_id, sai_attr_id_t priorit return true; } + +bool MACsecOrch::setPFCForward(sai_object_id_t port_id, bool enable) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + sai_status_t status; + + // Enable/Disable Forward pause frame + attr.id = SAI_PORT_ATTR_GLOBAL_FLOW_CONTROL_FORWARD; + attr.value.booldata = enable; + status = sai_port_api->set_port_attribute(port_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + // Enable/Disable Forward PFC frame + attr.id = SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_FORWARD; + attr.value.booldata = enable; + status = sai_port_api->set_port_attribute(port_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + return true; +} + +bool MACsecOrch::createPFCEntry( + sai_object_id_t &entry_id, + sai_object_id_t table_id, + sai_object_id_t switch_id, + sai_macsec_direction_t direction, + sai_uint32_t priority, + const std::string &pfc_mode) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + std::vector attrs; + + if (pfc_mode == PFC_MODE_BYPASS) + { + attrs.push_back(identifyPFC()); + attrs.push_back(bypassPFC()); + } + else if (pfc_mode == PFC_MODE_ENCRYPT) + { + if (direction == SAI_MACSEC_DIRECTION_EGRESS) + { + entry_id = SAI_NULL_OBJECT_ID; + return true; + } + else + { + attrs.push_back(identifyPFC()); + attrs.push_back(bypassPFC()); + } + } + else if (pfc_mode == PFC_MODE_STRICT_ENCRYPT) + { + if (direction == SAI_MACSEC_DIRECTION_EGRESS) + { + entry_id = SAI_NULL_OBJECT_ID; + return true; + } + else + { + attrs.push_back(identifyPFC()); + attrs.push_back(dropPFC()); + } + } + + attr.id = SAI_ACL_ENTRY_ATTR_TABLE_ID; + attr.value.oid = table_id; + attrs.push_back(attr); + attr.id = SAI_ACL_ENTRY_ATTR_PRIORITY; + attr.value.u32 = priority; + attrs.push_back(attr); + attr.id = SAI_ACL_ENTRY_ATTR_ADMIN_STATE; + attr.value.booldata = true; + attrs.push_back(attr); + + sai_status_t status = sai_acl_api->create_acl_entry( + &entry_id, + switch_id, + static_cast(attrs.size()), + attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + task_process_status handle_status = handleSaiCreateStatus(SAI_API_ACL, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + + return true; +} + +sai_attribute_t MACsecOrch::identifyPFC() const +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + + attr.id = SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE; + attr.value.aclfield.data.u16 = PAUSE_ETHER_TYPE; + attr.value.aclfield.mask.u16 = 0xFFFF; + attr.value.aclfield.enable = true; + + return attr; +} + +sai_attribute_t MACsecOrch::bypassPFC() const +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + + attr.id = SAI_ACL_ENTRY_ATTR_ACTION_PACKET_ACTION; + attr.value.aclaction.parameter.s32 = SAI_PACKET_ACTION_FORWARD; + attr.value.aclaction.enable = true; + + return attr; +} + +sai_attribute_t MACsecOrch::dropPFC() const +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + + attr.id = SAI_ACL_ENTRY_ATTR_ACTION_PACKET_ACTION; + attr.value.aclaction.parameter.s32 = SAI_PACKET_ACTION_DROP; + attr.value.aclaction.enable = true; + + return attr; +} diff --git a/orchagent/macsecorch.h b/orchagent/macsecorch.h index 6702c75cf6..9c6e2be636 100644 --- a/orchagent/macsecorch.h +++ b/orchagent/macsecorch.h @@ -16,6 +16,10 @@ using namespace swss; +#define COUNTERS_MACSEC_SA_ATTR_GROUP "COUNTERS_MACSEC_SA_ATTR" +#define COUNTERS_MACSEC_SA_GROUP "COUNTERS_MACSEC_SA" +#define COUNTERS_MACSEC_FLOW_GROUP "COUNTERS_MACSEC_FLOW" + // AN is a 2 bit number, it can only be 0, 1, 2 or 3 #define MAX_SA_NUMBER (3) @@ -63,18 +67,22 @@ class MACsecOrch : public Orch DBConnector m_counter_db; Table m_macsec_counters_map; - Table m_macsec_flow_tx_counters_map; - Table m_macsec_flow_rx_counters_map; - Table m_macsec_sa_tx_counters_map; - Table m_macsec_sa_rx_counters_map; + DBConnector m_gb_counter_db; + Table m_gb_macsec_counters_map; + Table m_applPortTable; FlexCounterManager m_macsec_sa_attr_manager; FlexCounterManager m_macsec_sa_stat_manager; FlexCounterManager m_macsec_flow_stat_manager; + FlexCounterManager m_gb_macsec_sa_attr_manager; + FlexCounterManager m_gb_macsec_sa_stat_manager; + FlexCounterManager m_gb_macsec_flow_stat_manager; + struct MACsecACLTable { sai_object_id_t m_table_id; sai_object_id_t m_eapol_packet_forward_entry_id; + sai_object_id_t m_pfc_entry_id; std::set m_available_acl_priorities; }; struct MACsecSC @@ -108,6 +116,7 @@ class MACsecOrch : public Orch sai_object_id_t m_ingress_id; map > m_macsec_ports; bool m_sci_in_ingress_macsec_acl; + sai_uint8_t m_max_sa_per_sc; }; map m_macsec_objs; map > m_macsec_ports; @@ -179,6 +188,7 @@ class MACsecOrch : public Orch const std::string &port_sci, sai_macsec_direction_t direction); bool deleteMACsecSC(sai_object_id_t sc_id); + bool setMACsecSC(sai_object_id_t sc_id, const sai_attribute_t &attr); bool updateMACsecAttr(sai_object_type_t object_type, sai_object_id_t object_id, const sai_attribute_t &attr); @@ -205,28 +215,40 @@ class MACsecOrch : public Orch /* Counter */ void installCounter( + MACsecOrchContext &ctx, CounterType counter_type, sai_macsec_direction_t direction, const std::string &obj_name, sai_object_id_t obj_id, const std::vector &stats); void uninstallCounter( + MACsecOrchContext &ctx, CounterType counter_type, sai_macsec_direction_t direction, const std::string &obj_name, sai_object_id_t obj_id); + Table& MACsecCountersMap(MACsecOrchContext &ctx); + + /* Flex Counter Manager */ + FlexCounterManager& MACsecSaStatManager(MACsecOrchContext &ctx); + FlexCounterManager& MACsecSaAttrStatManager(MACsecOrchContext &ctx); + FlexCounterManager& MACsecFlowStatManager(MACsecOrchContext &ctx); + /* MACsec ACL */ bool initMACsecACLTable( MACsecACLTable &acl_table, sai_object_id_t port_id, sai_object_id_t switch_id, sai_macsec_direction_t direction, - bool sci_in_sectag); + bool sci_in_sectag, + const std::string &port_name, + const gearbox_phy_t* phy); bool deinitMACsecACLTable( const MACsecACLTable &acl_table, sai_object_id_t port_id, - sai_macsec_direction_t direction); + sai_macsec_direction_t direction, + const gearbox_phy_t* phy); bool createMACsecACLTable( sai_object_id_t &table_id, sai_object_id_t switch_id, @@ -255,6 +277,19 @@ class MACsecOrch : public Orch sai_object_id_t switch_id, sai_attr_id_t priority_id, sai_uint32_t &priority) const; + + /* PFC */ + bool setPFCForward(sai_object_id_t port_id, bool enable); + bool createPFCEntry(sai_object_id_t &entry_id, + sai_object_id_t table_id, + sai_object_id_t switch_id, + sai_macsec_direction_t direction, + sai_uint32_t priority, + const std::string &pfc_mode); + sai_attribute_t identifyPFC() const; + sai_attribute_t bypassPFC() const; + sai_attribute_t dropPFC() const; + }; #endif // ORCHAGENT_MACSECORCH_H_ diff --git a/orchagent/main.cpp b/orchagent/main.cpp index de96234a2d..eab2369913 100644 --- a/orchagent/main.cpp +++ b/orchagent/main.cpp @@ -19,8 +19,6 @@ extern "C" { #include #include -#include "timestamp.h" - #include #include @@ -52,25 +50,14 @@ MacAddress gVxlanMacAddress; extern size_t gMaxBulkSize; #define DEFAULT_BATCH_SIZE 128 -int gBatchSize = DEFAULT_BATCH_SIZE; - -bool gSairedisRecord = true; -bool gSwssRecord = true; -bool gResponsePublisherRecord = false; -bool gLogRotate = false; -bool gSaiRedisLogRotate = false; -bool gResponsePublisherLogRotate = false; +extern int gBatchSize; + bool gSyncMode = false; sai_redis_communication_mode_t gRedisCommunicationMode = SAI_REDIS_COMMUNICATION_MODE_REDIS_ASYNC; string gAsicInstance; extern bool gIsNatSupported; -ofstream gRecordOfs; -string gRecordFile; -ofstream gResponsePublisherRecordOfs; -string gResponsePublisherRecordFile; - #define SAIREDIS_RECORD_ENABLE 0x1 #define SWSS_RECORD_ENABLE (0x1 << 1) #define RESPONSE_PUBLISHER_RECORD_ENABLE (0x1 << 2) @@ -109,9 +96,9 @@ void sighup_handler(int signo) /* * Don't do any logging since they are using mutexes. */ - gLogRotate = true; - gSaiRedisLogRotate = true; - gResponsePublisherLogRotate = true; + Recorder::Instance().swss.setRotate(true); + Recorder::Instance().sairedis.setRotate(true); + Recorder::Instance().respub.setRotate(true); } void syncd_apply_view() @@ -127,7 +114,7 @@ void syncd_apply_view() if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to notify syncd APPLY_VIEW %d", status); - exit(EXIT_FAILURE); + handleSaiFailure(true); } } @@ -171,13 +158,21 @@ void getCfgSwitchType(DBConnector *cfgDb, string &switch_type) { Table cfgDeviceMetaDataTable(cfgDb, CFG_DEVICE_METADATA_TABLE_NAME); - if (!cfgDeviceMetaDataTable.hget("localhost", "switch_type", switch_type)) + try { - //Switch type is not configured. Consider it default = "switch" (regular switch) + if (!cfgDeviceMetaDataTable.hget("localhost", "switch_type", switch_type)) + { + //Switch type is not configured. Consider it default = "switch" (regular switch) + switch_type = "switch"; + } + } + catch(const std::system_error& e) + { + SWSS_LOG_ERROR("System error: %s", e.what()); switch_type = "switch"; } - if (switch_type != "voq" && switch_type != "fabric" && switch_type != "switch") + if (switch_type != "voq" && switch_type != "fabric" && switch_type != "chassis-packet" && switch_type != "switch") { SWSS_LOG_ERROR("Invalid switch type %s configured", switch_type.c_str()); //If configured switch type is none of the supported, assume regular switch @@ -197,64 +192,72 @@ bool getSystemPortConfigList(DBConnector *cfgDb, DBConnector *appDb, vector attrs; @@ -435,57 +463,18 @@ int main(int argc, char **argv) attr.value.ptr = (void *)on_fdb_event; attrs.push_back(attr); - // Initialize recording parameters. - gSairedisRecord = - (record_type & SAIREDIS_RECORD_ENABLE) == SAIREDIS_RECORD_ENABLE; - gSwssRecord = (record_type & SWSS_RECORD_ENABLE) == SWSS_RECORD_ENABLE; - gResponsePublisherRecord = - (record_type & RESPONSE_PUBLISHER_RECORD_ENABLE) == - RESPONSE_PUBLISHER_RECORD_ENABLE; - - /* Disable/enable SwSS recording */ - if (gSwssRecord) - { - gRecordFile = record_location + "/" + swss_rec_filename; - gRecordOfs.open(gRecordFile, std::ofstream::out | std::ofstream::app); - if (!gRecordOfs.is_open()) - { - SWSS_LOG_ERROR("Failed to open SwSS recording file %s", gRecordFile.c_str()); - exit(EXIT_FAILURE); - } - gRecordOfs << getTimestamp() << "|recording started" << endl; - } - - // Disable/Enable response publisher recording. - if (gResponsePublisherRecord) - { - gResponsePublisherRecordFile = record_location + "/" + responsepublisher_rec_filename; - gResponsePublisherRecordOfs.open(gResponsePublisherRecordFile, std::ofstream::out | std::ofstream::app); - if (!gResponsePublisherRecordOfs.is_open()) - { - SWSS_LOG_ERROR("Failed to open Response Publisher recording file %s", - gResponsePublisherRecordFile.c_str()); - gResponsePublisherRecord = false; - } - else - { - gResponsePublisherRecordOfs << getTimestamp() << "|recording started" - << endl; - } - } - attr.id = SAI_SWITCH_ATTR_PORT_STATE_CHANGE_NOTIFY; attr.value.ptr = (void *)on_port_state_change; attrs.push_back(attr); - attr.id = SAI_SWITCH_ATTR_BFD_SESSION_STATE_CHANGE_NOTIFY; - attr.value.ptr = (void *)on_bfd_session_state_change; - attrs.push_back(attr); - attr.id = SAI_SWITCH_ATTR_SHUTDOWN_REQUEST_NOTIFY; attr.value.ptr = (void *)on_switch_shutdown_request; attrs.push_back(attr); + attr.id = SAI_SWITCH_ATTR_PORT_HOST_TX_READY_NOTIFY; + attr.value.ptr = (void *)on_port_host_tx_ready; + attrs.push_back(attr); + // Instantiate database connectors DBConnector appl_db("APPL_DB", 0); DBConnector config_db("CONFIG_DB", 0); @@ -574,14 +563,77 @@ int main(int argc, char **argv) attr.value.u64 = gSwitchId; attrs.push_back(attr); + auto delay_factor = 1; + bool asan_enabled = false; + + if (getenv("ASAN_OPTIONS")) + { + asan_enabled = true; + delay_factor = 2; + } + + if (gMySwitchType == "voq" || gMySwitchType == "fabric" || gMySwitchType == "chassis-packet" || asan_enabled) + { + /* We set this long timeout in order for orchagent to wait enough time for + * response from syncd. It is needed since switch create takes more time + * than default time to create switch if there are lots of front panel ports + * and systems ports to initialize + */ + + if (gMySwitchType == "voq" || gMySwitchType == "chassis-packet") + { + attr.value.u64 = (5 * SAI_REDIS_DEFAULT_SYNC_OPERATION_RESPONSE_TIMEOUT); + } + else if (gMySwitchType == "fabric") + { + attr.value.u64 = (10 * SAI_REDIS_DEFAULT_SYNC_OPERATION_RESPONSE_TIMEOUT); + } + else + { + attr.value.u64 = SAI_REDIS_DEFAULT_SYNC_OPERATION_RESPONSE_TIMEOUT; + } + + attr.value.u64 = attr.value.u64*delay_factor; + attr.id = SAI_REDIS_SWITCH_ATTR_SYNC_OPERATION_RESPONSE_TIMEOUT; + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Failed to set SAI REDIS response timeout"); + } + else + { + SWSS_LOG_NOTICE("SAI REDIS response timeout set successfully to %" PRIu64 " ", attr.value.u64); + } + } + + SWSS_LOG_ERROR("NOA - before create switch"); + status = sai_switch_api->create_switch(&gSwitchId, (uint32_t)attrs.size(), attrs.data()); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to create a switch, rv:%d", status); - exit(EXIT_FAILURE); + handleSaiFailure(true); } + SWSS_LOG_ERROR("NOA - after create switch"); SWSS_LOG_NOTICE("Create a switch, id:%" PRIu64, gSwitchId); + if (gMySwitchType == "voq" || gMySwitchType == "fabric" || gMySwitchType == "chassis-packet") + { + /* Set syncd response timeout back to the default value */ + attr.id = SAI_REDIS_SWITCH_ATTR_SYNC_OPERATION_RESPONSE_TIMEOUT; + attr.value.u64 = SAI_REDIS_DEFAULT_SYNC_OPERATION_RESPONSE_TIMEOUT; + status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Failed to set SAI REDIS response timeout to default"); + } + else + { + SWSS_LOG_NOTICE("SAI REDIS response timeout set successfully to default: %" PRIu64 " ", attr.value.u64); + } + } if (gMySwitchType != "fabric") { @@ -593,7 +645,7 @@ int main(int argc, char **argv) if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to get MAC address from switch, rv:%d", status); - exit(EXIT_FAILURE); + handleSaiFailure(true); } else { @@ -608,7 +660,7 @@ int main(int argc, char **argv) if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Fail to get switch virtual router ID %d", status); - exit(EXIT_FAILURE); + handleSaiFailure(true); } gVirtualRouterId = attr.value.oid; @@ -650,7 +702,7 @@ int main(int argc, char **argv) if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to create underlay router interface %d", status); - exit(EXIT_FAILURE); + handleSaiFailure(true); } SWSS_LOG_NOTICE("Created underlay router interface ID %" PRIx64, gUnderlayIfId); @@ -667,6 +719,8 @@ int main(int argc, char **argv) if (gMySwitchType == "voq") { orchDaemon->setFabricEnabled(true); + orchDaemon->setFabricPortStatEnabled(true); + orchDaemon->setFabricQueueStatEnabled(true); } } else diff --git a/orchagent/mirrororch.cpp b/orchagent/mirrororch.cpp index 0a73030f40..5647d48879 100644 --- a/orchagent/mirrororch.cpp +++ b/orchagent/mirrororch.cpp @@ -327,6 +327,7 @@ bool MirrorOrch::validateSrcPortList(const string& srcPortList) if (port.m_type == Port::LAG) { vector portv; + int portCount = 0; m_portsOrch->getLagMember(port, portv); for (const auto p : portv) { @@ -336,6 +337,13 @@ bool MirrorOrch::validateSrcPortList(const string& srcPortList) p.m_alias.c_str(), port.m_alias.c_str(), srcPortList.c_str()); return false; } + portCount++; + } + if (!portCount) + { + SWSS_LOG_ERROR("Source LAG %s is empty. set mirror session to inactive", + port.m_alias.c_str());; + return false; } } } @@ -344,6 +352,30 @@ bool MirrorOrch::validateSrcPortList(const string& srcPortList) return true; } +bool MirrorOrch::isHwResourcesAvailable() +{ + uint64_t availCount = 0; + + sai_status_t status = sai_object_type_get_availability( + gSwitchId, SAI_OBJECT_TYPE_MIRROR_SESSION, 0, nullptr, &availCount + ); + if (status != SAI_STATUS_SUCCESS) + { + if ((status == SAI_STATUS_NOT_SUPPORTED) || + (status == SAI_STATUS_NOT_IMPLEMENTED) || + SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || + SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) + { + SWSS_LOG_WARN("Mirror session resource availability monitoring is not supported. Skipping ..."); + return true; + } + + return parseHandleSaiStatusFailure(handleSaiGetStatus(SAI_API_MIRROR, status)); + } + + return availCount > 0; +} + task_process_status MirrorOrch::createEntry(const string& key, const vector& data) { SWSS_LOG_ENTER(); @@ -351,8 +383,7 @@ task_process_status MirrorOrch::createEntry(const string& key, const vectorattach(this, entry.dstIp); } + SWSS_LOG_NOTICE("Created mirror session %s", key.c_str()); + return task_process_status::task_success; } @@ -550,13 +586,30 @@ void MirrorOrch::setSessionState(const string& name, const MirrorEntry& session, if (attr.empty() || attr == MIRROR_SESSION_MONITOR_PORT) { Port port; - m_portsOrch->getPort(session.neighborInfo.portId, port); + if ((gMySwitchType == "voq") && (session.type == MIRROR_SESSION_ERSPAN)) + { + if (!m_portsOrch->getRecircPort(port, Port::Role::Rec)) + { + SWSS_LOG_ERROR("Failed to get recirc port for mirror session %s", name.c_str()); + return; + } + } + else + { + m_portsOrch->getPort(session.neighborInfo.portId, port); + } fvVector.emplace_back(MIRROR_SESSION_MONITOR_PORT, port.m_alias); } if (attr.empty() || attr == MIRROR_SESSION_DST_MAC_ADDRESS) { - value = session.neighborInfo.mac.to_string(); + if ((gMySwitchType == "voq") && (session.type == MIRROR_SESSION_ERSPAN)) + { + value = gMacAddress.to_string(); + } else + { + value = session.neighborInfo.mac.to_string(); + } fvVector.emplace_back(MIRROR_SESSION_DST_MAC_ADDRESS, value); } @@ -764,8 +817,7 @@ bool MirrorOrch::setUnsetPortMirror(Port port, if (set) { port_attr.value.objlist.count = 1; - port_attr.value.objlist.list = reinterpret_cast(calloc(port_attr.value.objlist.count, sizeof(sai_object_id_t))); - port_attr.value.objlist.list[0] = sessionId; + port_attr.value.objlist.list = &sessionId; } else { @@ -894,9 +946,9 @@ bool MirrorOrch::activateSession(const string& name, MirrorEntry& session) if (gMySwitchType == "voq") { Port recirc_port; - if (!m_portsOrch->getRecircPort(recirc_port, "Rec")) + if (!m_portsOrch->getRecircPort(recirc_port, Port::Role::Rec)) { - SWSS_LOG_ERROR("Failed to get recirc prot"); + SWSS_LOG_ERROR("Failed to get recirc port"); return false; } attr.value.oid = recirc_port.m_port_id; @@ -967,9 +1019,9 @@ bool MirrorOrch::activateSession(const string& name, MirrorEntry& session) attr.id = SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS; // Use router mac as mirror dst mac in voq switch. - if (gMySwitchType == "voq") + if ((gMySwitchType == "voq") && (session.type == MIRROR_SESSION_ERSPAN)) { - memcpy(attr.value.mac, gMacAddress.getMac(), sizeof(sai_mac_t)); + memcpy(attr.value.mac, gMacAddress.getMac(), sizeof(sai_mac_t)); } else { @@ -1083,13 +1135,19 @@ bool MirrorOrch::updateSessionDstMac(const string& name, MirrorEntry& session) sai_attribute_t attr; attr.id = SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS; - memcpy(attr.value.mac, session.neighborInfo.mac.getMac(), sizeof(sai_mac_t)); + if ((gMySwitchType == "voq") && (session.type == MIRROR_SESSION_ERSPAN)) + { + memcpy(attr.value.mac, gMacAddress.getMac(), sizeof(sai_mac_t)); + } else + { + memcpy(attr.value.mac, session.neighborInfo.mac.getMac(), sizeof(sai_mac_t)); + } sai_status_t status = sai_mirror_api->set_mirror_session_attribute(session.sessionId, &attr); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to update mirror session %s destination MAC to %s, rv:%d", - name.c_str(), session.neighborInfo.mac.to_string().c_str(), status); + name.c_str(), sai_serialize_mac(attr.value.mac).c_str(), status); task_process_status handle_status = handleSaiSetStatus(SAI_API_MIRROR, status); if (handle_status != task_success) { @@ -1098,7 +1156,7 @@ bool MirrorOrch::updateSessionDstMac(const string& name, MirrorEntry& session) } SWSS_LOG_NOTICE("Update mirror session %s destination MAC to %s", - name.c_str(), session.neighborInfo.mac.to_string().c_str()); + name.c_str(), sai_serialize_mac(attr.value.mac).c_str()); setSessionState(name, session, MIRROR_SESSION_DST_MAC_ADDRESS); @@ -1116,7 +1174,20 @@ bool MirrorOrch::updateSessionDstPort(const string& name, MirrorEntry& session) sai_attribute_t attr; attr.id = SAI_MIRROR_SESSION_ATTR_MONITOR_PORT; - attr.value.oid = session.neighborInfo.portId; + // Set monitor port to recirc port in voq switch. + if ((gMySwitchType == "voq") && (session.type == MIRROR_SESSION_ERSPAN)) + { + if (!m_portsOrch->getRecircPort(port, Port::Role::Rec)) + { + SWSS_LOG_ERROR("Failed to get recirc port for mirror session %s", name.c_str()); + return false; + } + attr.value.oid = port.m_port_id; + } + else + { + attr.value.oid = session.neighborInfo.portId; + } sai_status_t status = sai_mirror_api-> set_mirror_session_attribute(session.sessionId, &attr); diff --git a/orchagent/mirrororch.h b/orchagent/mirrororch.h index b31d58bff3..d498a7ef6c 100644 --- a/orchagent/mirrororch.h +++ b/orchagent/mirrororch.h @@ -104,6 +104,8 @@ class MirrorOrch : public Orch, public Observer, public Subject // session_name -> VLAN | monitor_port_alias | next_hop_ip map m_recoverySessionMap; + bool isHwResourcesAvailable(); + task_process_status createEntry(const string&, const vector&); task_process_status deleteEntry(const string&); diff --git a/orchagent/mplsrouteorch.cpp b/orchagent/mplsrouteorch.cpp index ef40987a19..73dbbdb194 100644 --- a/orchagent/mplsrouteorch.cpp +++ b/orchagent/mplsrouteorch.cpp @@ -465,7 +465,7 @@ bool RouteOrch::addLabelRoute(LabelRouteBulkContext& ctx, const NextHopGroupKey Label& label = ctx.label; /* next_hop_id indicates the next hop id or next hop group id of this route */ - sai_object_id_t next_hop_id; + sai_object_id_t next_hop_id = SAI_NULL_OBJECT_ID; bool blackhole = false; if (m_syncdLabelRoutes.find(vrf_id) == m_syncdLabelRoutes.end()) diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 5b7b0570a5..fc39e8aa28 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -23,6 +23,7 @@ #include "aclorch.h" #include "routeorch.h" #include "fdborch.h" +#include "qosorch.h" /* Global variables */ extern Directory gDirectory; @@ -32,6 +33,7 @@ extern RouteOrch *gRouteOrch; extern AclOrch *gAclOrch; extern PortsOrch *gPortsOrch; extern FdbOrch *gFdbOrch; +extern QosOrch *gQosOrch; extern sai_object_id_t gVirtualRouterId; extern sai_object_id_t gUnderlayIfId; @@ -42,7 +44,6 @@ extern sai_next_hop_api_t* sai_next_hop_api; extern sai_router_interface_api_t* sai_router_intfs_api; /* Constants */ -#define MUX_TUNNEL "MuxTunnel0" #define MUX_ACL_TABLE_NAME INGRESS_TABLE_DROP #define MUX_ACL_RULE_NAME "mux_acl_rule" #define MUX_HW_STATE_UNKNOWN "unknown" @@ -115,6 +116,10 @@ static sai_status_t create_route(IpPrefix &pfx, sai_object_id_t nh) sai_status_t status = sai_route_api->create_route_entry(&route_entry, (uint32_t)attrs.size(), attrs.data()); if (status != SAI_STATUS_SUCCESS) { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) { + SWSS_LOG_NOTICE("Tunnel route to %s already exists", pfx.to_string().c_str()); + return SAI_STATUS_SUCCESS; + } SWSS_LOG_ERROR("Failed to create tunnel route %s,nh %" PRIx64 " rv:%d", pfx.getIp().to_string().c_str(), nh, status); return status; @@ -144,6 +149,10 @@ static sai_status_t remove_route(IpPrefix &pfx) sai_status_t status = sai_route_api->remove_route_entry(&route_entry); if (status != SAI_STATUS_SUCCESS) { + if (status == SAI_STATUS_ITEM_NOT_FOUND) { + SWSS_LOG_NOTICE("Tunnel route to %s already removed", pfx.to_string().c_str()); + return SAI_STATUS_SUCCESS; + } SWSS_LOG_ERROR("Failed to remove tunnel route %s, rv:%d", pfx.getIp().to_string().c_str(), status); return status; @@ -162,7 +171,40 @@ static sai_status_t remove_route(IpPrefix &pfx) return status; } -static sai_object_id_t create_tunnel(const IpAddress* p_dst_ip, const IpAddress* p_src_ip) +/** + * @brief sets the given route to point to the given nexthop + * @param pfx IpPrefix of the route + * @param nexthop NextHopKey of the nexthop + * @return SAI_STATUS_SUCCESS on success + */ +static sai_status_t set_route(const IpPrefix& pfx, sai_object_id_t next_hop_id) +{ + /* set route entry to point to nh */ + sai_route_entry_t route_entry; + sai_attribute_t route_attr; + + route_entry.vr_id = gVirtualRouterId; + route_entry.switch_id = gSwitchId; + copy(route_entry.destination, pfx); + + route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + route_attr.value.oid = next_hop_id; + + sai_status_t status = sai_route_api->set_route_entry_attribute(&route_entry, &route_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set route entry %s nh %" PRIx64 " rv:%d", + pfx.to_string().c_str(), next_hop_id, status); + } + return status; +} + +static sai_object_id_t create_tunnel( + const IpAddress* p_dst_ip, + const IpAddress* p_src_ip, + sai_object_id_t tc_to_dscp_map_id, + sai_object_id_t tc_to_queue_map_id, + string dscp_mode_name) { sai_status_t status; @@ -206,6 +248,30 @@ static sai_object_id_t create_tunnel(const IpAddress* p_dst_ip, const IpAddress* attr.value.s32 = SAI_TUNNEL_TTL_MODE_PIPE_MODEL; tunnel_attrs.push_back(attr); + attr.id = SAI_TUNNEL_ATTR_DECAP_TTL_MODE; + attr.value.s32 = SAI_TUNNEL_TTL_MODE_PIPE_MODEL; + tunnel_attrs.push_back(attr); + + if (dscp_mode_name == "uniform" || dscp_mode_name == "pipe") + { + sai_tunnel_dscp_mode_t dscp_mode; + if (dscp_mode_name == "uniform") + { + dscp_mode = SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL; + } + else + { + dscp_mode = SAI_TUNNEL_DSCP_MODE_PIPE_MODEL; + } + attr.id = SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE; + attr.value.s32 = dscp_mode; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_ATTR_DECAP_DSCP_MODE; + attr.value.s32 = dscp_mode; + tunnel_attrs.push_back(attr); + } + attr.id = SAI_TUNNEL_ATTR_LOOPBACK_PACKET_ACTION; attr.value.s32 = SAI_PACKET_ACTION_DROP; tunnel_attrs.push_back(attr); @@ -224,6 +290,22 @@ static sai_object_id_t create_tunnel(const IpAddress* p_dst_ip, const IpAddress* tunnel_attrs.push_back(attr); } + // DSCP rewriting + if (tc_to_dscp_map_id != SAI_NULL_OBJECT_ID) + { + attr.id = SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP; + attr.value.oid = tc_to_dscp_map_id; + tunnel_attrs.push_back(attr); + } + + // TC remapping + if (tc_to_queue_map_id != SAI_NULL_OBJECT_ID) + { + attr.id = SAI_TUNNEL_ATTR_ENCAP_QOS_TC_TO_QUEUE_MAP; + attr.value.oid = tc_to_queue_map_id; + tunnel_attrs.push_back(attr); + } + sai_object_id_t tunnel_id; status = sai_tunnel_api->create_tunnel(&tunnel_id, gSwitchId, (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); if (status != SAI_STATUS_SUCCESS) @@ -313,8 +395,8 @@ static bool remove_nh_tunnel(sai_object_id_t nh_id, IpAddress& ipAddr) return true; } -MuxCable::MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress peer_ip) - :mux_name_(name), srv_ip4_(srv_ip4), srv_ip6_(srv_ip6), peer_ip4_(peer_ip) +MuxCable::MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress peer_ip, MuxCableType cable_type) + :mux_name_(name), srv_ip4_(srv_ip4), srv_ip6_(srv_ip6), peer_ip4_(peer_ip), cable_type_(cable_type) { mux_orch_ = gDirectory.get(); mux_cb_orch_ = gDirectory.get(); @@ -329,6 +411,7 @@ MuxCable::MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress /* Set initial state to "standby" */ stateStandby(); + state_ = MuxState::MUX_STATE_STANDBY; } bool MuxCable::stateInitActive() @@ -404,19 +487,25 @@ void MuxCable::setState(string new_state) new_state = muxStateValToString.at(ns); auto it = muxStateTransition.find(make_pair(state_, ns)); - if (it == muxStateTransition.end()) { // Update HW Mux cable state anyways mux_cb_orch_->updateMuxState(mux_name_, new_state); - SWSS_LOG_ERROR("State transition from %s to %s is not-handled ", - muxStateValToString.at(state_).c_str(), new_state.c_str()); + if (strcmp(new_state.c_str(), muxStateValToString.at(state_).c_str()) == 0) + { + SWSS_LOG_NOTICE("[%s] Maintaining current MUX state", mux_name_.c_str()); + } + else + { + SWSS_LOG_ERROR("State transition from %s to %s is not-handled ", + muxStateValToString.at(state_).c_str(), new_state.c_str()); + } return; } mux_cb_orch_->updateMuxMetricState(mux_name_, new_state, true); - MuxState state = state_; + prev_state_ = state_; state_ = ns; st_chg_in_progress_ = true; @@ -424,7 +513,7 @@ void MuxCable::setState(string new_state) if (!(this->*(state_machine_handlers_[it->second]))()) { //Reset back to original state - state_ = state; + state_ = prev_state_; st_chg_in_progress_ = false; st_chg_failed_ = true; throw std::runtime_error("Failed to handle state transition"); @@ -440,6 +529,51 @@ void MuxCable::setState(string new_state) return; } +void MuxCable::rollbackStateChange() +{ + if (prev_state_ == MuxState::MUX_STATE_FAILED || prev_state_ == MuxState::MUX_STATE_PENDING) + { + SWSS_LOG_ERROR("[%s] Rollback to %s not supported", mux_name_.c_str(), + muxStateValToString.at(prev_state_).c_str()); + return; + } + SWSS_LOG_WARN("[%s] Rolling back state change to %s", mux_name_.c_str(), + muxStateValToString.at(prev_state_).c_str()); + mux_cb_orch_->updateMuxMetricState(mux_name_, muxStateValToString.at(prev_state_), true); + st_chg_in_progress_ = true; + state_ = prev_state_; + bool success = false; + switch (prev_state_) + { + case MuxState::MUX_STATE_ACTIVE: + success = stateActive(); + break; + case MuxState::MUX_STATE_INIT: + case MuxState::MUX_STATE_STANDBY: + success = stateStandby(); + break; + case MuxState::MUX_STATE_FAILED: + case MuxState::MUX_STATE_PENDING: + // Check at the start of the function means we will never reach here + SWSS_LOG_ERROR("[%s] Rollback to %s not supported", mux_name_.c_str(), + muxStateValToString.at(prev_state_).c_str()); + return; + } + st_chg_in_progress_ = false; + if (success) + { + st_chg_failed_ = false; + } + else + { + st_chg_failed_ = true; + SWSS_LOG_ERROR("[%s] Rollback to %s failed", + mux_name_.c_str(), muxStateValToString.at(prev_state_).c_str()); + } + mux_cb_orch_->updateMuxMetricState(mux_name_, muxStateValToString.at(state_), false); + mux_cb_orch_->updateMuxState(mux_name_, muxStateValToString.at(state_)); +} + string MuxCable::getState() { SWSS_LOG_INFO("Get state request for %s, state %s", @@ -450,6 +584,11 @@ string MuxCable::getState() bool MuxCable::aclHandler(sai_object_id_t port, string alias, bool add) { + if (cable_type_ == MuxCableType::ACTIVE_ACTIVE) + { + SWSS_LOG_INFO("Skip programming ACL for mux port %s, cable type %d, add %d", alias.c_str(), cable_type_, add); + return true; + } if (add) { acl_handler_ = make_shared(port, alias); @@ -476,9 +615,13 @@ bool MuxCable::isIpInSubnet(IpAddress ip) bool MuxCable::nbrHandler(bool enable, bool update_rt) { + bool ret; + SWSS_LOG_NOTICE("Processing neighbors for mux %s, enable %d, state %d", + mux_name_.c_str(), enable, state_); if (enable) { - return nbr_handler_->enable(update_rt); + ret = nbr_handler_->enable(update_rt); + updateRoutes(); } else { @@ -488,13 +631,16 @@ bool MuxCable::nbrHandler(bool enable, bool update_rt) SWSS_LOG_INFO("Null NH object id, retry for %s", peer_ip4_.to_string().c_str()); return false; } - - return nbr_handler_->disable(tnh); + updateRoutes(); + ret = nbr_handler_->disable(tnh); } + return ret; } void MuxCable::updateNeighbor(NextHopKey nh, bool add) { + SWSS_LOG_NOTICE("Processing update on neighbor %s for mux %s, add %d, state %d", + nh.ip_address.to_string().c_str(), mux_name_.c_str(), add, state_); sai_object_id_t tnh = mux_orch_->getNextHopTunnelId(MUX_TUNNEL, peer_ip4_); nbr_handler_->update(nh, tnh, add, state_); if (add) @@ -505,14 +651,37 @@ void MuxCable::updateNeighbor(NextHopKey nh, bool add) { mux_orch_->removeNexthop(nh); } + updateRoutes(); +} + +/** + * @brief updates all routes pointing to the cables neighbor list + */ +void MuxCable::updateRoutes() +{ + MuxNeighbor neighbors = nbr_handler_->getNeighbors(); + string alias = nbr_handler_->getAlias(); + for (auto nh = neighbors.begin(); nh != neighbors.end(); nh ++) + { + std::set routes; + NextHopKey nhkey(nh->first, alias); + if (gRouteOrch->getRoutesForNexthop(routes, nhkey)) + { + for (auto rt = routes.begin(); rt != routes.end(); rt++) + { + mux_orch_->updateRoute(rt->prefix, true); + } + } + } } void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, MuxState state) { + uint32_t num_routes = 0; + SWSS_LOG_INFO("Neigh %s on %s, add %d, state %d", nh.ip_address.to_string().c_str(), nh.alias.c_str(), add, state); - MuxCableOrch* mux_cb_orch = gDirectory.get(); IpPrefix pfx = nh.ip_address.to_string(); if (add) @@ -535,11 +704,12 @@ void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, Mu case MuxState::MUX_STATE_ACTIVE: neighbors_[nh.ip_address] = gNeighOrch->getLocalNextHopId(nh); gNeighOrch->enableNeighbor(nh); + gRouteOrch->updateNextHopRoutes(nh, num_routes); break; case MuxState::MUX_STATE_STANDBY: neighbors_[nh.ip_address] = tunnelId; gNeighOrch->disableNeighbor(nh); - mux_cb_orch->addTunnelRoute(nh); + updateTunnelRoute(nh, true); create_route(pfx, tunnelId); break; default: @@ -554,7 +724,7 @@ void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, Mu if (state == MuxState::MUX_STATE_STANDBY) { remove_route(pfx); - mux_cb_orch->removeTunnelRoute(nh); + updateTunnelRoute(nh, false); } neighbors_.erase(nh.ip_address); } @@ -563,7 +733,6 @@ void MuxNbrHandler::update(NextHopKey nh, sai_object_id_t tunnelId, bool add, Mu bool MuxNbrHandler::enable(bool update_rt) { NeighborEntry neigh; - MuxCableOrch* mux_cb_orch = gDirectory.get(); auto it = neighbors_.begin(); while (it != neighbors_.end()) @@ -619,7 +788,7 @@ bool MuxNbrHandler::enable(bool update_rt) { return false; } - mux_cb_orch->removeTunnelRoute(nh_key); + updateTunnelRoute(nh_key, false); } it++; @@ -631,7 +800,6 @@ bool MuxNbrHandler::enable(bool update_rt) bool MuxNbrHandler::disable(sai_object_id_t tnh) { NeighborEntry neigh; - MuxCableOrch* mux_cb_orch = gDirectory.get(); auto it = neighbors_.begin(); while (it != neighbors_.end()) @@ -677,7 +845,7 @@ bool MuxNbrHandler::disable(sai_object_id_t tnh) return false; } - mux_cb_orch->addTunnelRoute(nh_key); + updateTunnelRoute(nh_key, true); IpPrefix pfx = it->first.to_string(); if (create_route(pfx, it->second) != SAI_STATUS_SUCCESS) @@ -702,52 +870,66 @@ sai_object_id_t MuxNbrHandler::getNextHopId(const NextHopKey nhKey) return SAI_NULL_OBJECT_ID; } -std::map MuxAclHandler::acl_table_; +void MuxNbrHandler::updateTunnelRoute(NextHopKey nh, bool add) +{ + MuxOrch* mux_orch = gDirectory.get(); + MuxCableOrch* mux_cb_orch = gDirectory.get(); + + if (mux_orch->isSkipNeighbor(nh.ip_address)) + { + SWSS_LOG_INFO("Skip updating neighbor %s, add %d", nh.ip_address.to_string().c_str(), add); + return; + } + + if (add) + { + mux_cb_orch->addTunnelRoute(nh); + } + else + { + mux_cb_orch->removeTunnelRoute(nh); + } +} MuxAclHandler::MuxAclHandler(sai_object_id_t port, string alias) { SWSS_LOG_ENTER(); + string value; + shared_ptr m_config_db = shared_ptr(new DBConnector("CONFIG_DB", 0)); + unique_ptr
m_systemDefaultsTable = unique_ptr
(new Table(m_config_db.get(), "SYSTEM_DEFAULTS")); + m_systemDefaultsTable->hget("mux_tunnel_egress_acl", "status", value); + is_ingress_acl_ = value != "enabled"; + // There is one handler instance per MUX port - string table_name = MUX_ACL_TABLE_NAME; + string table_name = is_ingress_acl_ ? MUX_ACL_TABLE_NAME : EGRESS_TABLE_DROP; string rule_name = MUX_ACL_RULE_NAME; port_ = port; alias_ = alias; - auto found = acl_table_.find(table_name); - if (found == acl_table_.end()) - { - SWSS_LOG_NOTICE("First time create for port %" PRIx64 "", port); + // Always try to create the table first. If it already exists, function will return early. + createMuxAclTable(port, table_name); + + SWSS_LOG_NOTICE("Binding port %" PRIx64 "", port); - // First time handling of Mux Table, create ACL table, and bind - createMuxAclTable(port, table_name); + AclRule* rule = gAclOrch->getAclRule(table_name, rule_name); + if (rule == nullptr) + { shared_ptr newRule = - make_shared(gAclOrch, rule_name, table_name); + make_shared(gAclOrch, rule_name, table_name, false /*no counters*/); createMuxAclRule(newRule, table_name); } else { - SWSS_LOG_NOTICE("Binding port %" PRIx64 "", port); - - AclRule* rule = gAclOrch->getAclRule(table_name, rule_name); - if (rule == nullptr) - { - shared_ptr newRule = - make_shared(gAclOrch, rule_name, table_name); - createMuxAclRule(newRule, table_name); - } - else - { - gAclOrch->updateAclRule(table_name, rule_name, MATCH_IN_PORTS, &port, RULE_OPER_ADD); - } + gAclOrch->updateAclRule(table_name, rule_name, MATCH_IN_PORTS, &port, RULE_OPER_ADD); } } MuxAclHandler::~MuxAclHandler(void) { SWSS_LOG_ENTER(); - string table_name = MUX_ACL_TABLE_NAME; + string table_name = is_ingress_acl_ ? MUX_ACL_TABLE_NAME : EGRESS_TABLE_DROP; string rule_name = MUX_ACL_RULE_NAME; SWSS_LOG_NOTICE("Un-Binding port %" PRIx64 "", port_); @@ -773,27 +955,20 @@ void MuxAclHandler::createMuxAclTable(sai_object_id_t port, string strTable) { SWSS_LOG_ENTER(); - auto inserted = acl_table_.emplace(piecewise_construct, - std::forward_as_tuple(strTable), - std::forward_as_tuple(gAclOrch, strTable)); - - assert(inserted.second); - - AclTable& acl_table = inserted.first->second; - sai_object_id_t table_oid = gAclOrch->getTableById(strTable); if (table_oid != SAI_NULL_OBJECT_ID) { // DROP ACL table is already created - SWSS_LOG_NOTICE("ACL table %s exists, reuse the same", strTable.c_str()); - acl_table = *(gAclOrch->getTableByOid(table_oid)); + SWSS_LOG_INFO("ACL table %s exists, reuse the same", strTable.c_str()); return; } + SWSS_LOG_NOTICE("First time create for port %" PRIx64 "", port); + AclTable acl_table(gAclOrch, strTable); auto dropType = gAclOrch->getAclTableType(TABLE_TYPE_DROP); assert(dropType); acl_table.validateAddType(*dropType); - acl_table.stage = ACL_STAGE_INGRESS; + acl_table.stage = is_ingress_acl_ ? ACL_STAGE_INGRESS : ACL_STAGE_EGRESS; gAclOrch->addAclTable(acl_table); bindAllPorts(acl_table); } @@ -835,6 +1010,13 @@ void MuxAclHandler::bindAllPorts(AclTable &acl_table) acl_table.link(port.m_port_id); acl_table.bind(port.m_port_id); } + else if (port.m_type == Port::LAG && !is_ingress_acl_) + { + SWSS_LOG_INFO("Binding LAG %" PRIx64 " to ACL table %s", port.m_lag_id, acl_table.id.c_str()); + + acl_table.link(port.m_lag_id); + acl_table.bind(port.m_lag_id); + } } } @@ -894,6 +1076,111 @@ sai_object_id_t MuxOrch::getNextHopTunnelId(std::string tunnelKey, IpAddress& ip return it->second.nh_id; } +/** + * @brief updates the given route to point to a single active NH or tunnel + * @param pfx IpPrefix of route to update + * @param remove bool only true when route is getting removed + */ +void MuxOrch::updateRoute(const IpPrefix &pfx, bool add) +{ + NextHopGroupKey nhg_key; + NextHopGroupEntry nhg_entry; + + if (!add) + { + mux_multi_active_nh_table.erase(pfx); + return; + } + + /* get nexthop group key from syncd */ + nhg_key = gRouteOrch->getSyncdRouteNhgKey(gVirtualRouterId, pfx); + + /* check for multi-nh neighbors. + * if none are present, ignore + */ + if (nhg_key.getSize() <= 1) + { + return; + } + + std::set nextHops; + sai_object_id_t next_hop_id; + sai_status_t status; + bool active_found = false; + + /* get nexthops from nexthop group */ + nextHops = nhg_key.getNextHops(); + + auto it = mux_multi_active_nh_table.find(pfx); + if (it != mux_multi_active_nh_table.end()) + { + /* This will only work for configured MUX neighbors (most cases) + * TODO: add way to find MUX from neighbor + */ + MuxCable* cable = findMuxCableInSubnet(it->second.ip_address); + auto standalone = standalone_tunnel_neighbors_.find(it->second.ip_address); + + if ((cable == nullptr && standalone == standalone_tunnel_neighbors_.end()) || + cable->isActive()) + { + SWSS_LOG_INFO("Route %s pointing to active neighbor %s", + pfx.to_string().c_str(), it->second.to_string().c_str()); + return; + } + } + + SWSS_LOG_NOTICE("Updating route %s pointing to Mux nexthops %s", + pfx.to_string().c_str(), nhg_key.to_string().c_str()); + + for (auto it = nextHops.begin(); it != nextHops.end(); it++) + { + NextHopKey nexthop = *it; + /* This will only work for configured MUX neighbors (most cases) + * TODO: add way to find MUX from neighbor + */ + MuxCable* cable = findMuxCableInSubnet(nexthop.ip_address); + auto standalone = standalone_tunnel_neighbors_.find(nexthop.ip_address); + + if ((cable == nullptr && standalone == standalone_tunnel_neighbors_.end()) || + cable->isActive()) + { + /* Here we pull from local nexthop ID because neighbor update occurs during state change + * before nexthopID is updated in neighorch. This ensures that if a neighbor is Active + * only that neighbor's nexthop ID is added, and not the tunnel nexthop + */ + next_hop_id = gNeighOrch->getLocalNextHopId(nexthop); + /* set route entry to point to nh */ + status = set_route(pfx, next_hop_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set route entry %s to nexthop %s", + pfx.to_string().c_str(), nexthop.to_string().c_str()); + continue; + } + SWSS_LOG_INFO("setting route %s with nexthop %s %" PRIx64 "", + pfx.to_string().c_str(), nexthop.to_string().c_str(), next_hop_id); + mux_multi_active_nh_table[pfx] = nexthop; + active_found = true; + break; + } + } + + if (!active_found) + { + next_hop_id = getNextHopTunnelId(MUX_TUNNEL, mux_peer_switch_); + /* no active nexthop found, point to first */ + SWSS_LOG_INFO("No Active neighbors found, setting route %s to point to tun", + pfx.getIp().to_string().c_str()); + status = set_route(pfx, next_hop_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set route entry %s to tunnel", + pfx.getIp().to_string().c_str()); + } + mux_multi_active_nh_table.erase(pfx); + } +} + MuxCable* MuxOrch::findMuxCableInSubnet(IpAddress ip) { for (auto it = mux_cable_tb_.begin(); it != mux_cable_tb_.end(); it++) @@ -1025,6 +1312,37 @@ void MuxOrch::updateNeighbor(const NeighborUpdate& update) return; } + bool is_tunnel_route_installed = isStandaloneTunnelRouteInstalled(update.entry.ip_address); + // Handling zero MAC neighbor updates + if (!update.mac) + { + /* For neighbors that were previously resolvable but are now unresolvable, + * we expect such neighbor entries to be deleted prior to a zero MAC update + * arriving for that same neighbor. + */ + + if (update.add) + { + if (!is_tunnel_route_installed) + { + createStandaloneTunnelRoute(update.entry.ip_address); + } + /* If the MAC address in the neighbor entry is zero but the neighbor IP + * is already present in standalone_tunnel_neighbors_, assume we have already + * added a tunnel route for it and exit early + */ + return; + } + } + /* If the update operation for a neighbor contains a non-zero MAC, we must + * make sure to remove any existing tunnel routes to prevent conflicts. + * This block also covers the case of neighbor deletion. + */ + if (is_tunnel_route_installed) + { + removeStandaloneTunnelRoute(update.entry.ip_address); + } + for (auto it = mux_cable_tb_.begin(); it != mux_cable_tb_.end(); it++) { MuxCable* ptr = it->second.get(); @@ -1091,6 +1409,35 @@ void MuxOrch::removeNexthop(NextHopKey nh) mux_nexthop_tb_.erase(nh); } +/** + * @brief checks if mux nexthop tb contains nexthop + * @param nexthop NextHopKey + * @return true if a mux contains the nexthop + */ +bool MuxOrch::containsNextHop(const NextHopKey& nexthop) +{ + return mux_nexthop_tb_.find(nexthop) != mux_nexthop_tb_.end(); +} + +/** + * @brief checks if a given nexthop group belongs to a mux + * @param nextHops NextHopGroupKey + * @return true if a mux contains any of the nexthops in the group + * false if none of the nexthops belong to a mux + */ +bool MuxOrch::isMuxNexthops(const NextHopGroupKey& nextHops) +{ + const std::set s_nexthops = nextHops.getNextHops(); + for (auto it = s_nexthops.begin(); it != s_nexthops.end(); it ++) + { + if (this->containsNextHop(*it)) + { + return true; + } + } + return false; +} + string MuxOrch::getNexthopMuxName(NextHopKey nh) { if (mux_nexthop_tb_.find(nh) == mux_nexthop_tb_.end()) @@ -1170,9 +1517,36 @@ bool MuxOrch::handleMuxCfg(const Request& request) auto srv_ip = request.getAttrIpPrefix("server_ipv4"); auto srv_ip6 = request.getAttrIpPrefix("server_ipv6"); + MuxCableType cable_type = MuxCableType::ACTIVE_STANDBY; + std::set skip_neighbors; + const auto& port_name = request.getKeyString(0); auto op = request.getOperation(); + for (const auto &name : request.getAttrFieldNames()) + { + if (name == "soc_ipv4") + { + auto soc_ip = request.getAttrIpPrefix("soc_ipv4"); + SWSS_LOG_NOTICE("%s: %s was added to ignored neighbor list", port_name.c_str(), soc_ip.getIp().to_string().c_str()); + skip_neighbors.insert(soc_ip.getIp()); + } + else if (name == "soc_ipv6") + { + auto soc_ip6 = request.getAttrIpPrefix("soc_ipv6"); + SWSS_LOG_NOTICE("%s: %s was added to ignored neighbor list", port_name.c_str(), soc_ip6.getIp().to_string().c_str()); + skip_neighbors.insert(soc_ip6.getIp()); + } + else if (name == "cable_type") + { + auto cable_type_str = request.getAttrString("cable_type"); + if (cable_type_str == "active-active") + { + cable_type = MuxCableType::ACTIVE_ACTIVE; + } + } + } + if (op == SET_COMMAND) { if(isMuxExists(port_name)) @@ -1188,9 +1562,10 @@ bool MuxOrch::handleMuxCfg(const Request& request) } mux_cable_tb_[port_name] = std::make_unique - (MuxCable(port_name, srv_ip, srv_ip6, mux_peer_switch_)); + (MuxCable(port_name, srv_ip, srv_ip6, mux_peer_switch_, cable_type)); + addSkipNeighbors(skip_neighbors); - SWSS_LOG_NOTICE("Mux entry for port '%s' was added", port_name.c_str()); + SWSS_LOG_NOTICE("Mux entry for port '%s' was added, cable type %d", port_name.c_str(), cable_type); } else { @@ -1200,6 +1575,7 @@ bool MuxOrch::handleMuxCfg(const Request& request) return true; } + removeSkipNeighbors(skip_neighbors); mux_cable_tb_.erase(port_name); SWSS_LOG_NOTICE("Mux cable for port '%s' was removed", port_name.c_str()); @@ -1219,8 +1595,6 @@ bool MuxOrch::handlePeerSwitch(const Request& request) if (op == SET_COMMAND) { - mux_peer_switch_ = peer_ip; - // Create P2P tunnel when peer_ip is available. IpAddresses dst_ips = decap_orch_->getDstIpAddresses(MUX_TUNNEL); if (!dst_ips.getSize()) @@ -1229,10 +1603,33 @@ bool MuxOrch::handlePeerSwitch(const Request& request) MUX_TUNNEL, peer_ip.to_string().c_str()); return false; } - auto it = dst_ips.getIpAddresses().begin(); const IpAddress& dst_ip = *it; - mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip); + + // Read dscp_mode of MuxTunnel0 from decap_orch + string dscp_mode_name = decap_orch_->getDscpMode(MUX_TUNNEL); + if (dscp_mode_name == "") + { + SWSS_LOG_NOTICE("dscp_mode for tunnel %s is not available. Will not be applied", MUX_TUNNEL); + } + + // Read tc_to_dscp_map_id of MuxTunnel0 from decap_orch + sai_object_id_t tc_to_dscp_map_id = SAI_NULL_OBJECT_ID; + decap_orch_->getQosMapId(MUX_TUNNEL, encap_tc_to_dscp_field_name, tc_to_dscp_map_id); + if (tc_to_dscp_map_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_NOTICE("tc_to_dscp_map_id for tunnel %s is not available. Will not be applied", MUX_TUNNEL); + } + // Read tc_to_queue_map_id of MuxTunnel0 from decap_orch + sai_object_id_t tc_to_queue_map_id = SAI_NULL_OBJECT_ID; + decap_orch_->getQosMapId(MUX_TUNNEL, encap_tc_to_queue_field_name, tc_to_queue_map_id); + if (tc_to_queue_map_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_NOTICE("tc_to_queue_map_id for tunnel %s is not available. Will not be applied", MUX_TUNNEL); + } + + mux_tunnel_id_ = create_tunnel(&peer_ip, &dst_ip, tc_to_dscp_map_id, tc_to_queue_map_id, dscp_mode_name); + mux_peer_switch_ = peer_ip; SWSS_LOG_NOTICE("Mux peer ip '%s' was added, peer name '%s'", peer_ip.to_string().c_str(), peer_name.c_str()); } @@ -1293,6 +1690,32 @@ bool MuxOrch::delOperation(const Request& request) return true; } +void MuxOrch::createStandaloneTunnelRoute(IpAddress neighborIp) +{ + SWSS_LOG_INFO("Creating standalone tunnel route for neighbor %s", neighborIp.to_string().c_str()); + sai_object_id_t tunnel_nexthop = getNextHopTunnelId(MUX_TUNNEL, mux_peer_switch_); + if (tunnel_nexthop == SAI_NULL_OBJECT_ID) { + SWSS_LOG_NOTICE("%s nexthop not created yet, ignoring tunnel route creation for %s", MUX_TUNNEL, neighborIp.to_string().c_str()); + return; + } + IpPrefix pfx = neighborIp.to_string(); + create_route(pfx, tunnel_nexthop); + standalone_tunnel_neighbors_.insert(neighborIp); +} + +void MuxOrch::removeStandaloneTunnelRoute(IpAddress neighborIp) +{ + SWSS_LOG_INFO("Removing standalone tunnel route for neighbor %s", neighborIp.to_string().c_str()); + IpPrefix pfx = neighborIp.to_string(); + remove_route(pfx); + standalone_tunnel_neighbors_.erase(neighborIp); +} + +bool MuxOrch::isStandaloneTunnelRouteInstalled(const IpAddress& neighborIp) +{ + return standalone_tunnel_neighbors_.find(neighborIp) != standalone_tunnel_neighbors_.end(); +} + MuxCableOrch::MuxCableOrch(DBConnector *db, DBConnector *sdb, const std::string& tableName): Orch2(db, tableName, request_), app_tunnel_route_table_(db, APP_TUNNEL_ROUTE_TABLE_NAME), @@ -1386,10 +1809,25 @@ bool MuxCableOrch::addOperation(const Request& request) { mux_obj->setState(state); } - catch(const std::runtime_error& error) + catch(const std::runtime_error& e) { SWSS_LOG_ERROR("Mux Error setting state %s for port %s. Error: %s", - state.c_str(), port_name.c_str(), error.what()); + state.c_str(), port_name.c_str(), e.what()); + mux_obj->rollbackStateChange(); + return true; + } + catch (const std::logic_error& e) + { + SWSS_LOG_ERROR("Logic error while setting state %s for port %s. Error: %s", + state.c_str(), port_name.c_str(), e.what()); + mux_obj->rollbackStateChange(); + return true; + } + catch (const std::exception& e) + { + SWSS_LOG_ERROR("Exception caught while setting state %s for port %s. Error: %s", + state.c_str(), port_name.c_str(), e.what()); + mux_obj->rollbackStateChange(); return true; } diff --git a/orchagent/muxorch.h b/orchagent/muxorch.h index 6e4f70408c..ce6a4d9b3f 100644 --- a/orchagent/muxorch.h +++ b/orchagent/muxorch.h @@ -29,6 +29,12 @@ enum MuxStateChange MUX_STATE_UNKNOWN_STATE }; +enum MuxCableType +{ + ACTIVE_STANDBY, + ACTIVE_ACTIVE +}; + // Forward Declarations class MuxOrch; class MuxCableOrch; @@ -46,9 +52,8 @@ class MuxAclHandler void createMuxAclRule(shared_ptr rule, string strTable); void bindAllPorts(AclTable &acl_table); - // class shared dict: ACL table name -> ACL table - static std::map acl_table_; sai_object_id_t port_ = SAI_NULL_OBJECT_ID; + bool is_ingress_acl_ = true; string alias_; }; @@ -66,6 +71,11 @@ class MuxNbrHandler void update(NextHopKey nh, sai_object_id_t, bool = true, MuxState = MuxState::MUX_STATE_INIT); sai_object_id_t getNextHopId(const NextHopKey); + MuxNeighbor getNeighbors() const { return neighbors_; }; + string getAlias() const { return alias_; }; + +private: + inline void updateTunnelRoute(NextHopKey, bool = true); private: MuxNeighbor neighbors_; @@ -76,7 +86,7 @@ class MuxNbrHandler class MuxCable { public: - MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress peer_ip); + MuxCable(string name, IpPrefix& srv_ip4, IpPrefix& srv_ip6, IpAddress peer_ip, MuxCableType cable_type); bool isActive() const { @@ -87,12 +97,14 @@ class MuxCable using state_machine_handlers = map; void setState(string state); + void rollbackStateChange(); string getState(); bool isStateChangeInProgress() { return st_chg_in_progress_; } bool isStateChangeFailed() { return st_chg_failed_; } bool isIpInSubnet(IpAddress ip); void updateNeighbor(NextHopKey nh, bool add); + void updateRoutes(); sai_object_id_t getNextHopId(const NextHopKey nh) { return nbr_handler_->getNextHopId(nh); @@ -107,8 +119,10 @@ class MuxCable bool nbrHandler(bool enable, bool update_routes = true); string mux_name_; + MuxCableType cable_type_; MuxState state_ = MuxState::MUX_STATE_INIT; + MuxState prev_state_; bool st_chg_in_progress_ = false; bool st_chg_failed_ = false; @@ -131,6 +145,9 @@ const request_description_t mux_cfg_request_description = { { "server_ipv4", REQ_T_IP_PREFIX }, { "server_ipv6", REQ_T_IP_PREFIX }, { "address_ipv4", REQ_T_IP }, + { "soc_ipv4", REQ_T_IP_PREFIX }, + { "soc_ipv6", REQ_T_IP_PREFIX }, + { "cable_type", REQ_T_STRING }, }, { } }; @@ -145,6 +162,7 @@ typedef std::unique_ptr MuxCable_T; typedef std::map MuxCableTb; typedef std::map MuxTunnelNHs; typedef std::map NextHopTb; +typedef std::map MuxRouteTb; class MuxCfgRequest : public Request { @@ -171,12 +189,24 @@ class MuxOrch : public Orch2, public Observer, public Subject return mux_cable_tb_.at(portName).get(); } + bool isSkipNeighbor(const IpAddress& nbr) + { + return (skip_neighbors_.find(nbr) != skip_neighbors_.end()); + } + + bool isMultiNexthopRoute(const IpPrefix& pfx) + { + return (mux_multi_active_nh_table.find(pfx) != mux_multi_active_nh_table.end()); + } + MuxCable* findMuxCableInSubnet(IpAddress); bool isNeighborActive(const IpAddress&, const MacAddress&, string&); void update(SubjectType, void *); void addNexthop(NextHopKey, string = ""); void removeNexthop(NextHopKey); + bool containsNextHop(const NextHopKey&); + bool isMuxNexthops(const NextHopGroupKey&); string getNexthopMuxName(NextHopKey); sai_object_id_t getNextHopId(const NextHopKey&); @@ -184,6 +214,9 @@ class MuxOrch : public Orch2, public Observer, public Subject bool removeNextHopTunnel(std::string tunnelKey, IpAddress& ipAddr); sai_object_id_t getNextHopTunnelId(std::string tunnelKey, IpAddress& ipAddr); + void updateRoute(const IpPrefix &pfx, bool add); + bool isStandaloneTunnelRouteInstalled(const IpAddress& neighborIp); + private: virtual bool addOperation(const Request& request); virtual bool delOperation(const Request& request); @@ -196,6 +229,26 @@ class MuxOrch : public Orch2, public Observer, public Subject bool getMuxPort(const MacAddress&, const string&, string&); + /*** + * Methods for managing tunnel routes for neighbor IPs not associated + * with a specific mux cable + ***/ + void createStandaloneTunnelRoute(IpAddress neighborIp); + void removeStandaloneTunnelRoute(IpAddress neighborIp); + + void addSkipNeighbors(const std::set &neighbors) + { + skip_neighbors_.insert(neighbors.begin(), neighbors.end()); + } + + void removeSkipNeighbors(const std::set &neighbors) + { + for (const IpAddress &neighbor : neighbors) + { + skip_neighbors_.erase(neighbor); + } + } + IpAddress mux_peer_switch_ = 0x0; sai_object_id_t mux_tunnel_id_ = SAI_NULL_OBJECT_ID; @@ -203,6 +256,9 @@ class MuxOrch : public Orch2, public Observer, public Subject MuxTunnelNHs mux_tunnel_nh_; NextHopTb mux_nexthop_tb_; + /* contains reference of programmed routes by updateRoute */ + MuxRouteTb mux_multi_active_nh_table; + handler_map handler_map_; TunnelDecapOrch *decap_orch_; @@ -210,6 +266,8 @@ class MuxOrch : public Orch2, public Observer, public Subject FdbOrch *fdb_orch_; MuxCfgRequest request_; + std::set standalone_tunnel_neighbors_; + std::set skip_neighbors_; }; const request_description_t mux_cable_request_description = { diff --git a/orchagent/natorch.cpp b/orchagent/natorch.cpp index d7f124a28e..c19f2d7823 100644 --- a/orchagent/natorch.cpp +++ b/orchagent/natorch.cpp @@ -106,8 +106,7 @@ NatOrch::NatOrch(DBConnector *appDb, DBConnector *stateDb, vectorfirst; NatEntryValue &entry = iter->second; uint32_t attr_count; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t nat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -3564,13 +3519,11 @@ bool NatOrch::getNatCounters(const NatEntry::iterator &iter) return 0; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; attr_count = 2; - memset(&nat_entry, 0, sizeof(nat_entry)); nat_entry.vr_id = gVirtualRouterId; nat_entry.switch_id = gSwitchId; @@ -3627,8 +3580,8 @@ bool NatOrch::getTwiceNatCounters(const TwiceNatEntry::iterator &iter) const TwiceNatEntryKey &key = iter->first; TwiceNatEntryValue &entry = iter->second; uint32_t attr_count; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t dbl_nat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t dbl_nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -3639,14 +3592,11 @@ bool NatOrch::getTwiceNatCounters(const TwiceNatEntry::iterator &iter) return 0; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; attr_count = 2; - memset(&dbl_nat_entry, 0, sizeof(dbl_nat_entry)); - dbl_nat_entry.vr_id = gVirtualRouterId; dbl_nat_entry.switch_id = gSwitchId; dbl_nat_entry.nat_type = SAI_NAT_TYPE_DOUBLE_NAT; @@ -3678,9 +3628,9 @@ bool NatOrch::setNatCounters(const NatEntry::iterator &iter) { const IpAddress &ipAddr = iter->first; NatEntryValue &entry = iter->second; - sai_attribute_t nat_entry_attr_packet; - sai_attribute_t nat_entry_attr_byte; - sai_nat_entry_t nat_entry; + sai_attribute_t nat_entry_attr_packet = {}; + sai_attribute_t nat_entry_attr_byte = {}; + sai_nat_entry_t nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -3690,12 +3640,8 @@ bool NatOrch::setNatCounters(const NatEntry::iterator &iter) return 0; } - memset(&nat_entry_attr_packet, 0, sizeof(nat_entry_attr_packet)); - memset(&nat_entry_attr_byte, 0, sizeof(nat_entry_attr_byte)); nat_entry_attr_byte.id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; nat_entry_attr_packet.id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; - - memset(&nat_entry, 0, sizeof(nat_entry)); nat_entry.vr_id = gVirtualRouterId; nat_entry.switch_id = gSwitchId; @@ -3762,8 +3708,8 @@ bool NatOrch::getNaptCounters(const NaptEntry::iterator &iter) NaptEntryValue &entry = iter->second; uint8_t protoType = ((naptKey.prototype == "TCP") ? IPPROTO_TCP : IPPROTO_UDP); uint32_t attr_count; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t nat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -3774,14 +3720,11 @@ bool NatOrch::getNaptCounters(const NaptEntry::iterator &iter) return 0; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; attr_count = 2; - memset(&nat_entry, 0, sizeof(nat_entry)); - nat_entry.vr_id = gVirtualRouterId; nat_entry.switch_id = gSwitchId; @@ -3848,8 +3791,8 @@ bool NatOrch::getTwiceNaptCounters(const TwiceNaptEntry::iterator &iter) TwiceNaptEntryValue &entry = iter->second; uint8_t protoType = ((key.prototype == "TCP") ? IPPROTO_TCP : IPPROTO_UDP); uint32_t attr_count; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t dbl_nat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t dbl_nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -3861,14 +3804,11 @@ bool NatOrch::getTwiceNaptCounters(const TwiceNaptEntry::iterator &iter) return 0; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; attr_count = 2; - memset(&dbl_nat_entry, 0, sizeof(dbl_nat_entry)); - dbl_nat_entry.vr_id = gVirtualRouterId; dbl_nat_entry.switch_id = gSwitchId; dbl_nat_entry.nat_type = SAI_NAT_TYPE_DOUBLE_NAT; @@ -3907,9 +3847,9 @@ bool NatOrch::setNaptCounters(const NaptEntry::iterator &iter) const NaptEntryKey &naptKey = iter->first; NaptEntryValue &entry = iter->second; uint8_t protoType = ((naptKey.prototype == "TCP") ? IPPROTO_TCP : IPPROTO_UDP); - sai_attribute_t nat_entry_attr_packet; - sai_attribute_t nat_entry_attr_byte; - sai_nat_entry_t nat_entry; + sai_attribute_t nat_entry_attr_packet = {}; + sai_attribute_t nat_entry_attr_byte = {}; + sai_nat_entry_t nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -3920,13 +3860,9 @@ bool NatOrch::setNaptCounters(const NaptEntry::iterator &iter) return 0; } - memset(&nat_entry_attr_packet, 0, sizeof(nat_entry_attr_packet)); - memset(&nat_entry_attr_byte, 0, sizeof(nat_entry_attr_byte)); nat_entry_attr_packet.id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; nat_entry_attr_byte.id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; - memset(&nat_entry, 0, sizeof(nat_entry)); - nat_entry.vr_id = gVirtualRouterId; nat_entry.switch_id = gSwitchId; @@ -4002,9 +3938,9 @@ bool NatOrch::setTwiceNatCounters(const TwiceNatEntry::iterator &iter) { const TwiceNatEntryKey &key = iter->first; TwiceNatEntryValue &entry = iter->second; - sai_attribute_t nat_entry_attr_packet; - sai_attribute_t nat_entry_attr_byte; - sai_nat_entry_t dbl_nat_entry; + sai_attribute_t nat_entry_attr_packet = {}; + sai_attribute_t nat_entry_attr_byte = {}; + sai_nat_entry_t dbl_nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -4015,13 +3951,9 @@ bool NatOrch::setTwiceNatCounters(const TwiceNatEntry::iterator &iter) return 0; } - memset(&nat_entry_attr_packet, 0, sizeof(nat_entry_attr_packet)); - memset(&nat_entry_attr_byte, 0, sizeof(nat_entry_attr_byte)); nat_entry_attr_packet.id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; nat_entry_attr_byte.id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; - memset(&dbl_nat_entry, 0, sizeof(dbl_nat_entry)); - dbl_nat_entry.vr_id = gVirtualRouterId; dbl_nat_entry.switch_id = gSwitchId; dbl_nat_entry.nat_type = SAI_NAT_TYPE_DOUBLE_NAT; @@ -4059,9 +3991,9 @@ bool NatOrch::setTwiceNaptCounters(const TwiceNaptEntry::iterator &iter) const TwiceNaptEntryKey &key = iter->first; TwiceNaptEntryValue &entry = iter->second; uint8_t protoType = ((key.prototype == "TCP") ? IPPROTO_TCP : IPPROTO_UDP); - sai_attribute_t nat_entry_attr_packet; - sai_attribute_t nat_entry_attr_byte; - sai_nat_entry_t dbl_nat_entry; + sai_attribute_t nat_entry_attr_packet = {}; + sai_attribute_t nat_entry_attr_byte = {}; + sai_nat_entry_t dbl_nat_entry = {}; sai_status_t status; uint64_t nat_translations_pkts = 0, nat_translations_bytes = 0; @@ -4072,13 +4004,9 @@ bool NatOrch::setTwiceNaptCounters(const TwiceNaptEntry::iterator &iter) return 0; } - memset(&nat_entry_attr_packet, 0, sizeof(nat_entry_attr_packet)); - memset(&nat_entry_attr_byte, 0, sizeof(nat_entry_attr_byte)); nat_entry_attr_packet.id = SAI_NAT_ENTRY_ATTR_PACKET_COUNT; nat_entry_attr_byte.id = SAI_NAT_ENTRY_ATTR_BYTE_COUNT; - memset(&dbl_nat_entry, 0, sizeof(dbl_nat_entry)); - dbl_nat_entry.vr_id = gVirtualRouterId; dbl_nat_entry.switch_id = gSwitchId; dbl_nat_entry.nat_type = SAI_NAT_TYPE_DOUBLE_NAT; @@ -4211,8 +4139,9 @@ bool NatOrch::checkIfNatEntryIsActive(const NatEntry::iterator &iter, time_t now NatEntryValue &entry = iter->second; uint32_t attr_count; IpAddress srcIp; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t snat_entry, dnat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t snat_entry = {}; + sai_nat_entry_t dnat_entry; sai_status_t status; if (entry.nat_type == "dnat") @@ -4233,7 +4162,6 @@ bool NatOrch::checkIfNatEntryIsActive(const NatEntry::iterator &iter, time_t now return 1; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_HIT_BIT; /* Get the Hit bit */ nat_entry_attr[0].value.booldata = 0; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_HIT_BIT_COR; /* clear the hit bit after returning the value */ @@ -4241,8 +4169,6 @@ bool NatOrch::checkIfNatEntryIsActive(const NatEntry::iterator &iter, time_t now attr_count = 2; - memset(&snat_entry, 0, sizeof(snat_entry)); - snat_entry.vr_id = gVirtualRouterId; snat_entry.switch_id = gSwitchId; snat_entry.nat_type = SAI_NAT_TYPE_SOURCE_NAT; @@ -4306,8 +4232,9 @@ bool NatOrch::checkIfNaptEntryIsActive(const NaptEntry::iterator &iter, time_t n uint32_t attr_count; IpAddress srcIp; uint16_t srcPort; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t snat_entry, dnat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t snat_entry = {}; + sai_nat_entry_t dnat_entry; sai_status_t status; if (entry.nat_type == "dnat") @@ -4329,7 +4256,6 @@ bool NatOrch::checkIfNaptEntryIsActive(const NaptEntry::iterator &iter, time_t n return 1; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_HIT_BIT; /* Get the Hit bit */ nat_entry_attr[0].value.booldata = 0; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_HIT_BIT_COR; /* clear the hit bit after returning the value */ @@ -4337,8 +4263,6 @@ bool NatOrch::checkIfNaptEntryIsActive(const NaptEntry::iterator &iter, time_t n attr_count = 2; - memset(&snat_entry, 0, sizeof(snat_entry)); - snat_entry.vr_id = gVirtualRouterId; snat_entry.switch_id = gSwitchId; snat_entry.nat_type = SAI_NAT_TYPE_SOURCE_NAT; @@ -4417,8 +4341,8 @@ bool NatOrch::checkIfTwiceNatEntryIsActive(const TwiceNatEntry::iterator &iter, const TwiceNatEntryKey &key = iter->first; TwiceNatEntryValue &entry = iter->second; uint32_t attr_count; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t dbl_nat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t dbl_nat_entry = {}; sai_status_t status; if (entry.entry_type == "static") @@ -4434,7 +4358,6 @@ bool NatOrch::checkIfTwiceNatEntryIsActive(const TwiceNatEntry::iterator &iter, return 0; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_HIT_BIT; /* Get the Hit bit */ nat_entry_attr[0].value.booldata = 0; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_HIT_BIT_COR; /* clear the hit bit after returning the value */ @@ -4442,8 +4365,6 @@ bool NatOrch::checkIfTwiceNatEntryIsActive(const TwiceNatEntry::iterator &iter, attr_count = 2; - memset(&dbl_nat_entry, 0, sizeof(dbl_nat_entry)); - dbl_nat_entry.vr_id = gVirtualRouterId; dbl_nat_entry.switch_id = gSwitchId; dbl_nat_entry.nat_type = SAI_NAT_TYPE_DOUBLE_NAT; @@ -4472,8 +4393,8 @@ bool NatOrch::checkIfTwiceNaptEntryIsActive(const TwiceNaptEntry::iterator &iter TwiceNaptEntryValue &entry = iter->second; uint8_t protoType = ((key.prototype == "TCP") ? IPPROTO_TCP : IPPROTO_UDP); uint32_t attr_count; - sai_attribute_t nat_entry_attr[4]; - sai_nat_entry_t dbl_nat_entry; + sai_attribute_t nat_entry_attr[4] = {}; + sai_nat_entry_t dbl_nat_entry = {}; sai_status_t status; if (entry.addedToHw == false) @@ -4489,7 +4410,6 @@ bool NatOrch::checkIfTwiceNaptEntryIsActive(const TwiceNaptEntry::iterator &iter return 1; } - memset(nat_entry_attr, 0, sizeof(nat_entry_attr)); nat_entry_attr[0].id = SAI_NAT_ENTRY_ATTR_HIT_BIT; /* Get the Hit bit */ nat_entry_attr[0].value.booldata = 0; nat_entry_attr[1].id = SAI_NAT_ENTRY_ATTR_HIT_BIT_COR; /* clear the hit bit after returning the value */ @@ -4497,8 +4417,6 @@ bool NatOrch::checkIfTwiceNaptEntryIsActive(const TwiceNaptEntry::iterator &iter attr_count = 2; - memset(&dbl_nat_entry, 0, sizeof(dbl_nat_entry)); - dbl_nat_entry.vr_id = gVirtualRouterId; dbl_nat_entry.switch_id = gSwitchId; dbl_nat_entry.nat_type = SAI_NAT_TYPE_DOUBLE_NAT; diff --git a/orchagent/neighorch.cpp b/orchagent/neighorch.cpp index 0ade29e4b4..47bcca3c32 100644 --- a/orchagent/neighorch.cpp +++ b/orchagent/neighorch.cpp @@ -21,6 +21,7 @@ extern FgNhgOrch *gFgNhgOrch; extern Directory gDirectory; extern string gMySwitchType; extern int32_t gVoqMySwitchId; +extern BfdOrch *gBfdOrch; const int neighorch_pri = 30; @@ -35,6 +36,12 @@ NeighOrch::NeighOrch(DBConnector *appDb, string tableName, IntfsOrch *intfsOrch, m_fdbOrch->attach(this); + // Some UTs instantiate NeighOrch but gBfdOrch is null, it is not null in orchagent + if (gBfdOrch) + { + gBfdOrch->attach(this); + } + if(gMySwitchType == "voq") { //Add subscriber to process VOQ system neigh @@ -151,6 +158,12 @@ void NeighOrch::update(SubjectType type, void *cntx) processFDBFlushUpdate(*update); break; } + case SUBJECT_TYPE_BFD_SESSION_STATE_CHANGE: + { + BfdUpdate *update = static_cast(cntx); + updateNextHop (*update); + break; + } default: break; } @@ -255,6 +268,12 @@ bool NeighOrch::addNextHop(const NextHopKey &nh) sai_status_t status = sai_next_hop_api->create_next_hop(&next_hop_id, gSwitchId, (uint32_t)next_hop_attrs.size(), next_hop_attrs.data()); if (status != SAI_STATUS_SUCCESS) { + if (status == SAI_STATUS_ITEM_ALREADY_EXISTS) + { + SWSS_LOG_NOTICE("Next hop %s on %s already exists", + nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str()); + return true; + } SWSS_LOG_ERROR("Failed to create next hop %s on %s, rv:%d", nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str(), status); task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEXT_HOP, status); @@ -427,6 +446,62 @@ bool NeighOrch::ifChangeInformNextHop(const string &alias, bool if_up) return rc; } +void NeighOrch::updateNextHop(const BfdUpdate& update) +{ + SWSS_LOG_ENTER(); + bool rc = true; + + auto key = update.peer; + sai_bfd_session_state_t state = update.state; + + size_t found_vrf = key.find(state_db_key_delimiter); + if (found_vrf == string::npos) + { + SWSS_LOG_INFO("Failed to parse key %s, no vrf is given", key.c_str()); + return; + } + + size_t found_ifname = key.find(state_db_key_delimiter, found_vrf + 1); + if (found_ifname == string::npos) + { + SWSS_LOG_INFO("Failed to parse key %s, no ifname is given", key.c_str()); + return; + } + + string vrf_name = key.substr(0, found_vrf); + string alias = key.substr(found_vrf + 1, found_ifname - found_vrf - 1); + IpAddress peer_address(key.substr(found_ifname + 1)); + + if (alias != "default" || vrf_name != "default") + { + return; + } + + for (auto nhop = m_syncdNextHops.begin(); nhop != m_syncdNextHops.end(); ++nhop) + { + if (nhop->first.ip_address != peer_address) + { + continue; + } + + if (state == SAI_BFD_SESSION_STATE_UP) + { + SWSS_LOG_INFO("updateNextHop get BFD session UP event, key %s", key.c_str()); + rc = clearNextHopFlag(nhop->first, NHFLAGS_IFDOWN); + } + else + { + SWSS_LOG_INFO("updateNextHop get BFD session DOWN event, key %s", key.c_str()); + rc = setNextHopFlag(nhop->first, NHFLAGS_IFDOWN); + } + + if (!rc) + { + break; + } + } +} + bool NeighOrch::removeNextHop(const IpAddress &ipAddress, const string &alias) { SWSS_LOG_ENTER(); @@ -591,24 +666,51 @@ void NeighOrch::decreaseNextHopRefCount(const NextHopKey &nexthop, uint32_t coun assert(hasNextHop(nexthop)); if (m_syncdNextHops.find(nexthop) != m_syncdNextHops.end()) { + if ((m_syncdNextHops[nexthop].ref_count - (int)count) < 0) + { + SWSS_LOG_ERROR("Ref count cannot be negative for next_hop_id: 0x%" PRIx64 " with ip: %s and alias: %s", + m_syncdNextHops[nexthop].next_hop_id, nexthop.ip_address.to_string().c_str(), nexthop.alias.c_str()); + // Reset refcount to 0 to match expected value + m_syncdNextHops[nexthop].ref_count = 0; + return; + } m_syncdNextHops[nexthop].ref_count -= count; } } bool NeighOrch::getNeighborEntry(const NextHopKey &nexthop, NeighborEntry &neighborEntry, MacAddress &macAddress) { + Port inbp; + string nbr_alias; if (!hasNextHop(nexthop)) { return false; } + if (gMySwitchType == "voq") + { + gPortsOrch->getInbandPort(inbp); + assert(inbp.m_alias.length()); + } for (const auto &entry : m_syncdNeighbors) { - if (entry.first.ip_address == nexthop.ip_address && entry.first.alias == nexthop.alias) + if (entry.first.ip_address == nexthop.ip_address) { - neighborEntry = entry.first; - macAddress = entry.second.mac; - return true; + if (m_intfsOrch->isRemoteSystemPortIntf(entry.first.alias)) + { + //For remote system ports, nexthops are always on inband. + nbr_alias = inbp.m_alias; + } + else + { + nbr_alias = entry.first.alias; + } + if (nbr_alias == nexthop.alias) + { + neighborEntry = entry.first; + macAddress = entry.second.mac; + return true; + } } } @@ -685,19 +787,6 @@ void NeighOrch::doTask(Consumer &consumer) IpAddress ip_address(key.substr(found+1)); - /* Verify Ipv4 LinkLocal and skip neighbor entry added for RFC5549 */ - if ((ip_address.getAddrScope() == IpAddress::LINK_SCOPE) && (ip_address.isV4())) - { - /* Check if this prefix is not a configured ip, if so allow */ - IpPrefix ipll_prefix(ip_address.getV4Addr(), 16); - if (!m_intfsOrch->isPrefixSubnet (ipll_prefix, alias)) - { - SWSS_LOG_NOTICE("Skip IPv4LL neighbor %s, Intf:%s op: %s ", ip_address.to_string().c_str(), alias.c_str(), op.c_str()); - it = consumer.m_toSync.erase(it); - continue; - } - } - NeighborEntry neighbor_entry = { ip_address, alias }; if (op == SET_COMMAND) @@ -725,10 +814,35 @@ void NeighOrch::doTask(Consumer &consumer) mac_address = MacAddress(fvValue(*i)); } - if (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end() - || m_syncdNeighbors[neighbor_entry].mac != mac_address) + bool nbr_not_found = (m_syncdNeighbors.find(neighbor_entry) == m_syncdNeighbors.end()); + if (nbr_not_found || m_syncdNeighbors[neighbor_entry].mac != mac_address) { - if (addNeighbor(neighbor_entry, mac_address)) + if (!mac_address) + { + if (nbr_not_found) + { + // only for unresolvable neighbors that are new + if (addZeroMacTunnelRoute(neighbor_entry, mac_address)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + continue; + } + } + else + { + /* + * For neighbors that were previously resolvable but are now unresolvable, + * we expect such neighbor entries to be deleted prior to a zero MAC update + * arriving for that same neighbor. + */ + it = consumer.m_toSync.erase(it); + } + } + else if (addNeighbor(neighbor_entry, mac_address)) { it = consumer.m_toSync.erase(it); } @@ -807,6 +921,18 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress memcpy(neighbor_attr.value.mac, macAddress.getMac(), 6); neighbor_attrs.push_back(neighbor_attr); + if ((ip_address.getAddrScope() == IpAddress::LINK_SCOPE) && (ip_address.isV4())) + { + /* Check if this prefix is a configured ip, if not allow */ + IpPrefix ipll_prefix(ip_address.getV4Addr(), 16); + if (!m_intfsOrch->isPrefixSubnet (ipll_prefix, alias)) + { + neighbor_attr.id = SAI_NEIGHBOR_ENTRY_ATTR_NO_HOST_ROUTE; + neighbor_attr.value.booldata = 1; + neighbor_attrs.push_back(neighbor_attr); + } + } + MuxOrch* mux_orch = gDirectory.get(); bool hw_config = isHwConfigured(neighborEntry); @@ -885,15 +1011,18 @@ bool NeighOrch::addNeighbor(const NeighborEntry &neighborEntry, const MacAddress } else if (isHwConfigured(neighborEntry)) { - status = sai_neighbor_api->set_neighbor_entry_attribute(&neighbor_entry, &neighbor_attr); - if (status != SAI_STATUS_SUCCESS) + for (auto itr : neighbor_attrs) { - SWSS_LOG_ERROR("Failed to update neighbor %s on %s, rv:%d", - macAddress.to_string().c_str(), alias.c_str(), status); - task_process_status handle_status = handleSaiSetStatus(SAI_API_NEIGHBOR, status); - if (handle_status != task_success) + status = sai_neighbor_api->set_neighbor_entry_attribute(&neighbor_entry, &itr); + if (status != SAI_STATUS_SUCCESS) { - return parseHandleSaiStatusFailure(handle_status); + SWSS_LOG_ERROR("Failed to update neighbor %s on %s, attr.id=0x%x, rv:%d", + macAddress.to_string().c_str(), alias.c_str(), itr.id, status); + task_process_status handle_status = handleSaiSetStatus(SAI_API_NEIGHBOR, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } } } SWSS_LOG_NOTICE("Updated neighbor %s on %s", macAddress.to_string().c_str(), alias.c_str()); @@ -960,7 +1089,7 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) /* When next hop is not found, we continue to remove neighbor entry. */ if (status == SAI_STATUS_ITEM_NOT_FOUND) { - SWSS_LOG_ERROR("Failed to locate next hop %s on %s, rv:%d", + SWSS_LOG_NOTICE("Next hop %s on %s doesn't exist, rv:%d", ip_address.to_string().c_str(), alias.c_str(), status); } else @@ -995,9 +1124,8 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) { if (status == SAI_STATUS_ITEM_NOT_FOUND) { - SWSS_LOG_ERROR("Failed to locate neighbor %s on %s, rv:%d", + SWSS_LOG_NOTICE("Neighbor %s on %s already removed, rv:%d", m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str(), status); - return true; } else { @@ -1010,22 +1138,24 @@ bool NeighOrch::removeNeighbor(const NeighborEntry &neighborEntry, bool disable) } } } - - if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) - { - gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); - } else { - gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); - } + if (neighbor_entry.ip_address.addr_family == SAI_IP_ADDR_FAMILY_IPV4) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_NEIGHBOR); + } - removeNextHop(ip_address, alias); - m_intfsOrch->decreaseRouterIntfsRefCount(alias); + removeNextHop(ip_address, alias); + m_intfsOrch->decreaseRouterIntfsRefCount(alias); + SWSS_LOG_NOTICE("Removed neighbor %s on %s", + m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str()); + } } - SWSS_LOG_NOTICE("Removed neighbor %s on %s", - m_syncdNeighbors[neighborEntry].mac.to_string().c_str(), alias.c_str()); /* Do not delete entry from cache if its disable request */ if (disable) @@ -1204,7 +1334,7 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) string alias = key.substr(0, found); - if(!gIntfsOrch->isRemoteSystemPortIntf(alias)) + if(gIntfsOrch->isLocalSystemPortIntf(alias)) { //Synced local neighbor. Skip it = consumer.m_toSync.erase(it); @@ -1290,6 +1420,17 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) { SWSS_LOG_NOTICE("VOQ encap index updated for neighbor %s", kfvKey(t).c_str()); it = consumer.m_toSync.erase(it); + + /* Remove remaining DEL operation in m_toSync for the same neighbor. + * Since DEL operation is supposed to be executed before SET for the same neighbor + * A remaining DEL after the SET operation means the DEL operation failed previously and should not be executed anymore + */ + auto rit = make_reverse_iterator(it); + while (rit != consumer.m_toSync.rend() && rit->first == key && kfvOp(rit->second) == DEL_COMMAND) + { + consumer.m_toSync.erase(next(rit).base()); + SWSS_LOG_NOTICE("Removed pending system neighbor DEL operation for %s after SET operation", key.c_str()); + } } continue; } @@ -1351,6 +1492,7 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) else { it++; + continue; } } else @@ -1359,6 +1501,17 @@ void NeighOrch::doVoqSystemNeighTask(Consumer &consumer) SWSS_LOG_INFO("System neighbor %s already exists", kfvKey(t).c_str()); it = consumer.m_toSync.erase(it); } + + /* Remove remaining DEL operation in m_toSync for the same neighbor. + * Since DEL operation is supposed to be executed before SET for the same neighbor + * A remaining DEL after the SET operation means the DEL operation failed previously and should not be executed anymore + */ + auto rit = make_reverse_iterator(it); + while (rit != consumer.m_toSync.rend() && rit->first == key && kfvOp(rit->second) == DEL_COMMAND) + { + consumer.m_toSync.erase(next(rit).base()); + SWSS_LOG_NOTICE("Removed pending system neighbor DEL operation for %s after SET operation", key.c_str()); + } } else if (op == DEL_COMMAND) { @@ -1717,3 +1870,18 @@ void NeighOrch::updateSrv6Nexthop(const NextHopKey &nh, const sai_object_id_t &n m_syncdNextHops.erase(nh); } } + +bool NeighOrch::addZeroMacTunnelRoute(const NeighborEntry& entry, const MacAddress& mac) +{ + SWSS_LOG_INFO("Creating tunnel route for neighbor %s", entry.ip_address.to_string().c_str()); + MuxOrch* mux_orch = gDirectory.get(); + NeighborUpdate update = {entry, mac, true}; + mux_orch->update(SUBJECT_TYPE_NEIGH_CHANGE, static_cast(&update)); + if (mux_orch->isStandaloneTunnelRouteInstalled(entry.ip_address)) + { + m_syncdNeighbors[entry] = { mac, false }; + return true; + } + + return false; +} diff --git a/orchagent/neighorch.h b/orchagent/neighorch.h index 6587606168..e72979ad07 100644 --- a/orchagent/neighorch.h +++ b/orchagent/neighorch.h @@ -11,6 +11,7 @@ #include "nexthopkey.h" #include "producerstatetable.h" #include "schema.h" +#include "bfdorch.h" #define NHFLAGS_IFDOWN 0x1 // nexthop's outbound i/f is down @@ -112,9 +113,12 @@ class NeighOrch : public Orch, public Subject, public Observer void voqSyncAddNeigh(string &alias, IpAddress &ip_address, const MacAddress &mac, sai_neighbor_entry_t &neighbor_entry); void voqSyncDelNeigh(string &alias, IpAddress &ip_address); bool updateVoqNeighborEncapIndex(const NeighborEntry &neighborEntry, uint32_t encap_index); + void updateNextHop(const BfdUpdate&); bool resolveNeighborEntry(const NeighborEntry &, const MacAddress &); void clearResolvedNeighborEntry(const NeighborEntry &); + + bool addZeroMacTunnelRoute(const NeighborEntry &, const MacAddress &); }; #endif /* SWSS_NEIGHORCH_H */ diff --git a/orchagent/nhgbase.h b/orchagent/nhgbase.h index 65f0690555..1dbf2f7762 100644 --- a/orchagent/nhgbase.h +++ b/orchagent/nhgbase.h @@ -451,11 +451,6 @@ class NhgOrchCommon : public Orch } inline void decSyncedNhgCount() { NhgBase::decSyncedCount(); } - /* Handling SAI status*/ - using Orch::handleSaiCreateStatus; - using Orch::handleSaiRemoveStatus; - using Orch::parseHandleSaiStatusFailure; - protected: /* * Map of synced next hop groups. diff --git a/orchagent/nhgorch.cpp b/orchagent/nhgorch.cpp index 32ddb27eb5..cefc2efbb1 100644 --- a/orchagent/nhgorch.cpp +++ b/orchagent/nhgorch.cpp @@ -576,10 +576,10 @@ bool NextHopGroup::sync() SWSS_LOG_ERROR("Failed to create next hop group %s, rv:%d", m_key.to_string().c_str(), status); - task_process_status handle_status = gNhgOrch->handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); + task_process_status handle_status = handleSaiCreateStatus(SAI_API_NEXT_HOP_GROUP, status); if (handle_status != task_success) { - return gNhgOrch->parseHandleSaiStatusFailure(handle_status); + return parseHandleSaiStatusFailure(handle_status); } } diff --git a/orchagent/notifications.cpp b/orchagent/notifications.cpp index 1a49526370..39ce5aa1d4 100644 --- a/orchagent/notifications.cpp +++ b/orchagent/notifications.cpp @@ -23,7 +23,7 @@ void on_bfd_session_state_change(uint32_t count, sai_bfd_session_state_notificat // which causes concurrency access to the DB } -void on_switch_shutdown_request() +void on_switch_shutdown_request(sai_object_id_t switch_id) { SWSS_LOG_ENTER(); @@ -32,3 +32,9 @@ void on_switch_shutdown_request() exit(EXIT_FAILURE); } + +void on_port_host_tx_ready(sai_object_id_t port_id, sai_object_id_t switch_id, sai_port_host_tx_ready_status_t m_portHostTxReadyStatus) +{ + // don't use this event handler, because it runs by libsairedis in a separate thread + // which causes concurrency access to the DB +} \ No newline at end of file diff --git a/orchagent/notifications.h b/orchagent/notifications.h index ea22593a1f..0b4b675987 100644 --- a/orchagent/notifications.h +++ b/orchagent/notifications.h @@ -7,4 +7,8 @@ extern "C" { void on_fdb_event(uint32_t count, sai_fdb_event_notification_data_t *data); void on_port_state_change(uint32_t count, sai_port_oper_status_notification_t *data); void on_bfd_session_state_change(uint32_t count, sai_bfd_session_state_notification_t *data); -void on_switch_shutdown_request(); + +// The function prototype information can be found here: +// https://github.com/sonic-net/sonic-sairedis/blob/master/meta/NotificationSwitchShutdownRequest.cpp#L49 +void on_switch_shutdown_request(sai_object_id_t switch_id); +void on_port_host_tx_ready(sai_object_id_t port_id, sai_object_id_t switch_id, sai_port_host_tx_ready_status_t m_portHostTxReadyStatus); diff --git a/orchagent/nvgreorch.cpp b/orchagent/nvgreorch.cpp new file mode 100644 index 0000000000..38f8c19874 --- /dev/null +++ b/orchagent/nvgreorch.cpp @@ -0,0 +1,582 @@ +#include "orch.h" +#include "nvgreorch.h" +#include "request_parser.h" +#include "swssnet.h" +#include "directory.h" + +#define NVGRE_VSID_MAX_VALUE 16777214 + +extern Directory gDirectory; +extern PortsOrch* gPortsOrch; +extern sai_object_id_t gSwitchId; +extern sai_object_id_t gUnderlayIfId; +extern sai_object_id_t gVirtualRouterId; +extern sai_tunnel_api_t *sai_tunnel_api; + +static const std::vector nvgreMapTypes = { + MAP_T_VLAN, + MAP_T_BRIDGE +}; + +static const std::map nvgreEncapTunnelMap = { + { MAP_T_VLAN, SAI_TUNNEL_MAP_TYPE_VLAN_ID_TO_VSID }, + { MAP_T_BRIDGE, SAI_TUNNEL_MAP_TYPE_BRIDGE_IF_TO_VSID } +}; + +static inline sai_tunnel_map_type_t get_encap_nvgre_mapper(map_type_t map) +{ + return nvgreEncapTunnelMap.at(map); +} + +static const std::map nvgreDecapTunnelMap = { + { MAP_T_VLAN, SAI_TUNNEL_MAP_TYPE_VSID_TO_VLAN_ID }, + { MAP_T_BRIDGE, SAI_TUNNEL_MAP_TYPE_VSID_TO_BRIDGE_IF } +}; + +static inline sai_tunnel_map_type_t get_decap_nvgre_mapper(map_type_t map) +{ + return nvgreDecapTunnelMap.at(map); +} + +static const map> nvgreEncapTunnelMapKeyVal = +{ + { MAP_T_VLAN, + { SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_KEY, SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_VALUE } + }, + { MAP_T_BRIDGE, + { SAI_TUNNEL_MAP_ENTRY_ATTR_BRIDGE_ID_KEY, SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_VALUE } + } +}; + +static inline sai_tunnel_map_entry_attr_t get_encap_nvgre_map_key(map_type_t map) +{ + return nvgreEncapTunnelMapKeyVal.at(map).first; +} + +static inline sai_tunnel_map_entry_attr_t get_encap_nvgre_map_val(map_type_t map) +{ + return nvgreEncapTunnelMapKeyVal.at(map).second; +} + +static const map> nvgreDecapTunnelMapKeyVal = +{ + { MAP_T_VLAN, + { SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_KEY, SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_VALUE } + }, + { MAP_T_BRIDGE, + { SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_KEY, SAI_TUNNEL_MAP_ENTRY_ATTR_BRIDGE_ID_VALUE } + } +}; + +static inline sai_tunnel_map_entry_attr_t get_decap_nvgre_map_key(map_type_t map) +{ + return nvgreDecapTunnelMapKeyVal.at(map).first; +} + +static inline sai_tunnel_map_entry_attr_t get_decap_nvgre_map_val(map_type_t map) +{ + return nvgreDecapTunnelMapKeyVal.at(map).second; +} + +/** @brief Creates tunnel mapper in SAI. + * + * @param sai_tunnel_map_type SAI tunnel map type e.g. VSID_TO_VLAN + * + * @return Tunnel map SAI identifier. + */ +sai_object_id_t NvgreTunnel::sai_create_tunnel_map(sai_tunnel_map_type_t sai_tunnel_map_type) +{ + sai_attribute_t attr; + std::vector tunnel_map_attrs; + + attr.id = SAI_TUNNEL_MAP_ATTR_TYPE; + attr.value.u32 = sai_tunnel_map_type; + + tunnel_map_attrs.push_back(attr); + + sai_object_id_t tunnel_map_id; + sai_status_t status = sai_tunnel_api->create_tunnel_map( + &tunnel_map_id, + gSwitchId, + static_cast(tunnel_map_attrs.size()), + tunnel_map_attrs.data() + ); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't create the NVGRE tunnel map object"); + } + + return tunnel_map_id; +} + +/** @brief Removes tunnel mapper in SAI. + * + * @param sai_tunnel_map_type SAI tunnel map identifier. + * + * @return void. + */ +void NvgreTunnel::sai_remove_tunnel_map(sai_object_id_t tunnel_map_id) +{ + sai_status_t status = sai_tunnel_api->remove_tunnel_map(tunnel_map_id); + + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't remove the NVGRE tunnel map object"); + } +} + + +/** @brief Creates tunnel in SAI. + * + * @param ids Pointer to structure where stored tunnel and tunnel mappers identifiers. + * @param src_ip Pointer to source IP address. + * + * @return SAI tunnel identifier. + */ +sai_object_id_t NvgreTunnel::sai_create_tunnel(struct tunnel_sai_ids_t &ids, const sai_ip_address_t &src_ip, sai_object_id_t underlay_rif) +{ + sai_attribute_t attr; + std::vector tunnel_attrs; + + attr.id = SAI_TUNNEL_ATTR_TYPE; + attr.value.s32 = SAI_TUNNEL_TYPE_NVGRE; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE; + attr.value.oid = underlay_rif; + tunnel_attrs.push_back(attr); + + sai_object_id_t decap_map_list[MAP_T_MAX]; + uint8_t num_decap_map = 0; + + for (auto map_type : nvgreMapTypes) + { + decap_map_list[num_decap_map] = ids.tunnel_decap_id.at(map_type); + num_decap_map++; + } + + attr.id = SAI_TUNNEL_ATTR_DECAP_MAPPERS; + attr.value.objlist.count = num_decap_map; + attr.value.objlist.list = decap_map_list; + tunnel_attrs.push_back(attr); + + sai_object_id_t encap_map_list[MAP_T_MAX]; + uint8_t num_encap_map = 0; + + for (auto map_type : nvgreMapTypes) + { + encap_map_list[num_encap_map] = ids.tunnel_encap_id.at(map_type); + num_encap_map++; + } + + attr.id = SAI_TUNNEL_ATTR_ENCAP_MAPPERS; + attr.value.objlist.count = num_encap_map; + attr.value.objlist.list = encap_map_list; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_ATTR_ENCAP_SRC_IP; + attr.value.ipaddr = src_ip; + tunnel_attrs.push_back(attr); + + sai_object_id_t tunnel_id; + sai_status_t status = sai_tunnel_api->create_tunnel( + &tunnel_id, + gSwitchId, + static_cast(tunnel_attrs.size()), + tunnel_attrs.data() + ); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't create the NVGRE tunnel object"); + } + + return tunnel_id; +} + +/** @brief Removes tunnel in SAI. + * + * @param tunnel_id Pointer to tunnel identifier. + * + * @return void. + */ +void NvgreTunnel::sai_remove_tunnel(sai_object_id_t tunnel_id) +{ + sai_status_t status = sai_tunnel_api->remove_tunnel(tunnel_id); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't remove the NVGRE tunnel object"); + } +} + +/** @brief Creates tunnel termination in SAI. + * + * @param tunnel_id Tunnel identifier. + * @param src_ip Pointer to source IP address. + * @param default_vrid Virtual router identifier. + * + * @return SAI tunnel termination identifier. + */ +sai_object_id_t NvgreTunnel::sai_create_tunnel_termination(sai_object_id_t tunnel_id, const sai_ip_address_t &src_ip, sai_object_id_t default_vrid) +{ + sai_attribute_t attr; + std::vector tunnel_attrs; + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE; + attr.value.s32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID; + attr.value.oid = default_vrid; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP; + attr.value.ipaddr = src_ip; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE; + attr.value.s32 = SAI_TUNNEL_TYPE_NVGRE; + tunnel_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID; + attr.value.oid = tunnel_id; + tunnel_attrs.push_back(attr); + + sai_object_id_t term_table_id; + sai_status_t status = sai_tunnel_api->create_tunnel_term_table_entry( + &term_table_id, + gSwitchId, + static_cast(tunnel_attrs.size()), + tunnel_attrs.data() + ); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't create a tunnel term table object"); + } + + return term_table_id; +} + +/** @brief Removes tunnel termination in SAI. + * + * @param tunnel_id Pointer to tunnel termination identifier. + * + * @return void. + */ +void NvgreTunnel::sai_remove_tunnel_termination(sai_object_id_t tunnel_term_id) +{ + sai_status_t status = sai_tunnel_api->remove_tunnel_term_table_entry(tunnel_term_id); + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't remove a tunnel term object"); + } +} + +void NvgreTunnel::createNvgreMappers() +{ + for (auto map_type : nvgreMapTypes) + { + tunnel_ids_.tunnel_encap_id.insert( + make_pair(map_type, sai_create_tunnel_map(get_encap_nvgre_mapper(map_type))) + ); + } + + for (auto map_type : nvgreMapTypes) + { + tunnel_ids_.tunnel_decap_id.insert( + make_pair(map_type, sai_create_tunnel_map(get_decap_nvgre_mapper(map_type))) + ); + } +} + +void NvgreTunnel::removeNvgreMappers() +{ + for (auto map_type : nvgreMapTypes) + { + sai_remove_tunnel_map(getEncapMapId(map_type)); + } + + for (auto map_type : nvgreMapTypes) + { + sai_remove_tunnel_map(getDecapMapId(map_type)); + } + + tunnel_ids_.tunnel_encap_id.clear(); + tunnel_ids_.tunnel_decap_id.clear(); +} + +void NvgreTunnel::createNvgreTunnel() +{ + sai_ip_address_t ip_addr; + swss::copy(ip_addr, src_ip_); + + tunnel_ids_.tunnel_id = sai_create_tunnel(tunnel_ids_, ip_addr, gUnderlayIfId); + tunnel_ids_.tunnel_term_id = sai_create_tunnel_termination(tunnel_ids_.tunnel_id, ip_addr, gVirtualRouterId); + + SWSS_LOG_INFO("NVGRE tunnel '%s' was created", tunnel_name_.c_str()); +} + +void NvgreTunnel::removeNvgreTunnel() +{ + try + { + sai_remove_tunnel_termination(tunnel_ids_.tunnel_term_id); + sai_remove_tunnel(tunnel_ids_.tunnel_id); + } + catch(const std::runtime_error& error) + { + SWSS_LOG_ERROR("Error while removing tunnel entry. Tunnel: %s. Error: %s", tunnel_name_.c_str(), error.what()); + } + + SWSS_LOG_INFO("NVGRE tunnel '%s' was removed", tunnel_name_.c_str()); + + tunnel_ids_.tunnel_id = SAI_NULL_OBJECT_ID; + tunnel_ids_.tunnel_term_id = SAI_NULL_OBJECT_ID; +} + +NvgreTunnel::NvgreTunnel(std::string tunnelName, IpAddress srcIp) : + tunnel_name_(tunnelName), + src_ip_(srcIp) +{ + createNvgreMappers(); + createNvgreTunnel(); +} + +NvgreTunnel::~NvgreTunnel() +{ + removeNvgreTunnel(); + removeNvgreMappers(); +} + +bool NvgreTunnelOrch::addOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + + auto src_ip = request.getAttrIP("src_ip"); + const auto& tunnel_name = request.getKeyString(0); + + if (isTunnelExists(tunnel_name)) + { + SWSS_LOG_WARN("NVGRE tunnel '%s' already exists", tunnel_name.c_str()); + return true; + } + + nvgre_tunnel_table_[tunnel_name] = std::unique_ptr(new NvgreTunnel(tunnel_name, src_ip)); + + return true; +} + +bool NvgreTunnelOrch::delOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + + const auto& tunnel_name = request.getKeyString(0); + + if (!isTunnelExists(tunnel_name)) + { + SWSS_LOG_ERROR("NVGRE tunnel '%s' doesn't exist", tunnel_name.c_str()); + return true; + } + + nvgre_tunnel_table_.erase(tunnel_name); + + SWSS_LOG_INFO("NVGRE tunnel '%s' was removed", tunnel_name.c_str()); + + return true; +} + +/** @brief Creates tunnel map entry in SAI. + * + * @param map_type map type - VLAN or BRIDGE. + * @param vsid Virtual Subnet ID value. + * @param vlan_id VLAN ID value. + * @param bridge_obj_id SAI bridge object. + * @param encap encapsulation flag. + * + * @return SAI tunnel map entry ID. + */ +sai_object_id_t NvgreTunnel::sai_create_tunnel_map_entry( + map_type_t map_type, + sai_uint32_t vsid, + sai_vlan_id_t vlan_id, + sai_object_id_t bridge_obj_id, + bool encap) +{ + sai_attribute_t attr; + sai_object_id_t tunnel_map_entry_id; + std::vector tunnel_map_entry_attrs; + + attr.id = SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE; + attr.value.u32 = (encap) ? get_encap_nvgre_mapper(map_type) : get_decap_nvgre_mapper(map_type); + tunnel_map_entry_attrs.push_back(attr); + + attr.id = SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP; + attr.value.oid = (encap) ? getEncapMapId(map_type) : getDecapMapId(map_type); + tunnel_map_entry_attrs.push_back(attr); + + attr.id = (encap) ? get_encap_nvgre_map_key(map_type) : get_decap_nvgre_map_val(map_type); + if (bridge_obj_id != SAI_NULL_OBJECT_ID) + { + attr.value.oid = bridge_obj_id; + } + else + { + attr.value.u16 = vlan_id; + } + + tunnel_map_entry_attrs.push_back(attr); + + attr.id = (encap) ? get_encap_nvgre_map_val(map_type) : get_decap_nvgre_map_key(map_type); + attr.value.u32 = vsid; + tunnel_map_entry_attrs.push_back(attr); + + sai_status_t status = sai_tunnel_api->create_tunnel_map_entry(&tunnel_map_entry_id, gSwitchId, + static_cast (tunnel_map_entry_attrs.size()), + tunnel_map_entry_attrs.data()); + + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't create the NVGRE tunnel map entry object"); + } + + return tunnel_map_entry_id; +} + + +bool NvgreTunnel::addDecapMapperEntry( + map_type_t map_type, + uint32_t vsid, + sai_vlan_id_t vlan_id, + std::string tunnel_map_entry_name, + sai_object_id_t bridge_obj) +{ + auto tunnel_map_entry_id = sai_create_tunnel_map_entry(map_type, vsid, vlan_id, bridge_obj); + + nvgre_tunnel_map_table_[tunnel_map_entry_name].map_entry_id = tunnel_map_entry_id; + nvgre_tunnel_map_table_[tunnel_map_entry_name].vlan_id = vlan_id; + nvgre_tunnel_map_table_[tunnel_map_entry_name].vsid = vsid; + + SWSS_LOG_INFO("NVGRE decap tunnel map entry '%s' for tunnel '%s' was created", + tunnel_map_entry_name.c_str(), tunnel_name_.c_str()); + + return true; +} + +bool NvgreTunnelMapOrch::addOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + + auto tunnel_name = request.getKeyString(0); + NvgreTunnelOrch* tunnel_orch = gDirectory.get(); + + if (!tunnel_orch->isTunnelExists(tunnel_name)) + { + SWSS_LOG_WARN("NVGRE tunnel '%s' doesn't exist", tunnel_name.c_str()); + return true; + } + + auto tunnel_obj = tunnel_orch->getNvgreTunnel(tunnel_name); + const auto full_tunnel_map_entry_name = request.getFullKey(); + + if (tunnel_obj->isTunnelMapExists(full_tunnel_map_entry_name)) + { + SWSS_LOG_WARN("NVGRE tunnel map '%s' already exist", full_tunnel_map_entry_name.c_str()); + return true; + } + + sai_vlan_id_t vlan_id = (sai_vlan_id_t) request.getAttrVlan("vlan_id"); + Port port; + + if (!gPortsOrch->getVlanByVlanId(vlan_id, port)) + { + SWSS_LOG_WARN("VLAN ID doesn't exist: %d", vlan_id); + return true; + } + + auto vsid = static_cast(request.getAttrUint("vsid")); + if (vsid > NVGRE_VSID_MAX_VALUE) + { + SWSS_LOG_WARN("VSID is invalid: %d", vsid); + return true; + } + + if (!tunnel_obj->addDecapMapperEntry(MAP_T_VLAN, vsid, vlan_id, full_tunnel_map_entry_name)) + { + return true; + } + + return true; +} + +/** @brief Removes tunnel map entry in SAI. + * + * @param obj_id SAI tunnel map identifier. + * + * @return void. + */ +void NvgreTunnel::sai_remove_tunnel_map_entry(sai_object_id_t obj_id) +{ + sai_status_t status = SAI_STATUS_SUCCESS; + + if (obj_id != SAI_NULL_OBJECT_ID) + { + status = sai_tunnel_api->remove_tunnel_map_entry(obj_id); + } + + if (status != SAI_STATUS_SUCCESS) + { + throw std::runtime_error("Can't delete the NVGRE tunnel map entry object"); + } +} + +bool NvgreTunnel::delMapperEntry(std::string tunnel_map_entry_name) +{ + auto tunnel_map_entry_id = getMapEntryId(tunnel_map_entry_name); + + try + { + sai_remove_tunnel_map_entry(tunnel_map_entry_id); + } + catch (const std::runtime_error& error) + { + SWSS_LOG_ERROR("Error while removing decap tunnel map %s: %s", + tunnel_map_entry_name.c_str(), error.what()); + return false; + } + + nvgre_tunnel_map_table_.erase(tunnel_map_entry_name); + + SWSS_LOG_INFO("NVGRE tunnel map entry '%s' for tunnel '%s' was removed", + tunnel_map_entry_name.c_str(), tunnel_name_.c_str()); + + return true; +} + +bool NvgreTunnelMapOrch::delOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + + const auto& tunnel_name = request.getKeyString(0); + NvgreTunnelOrch* tunnel_orch = gDirectory.get(); + auto tunnel_obj = tunnel_orch->getNvgreTunnel(tunnel_name); + const auto& full_tunnel_map_entry_name = request.getFullKey(); + + if (!tunnel_orch->isTunnelExists(tunnel_name)) + { + SWSS_LOG_WARN("NVGRE tunnel '%s' does not exist", tunnel_name.c_str()); + return true; + } + + if (!tunnel_obj->isTunnelMapExists(full_tunnel_map_entry_name)) + { + SWSS_LOG_WARN("NVGRE tunnel map '%s' does not exist", + full_tunnel_map_entry_name.c_str()); + return true; + } + + if (!tunnel_obj->delMapperEntry(full_tunnel_map_entry_name)) + { + return true; + } + + return true; +} diff --git a/orchagent/nvgreorch.h b/orchagent/nvgreorch.h new file mode 100644 index 0000000000..82092565ac --- /dev/null +++ b/orchagent/nvgreorch.h @@ -0,0 +1,167 @@ +#pragma once + +#include + +#include "sai.h" +#include "orch.h" +#include "request_parser.h" +#include "portsorch.h" + +typedef enum { + MAP_T_VLAN = 0, + MAP_T_BRIDGE = 1, + MAP_T_MAX = 2 +} map_type_t; + +struct tunnel_sai_ids_t +{ + std::map tunnel_encap_id; + std::map tunnel_decap_id; + sai_object_id_t tunnel_id; + sai_object_id_t tunnel_term_id; +}; + +typedef struct nvgre_tunnel_map_entry_s +{ + sai_object_id_t map_entry_id; + sai_vlan_id_t vlan_id; + uint32_t vsid; +} nvgre_tunnel_map_entry_t; + +const request_description_t nvgre_tunnel_request_description = { + { REQ_T_STRING }, + { + { "src_ip", REQ_T_IP }, + }, + { "src_ip" } +}; + +typedef std::map NvgreTunnelMapTable; + +class NvgreTunnel +{ +public: + NvgreTunnel(std::string tunnelName, IpAddress srcIp); + ~NvgreTunnel(); + + bool isTunnelMapExists(const std::string& name) const + { + return nvgre_tunnel_map_table_.find(name) != std::end(nvgre_tunnel_map_table_); + } + + sai_object_id_t getDecapMapId(map_type_t type) const + { + return tunnel_ids_.tunnel_decap_id.at(type); + } + + sai_object_id_t getEncapMapId(map_type_t type) const + { + return tunnel_ids_.tunnel_encap_id.at(type); + } + + sai_object_id_t getMapEntryId(std::string tunnel_map_entry_name) + { + return nvgre_tunnel_map_table_.at(tunnel_map_entry_name).map_entry_id; + } + + sai_object_id_t getMapEntryVlanId(std::string tunnel_map_entry_name) + { + return nvgre_tunnel_map_table_.at(tunnel_map_entry_name).vlan_id; + } + + sai_object_id_t getMapEntryVsid(std::string tunnel_map_entry_name) + { + return nvgre_tunnel_map_table_.at(tunnel_map_entry_name).vsid; + } + + bool addDecapMapperEntry(map_type_t map_type, uint32_t vsid, sai_vlan_id_t vlan_id, std::string tunnel_map_entry_name, sai_object_id_t bridge_obj=SAI_NULL_OBJECT_ID); + + bool delMapperEntry(std::string tunnel_map_entry_name); + +private: + void createNvgreMappers(); + void removeNvgreMappers(); + + void createNvgreTunnel(); + void removeNvgreTunnel(); + + sai_object_id_t sai_create_tunnel_map(sai_tunnel_map_type_t sai_tunnel_map_type); + void sai_remove_tunnel_map(sai_object_id_t tunnel_map_id); + + sai_object_id_t sai_create_tunnel(struct tunnel_sai_ids_t &ids, const sai_ip_address_t &src_ip, sai_object_id_t underlay_rif); + void sai_remove_tunnel(sai_object_id_t tunnel_id); + + sai_object_id_t sai_create_tunnel_termination(sai_object_id_t tunnel_id, const sai_ip_address_t &src_ip, sai_object_id_t default_vrid); + void sai_remove_tunnel_termination(sai_object_id_t tunnel_term_id); + + sai_object_id_t sai_create_tunnel_map_entry(map_type_t map_type, sai_uint32_t vsid, sai_vlan_id_t vlan_id, sai_object_id_t bridge_obj_id, bool encap=false); + void sai_remove_tunnel_map_entry(sai_object_id_t obj_id); + + std::string tunnel_name_; + IpAddress src_ip_; + tunnel_sai_ids_t tunnel_ids_; + + NvgreTunnelMapTable nvgre_tunnel_map_table_; +}; + +typedef std::map> NvgreTunnelTable; + +class NvgreTunnelRequest : public Request +{ +public: + NvgreTunnelRequest() : Request(nvgre_tunnel_request_description, '|') { } +}; + +class NvgreTunnelOrch : public Orch2 +{ +public: + NvgreTunnelOrch(DBConnector *db, const std::string& tableName) : + Orch2(db, tableName, request_) + { } + + bool isTunnelExists(const std::string& tunnelName) const + { + return nvgre_tunnel_table_.find(tunnelName) != std::end(nvgre_tunnel_table_); + } + + NvgreTunnel* getNvgreTunnel(const std::string& tunnelName) + { + return nvgre_tunnel_table_.at(tunnelName).get(); + } + +private: + virtual bool addOperation(const Request& request); + virtual bool delOperation(const Request& request); + + NvgreTunnelRequest request_; + NvgreTunnelTable nvgre_tunnel_table_; +}; + +const request_description_t nvgre_tunnel_map_request_description = { + { REQ_T_STRING, REQ_T_STRING }, + { + { "vsid", REQ_T_UINT }, + { "vlan_id", REQ_T_VLAN }, + }, + { "vsid", "vlan_id" } +}; + +class NvgreTunnelMapRequest : public Request +{ +public: + NvgreTunnelMapRequest() : Request(nvgre_tunnel_map_request_description, '|') { } +}; + +class NvgreTunnelMapOrch : public Orch2 +{ +public: + NvgreTunnelMapOrch(DBConnector *db, const std::string& tableName) : + Orch2(db, tableName, request_) + {} + +private: + virtual bool addOperation(const Request& request); + virtual bool delOperation(const Request& request); + + NvgreTunnelMapRequest request_; +}; \ No newline at end of file diff --git a/orchagent/orch.cpp b/orchagent/orch.cpp index 0992e329a4..fd9e4c2884 100644 --- a/orchagent/orch.cpp +++ b/orchagent/orch.cpp @@ -1,7 +1,4 @@ -#include -#include #include -#include #include #include #include "timestamp.h" @@ -16,12 +13,7 @@ using namespace swss; -extern int gBatchSize; - -extern bool gSwssRecord; -extern ofstream gRecordOfs; -extern bool gLogRotate; -extern string gRecordFile; +int gBatchSize = 0; Orch::Orch(DBConnector *db, const string tableName, int pri) { @@ -52,11 +44,17 @@ Orch::Orch(const vector& tables) } } -Orch::~Orch() +Orch::Orch(const vector &dbsTablesWithPri) { - if (gRecordOfs.is_open()) + for (auto it : dbsTablesWithPri) { - gRecordOfs.close(); + auto db = it.first; + auto tablesWithPri = it.second; + + for (const auto& table : tablesWithPri) + { + addConsumer(db, table.first, table.second); + } } } @@ -70,19 +68,15 @@ vector Orch::getSelectables() return selectables; } -void Consumer::addToSync(const KeyOpFieldsValuesTuple &entry) +void ConsumerBase::addToSync(const KeyOpFieldsValuesTuple &entry) { SWSS_LOG_ENTER(); - string key = kfvKey(entry); string op = kfvOp(entry); /* Record incoming tasks */ - if (gSwssRecord) - { - Orch::recordTuple(*this, entry); - } + Recorder::Instance().swss.record(dumpTuple(entry)); /* * m_toSync is a multimap which will allow one key with multiple values, @@ -157,7 +151,7 @@ void Consumer::addToSync(const KeyOpFieldsValuesTuple &entry) } -size_t Consumer::addToSync(const std::deque &entries) +size_t ConsumerBase::addToSync(const std::deque &entries) { SWSS_LOG_ENTER(); @@ -194,9 +188,7 @@ size_t Consumer::refillToSync(Table* table) size_t Consumer::refillToSync() { - ConsumerTableBase *consumerTable = getConsumerTable(); - - auto subTable = dynamic_cast(consumerTable); + auto subTable = dynamic_cast(getSelectable()); if (subTable != NULL) { size_t update_size = 0; @@ -213,35 +205,14 @@ size_t Consumer::refillToSync() else { // consumerTable is either ConsumerStateTable or ConsumerTable - auto db = consumerTable->getDbConnector(); - string tableName = consumerTable->getTableName(); + auto db = getDbConnector(); + string tableName = getTableName(); auto table = Table(db, tableName); return refillToSync(&table); } } -void Consumer::execute() -{ - SWSS_LOG_ENTER(); - - size_t update_size = 0; - do - { - std::deque entries; - getConsumerTable()->pops(entries); - update_size = addToSync(entries); - } while (update_size != 0); - - drain(); -} - -void Consumer::drain() -{ - if (!m_toSync.empty()) - m_orch->doTask(*this); -} - -string Consumer::dumpTuple(const KeyOpFieldsValuesTuple &tuple) +string ConsumerBase::dumpTuple(const KeyOpFieldsValuesTuple &tuple) { string s = getTableName() + getConsumerTable()->getTableNameSeparator() + kfvKey(tuple) + "|" + kfvOp(tuple); @@ -253,7 +224,7 @@ string Consumer::dumpTuple(const KeyOpFieldsValuesTuple &tuple) return s; } -void Consumer::dumpPendingTasks(vector &ts) +void ConsumerBase::dumpPendingTasks(vector &ts) { for (auto &tm : m_toSync) { @@ -265,6 +236,28 @@ void Consumer::dumpPendingTasks(vector &ts) } } +void Consumer::execute() +{ + SWSS_LOG_ENTER(); + + size_t update_size = 0; + auto table = static_cast(getSelectable()); + do + { + std::deque entries; + table->pops(entries); + update_size = addToSync(entries); + } while (update_size != 0); + + drain(); +} + +void Consumer::drain() +{ + if (!m_toSync.empty()) + m_orch->doTask(*this); +} + size_t Orch::addExistingData(const string& tableName) { auto consumer = dynamic_cast(getExecutor(tableName)); @@ -357,6 +350,11 @@ bool Orch::parseReference(type_map &type_maps, string &ref_in, const string &typ SWSS_LOG_INFO("map:%s does not contain object with name:%s\n", type_name.c_str(), ref_in.c_str()); return false; } + if (obj_it->second.m_pendingRemove) + { + SWSS_LOG_NOTICE("map:%s contains a pending removed object %s, skip\n", type_name.c_str(), ref_in.c_str()); + return false; + } object_name = ref_in; SWSS_LOG_DEBUG("parsed: type_name:%s, object_name:%s", type_name.c_str(), object_name.c_str()); return true; @@ -388,7 +386,7 @@ ref_resolve_status Orch::resolveFieldRefValue( { return ref_resolve_status::not_resolved; } - else if (ref_type_name.empty() && object_name.empty()) + else if (object_name.empty()) { return ref_resolve_status::empty; } @@ -410,7 +408,8 @@ void Orch::removeMeFromObjsReferencedByMe( const string &table, const string &obj_name, const string &field, - const string &old_referenced_obj_name) + const string &old_referenced_obj_name, + bool remove_field) { vector objects = tokenize(old_referenced_obj_name, list_item_delimiter); for (auto &obj : objects) @@ -426,6 +425,12 @@ void Orch::removeMeFromObjsReferencedByMe( referenced_table.c_str(), ref_obj_name.c_str(), to_string(old_referenced_obj.m_objsDependingOnMe.size()).c_str()); } + + if (remove_field) + { + auto &referencing_object = (*type_maps[table])[obj_name]; + referencing_object.m_objsReferencingByMe.erase(field); + } } void Orch::setObjectReference( @@ -439,7 +444,7 @@ void Orch::setObjectReference( auto field_ref = obj.m_objsReferencingByMe.find(field); if (field_ref != obj.m_objsReferencingByMe.end()) - removeMeFromObjsReferencedByMe(type_maps, table, obj_name, field, field_ref->second); + removeMeFromObjsReferencedByMe(type_maps, table, obj_name, field, field_ref->second, false); obj.m_objsReferencingByMe[field] = referenced_obj; @@ -459,16 +464,44 @@ void Orch::setObjectReference( } } +bool Orch::doesObjectExist( + type_map &type_maps, + const string &table, + const string &obj_name, + const string &field, + string &referenced_obj) +{ + auto &&searchRef = (*type_maps[table]).find(obj_name); + if (searchRef != (*type_maps[table]).end()) + { + auto &obj = searchRef->second; + auto &&searchReferencingObjectRef = obj.m_objsReferencingByMe.find(field); + if (searchReferencingObjectRef != obj.m_objsReferencingByMe.end()) + { + referenced_obj = searchReferencingObjectRef->second; + return true; + } + } + + return false; +} + void Orch::removeObject( type_map &type_maps, const string &table, const string &obj_name) { - auto &obj = (*type_maps[table])[obj_name]; + auto &&searchRef = (*type_maps[table]).find(obj_name); + if (searchRef == (*type_maps[table]).end()) + { + return; + } + + auto &obj = searchRef->second; for (auto field_ref : obj.m_objsReferencingByMe) { - removeMeFromObjsReferencedByMe(type_maps, table, obj_name, field_ref.first, field_ref.second); + removeMeFromObjsReferencedByMe(type_maps, table, obj_name, field_ref.first, field_ref.second, false); } // Update the field store @@ -522,43 +555,9 @@ void Orch::dumpPendingTasks(vector &ts) } } -void Orch::logfileReopen() -{ - gRecordOfs.close(); - - /* - * On log rotate we will use the same file name, we are assuming that - * logrotate daemon move filename to filename.1 and we will create new - * empty file here. - */ - - gRecordOfs.open(gRecordFile, std::ofstream::out | std::ofstream::app); - - if (!gRecordOfs.is_open()) - { - SWSS_LOG_ERROR("failed to open gRecordOfs file %s: %s", gRecordFile.c_str(), strerror(errno)); - return; - } -} - -void Orch::recordTuple(Consumer &consumer, const KeyOpFieldsValuesTuple &tuple) -{ - string s = consumer.dumpTuple(tuple); - - gRecordOfs << getTimestamp() << "|" << s << endl; - - if (gLogRotate) - { - gLogRotate = false; - - logfileReopen(); - } -} - -string Orch::dumpTuple(Consumer &consumer, const KeyOpFieldsValuesTuple &tuple) +void Orch::flushResponses() { - string s = consumer.dumpTuple(tuple); - return s; + m_publisher.flush(); } ref_resolve_status Orch::resolveFieldRefArray( @@ -816,193 +815,6 @@ Executor *Orch::getExecutor(string executorName) return NULL; } -task_process_status Orch::handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context) -{ - /* - * This function aims to provide coarse handling of failures in sairedis create - * operation (i.e., notify users by throwing excepions when failures happen). - * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. - * task_need_retry - Cannot handle the status. Need to retry the SAI operation. - * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. - * TODO: 1. Add general handling logic for specific statuses (e.g., SAI_STATUS_ITEM_ALREADY_EXISTS) - * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling - * in each orch. - * 3. Take the type of sai api into consideration. - */ - switch (api) - { - case SAI_API_FDB: - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); - return task_success; - case SAI_STATUS_ITEM_ALREADY_EXISTS: - /* - * In FDB creation, there are scenarios where the hardware learns an FDB entry before orchagent. - * In such cases, the FDB SAI creation would report the status of SAI_STATUS_ITEM_ALREADY_EXISTS, - * and orchagent should ignore the error and treat it as entry was explicitly created. - */ - return task_success; - default: - SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); - } - break; - case SAI_API_HOSTIF: - switch (status) - { - case SAI_STATUS_SUCCESS: - return task_success; - case SAI_STATUS_FAILURE: - /* - * Host interface maybe failed due to lane not available. - * In some scenarios, like SONiC virtual machine, the invalid lane may be not enabled by VM configuration, - * So just ignore the failure and report an error log. - */ - return task_ignore; - default: - SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); - } - default: - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); - return task_success; - default: - SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); - } - } - return task_need_retry; -} - -task_process_status Orch::handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context) -{ - /* - * This function aims to provide coarse handling of failures in sairedis set - * operation (i.e., notify users by throwing excepions when failures happen). - * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. - * task_need_retry - Cannot handle the status. Need to retry the SAI operation. - * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. - * TODO: 1. Add general handling logic for specific statuses - * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling - * in each orch. - * 3. Take the type of sai api into consideration. - */ - if (status == SAI_STATUS_SUCCESS) - { - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiSetStatus"); - return task_success; - } - - switch (api) - { - case SAI_API_PORT: - switch (status) - { - case SAI_STATUS_INVALID_ATTR_VALUE_0: - /* - * If user gives an invalid attribute value, no need to retry or exit orchagent, just fail the current task - * and let user correct the configuration. - */ - SWSS_LOG_ERROR("Encountered SAI_STATUS_INVALID_ATTR_VALUE_0 in set operation, task failed, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - return task_failed; - default: - SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); - } - default: - SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); - } - - return task_need_retry; -} - -task_process_status Orch::handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context) -{ - /* - * This function aims to provide coarse handling of failures in sairedis remove - * operation (i.e., notify users by throwing excepions when failures happen). - * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. - * task_need_retry - Cannot handle the status. Need to retry the SAI operation. - * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. - * TODO: 1. Add general handling logic for specific statuses (e.g., SAI_STATUS_OBJECT_IN_USE, - * SAI_STATUS_ITEM_NOT_FOUND) - * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling - * in each orch. - * 3. Take the type of sai api into consideration. - */ - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiRemoveStatus"); - return task_success; - default: - SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - exit(EXIT_FAILURE); - } - return task_need_retry; -} - -task_process_status Orch::handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context) -{ - /* - * This function aims to provide coarse handling of failures in sairedis get - * operation (i.e., notify users by throwing excepions when failures happen). - * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. - * task_need_retry - Cannot handle the status. Need to retry the SAI operation. - * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. - * TODO: 1. Add general handling logic for specific statuses - * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling - * in each orch. - * 3. Take the type of sai api into consideration. - */ - switch (status) - { - case SAI_STATUS_SUCCESS: - SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiGetStatus"); - return task_success; - case SAI_STATUS_NOT_IMPLEMENTED: - SWSS_LOG_ERROR("Encountered failure in get operation due to the function is not implemented, exiting orchagent, SAI API: %s", - sai_serialize_api(api).c_str()); - throw std::logic_error("SAI get function not implemented"); - default: - SWSS_LOG_ERROR("Encountered failure in get operation, SAI API: %s, status: %s", - sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); - } - return task_failed; -} - -bool Orch::parseHandleSaiStatusFailure(task_process_status status) -{ - /* - * This function parses task process status from SAI failure handling function to whether a retry is needed. - * Return value: true - no retry is needed. - * false - retry is needed. - */ - switch (status) - { - case task_need_retry: - return false; - case task_failed: - return true; - default: - SWSS_LOG_WARN("task_process_status %d is not expected in parseHandleSaiStatusFailure", status); - } - return true; -} - void Orch2::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); diff --git a/orchagent/orch.h b/orchagent/orch.h index 46a5d446ce..a0570f2411 100644 --- a/orchagent/orch.h +++ b/orchagent/orch.h @@ -9,8 +9,8 @@ #include extern "C" { -#include "sai.h" -#include "saistatus.h" +#include +#include } #include "dbconnector.h" @@ -21,6 +21,7 @@ extern "C" { #include "selectabletimer.h" #include "macaddress.h" #include "response_publisher.h" +#include "recorder.h" const char delimiter = ':'; const char list_item_delimiter = ','; @@ -34,11 +35,13 @@ const char state_db_key_delimiter = '|'; #define INVM_PLATFORM_SUBSTRING "innovium" #define MLNX_PLATFORM_SUBSTRING "mellanox" #define BRCM_PLATFORM_SUBSTRING "broadcom" +#define BRCM_DNX_PLATFORM_SUBSTRING "broadcom-dnx" #define BFN_PLATFORM_SUBSTRING "barefoot" #define VS_PLATFORM_SUBSTRING "vs" #define NPS_PLATFORM_SUBSTRING "nephos" #define MRVL_PLATFORM_SUBSTRING "marvell" #define CISCO_8000_PLATFORM_SUBSTRING "cisco-8000" +#define XS_PLATFORM_SUBSTRING "xsight" #define CONFIGDB_KEY_SEPARATOR "|" #define DEFAULT_KEY_SEPARATOR ":" @@ -65,6 +68,7 @@ typedef struct // multiple objects being referenced are separated by ',' std::map m_objsReferencingByMe; sai_object_id_t m_saiObjectId; + bool m_pendingRemove; } referenced_object; typedef std::map object_reference_map; @@ -129,49 +133,69 @@ class Executor : public swss::Selectable swss::Selectable *getSelectable() const { return m_selectable; } }; -class Consumer : public Executor { +class ConsumerBase : public Executor { +public: + ConsumerBase(swss::Selectable *selectable, Orch *orch, const std::string &name) + : Executor(selectable, orch, name) + { + } + + virtual swss::TableBase *getConsumerTable() const = 0; + + std::string getTableName() const + { + return getConsumerTable()->getTableName(); + } + + std::string dumpTuple(const swss::KeyOpFieldsValuesTuple &tuple); + void dumpPendingTasks(std::vector &ts); + + /* Store the latest 'golden' status */ + // TODO: hide? + SyncMap m_toSync; + + /* record the tuple */ + void recordTuple(const swss::KeyOpFieldsValuesTuple &tuple); + + void addToSync(const swss::KeyOpFieldsValuesTuple &entry); + + // Returns: the number of entries added to m_toSync + size_t addToSync(const std::deque &entries); +}; + +class Consumer : public ConsumerBase { public: Consumer(swss::ConsumerTableBase *select, Orch *orch, const std::string &name) - : Executor(select, orch, name) + : ConsumerBase(select, orch, name) { } - swss::ConsumerTableBase *getConsumerTable() const + swss::TableBase *getConsumerTable() const override { + // ConsumerTableBase is a subclass of TableBase return static_cast(getSelectable()); } - std::string getTableName() const + const swss::DBConnector* getDbConnector() const { - return getConsumerTable()->getTableName(); + auto table = static_cast(getSelectable()); + return table->getDbConnector(); } int getDbId() const { - return getConsumerTable()->getDbConnector()->getDbId(); + return getDbConnector()->getDbId(); } std::string getDbName() const { - return getConsumerTable()->getDbConnector()->getDbName(); + return getDbConnector()->getDbName(); } - std::string dumpTuple(const swss::KeyOpFieldsValuesTuple &tuple); - void dumpPendingTasks(std::vector &ts); - size_t refillToSync(); size_t refillToSync(swss::Table* table); - void execute(); - void drain(); - - /* Store the latest 'golden' status */ - // TODO: hide? - SyncMap m_toSync; - - void addToSync(const swss::KeyOpFieldsValuesTuple &entry); - - // Returns: the number of entries added to m_toSync - size_t addToSync(const std::deque &entries); + void execute() override; + void drain() override; }; typedef std::map> ConsumerMap; @@ -188,6 +212,7 @@ typedef enum typedef std::pair TableConnector; typedef std::pair> TablesConnector; +typedef std::pair &> PriTablesConnector; class Orch { @@ -196,7 +221,9 @@ class Orch Orch(swss::DBConnector *db, const std::vector &tableNames); Orch(swss::DBConnector *db, const std::vector &tableNameWithPri); Orch(const std::vector& tables); - virtual ~Orch(); + Orch(const std::vector &dbsTablesWithPri); + + virtual ~Orch() = default; std::vector getSelectables(); @@ -212,19 +239,19 @@ class Orch virtual void doTask(); /* Run doTask against a specific executor */ - virtual void doTask(Consumer &consumer) = 0; + virtual void doTask(Consumer &consumer) { }; virtual void doTask(swss::NotificationConsumer &consumer) { } virtual void doTask(swss::SelectableTimer &timer) { } - /* TODO: refactor recording */ - static void recordTuple(Consumer &consumer, const swss::KeyOpFieldsValuesTuple &tuple); - void dumpPendingTasks(std::vector &ts); + + /** + * @brief Flush pending responses + */ + void flushResponses(); protected: ConsumerMap m_consumerMap; - static void logfileReopen(); - std::string dumpTuple(Consumer &consumer, const swss::KeyOpFieldsValuesTuple &tuple); ref_resolve_status resolveFieldRefValue(type_map&, const std::string&, const std::string&, swss::KeyOpFieldsValuesTuple&, sai_object_id_t&, std::string&); std::set generateIdListFromMap(unsigned long idsMap, sai_uint32_t maxId); unsigned long generateBitMapFromIdsStr(const std::string &idsStr); @@ -233,24 +260,18 @@ class Orch bool parseReference(type_map &type_maps, std::string &ref, const std::string &table_name, std::string &object_name); ref_resolve_status resolveFieldRefArray(type_map&, const std::string&, const std::string&, swss::KeyOpFieldsValuesTuple&, std::vector&, std::string&); void setObjectReference(type_map&, const std::string&, const std::string&, const std::string&, const std::string&); + bool doesObjectExist(type_map&, const std::string&, const std::string&, const std::string&, std::string&); void removeObject(type_map&, const std::string&, const std::string&); bool isObjectBeingReferenced(type_map&, const std::string&, const std::string&); std::string objectReferenceInfo(type_map&, const std::string&, const std::string&); + void removeMeFromObjsReferencedByMe(type_map &type_maps, const std::string &table, const std::string &obj_name, const std::string &field, const std::string &old_referenced_obj_name, bool remove_field=true); /* Note: consumer will be owned by this class */ void addExecutor(Executor* executor); Executor *getExecutor(std::string executorName); - /* Handling SAI status*/ - virtual task_process_status handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context = nullptr); - virtual task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context = nullptr); - virtual task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context = nullptr); - virtual task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context = nullptr); - bool parseHandleSaiStatusFailure(task_process_status status); - ResponsePublisher m_publisher; private: - void removeMeFromObjsReferencedByMe(type_map &type_maps, const std::string &table, const std::string &obj_name, const std::string &field, const std::string &old_referenced_obj_name); void addConsumer(swss::DBConnector *db, std::string tableName, int pri = default_orch_pri); }; diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 0341f69ea9..98ea7c3f91 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -6,6 +6,7 @@ #include "logger.h" #include #include "warm_restart.h" +#include #define SAI_SWITCH_ATTR_CUSTOM_RANGE_BASE SAI_SWITCH_ATTR_CUSTOM_RANGE_START #include "sairedis.h" @@ -18,9 +19,11 @@ using namespace swss; #define SELECT_TIMEOUT 1000 #define PFC_WD_POLL_MSECS 100 +/* orchagent heart beat message interval */ +#define HEART_BEAT_INTERVAL_MSECS 10 * 1000 + extern sai_switch_api_t* sai_switch_api; extern sai_object_id_t gSwitchId; -extern bool gSaiRedisLogRotate; extern void syncd_apply_view(); /* @@ -41,9 +44,11 @@ PbhOrch *gPbhOrch; MirrorOrch *gMirrorOrch; CrmOrch *gCrmOrch; BufferOrch *gBufferOrch; +QosOrch *gQosOrch; SwitchOrch *gSwitchOrch; Directory gDirectory; NatOrch *gNatOrch; +PolicerOrch *gPolicerOrch; MlagOrch *gMlagOrch; IsoGrpOrch *gIsoGrpOrch; MACsecOrch *gMacsecOrch; @@ -51,8 +56,12 @@ CoppOrch *gCoppOrch; P4Orch *gP4Orch; BfdOrch *gBfdOrch; Srv6Orch *gSrv6Orch; +FlowCounterRouteOrch *gFlowCounterRouteOrch; +DebugCounterOrch *gDebugCounterOrch; +MonitorOrch *gMonitorOrch; bool gIsNatSupported = false; +event_handle_t g_events_handle; #define DEFAULT_MAX_BULK_SIZE 1000 size_t gMaxBulkSize = DEFAULT_MAX_BULK_SIZE; @@ -65,6 +74,7 @@ OrchDaemon::OrchDaemon(DBConnector *applDb, DBConnector *configDb, DBConnector * { SWSS_LOG_ENTER(); m_select = new Select(); + m_lastHeartBeat = std::chrono::high_resolution_clock::now(); } OrchDaemon::~OrchDaemon() @@ -85,6 +95,8 @@ OrchDaemon::~OrchDaemon() delete(*it); } delete m_select; + + events_deinit_publisher(g_events_handle); } bool OrchDaemon::init() @@ -93,13 +105,17 @@ bool OrchDaemon::init() string platform = getenv("platform") ? getenv("platform") : ""; + g_events_handle = events_init_publisher("sonic-events-swss"); + gCrmOrch = new CrmOrch(m_configDb, CFG_CRM_TABLE_NAME); - TableConnector stateDbSwitchTable(m_stateDb, "SWITCH_CAPABILITY"); + TableConnector stateDbSwitchTable(m_stateDb, STATE_SWITCH_CAPABILITY_TABLE_NAME); TableConnector app_switch_table(m_applDb, APP_SWITCH_TABLE_NAME); TableConnector conf_asic_sensors(m_configDb, CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector conf_switch_hash(m_configDb, CFG_SWITCH_HASH_TABLE_NAME); vector switch_tables = { + conf_switch_hash, conf_asic_sensors, app_switch_table }; @@ -116,19 +132,39 @@ bool OrchDaemon::init() { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } }; + vector state_transceiver_tables = { + {STATE_TRANSCEIVER_INFO_TABLE_NAME, portsorch_base_pri} + }; + + // PriTablesConnector appPriTables(m_applDb, ports_tables); + // PriTablesConnector statePriTables(m_stateDb, state_transceiver_tables); + + // vector portsOrchTables = { + // appPriTables, + // statePriTables + // }; + + gPortsOrch = new PortsOrch(m_applDb, m_stateDb, ports_tables, m_chassisAppDb); + // gPortsOrch = new PortsOrch(m_applDb, m_stateDb, portsOrchTables, m_chassisAppDb); + vector app_fdb_tables = { { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, { APP_MCLAG_FDB_TABLE_NAME, FdbOrch::fdborch_pri} }; - gPortsOrch = new PortsOrch(m_applDb, m_stateDb, ports_tables, m_chassisAppDb); TableConnector stateDbFdb(m_stateDb, STATE_FDB_TABLE_NAME); TableConnector stateMclagDbFdb(m_stateDb, STATE_MCLAG_REMOTE_FDB_TABLE_NAME); gFdbOrch = new FdbOrch(m_applDb, app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); TableConnector stateDbBfdSessionTable(m_stateDb, STATE_BFD_SESSION_TABLE_NAME); gBfdOrch = new BfdOrch(m_applDb, APP_BFD_SESSION_TABLE_NAME, stateDbBfdSessionTable); + static const vector route_pattern_tables = { + CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, + }; + gFlowCounterRouteOrch = new FlowCounterRouteOrch(m_configDb, route_pattern_tables); + gDirectory.set(gFlowCounterRouteOrch); + vector vnet_tables = { APP_VNET_RT_TABLE_NAME, APP_VNET_RT_TUNNEL_TABLE_NAME @@ -149,6 +185,8 @@ bool OrchDaemon::init() gDirectory.set(vnet_rt_orch); VRFOrch *vrf_orch = new VRFOrch(m_applDb, APP_VRF_TABLE_NAME, m_stateDb, STATE_VRF_OBJECT_TABLE_NAME); gDirectory.set(vrf_orch); + gMonitorOrch = new MonitorOrch(m_stateDb, STATE_VNET_MONITOR_TABLE_NAME); + gDirectory.set(gMonitorOrch); const vector chassis_frontend_tables = { CFG_PASS_THROUGH_ROUTE_TABLE_NAME, @@ -200,6 +238,10 @@ bool OrchDaemon::init() EvpnNvoOrch* evpn_nvo_orch = new EvpnNvoOrch(m_applDb, APP_VXLAN_EVPN_NVO_TABLE_NAME); gDirectory.set(evpn_nvo_orch); + NvgreTunnelOrch *nvgre_tunnel_orch = new NvgreTunnelOrch(m_configDb, CFG_NVGRE_TUNNEL_TABLE_NAME); + gDirectory.set(nvgre_tunnel_orch); + NvgreTunnelMapOrch *nvgre_tunnel_map_orch = new NvgreTunnelMapOrch(m_configDb, CFG_NVGRE_TUNNEL_MAP_TABLE_NAME); + gDirectory.set(nvgre_tunnel_map_orch); vector qos_tables = { CFG_TC_TO_QUEUE_MAP_TABLE_NAME, @@ -214,9 +256,11 @@ bool OrchDaemon::init() CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, CFG_DSCP_TO_FC_MAP_TABLE_NAME, - CFG_EXP_TO_FC_MAP_TABLE_NAME + CFG_EXP_TO_FC_MAP_TABLE_NAME, + CFG_TC_TO_DOT1P_MAP_TABLE_NAME, + CFG_TC_TO_DSCP_MAP_TABLE_NAME }; - QosOrch *qos_orch = new QosOrch(m_configDb, qos_tables); + gQosOrch = new QosOrch(m_configDb, qos_tables); vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, @@ -228,11 +272,17 @@ bool OrchDaemon::init() }; gBufferOrch = new BufferOrch(m_applDb, m_configDb, m_stateDb, buffer_tables); - PolicerOrch *policer_orch = new PolicerOrch(m_configDb, "POLICER"); + vector policer_tables = { + TableConnector(m_configDb, CFG_POLICER_TABLE_NAME), + TableConnector(m_configDb, CFG_PORT_STORM_CONTROL_TABLE_NAME) + }; + + TableConnector stateDbStorm(m_stateDb, "BUM_STORM_CAPABILITY"); + gPolicerOrch = new PolicerOrch(policer_tables, gPortsOrch); TableConnector stateDbMirrorSession(m_stateDb, STATE_MIRROR_SESSION_TABLE_NAME); TableConnector confDbMirrorSession(m_configDb, CFG_MIRROR_SESSION_TABLE_NAME); - gMirrorOrch = new MirrorOrch(stateDbMirrorSession, confDbMirrorSession, gPortsOrch, gRouteOrch, gNeighOrch, gFdbOrch, policer_orch); + gMirrorOrch = new MirrorOrch(stateDbMirrorSession, confDbMirrorSession, gPortsOrch, gRouteOrch, gNeighOrch, gFdbOrch, gPolicerOrch); TableConnector confDbAclTable(m_configDb, CFG_ACL_TABLE_TABLE_NAME); TableConnector confDbAclTableType(m_configDb, CFG_ACL_TABLE_TYPE_TABLE_NAME); @@ -277,7 +327,7 @@ bool OrchDaemon::init() CFG_DEBUG_COUNTER_DROP_REASON_TABLE_NAME }; - DebugCounterOrch *debug_counter_orch = new DebugCounterOrch(m_configDb, debug_counter_tables, 1000); + gDebugCounterOrch = new DebugCounterOrch(m_configDb, debug_counter_tables, 1000); const int natorch_base_pri = 50; @@ -325,7 +375,7 @@ bool OrchDaemon::init() * when iterating ConsumerMap. This is ensured implicitly by the order of keys in ordered map. * For cases when Orch has to process tables in specific order, like PortsOrch during warm start, it has to override Orch::doTask() */ - m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, mux_orch, mux_cb_orch, gIntfsOrch, gNeighOrch, gNhgMapOrch, gNhgOrch, gCbfNhgOrch, gRouteOrch, gCoppOrch, qos_orch, wm_orch, policer_orch, tunnel_decap_orch, sflow_orch, debug_counter_orch, gMacsecOrch, gBfdOrch, gSrv6Orch}; + m_orchList = { gSwitchOrch, gCrmOrch, gPortsOrch, gBufferOrch, gFlowCounterRouteOrch, gIntfsOrch, gNeighOrch, gNhgMapOrch, gNhgOrch, gCbfNhgOrch, gRouteOrch, gCoppOrch, gQosOrch, wm_orch, gPolicerOrch, tunnel_decap_orch, sflow_orch, gDebugCounterOrch, gMacsecOrch, gBfdOrch, gSrv6Orch, mux_orch, mux_cb_orch, gMonitorOrch}; bool initialize_dtel = false; if (platform == BFN_PLATFORM_SUBSTRING || platform == VS_PLATFORM_SUBSTRING) @@ -424,13 +474,15 @@ bool OrchDaemon::init() m_orchList.push_back(gIsoGrpOrch); m_orchList.push_back(gFgNhgOrch); m_orchList.push_back(mux_st_orch); + m_orchList.push_back(nvgre_tunnel_orch); + m_orchList.push_back(nvgre_tunnel_map_orch); if (m_fabricEnabled) { vector fabric_port_tables = { // empty for now }; - gFabricPortsOrch = new FabricPortsOrch(m_applDb, fabric_port_tables); + gFabricPortsOrch = new FabricPortsOrch(m_applDb, fabric_port_tables, m_fabricPortStatEnabled, m_fabricQueueStatEnabled); m_orchList.push_back(gFabricPortsOrch); } @@ -574,13 +626,26 @@ bool OrchDaemon::init() SAI_QUEUE_ATTR_PAUSE_STATUS, }; - m_orchList.push_back(new PfcWdSwOrch( - m_configDb, - pfc_wd_tables, - portStatIds, - queueStatIds, - queueAttrIds, - PFC_WD_POLL_MSECS)); + if(gSwitchOrch->checkPfcDlrInitEnable()) + { + m_orchList.push_back(new PfcWdSwOrch( + m_configDb, + pfc_wd_tables, + portStatIds, + queueStatIds, + queueAttrIds, + PFC_WD_POLL_MSECS)); + } + else + { + m_orchList.push_back(new PfcWdSwOrch( + m_configDb, + pfc_wd_tables, + portStatIds, + queueStatIds, + queueAttrIds, + PFC_WD_POLL_MSECS)); + } } else if (platform == CISCO_8000_PLATFORM_SUBSTRING) { static const vector portStatIds; @@ -633,27 +698,35 @@ void OrchDaemon::flush() if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to flush redis pipeline %d", status); - exit(EXIT_FAILURE); + handleSaiFailure(true); } - // check if logroate is requested - if (gSaiRedisLogRotate) + for (auto* orch: m_orchList) { - SWSS_LOG_NOTICE("performing log rotate"); - - gSaiRedisLogRotate = false; - - attr.id = SAI_REDIS_SWITCH_ATTR_PERFORM_LOG_ROTATE; - attr.value.booldata = true; + orch->flushResponses(); + } +} - sai_switch_api->set_switch_attribute(gSwitchId, &attr); +/* Release the file handle so the log can be rotated */ +void OrchDaemon::logRotate() { + SWSS_LOG_ENTER(); + sai_attribute_t attr; + attr.id = SAI_REDIS_SWITCH_ATTR_PERFORM_LOG_ROTATE; + attr.value.booldata = true; + sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to release the file handle on sairedis log %d", status); } } + void OrchDaemon::start() { SWSS_LOG_ENTER(); + Recorder::Instance().sairedis.setRotate(false); + for (Orch *o : m_orchList) { m_select->addSelectables(o->getSelectables()); @@ -669,6 +742,7 @@ void OrchDaemon::start() ret = m_select->select(&s, SELECT_TIMEOUT); auto tend = std::chrono::high_resolution_clock::now(); + heartBeat(tend); auto diff = std::chrono::duration_cast(tend - tstart); @@ -696,6 +770,14 @@ void OrchDaemon::start() continue; } + // check if logroate is requested + if (Recorder::Instance().sairedis.isRotate()) + { + SWSS_LOG_NOTICE("Performing %s log rotate", Recorder::Instance().sairedis.getName().c_str()); + Recorder::Instance().sairedis.setRotate(false); + logRotate(); + } + auto *c = (Executor *)s; c->execute(); @@ -894,6 +976,18 @@ void OrchDaemon::addOrchList(Orch *o) m_orchList.push_back(o); } +void OrchDaemon::heartBeat(std::chrono::time_point tcurrent) +{ + // output heart beat message to SYSLOG + auto diff = std::chrono::duration_cast(tcurrent - m_lastHeartBeat); + if (diff.count() >= HEART_BEAT_INTERVAL_MSECS) + { + m_lastHeartBeat = tcurrent; + // output heart beat message to supervisord with 'PROCESS_COMMUNICATION_STDOUT' event: http://supervisord.org/events.html + cout << "heartbeat" << endl; + } +} + FabricOrchDaemon::FabricOrchDaemon(DBConnector *applDb, DBConnector *configDb, DBConnector *stateDb, DBConnector *chassisAppDb) : OrchDaemon(applDb, configDb, stateDb, chassisAppDb), m_applDb(applDb), diff --git a/orchagent/orchdaemon.h b/orchagent/orchdaemon.h index ea49affbfc..32fab2b9b0 100644 --- a/orchagent/orchdaemon.h +++ b/orchagent/orchdaemon.h @@ -11,6 +11,7 @@ #include "intfsorch.h" #include "neighorch.h" #include "routeorch.h" +#include "flowcounterrouteorch.h" #include "nhgorch.h" #include "cbf/cbfnhgorch.h" #include "cbf/nhgmaporch.h" @@ -43,6 +44,8 @@ #include "p4orch/p4orch.h" #include "bfdorch.h" #include "srv6orch.h" +#include "nvgreorch.h" +#include using namespace swss; @@ -65,6 +68,15 @@ class OrchDaemon { m_fabricEnabled = enabled; } + void setFabricPortStatEnabled(bool enabled) + { + m_fabricPortStatEnabled = enabled; + } + void setFabricQueueStatEnabled(bool enabled) + { + m_fabricQueueStatEnabled = enabled; + } + void logRotate(); private: DBConnector *m_applDb; DBConnector *m_configDb; @@ -72,11 +84,17 @@ class OrchDaemon DBConnector *m_chassisAppDb; bool m_fabricEnabled = false; + bool m_fabricPortStatEnabled = true; + bool m_fabricQueueStatEnabled = true; std::vector m_orchList; Select *m_select; + + std::chrono::time_point m_lastHeartBeat; void flush(); + + void heartBeat(std::chrono::time_point tcurrent); }; class FabricOrchDaemon : public OrchDaemon diff --git a/orchagent/p4orch/acl_rule_manager.cpp b/orchagent/p4orch/acl_rule_manager.cpp index 3984fd956f..985ea4b1c7 100644 --- a/orchagent/p4orch/acl_rule_manager.cpp +++ b/orchagent/p4orch/acl_rule_manager.cpp @@ -4,8 +4,11 @@ #include #include +#include "SaiAttributeList.h" #include "converter.h" #include "crmorch.h" +#include "dbconnector.h" +#include "intfsorch.h" #include "json.hpp" #include "logger.h" #include "orch.h" @@ -13,12 +16,15 @@ #include "p4orch/p4orch_util.h" #include "portsorch.h" #include "sai_serialize.h" +#include "table.h" #include "tokenize.h" extern "C" { #include "sai.h" } +using ::p4orch::kTableKeyDelimiter; + extern sai_object_id_t gSwitchId; extern sai_acl_api_t *sai_acl_api; extern sai_policer_api_t *sai_policer_api; @@ -37,9 +43,134 @@ const std::string concatTableNameAndRuleKey(const std::string &table_name, const return table_name + kTableKeyDelimiter + rule_key; } +std::vector getRuleSaiAttrs(const P4AclRule &acl_rule) +{ + std::vector acl_entry_attrs; + sai_attribute_t acl_entry_attr; + acl_entry_attr.id = SAI_ACL_ENTRY_ATTR_TABLE_ID; + acl_entry_attr.value.oid = acl_rule.acl_table_oid; + acl_entry_attrs.push_back(acl_entry_attr); + + acl_entry_attr.id = SAI_ACL_ENTRY_ATTR_PRIORITY; + acl_entry_attr.value.u32 = acl_rule.priority; + acl_entry_attrs.push_back(acl_entry_attr); + + acl_entry_attr.id = SAI_ACL_ENTRY_ATTR_ADMIN_STATE; + acl_entry_attr.value.booldata = true; + acl_entry_attrs.push_back(acl_entry_attr); + + // Add matches + for (const auto &match_fv : acl_rule.match_fvs) + { + acl_entry_attr.id = fvField(match_fv); + acl_entry_attr.value = fvValue(match_fv); + acl_entry_attrs.push_back(acl_entry_attr); + } + + // Add actions + for (const auto &action_fv : acl_rule.action_fvs) + { + acl_entry_attr.id = fvField(action_fv); + acl_entry_attr.value = fvValue(action_fv); + acl_entry_attrs.push_back(acl_entry_attr); + } + + // Add meter + if (acl_rule.meter.enabled) + { + acl_entry_attr.id = SAI_ACL_ENTRY_ATTR_ACTION_SET_POLICER; + acl_entry_attr.value.aclaction.parameter.oid = acl_rule.meter.meter_oid; + acl_entry_attr.value.aclaction.enable = true; + acl_entry_attrs.push_back(acl_entry_attr); + } + + // Add counter + if (acl_rule.counter.packets_enabled || acl_rule.counter.bytes_enabled) + { + acl_entry_attr.id = SAI_ACL_ENTRY_ATTR_ACTION_COUNTER; + acl_entry_attr.value.aclaction.enable = true; + acl_entry_attr.value.aclaction.parameter.oid = acl_rule.counter.counter_oid; + acl_entry_attrs.push_back(acl_entry_attr); + } + + return acl_entry_attrs; +} + +std::vector getCounterSaiAttrs(const P4AclRule &acl_rule) +{ + sai_attribute_t attr; + std::vector counter_attrs; + attr.id = SAI_ACL_COUNTER_ATTR_TABLE_ID; + attr.value.oid = acl_rule.acl_table_oid; + counter_attrs.push_back(attr); + + if (acl_rule.counter.bytes_enabled) + { + attr.id = SAI_ACL_COUNTER_ATTR_ENABLE_BYTE_COUNT; + attr.value.booldata = true; + counter_attrs.push_back(attr); + } + + if (acl_rule.counter.packets_enabled) + { + attr.id = SAI_ACL_COUNTER_ATTR_ENABLE_PACKET_COUNT; + attr.value.booldata = true; + counter_attrs.push_back(attr); + } + + return counter_attrs; +} + +std::vector getMeterSaiAttrs(const P4AclMeter &p4_acl_meter) +{ + std::vector meter_attrs; + sai_attribute_t meter_attr; + + meter_attr.id = SAI_POLICER_ATTR_MODE; + meter_attr.value.s32 = p4_acl_meter.mode; + meter_attrs.push_back(meter_attr); + + if (p4_acl_meter.enabled) + { + meter_attr.id = SAI_POLICER_ATTR_METER_TYPE; + meter_attr.value.s32 = p4_acl_meter.type; + meter_attrs.push_back(meter_attr); + + meter_attr.id = SAI_POLICER_ATTR_CBS; + meter_attr.value.u64 = p4_acl_meter.cburst; + meter_attrs.push_back(meter_attr); + + meter_attr.id = SAI_POLICER_ATTR_CIR; + meter_attr.value.u64 = p4_acl_meter.cir; + meter_attrs.push_back(meter_attr); + + meter_attr.id = SAI_POLICER_ATTR_PIR; + meter_attr.value.u64 = p4_acl_meter.pir; + meter_attrs.push_back(meter_attr); + + meter_attr.id = SAI_POLICER_ATTR_PBS; + meter_attr.value.u64 = p4_acl_meter.pburst; + meter_attrs.push_back(meter_attr); + } + + for (const auto &packet_color_action : p4_acl_meter.packet_color_actions) + { + meter_attr.id = fvField(packet_color_action); + meter_attr.value.s32 = fvValue(packet_color_action); + meter_attrs.push_back(meter_attr); + } + + return meter_attrs; +} + } // namespace -void AclRuleManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode AclRuleManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +{ + return StatusCode::SWSS_RC_UNIMPLEMENTED; +} + +void AclRuleManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } @@ -232,43 +363,18 @@ void AclRuleManager::doAclCounterStatsTask() } ReturnCode AclRuleManager::createAclCounter(const std::string &acl_table_name, const std::string &counter_key, - const P4AclCounter &p4_acl_counter, sai_object_id_t *counter_oid) + const P4AclRule &acl_rule, sai_object_id_t *counter_oid) { SWSS_LOG_ENTER(); - sai_attribute_t attr; - std::vector counter_attrs; - sai_object_id_t acl_table_oid; - if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_ACL_TABLE, acl_table_name, &acl_table_oid)) - { - LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) - << "Invalid ACL counter to create: ACL table key " << QuotedVar(acl_table_name) - << " not found."); - } - attr.id = SAI_ACL_COUNTER_ATTR_TABLE_ID; - attr.value.oid = acl_table_oid; - counter_attrs.push_back(attr); - - if (p4_acl_counter.bytes_enabled) - { - attr.id = SAI_ACL_COUNTER_ATTR_ENABLE_BYTE_COUNT; - attr.value.booldata = true; - counter_attrs.push_back(attr); - } - - if (p4_acl_counter.packets_enabled) - { - attr.id = SAI_ACL_COUNTER_ATTR_ENABLE_PACKET_COUNT; - attr.value.booldata = true; - counter_attrs.push_back(attr); - } + auto attrs = getCounterSaiAttrs(acl_rule); CHECK_ERROR_AND_LOG_AND_RETURN( - sai_acl_api->create_acl_counter(counter_oid, gSwitchId, (uint32_t)counter_attrs.size(), counter_attrs.data()), - "Faied to create counter for the rule in table " << sai_serialize_object_id(acl_table_oid)); + sai_acl_api->create_acl_counter(counter_oid, gSwitchId, (uint32_t)attrs.size(), attrs.data()), + "Faied to create counter for the rule in table " << sai_serialize_object_id(acl_rule.acl_table_oid)); SWSS_LOG_NOTICE("Suceeded to create ACL counter %s ", sai_serialize_object_id(*counter_oid).c_str()); m_p4OidMapper->setOID(SAI_OBJECT_TYPE_ACL_COUNTER, counter_key, *counter_oid); - gCrmOrch->incCrmAclTableUsedCounter(CrmResourceType::CRM_ACL_COUNTER, acl_table_oid); + gCrmOrch->incCrmAclTableUsedCounter(CrmResourceType::CRM_ACL_COUNTER, acl_rule.acl_table_oid); m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_ACL_TABLE, acl_table_name); return ReturnCode(); } @@ -305,44 +411,10 @@ ReturnCode AclRuleManager::createAclMeter(const P4AclMeter &p4_acl_meter, const { SWSS_LOG_ENTER(); - std::vector meter_attrs; - sai_attribute_t meter_attr; - meter_attr.id = SAI_POLICER_ATTR_METER_TYPE; - meter_attr.value.s32 = p4_acl_meter.type; - meter_attrs.push_back(meter_attr); - - meter_attr.id = SAI_POLICER_ATTR_MODE; - meter_attr.value.s32 = p4_acl_meter.mode; - meter_attrs.push_back(meter_attr); - - if (p4_acl_meter.enabled) - { - meter_attr.id = SAI_POLICER_ATTR_CBS; - meter_attr.value.u64 = p4_acl_meter.cburst; - meter_attrs.push_back(meter_attr); - - meter_attr.id = SAI_POLICER_ATTR_CIR; - meter_attr.value.u64 = p4_acl_meter.cir; - meter_attrs.push_back(meter_attr); - - meter_attr.id = SAI_POLICER_ATTR_PIR; - meter_attr.value.u64 = p4_acl_meter.pir; - meter_attrs.push_back(meter_attr); - - meter_attr.id = SAI_POLICER_ATTR_PBS; - meter_attr.value.u64 = p4_acl_meter.pburst; - meter_attrs.push_back(meter_attr); - } - - for (const auto &packet_color_action : p4_acl_meter.packet_color_actions) - { - meter_attr.id = fvField(packet_color_action); - meter_attr.value.s32 = fvValue(packet_color_action); - meter_attrs.push_back(meter_attr); - } + auto attrs = getMeterSaiAttrs(p4_acl_meter); CHECK_ERROR_AND_LOG_AND_RETURN( - sai_policer_api->create_policer(meter_oid, gSwitchId, (uint32_t)meter_attrs.size(), meter_attrs.data()), + sai_policer_api->create_policer(meter_oid, gSwitchId, (uint32_t)attrs.size(), attrs.data()), "Failed to create ACL meter"); m_p4OidMapper->setOID(SAI_OBJECT_TYPE_POLICER, meter_key, *meter_oid); SWSS_LOG_NOTICE("Suceeded to create ACL meter %s ", sai_serialize_object_id(*meter_oid).c_str()); @@ -645,39 +717,37 @@ ReturnCode AclRuleManager::setAclRuleCounterStats(const P4AclRule &acl_rule) std::to_string(meter_stats[i])}); } } - else + // Query general packets/bytes stats by ACL counter object id. + std::vector counter_attrs; + sai_attribute_t counter_attr; + if (acl_rule.counter.packets_enabled) { - // Query general packets/bytes stats by ACL counter object id. - std::vector counter_attrs; - sai_attribute_t counter_attr; - if (acl_rule.counter.packets_enabled) - { - counter_attr.id = SAI_ACL_COUNTER_ATTR_PACKETS; - counter_attrs.push_back(counter_attr); - } - if (acl_rule.counter.bytes_enabled) + counter_attr.id = SAI_ACL_COUNTER_ATTR_PACKETS; + counter_attrs.push_back(counter_attr); + } + if (acl_rule.counter.bytes_enabled) + { + counter_attr.id = SAI_ACL_COUNTER_ATTR_BYTES; + counter_attrs.push_back(counter_attr); + } + CHECK_ERROR_AND_LOG_AND_RETURN(sai_acl_api->get_acl_counter_attribute(acl_rule.counter.counter_oid, + static_cast(counter_attrs.size()), + counter_attrs.data()), + "Failed to get counters stats for " << QuotedVar(acl_rule.acl_table_name)); + for (const auto &counter_attr : counter_attrs) + { + if (counter_attr.id == SAI_ACL_COUNTER_ATTR_PACKETS) { - counter_attr.id = SAI_ACL_COUNTER_ATTR_BYTES; - counter_attrs.push_back(counter_attr); + counter_stats_values.push_back( + swss::FieldValueTuple{P4_COUNTER_STATS_PACKETS, std::to_string(counter_attr.value.u64)}); } - CHECK_ERROR_AND_LOG_AND_RETURN( - sai_acl_api->get_acl_counter_attribute(acl_rule.counter.counter_oid, - static_cast(counter_attrs.size()), counter_attrs.data()), - "Failed to get counters stats for " << QuotedVar(acl_rule.acl_table_name)); - for (const auto &counter_attr : counter_attrs) + if (counter_attr.id == SAI_ACL_COUNTER_ATTR_BYTES) { - if (counter_attr.id == SAI_ACL_COUNTER_ATTR_PACKETS) - { - counter_stats_values.push_back( - swss::FieldValueTuple{P4_COUNTER_STATS_PACKETS, std::to_string(counter_attr.value.u64)}); - } - if (counter_attr.id == SAI_ACL_COUNTER_ATTR_BYTES) - { - counter_stats_values.push_back( - swss::FieldValueTuple{P4_COUNTER_STATS_BYTES, std::to_string(counter_attr.value.u64)}); - } + counter_stats_values.push_back( + swss::FieldValueTuple{P4_COUNTER_STATS_BYTES, std::to_string(counter_attr.value.u64)}); } } + // Set field value tuples for counters stats in COUNTERS_DB m_countersTable->set(acl_rule.db_key, counter_stats_values); return ReturnCode(); @@ -916,6 +986,7 @@ ReturnCode AclRuleManager::setMatchValue(const acl_entry_attr_union_t attr_name, break; } case SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_VNI: + case SAI_ACL_ENTRY_ATTR_FIELD_ROUTE_DST_USER_META: case SAI_ACL_ENTRY_ATTR_FIELD_IPV6_FLOW_LABEL: { const std::vector &value_and_mask = tokenize(attr_value, kDataMaskDelimiter); value->aclfield.data.u32 = to_uint(trim(value_and_mask[0])); @@ -1402,41 +1473,26 @@ ReturnCode AclRuleManager::setMeterValue(const P4AclTableDefinition *acl_table, { acl_meter.packet_color_actions = action_color_it->second; } + + // SAI_POLICER_MODE_TR_TCM mode is used by default. + // Meter rate limit config is not present for the ACL rule + // Mark the packet as GREEN by setting rate limit to max. + if (!acl_meter.packet_color_actions.empty() && !acl_meter.enabled) + { + acl_meter.enabled = true; + acl_meter.type = SAI_METER_TYPE_PACKETS; + acl_meter.cburst = 0x7fffffff; + acl_meter.cir = 0x7fffffff; + acl_meter.pir = 0x7fffffff; + acl_meter.pburst = 0x7fffffff; + } + return ReturnCode(); } ReturnCode AclRuleManager::createAclRule(P4AclRule &acl_rule) { SWSS_LOG_ENTER(); - std::vector acl_entry_attrs; - sai_attribute_t acl_entry_attr; - acl_entry_attr.id = SAI_ACL_ENTRY_ATTR_TABLE_ID; - acl_entry_attr.value.oid = acl_rule.acl_table_oid; - acl_entry_attrs.push_back(acl_entry_attr); - - acl_entry_attr.id = SAI_ACL_ENTRY_ATTR_PRIORITY; - acl_entry_attr.value.u32 = acl_rule.priority; - acl_entry_attrs.push_back(acl_entry_attr); - - acl_entry_attr.id = SAI_ACL_ENTRY_ATTR_ADMIN_STATE; - acl_entry_attr.value.booldata = true; - acl_entry_attrs.push_back(acl_entry_attr); - - // Add matches - for (const auto &match_fv : acl_rule.match_fvs) - { - acl_entry_attr.id = fvField(match_fv); - acl_entry_attr.value = fvValue(match_fv); - acl_entry_attrs.push_back(acl_entry_attr); - } - - // Add actions - for (const auto &action_fv : acl_rule.action_fvs) - { - acl_entry_attr.id = fvField(action_fv); - acl_entry_attr.value = fvValue(action_fv); - acl_entry_attrs.push_back(acl_entry_attr); - } // Track if the entry creats a new counter or meter bool created_meter = false; @@ -1444,7 +1500,7 @@ ReturnCode AclRuleManager::createAclRule(P4AclRule &acl_rule) const auto &table_name_and_rule_key = concatTableNameAndRuleKey(acl_rule.acl_table_name, acl_rule.acl_rule_key); // Add meter - if (acl_rule.meter.enabled || !acl_rule.meter.packet_color_actions.empty()) + if (acl_rule.meter.enabled) { if (acl_rule.meter.meter_oid == SAI_NULL_OBJECT_ID) { @@ -1456,10 +1512,6 @@ ReturnCode AclRuleManager::createAclRule(P4AclRule &acl_rule) } created_meter = true; } - acl_entry_attr.id = SAI_ACL_ENTRY_ATTR_ACTION_SET_POLICER; - acl_entry_attr.value.aclaction.parameter.oid = acl_rule.meter.meter_oid; - acl_entry_attr.value.aclaction.enable = true; - acl_entry_attrs.push_back(acl_entry_attr); } // Add counter @@ -1467,7 +1519,7 @@ ReturnCode AclRuleManager::createAclRule(P4AclRule &acl_rule) { if (acl_rule.counter.counter_oid == SAI_NULL_OBJECT_ID) { - auto status = createAclCounter(acl_rule.acl_table_name, table_name_and_rule_key, acl_rule.counter, + auto status = createAclCounter(acl_rule.acl_table_name, table_name_and_rule_key, acl_rule, &acl_rule.counter.counter_oid); if (!status.ok()) { @@ -1484,14 +1536,12 @@ ReturnCode AclRuleManager::createAclRule(P4AclRule &acl_rule) } created_counter = true; } - acl_entry_attr.id = SAI_ACL_ENTRY_ATTR_ACTION_COUNTER; - acl_entry_attr.value.aclaction.enable = true; - acl_entry_attr.value.aclaction.parameter.oid = acl_rule.counter.counter_oid; - acl_entry_attrs.push_back(acl_entry_attr); } - auto sai_status = sai_acl_api->create_acl_entry(&acl_rule.acl_entry_oid, gSwitchId, - (uint32_t)acl_entry_attrs.size(), acl_entry_attrs.data()); + auto attrs = getRuleSaiAttrs(acl_rule); + + auto sai_status = + sai_acl_api->create_acl_entry(&acl_rule.acl_entry_oid, gSwitchId, (uint32_t)attrs.size(), attrs.data()); if (sai_status != SAI_STATUS_SUCCESS) { ReturnCode status = ReturnCode(sai_status) @@ -1665,7 +1715,7 @@ ReturnCode AclRuleManager::removeAclRule(const std::string &acl_table_name, cons << sai_serialize_object_id(acl_rule->acl_entry_oid) << " in table " << QuotedVar(acl_table_name)); bool deleted_meter = false; - if (acl_rule->meter.enabled || !acl_rule->meter.packet_color_actions.empty()) + if (acl_rule->meter.enabled) { m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_POLICER, table_name_and_rule_key); auto status = removeAclMeter(table_name_and_rule_key); @@ -1750,7 +1800,7 @@ ReturnCode AclRuleManager::removeAclRule(const std::string &acl_table_name, cons ReturnCode AclRuleManager::processAddRuleRequest(const std::string &acl_rule_key, const P4AclRuleAppDbEntry &app_db_entry) { - P4AclRule acl_rule; + P4AclRule acl_rule{}; acl_rule.priority = app_db_entry.priority; acl_rule.acl_rule_key = acl_rule_key; acl_rule.p4_action = app_db_entry.action; @@ -1832,7 +1882,6 @@ ReturnCode AclRuleManager::processAddRuleRequest(const std::string &acl_rule_key gPortsOrch->increasePortRefCount(port_alias); } gCrmOrch->incCrmAclTableUsedCounter(CrmResourceType::CRM_ACL_ENTRY, acl_rule.acl_table_oid); - m_aclRuleTables[acl_rule.acl_table_name][acl_rule.acl_rule_key] = acl_rule; m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_ACL_TABLE, acl_rule.acl_table_name); const auto &table_name_and_rule_key = concatTableNameAndRuleKey(acl_rule.acl_table_name, acl_rule.acl_rule_key); m_p4OidMapper->setOID(SAI_OBJECT_TYPE_ACL_ENTRY, table_name_and_rule_key, acl_rule.acl_entry_oid); @@ -1841,13 +1890,15 @@ ReturnCode AclRuleManager::processAddRuleRequest(const std::string &acl_rule_key // Counter was created, increase ACL rule ref count m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_ACL_COUNTER, table_name_and_rule_key); } - if (acl_rule.meter.enabled || !acl_rule.meter.packet_color_actions.empty()) + if (acl_rule.meter.enabled) { // Meter was created, increase ACL rule ref count m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_POLICER, table_name_and_rule_key); } - SWSS_LOG_NOTICE("Suceeded to create ACL rule %s : %s", QuotedVar(acl_rule.acl_rule_key).c_str(), - sai_serialize_object_id(acl_rule.acl_entry_oid).c_str()); + m_aclRuleTables[acl_table->acl_table_name][acl_rule_key] = std::move(acl_rule); + SWSS_LOG_NOTICE( + "Suceeded to create ACL rule %s : %s", QuotedVar(acl_rule_key).c_str(), + sai_serialize_object_id(m_aclRuleTables[acl_table->acl_table_name][acl_rule_key].acl_entry_oid).c_str()); return status; } @@ -1868,7 +1919,7 @@ ReturnCode AclRuleManager::processUpdateRuleRequest(const P4AclRuleAppDbEntry &a { SWSS_LOG_ENTER(); - P4AclRule acl_rule; + P4AclRule acl_rule{}; const auto *acl_table = gP4Orch->getAclTableManager()->getAclTable(app_db_entry.acl_table_name); acl_rule.acl_table_oid = acl_table->table_oid; acl_rule.acl_table_name = acl_table->acl_table_name; @@ -1876,9 +1927,6 @@ ReturnCode AclRuleManager::processUpdateRuleRequest(const P4AclRuleAppDbEntry &a // Skip match field comparison because the acl_rule_key including match // field value and priority should be the same with old one. - acl_rule.match_fvs = old_acl_rule.match_fvs; - acl_rule.in_ports = old_acl_rule.in_ports; - acl_rule.out_ports = old_acl_rule.out_ports; acl_rule.priority = app_db_entry.priority; acl_rule.acl_rule_key = old_acl_rule.acl_rule_key; // Skip Counter comparison since the counter unit is defined in table @@ -1906,8 +1954,7 @@ ReturnCode AclRuleManager::processUpdateRuleRequest(const P4AclRuleAppDbEntry &a bool created_meter = false; bool updated_meter = false; LOG_AND_RETURN_IF_ERROR(setMeterValue(acl_table, app_db_entry, acl_rule.meter)); - if (old_acl_rule.meter.meter_oid == SAI_NULL_OBJECT_ID && - (acl_rule.meter.enabled || !acl_rule.meter.packet_color_actions.empty())) + if (old_acl_rule.meter.meter_oid == SAI_NULL_OBJECT_ID && acl_rule.meter.enabled) { // Create new meter auto status = createAclMeter(acl_rule.meter, table_name_and_rule_key, &acl_rule.meter.meter_oid); @@ -1926,8 +1973,7 @@ ReturnCode AclRuleManager::processUpdateRuleRequest(const P4AclRuleAppDbEntry &a acl_entry_attr.value.aclaction.parameter.oid = SAI_NULL_OBJECT_ID; rollback_attrs.push_back(acl_entry_attr); } - else if (old_acl_rule.meter.meter_oid != SAI_NULL_OBJECT_ID && !acl_rule.meter.enabled && - acl_rule.meter.packet_color_actions.empty()) + else if (old_acl_rule.meter.meter_oid != SAI_NULL_OBJECT_ID && !acl_rule.meter.enabled) { // Remove old meter remove_meter = true; @@ -2001,9 +2047,468 @@ ReturnCode AclRuleManager::processUpdateRuleRequest(const P4AclRuleAppDbEntry &a } return status; } - - m_aclRuleTables[acl_rule.acl_table_name][acl_rule.acl_rule_key] = acl_rule; + // Move match_fvs and referred pointers from old rule to new rule + acl_rule.in_ports = std::move(old_acl_rule.in_ports); + acl_rule.out_ports = std::move(old_acl_rule.out_ports); + acl_rule.in_ports_oids = std::move(old_acl_rule.in_ports_oids); + acl_rule.out_ports_oids = std::move(old_acl_rule.out_ports_oids); + acl_rule.udf_data_masks = std::move(old_acl_rule.udf_data_masks); + acl_rule.match_fvs = std::move(old_acl_rule.match_fvs); + m_aclRuleTables[acl_rule.acl_table_name][acl_rule.acl_rule_key] = std::move(acl_rule); return ReturnCode(); } +std::string AclRuleManager::verifyState(const std::string &key, const std::vector &tuple) +{ + SWSS_LOG_ENTER(); + + auto pos = key.find_first_of(kTableKeyDelimiter); + if (pos == std::string::npos) + { + return std::string("Invalid key: ") + key; + } + std::string p4rt_table = key.substr(0, pos); + std::string p4rt_key = key.substr(pos + 1); + if (p4rt_table != APP_P4RT_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + std::string table_name; + std::string key_content; + parseP4RTKey(p4rt_key, &table_name, &key_content); + + ReturnCode status; + auto app_db_entry_or = deserializeAclRuleAppDbEntry(table_name, key_content, tuple); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + std::stringstream msg; + msg << "Unable to deserialize key " << QuotedVar(key) << ": " << status.message(); + return msg.str(); + } + auto &app_db_entry = *app_db_entry_or; + + const auto &acl_table_name = app_db_entry.acl_table_name; + const auto &acl_rule_key = + KeyGenerator::generateAclRuleKey(app_db_entry.match_fvs, std::to_string(app_db_entry.priority)); + auto *acl_rule = getAclRule(acl_table_name, acl_rule_key); + if (acl_rule == nullptr) + { + std::stringstream msg; + msg << "No entry found with key " << QuotedVar(key); + return msg.str(); + } + + std::string cache_result = verifyStateCache(app_db_entry, acl_rule); + std::string asic_db_result = verifyStateAsicDb(acl_rule); + if (cache_result.empty()) + { + return asic_db_result; + } + if (asic_db_result.empty()) + { + return cache_result; + } + return cache_result + "; " + asic_db_result; +} + +std::string AclRuleManager::verifyStateCache(const P4AclRuleAppDbEntry &app_db_entry, const P4AclRule *acl_rule) +{ + ReturnCode status = validateAclRuleAppDbEntry(app_db_entry); + if (!status.ok()) + { + std::stringstream msg; + msg << "Validation failed for ACL rule DB entry with key " << QuotedVar(acl_rule->acl_rule_key) << ": " + << status.message(); + return msg.str(); + } + + const auto &acl_rule_key = + KeyGenerator::generateAclRuleKey(app_db_entry.match_fvs, std::to_string(app_db_entry.priority)); + if (acl_rule->acl_rule_key != acl_rule_key) + { + std::stringstream msg; + msg << "ACL rule " << QuotedVar(acl_rule_key) << " does not match internal cache " + << QuotedVar(acl_rule->acl_rule_key) << " in ACL rule manager."; + return msg.str(); + } + if (acl_rule->acl_table_name != app_db_entry.acl_table_name) + { + std::stringstream msg; + msg << "ACL rule " << QuotedVar(acl_rule_key) << " with table name " << QuotedVar(app_db_entry.acl_table_name) + << " does not match internal cache " << QuotedVar(acl_rule->acl_table_name) << " in ACL rule manager."; + return msg.str(); + } + if (acl_rule->db_key != app_db_entry.db_key) + { + std::stringstream msg; + msg << "ACL rule " << QuotedVar(acl_rule_key) << " with DB key " << QuotedVar(app_db_entry.db_key) + << " does not match internal cache " << QuotedVar(acl_rule->db_key) << " in ACL rule manager."; + return msg.str(); + } + if (acl_rule->p4_action != app_db_entry.action) + { + std::stringstream msg; + msg << "ACL rule " << QuotedVar(acl_rule_key) << " with action " << QuotedVar(app_db_entry.action) + << " does not match internal cache " << QuotedVar(acl_rule->p4_action) << " in ACL rule manager."; + return msg.str(); + } + if (acl_rule->priority != app_db_entry.priority) + { + std::stringstream msg; + msg << "ACL rule " << QuotedVar(acl_rule_key) << " with priority " << app_db_entry.priority + << " does not match internal cache " << acl_rule->priority << " in ACL rule manager."; + return msg.str(); + } + + auto *acl_table = gP4Orch->getAclTableManager()->getAclTable(app_db_entry.acl_table_name); + if (acl_table == nullptr) + { + std::stringstream msg; + msg << "ACL table " << QuotedVar(app_db_entry.acl_table_name) << " not found in ACL rule " + << QuotedVar(acl_rule_key); + return msg.str(); + } + if (acl_rule->acl_table_name != acl_table->acl_table_name) + { + std::stringstream msg; + msg << "ACL rule " << QuotedVar(acl_rule_key) << " with ACL table name " << QuotedVar(acl_rule->acl_table_name) + << " mismatch with ACl table " << QuotedVar(acl_table->acl_table_name) << " in ACl rule manager."; + return msg.str(); + } + if (acl_rule->acl_table_oid != acl_table->table_oid) + { + std::stringstream msg; + msg << "ACL rule " << QuotedVar(acl_rule_key) << " with ACL table OID " << acl_rule->acl_table_oid + << " mismatch with ACl table " << acl_table->table_oid << " in ACl rule manager."; + return msg.str(); + } + + P4AclRule acl_rule_entry{}; + acl_rule_entry.priority = app_db_entry.priority; + acl_rule_entry.acl_rule_key = acl_rule_key; + acl_rule_entry.p4_action = app_db_entry.action; + acl_rule_entry.db_key = app_db_entry.db_key; + status = setAllMatchFieldValues(app_db_entry, acl_table, acl_rule_entry); + if (!status.ok()) + { + std::stringstream msg; + msg << "Failed to set match field values for ACL rule " << QuotedVar(acl_rule_key); + return msg.str(); + } + status = setAllActionFieldValues(app_db_entry, acl_table, acl_rule_entry); + if (!status.ok()) + { + std::stringstream msg; + msg << "Failed to set action field values for ACL rule " << QuotedVar(acl_rule_key); + return msg.str(); + } + status = setMeterValue(acl_table, app_db_entry, acl_rule_entry.meter); + if (!status.ok()) + { + std::stringstream msg; + msg << "Failed to set meter value for ACL rule " << QuotedVar(acl_rule_key); + return msg.str(); + } + if (!acl_table->counter_unit.empty()) + { + if (acl_table->counter_unit == P4_COUNTER_UNIT_PACKETS) + { + acl_rule_entry.counter.packets_enabled = true; + } + else if (acl_table->counter_unit == P4_COUNTER_UNIT_BYTES) + { + acl_rule_entry.counter.bytes_enabled = true; + } + else if (acl_table->counter_unit == P4_COUNTER_UNIT_BOTH) + { + acl_rule_entry.counter.bytes_enabled = true; + acl_rule_entry.counter.packets_enabled = true; + } + } + + if (acl_rule->match_fvs.size() != acl_rule_entry.match_fvs.size()) + { + std::stringstream msg; + msg << "ACL rule " << QuotedVar(acl_rule_key) << " with match fvs size " << acl_rule_entry.match_fvs.size() + << " does not match internal cache " << acl_rule->match_fvs.size() << " in ACl rule manager."; + return msg.str(); + } + for (const auto &match_fv : acl_rule_entry.match_fvs) + { + const auto &it = acl_rule->match_fvs.find(fvField(match_fv)); + if (it == acl_rule->match_fvs.end()) + { + std::stringstream msg; + msg << "ACL match field " << fvField(match_fv) << " not found in internal cache in ACL rule " + << QuotedVar(acl_rule_key); + return msg.str(); + } + else if (isDiffMatchFieldValue(fvField(match_fv), fvValue(match_fv), it->second, acl_rule_entry, *acl_rule)) + { + std::stringstream msg; + msg << "ACL match field " << fvField(match_fv) << " mismatch in internal cache in ACL rule " + << QuotedVar(acl_rule_key); + return msg.str(); + } + } + + if (acl_rule->action_fvs.size() != acl_rule_entry.action_fvs.size()) + { + std::stringstream msg; + msg << "ACL rule " << QuotedVar(acl_rule_key) << " with action fvs size " << acl_rule_entry.action_fvs.size() + << " does not match internal cache " << acl_rule->action_fvs.size() << " in ACl rule manager."; + return msg.str(); + } + for (const auto &action_fv : acl_rule_entry.action_fvs) + { + const auto &it = acl_rule->action_fvs.find(fvField(action_fv)); + if (it == acl_rule->action_fvs.end()) + { + std::stringstream msg; + msg << "ACL action field " << fvField(action_fv) << " not found in internal cache in ACL rule " + << QuotedVar(acl_rule_key); + return msg.str(); + } + else if (isDiffActionFieldValue(fvField(action_fv), fvValue(action_fv), it->second, acl_rule_entry, *acl_rule)) + { + std::stringstream msg; + msg << "ACL action field " << fvField(action_fv) << " mismatch in internal cache in ACL rule " + << QuotedVar(acl_rule_key); + return msg.str(); + } + } + + if (acl_rule->meter != acl_rule_entry.meter) + { + std::stringstream msg; + msg << "Meter mismatch on ACL rule " << QuotedVar(acl_rule_key); + return msg.str(); + } + if (acl_rule->counter != acl_rule_entry.counter) + { + std::stringstream msg; + msg << "Counter mismatch on ACL rule " << QuotedVar(acl_rule_key); + return msg.str(); + } + if (acl_rule->action_qos_queue_num != acl_rule_entry.action_qos_queue_num) + { + std::stringstream msg; + msg << "ACL rule " << QuotedVar(acl_rule_key) << " with qos queue number " + << acl_rule_entry.action_qos_queue_num << " mismatch with internal cache " << acl_rule->action_qos_queue_num + << " in ACl rule manager."; + return msg.str(); + } + if (acl_rule->action_redirect_nexthop_key != acl_rule_entry.action_redirect_nexthop_key) + { + std::stringstream msg; + msg << "ACL rule " << QuotedVar(acl_rule_key) << " with redirect nexthop key " + << QuotedVar(acl_rule_entry.action_redirect_nexthop_key) << " mismatch with internal cache " + << QuotedVar(acl_rule->action_redirect_nexthop_key) << " in ACl rule manager."; + return msg.str(); + } + if (acl_rule->action_mirror_sessions != acl_rule_entry.action_mirror_sessions) + { + std::stringstream msg; + msg << "Mirror sessions mismatch on ACL rule " << QuotedVar(acl_rule_key); + return msg.str(); + } + if (!acl_rule->action_mirror_sessions.empty()) + { + for (const auto &fv : acl_rule->action_mirror_sessions) + { + if (acl_rule->action_fvs.find(fvField(fv)) == acl_rule->action_fvs.end() || + acl_rule->action_fvs.at(fvField(fv)).aclaction.parameter.objlist.list != &fvValue(fv).oid) + { + std::stringstream msg; + msg << "Mirror session " << QuotedVar(std::to_string(fvField(fv))) + << " mismatch on internal cache ACL rule " << QuotedVar(acl_rule_key); + return msg.str(); + } + } + } + + if (acl_rule->udf_data_masks != acl_rule_entry.udf_data_masks) + { + std::stringstream msg; + msg << "UDF data masks mismatch on ACL rule " << QuotedVar(acl_rule_key); + return msg.str(); + } + if (!acl_rule->udf_data_masks.empty()) + { + std::stringstream msg; + for (const auto &fv : acl_rule->udf_data_masks) + { + if (acl_rule->match_fvs.find(fvField(fv)) == acl_rule->match_fvs.end()) + { + msg << "UDF group " << QuotedVar(std::to_string(fvField(fv))) + << " are missing in in internal cache in ACL rule match_fvs" << QuotedVar(acl_rule_key); + return msg.str(); + } + if (acl_rule->match_fvs.at(fvField(fv)).aclfield.data.u8list.list != fvValue(fv).data.data()) + { + msg << "UDF data for field " << QuotedVar(std::to_string(fvField(fv))) + << " mismatches between match_fvs and " + "udf_data_masks in internal cache in ACL rule" + << QuotedVar(acl_rule_key); + return msg.str(); + } + if (acl_rule->match_fvs.at(fvField(fv)).aclfield.mask.u8list.list != fvValue(fv).mask.data()) + { + msg << "UDF mask for field " << QuotedVar(std::to_string(fvField(fv))) + << " mismatches between match_fvs and " + "udf_data_masks in internal cache in ACL rule" + << QuotedVar(acl_rule_key); + return msg.str(); + } + } + } + if (acl_rule->in_ports != acl_rule_entry.in_ports) + { + std::stringstream msg; + msg << "In ports mismatch on ACL rule " << QuotedVar(acl_rule_key); + return msg.str(); + } + if (acl_rule->out_ports != acl_rule_entry.out_ports) + { + std::stringstream msg; + msg << "Out ports mismatch on ACL rule " << QuotedVar(acl_rule_key); + return msg.str(); + } + if (acl_rule->in_ports_oids != acl_rule_entry.in_ports_oids) + { + std::stringstream msg; + msg << "In port OIDs mismatch on ACL rule " << QuotedVar(acl_rule_key); + return msg.str(); + } + if (!acl_rule->in_ports_oids.empty() && + (acl_rule->match_fvs.find(SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS) == acl_rule->match_fvs.end() || + acl_rule->match_fvs.at(SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS).aclfield.data.objlist.list != + acl_rule->in_ports_oids.data())) + { + std::stringstream msg; + msg << "In port OIDs mismatch between match_fvs and " + "in_ports_oids in internal cache in ACL rule" + << QuotedVar(acl_rule_key); + return msg.str(); + } + + if (acl_rule->out_ports_oids != acl_rule_entry.out_ports_oids) + { + std::stringstream msg; + msg << "Out port OIDs mismatch on ACL rule " << QuotedVar(acl_rule_key); + return msg.str(); + } + if (!acl_rule->out_ports_oids.empty() && + (acl_rule->match_fvs.find(SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORTS) == acl_rule->match_fvs.end() || + acl_rule->match_fvs.at(SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORTS).aclfield.data.objlist.list != + acl_rule->out_ports_oids.data())) + { + std::stringstream msg; + msg << "Out port OIDs mismatch between match_fvs and " + "out_ports_oids in internal cache in ACL rule" + << QuotedVar(acl_rule_key); + return msg.str(); + } + + const auto &table_name_and_rule_key = concatTableNameAndRuleKey(acl_rule->acl_table_name, acl_rule->acl_rule_key); + std::string err_msg = + m_p4OidMapper->verifyOIDMapping(SAI_OBJECT_TYPE_ACL_ENTRY, table_name_and_rule_key, acl_rule->acl_entry_oid); + if (!err_msg.empty()) + { + return err_msg; + } + if (!acl_table->counter_unit.empty()) + { + err_msg = m_p4OidMapper->verifyOIDMapping(SAI_OBJECT_TYPE_ACL_COUNTER, table_name_and_rule_key, + acl_rule->counter.counter_oid); + if (!err_msg.empty()) + { + return err_msg; + } + } + if (acl_rule_entry.meter.enabled) + { + err_msg = m_p4OidMapper->verifyOIDMapping(SAI_OBJECT_TYPE_POLICER, table_name_and_rule_key, + acl_rule->meter.meter_oid); + if (!err_msg.empty()) + { + return err_msg; + } + } + + return ""; +} + +std::string AclRuleManager::verifyStateAsicDb(const P4AclRule *acl_rule) +{ + swss::DBConnector db("ASIC_DB", 0); + swss::Table table(&db, "ASIC_STATE"); + + // Verify rule. + auto attrs = getRuleSaiAttrs(*acl_rule); + std::vector exp = + saimeta::SaiAttributeList::serialize_attr_list(SAI_OBJECT_TYPE_ACL_ENTRY, (uint32_t)attrs.size(), attrs.data(), + /*countOnly=*/false); + std::string key = + sai_serialize_object_type(SAI_OBJECT_TYPE_ACL_ENTRY) + ":" + sai_serialize_object_id(acl_rule->acl_entry_oid); + std::vector values; + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + std::string err_msg = verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/true); + if (!err_msg.empty()) + { + return err_msg; + } + + // Verify counter. + if (acl_rule->counter.packets_enabled || acl_rule->counter.bytes_enabled) + { + attrs = getCounterSaiAttrs(*acl_rule); + exp = saimeta::SaiAttributeList::serialize_attr_list(SAI_OBJECT_TYPE_ACL_COUNTER, (uint32_t)attrs.size(), + attrs.data(), + /*countOnly=*/false); + key = sai_serialize_object_type(SAI_OBJECT_TYPE_ACL_COUNTER) + ":" + + sai_serialize_object_id(acl_rule->counter.counter_oid); + values.clear(); + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + err_msg = verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/true); + if (!err_msg.empty()) + { + return err_msg; + } + } + + // Verify meter. + if (acl_rule->meter.enabled) + { + attrs = getMeterSaiAttrs(acl_rule->meter); + exp = saimeta::SaiAttributeList::serialize_attr_list(SAI_OBJECT_TYPE_POLICER, (uint32_t)attrs.size(), + attrs.data(), + /*countOnly=*/false); + key = sai_serialize_object_type(SAI_OBJECT_TYPE_POLICER) + ":" + + sai_serialize_object_id(acl_rule->meter.meter_oid); + values.clear(); + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + err_msg = verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/true); + if (!err_msg.empty()) + { + return err_msg; + } + } + + return ""; +} + } // namespace p4orch diff --git a/orchagent/p4orch/acl_rule_manager.h b/orchagent/p4orch/acl_rule_manager.h index cc00735d84..230f226f98 100644 --- a/orchagent/p4orch/acl_rule_manager.h +++ b/orchagent/p4orch/acl_rule_manager.h @@ -41,8 +41,10 @@ class AclRuleManager : public ObjectManagerInterface } virtual ~AclRuleManager() = default; - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; + std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; // Update counters stats for every rule in each ACL table in COUNTERS_DB, if // counters are enabled in rules. @@ -77,7 +79,7 @@ class AclRuleManager : public ObjectManagerInterface // Create an ACL counter. ReturnCode createAclCounter(const std::string &acl_table_name, const std::string &counter_key, - const P4AclCounter &p4_acl_counter, sai_object_id_t *counter_oid); + const P4AclRule &acl_rule, sai_object_id_t *counter_oid); // Create an ACL meter. ReturnCode createAclMeter(const P4AclMeter &p4_acl_meter, const std::string &meter_key, sai_object_id_t *meter_oid); @@ -136,6 +138,12 @@ class AclRuleManager : public ObjectManagerInterface // clean up. ReturnCode cleanUpUserDefinedTraps(); + // Verifies internal cache for an entry. + std::string verifyStateCache(const P4AclRuleAppDbEntry &app_db_entry, const P4AclRule *acl_rule); + + // Verifies ASIC DB for an entry. + std::string verifyStateAsicDb(const P4AclRule *acl_rule); + P4OidMapper *m_p4OidMapper; ResponsePublisherInterface *m_publisher; P4AclRuleTables m_aclRuleTables; diff --git a/orchagent/p4orch/acl_table_manager.cpp b/orchagent/p4orch/acl_table_manager.cpp index 456c2f04d2..c1ad1c1c05 100644 --- a/orchagent/p4orch/acl_table_manager.cpp +++ b/orchagent/p4orch/acl_table_manager.cpp @@ -4,7 +4,9 @@ #include #include +#include "SaiAttributeList.h" #include "crmorch.h" +#include "dbconnector.h" #include "json.hpp" #include "logger.h" #include "orch.h" @@ -12,12 +14,15 @@ #include "p4orch/p4orch_util.h" #include "sai_serialize.h" #include "switchorch.h" +#include "table.h" #include "tokenize.h" extern "C" { #include "sai.h" } +using ::p4orch::kTableKeyDelimiter; + extern sai_object_id_t gSwitchId; extern sai_acl_api_t *sai_acl_api; extern sai_udf_api_t *sai_udf_api; @@ -29,6 +34,44 @@ extern int gBatchSize; namespace p4orch { +namespace +{ + +std::vector getGroupMemSaiAttrs(const P4AclTableDefinition &acl_table) +{ + std::vector acl_mem_attrs; + sai_attribute_t acl_mem_attr; + acl_mem_attr.id = SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_GROUP_ID; + acl_mem_attr.value.oid = acl_table.group_oid; + acl_mem_attrs.push_back(acl_mem_attr); + + acl_mem_attr.id = SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_ID; + acl_mem_attr.value.oid = acl_table.table_oid; + acl_mem_attrs.push_back(acl_mem_attr); + + acl_mem_attr.id = SAI_ACL_TABLE_GROUP_MEMBER_ATTR_PRIORITY; + acl_mem_attr.value.u32 = acl_table.priority; + acl_mem_attrs.push_back(acl_mem_attr); + + return acl_mem_attrs; +} + +std::vector getUdfGroupSaiAttrs(const P4UdfField &udf_field) +{ + std::vector udf_group_attrs; + sai_attribute_t udf_group_attr; + udf_group_attr.id = SAI_UDF_GROUP_ATTR_TYPE; + udf_group_attr.value.s32 = SAI_UDF_GROUP_TYPE_GENERIC; + udf_group_attrs.push_back(udf_group_attr); + + udf_group_attr.id = SAI_UDF_GROUP_ATTR_LENGTH; + udf_group_attr.value.u16 = udf_field.length; + udf_group_attrs.push_back(udf_group_attr); + + return udf_group_attrs; +} + +} // namespace AclTableManager::AclTableManager(P4OidMapper *p4oidMapper, ResponsePublisherInterface *publisher) : m_p4OidMapper(p4oidMapper), m_publisher(publisher) @@ -36,16 +79,15 @@ AclTableManager::AclTableManager(P4OidMapper *p4oidMapper, ResponsePublisherInte SWSS_LOG_ENTER(); assert(p4oidMapper != nullptr); - // Create the default UDF match - auto status = createDefaultUdfMatch(); - if (!status.ok()) - { - SWSS_LOG_ERROR("Failed to create ACL UDF default match : %s", status.message().c_str()); - } } AclTableManager::~AclTableManager() { + sai_object_id_t udf_match_oid; + if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_UDF_MATCH, P4_UDF_MATCH_DEFAULT, &udf_match_oid)) + { + return; + } auto status = removeDefaultUdfMatch(); if (!status.ok()) { @@ -54,7 +96,121 @@ AclTableManager::~AclTableManager() } } -void AclTableManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCodeOr> AclTableManager::getTableSaiAttrs(const P4AclTableDefinition &acl_table) +{ + std::vector acl_attr_list; + sai_attribute_t acl_attr; + acl_attr.id = SAI_ACL_TABLE_ATTR_ACL_STAGE; + acl_attr.value.s32 = acl_table.stage; + acl_attr_list.push_back(acl_attr); + + if (acl_table.size > 0) + { + acl_attr.id = SAI_ACL_TABLE_ATTR_SIZE; + acl_attr.value.u32 = acl_table.size; + acl_attr_list.push_back(acl_attr); + } + + std::set table_match_fields_to_add; + if (!acl_table.ip_type_bit_type_lookup.empty()) + { + acl_attr.id = SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE; + acl_attr.value.booldata = true; + acl_attr_list.push_back(acl_attr); + table_match_fields_to_add.insert(SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE); + } + + for (const auto &match_field : acl_table.sai_match_field_lookup) + { + const auto &sai_match_field = fvValue(match_field); + // Avoid duplicate match attribute to add + if (table_match_fields_to_add.find(sai_match_field.table_attr) != table_match_fields_to_add.end()) + continue; + acl_attr.id = sai_match_field.table_attr; + acl_attr.value.booldata = true; + acl_attr_list.push_back(acl_attr); + table_match_fields_to_add.insert(sai_match_field.table_attr); + } + + for (const auto &match_fields : acl_table.composite_sai_match_fields_lookup) + { + const auto &sai_match_fields = fvValue(match_fields); + for (const auto &sai_match_field : sai_match_fields) + { + // Avoid duplicate match attribute to add + if (table_match_fields_to_add.find(sai_match_field.table_attr) != table_match_fields_to_add.end()) + continue; + acl_attr.id = sai_match_field.table_attr; + acl_attr.value.booldata = true; + acl_attr_list.push_back(acl_attr); + table_match_fields_to_add.insert(sai_match_field.table_attr); + } + } + + // Add UDF group attributes + for (const auto &udf_group_idx : acl_table.udf_group_attr_index_lookup) + { + acl_attr.id = SAI_ACL_TABLE_ATTR_USER_DEFINED_FIELD_GROUP_MIN + fvValue(udf_group_idx); + if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_UDF_GROUP, fvField(udf_group_idx), &acl_attr.value.oid)) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "THe UDF group with id " << QuotedVar(fvField(udf_group_idx)) << " was not found."); + } + acl_attr_list.push_back(acl_attr); + } + + m_acl_action_list[0] = SAI_ACL_ACTION_TYPE_COUNTER; + acl_attr.id = SAI_ACL_TABLE_ATTR_ACL_ACTION_TYPE_LIST; + acl_attr.value.s32list.count = 1; + acl_attr.value.s32list.list = m_acl_action_list; + acl_attr_list.push_back(acl_attr); + + return acl_attr_list; +} + +ReturnCodeOr> AclTableManager::getUdfSaiAttrs(const P4UdfField &udf_field) +{ + sai_object_id_t udf_group_oid; + if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_UDF_GROUP, udf_field.group_id, &udf_group_oid)) + { + return ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "UDF group " << QuotedVar(udf_field.group_id) << " does not exist"; + } + sai_object_id_t udf_match_oid; + if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_UDF_MATCH, P4_UDF_MATCH_DEFAULT, &udf_match_oid)) + { + // Create the default UDF match + LOG_AND_RETURN_IF_ERROR(createDefaultUdfMatch() + << "Failed to create ACL UDF default match " << QuotedVar(P4_UDF_MATCH_DEFAULT)); + m_p4OidMapper->getOID(SAI_OBJECT_TYPE_UDF_MATCH, P4_UDF_MATCH_DEFAULT, &udf_match_oid); + } + std::vector udf_attrs; + sai_attribute_t udf_attr; + udf_attr.id = SAI_UDF_ATTR_GROUP_ID; + udf_attr.value.oid = udf_group_oid; + udf_attrs.push_back(udf_attr); + + udf_attr.id = SAI_UDF_ATTR_MATCH_ID; + udf_attr.value.oid = udf_match_oid; + udf_attrs.push_back(udf_attr); + + udf_attr.id = SAI_UDF_ATTR_BASE; + udf_attr.value.s32 = udf_field.base; + udf_attrs.push_back(udf_attr); + + udf_attr.id = SAI_UDF_ATTR_OFFSET; + udf_attr.value.u16 = udf_field.offset; + udf_attrs.push_back(udf_attr); + + return udf_attrs; +} + +ReturnCode AclTableManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +{ + return StatusCode::SWSS_RC_UNIMPLEMENTED; +} + +void AclTableManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } @@ -262,7 +418,7 @@ ReturnCode AclTableManager::processAddTableRequest(const P4AclTableDefinitionApp << "ACL table stage " << QuotedVar(app_db_entry.stage) << " is invalid"); } - if (gSwitchOrch->getAclGroupOidsBindingToSwitch().empty()) + if (gSwitchOrch->getAclGroupsBindingToSwitch().empty()) { // Create default ACL groups binding to switch gSwitchOrch->initAclGroupsBindToSwitch(); @@ -271,13 +427,14 @@ ReturnCode AclTableManager::processAddTableRequest(const P4AclTableDefinitionApp P4AclTableDefinition acl_table_definition(app_db_entry.acl_table_name, stage, app_db_entry.priority, app_db_entry.size, app_db_entry.meter_unit, app_db_entry.counter_unit); - auto group_it = gSwitchOrch->getAclGroupOidsBindingToSwitch().find(acl_table_definition.stage); - if (group_it == gSwitchOrch->getAclGroupOidsBindingToSwitch().end()) + auto &group_map = gSwitchOrch->getAclGroupsBindingToSwitch(); + auto group_it = group_map.find(acl_table_definition.stage); + if (group_it == group_map.end()) { RETURN_INTERNAL_ERROR_AND_RAISE_CRITICAL("Failed to find ACL group binding to switch at stage " << acl_table_definition.stage); } - acl_table_definition.group_oid = group_it->second; + acl_table_definition.group_oid = group_it->second.m_saiObjectId; auto build_match_rc = buildAclTableDefinitionMatchFieldValues(app_db_entry.match_field_lookup, &acl_table_definition); @@ -399,21 +556,12 @@ ReturnCode AclTableManager::createUdfGroup(const P4UdfField &udf_field) { SWSS_LOG_ENTER(); sai_object_id_t udf_group_oid; - std::vector udf_group_attrs; - sai_attribute_t udf_group_attr; - udf_group_attr.id = SAI_UDF_GROUP_ATTR_TYPE; - udf_group_attr.value.s32 = SAI_UDF_GROUP_TYPE_GENERIC; - udf_group_attrs.push_back(udf_group_attr); - - udf_group_attr.id = SAI_UDF_GROUP_ATTR_LENGTH; - udf_group_attr.value.u16 = udf_field.length; - udf_group_attrs.push_back(udf_group_attr); + auto attrs = getUdfGroupSaiAttrs(udf_field); - CHECK_ERROR_AND_LOG_AND_RETURN(sai_udf_api->create_udf_group(&udf_group_oid, gSwitchId, - (uint32_t)udf_group_attrs.size(), - udf_group_attrs.data()), - "Failed to create UDF group " << QuotedVar(udf_field.group_id) - << " from SAI call sai_udf_api->create_udf_group"); + CHECK_ERROR_AND_LOG_AND_RETURN( + sai_udf_api->create_udf_group(&udf_group_oid, gSwitchId, (uint32_t)attrs.size(), attrs.data()), + "Failed to create UDF group " << QuotedVar(udf_field.group_id) + << " from SAI call sai_udf_api->create_udf_group"); m_p4OidMapper->setOID(SAI_OBJECT_TYPE_UDF_GROUP, udf_field.group_id, udf_group_oid); SWSS_LOG_INFO("Suceeded to create UDF group %s with object ID %s ", QuotedVar(udf_field.group_id).c_str(), sai_serialize_object_id(udf_group_oid).c_str()); @@ -456,40 +604,13 @@ ReturnCode AclTableManager::createUdf(const P4UdfField &udf_field) { SWSS_LOG_ENTER(); const auto &udf_id = udf_field.udf_id; - sai_object_id_t udf_group_oid; - if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_UDF_GROUP, udf_field.group_id, &udf_group_oid)) - { - return ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) - << "UDF group " << QuotedVar(udf_field.group_id) << " does not exist"; - } - sai_object_id_t udf_match_oid; - if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_UDF_MATCH, P4_UDF_MATCH_DEFAULT, &udf_match_oid)) - { - return ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) - << "UDF default match " << QuotedVar(P4_UDF_MATCH_DEFAULT) << " does not exist"; - } - std::vector udf_attrs; - sai_attribute_t udf_attr; - udf_attr.id = SAI_UDF_ATTR_GROUP_ID; - udf_attr.value.oid = udf_group_oid; - udf_attrs.push_back(udf_attr); - udf_attr.id = SAI_UDF_ATTR_MATCH_ID; - udf_attr.value.oid = udf_match_oid; - udf_attrs.push_back(udf_attr); - - udf_attr.id = SAI_UDF_ATTR_BASE; - udf_attr.value.s32 = udf_field.base; - udf_attrs.push_back(udf_attr); - - udf_attr.id = SAI_UDF_ATTR_OFFSET; - udf_attr.value.u16 = udf_field.offset; - udf_attrs.push_back(udf_attr); + ASSIGN_OR_RETURN(auto attrs, getUdfSaiAttrs(udf_field)); sai_object_id_t udf_oid; - CHECK_ERROR_AND_LOG_AND_RETURN( - sai_udf_api->create_udf(&udf_oid, gSwitchId, (uint32_t)udf_attrs.size(), udf_attrs.data()), - "Failed to create UDF " << QuotedVar(udf_id) << " from SAI call sai_udf_api->create_udf"); + CHECK_ERROR_AND_LOG_AND_RETURN(sai_udf_api->create_udf(&udf_oid, gSwitchId, (uint32_t)attrs.size(), attrs.data()), + "Failed to create UDF " << QuotedVar(udf_id) + << " from SAI call sai_udf_api->create_udf"); m_p4OidMapper->setOID(SAI_OBJECT_TYPE_UDF, udf_id, udf_oid); // Increase UDF group and match reference count m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_UDF_MATCH, P4_UDF_MATCH_DEFAULT); @@ -594,78 +715,10 @@ ReturnCode AclTableManager::createAclTable(P4AclTableDefinition &acl_table, sai_ sai_object_id_t *acl_group_member_oid) { // Prepare SAI ACL attributes list to create ACL table - std::vector acl_attr_list; - sai_attribute_t acl_attr; - acl_attr.id = SAI_ACL_TABLE_ATTR_ACL_STAGE; - acl_attr.value.s32 = acl_table.stage; - acl_attr_list.push_back(acl_attr); - - if (acl_table.size > 0) - { - acl_attr.id = SAI_ACL_TABLE_ATTR_SIZE; - acl_attr.value.u32 = acl_table.size; - acl_attr_list.push_back(acl_attr); - } - - std::set table_match_fields_to_add; - if (!acl_table.ip_type_bit_type_lookup.empty()) - { - acl_attr.id = SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE; - acl_attr.value.booldata = true; - acl_attr_list.push_back(acl_attr); - table_match_fields_to_add.insert(SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE); - } - - for (const auto &match_field : acl_table.sai_match_field_lookup) - { - const auto &sai_match_field = fvValue(match_field); - // Avoid duplicate match attribute to add - if (table_match_fields_to_add.find(sai_match_field.table_attr) != table_match_fields_to_add.end()) - continue; - acl_attr.id = sai_match_field.table_attr; - acl_attr.value.booldata = true; - acl_attr_list.push_back(acl_attr); - table_match_fields_to_add.insert(sai_match_field.table_attr); - } - - for (const auto &match_fields : acl_table.composite_sai_match_fields_lookup) - { - const auto &sai_match_fields = fvValue(match_fields); - for (const auto &sai_match_field : sai_match_fields) - { - // Avoid duplicate match attribute to add - if (table_match_fields_to_add.find(sai_match_field.table_attr) != table_match_fields_to_add.end()) - continue; - acl_attr.id = sai_match_field.table_attr; - acl_attr.value.booldata = true; - acl_attr_list.push_back(acl_attr); - table_match_fields_to_add.insert(sai_match_field.table_attr); - } - } - - // Add UDF group attributes - for (const auto &udf_group_idx : acl_table.udf_group_attr_index_lookup) - { - acl_attr.id = SAI_ACL_TABLE_ATTR_USER_DEFINED_FIELD_GROUP_MIN + fvValue(udf_group_idx); - if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_UDF_GROUP, fvField(udf_group_idx), &acl_attr.value.oid)) - { - LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) - << "THe UDF group with id " << QuotedVar(fvField(udf_group_idx)) << " was not found."); - } - acl_attr_list.push_back(acl_attr); - } - - // OA workaround to fix b/191114070: always add counter action in ACL table - // action list during creation - int32_t acl_action_list[1]; - acl_action_list[0] = SAI_ACL_ACTION_TYPE_COUNTER; - acl_attr.id = SAI_ACL_TABLE_ATTR_ACL_ACTION_TYPE_LIST; - acl_attr.value.s32list.count = 1; - acl_attr.value.s32list.list = acl_action_list; - acl_attr_list.push_back(acl_attr); + ASSIGN_OR_RETURN(auto attrs, getTableSaiAttrs(acl_table)); CHECK_ERROR_AND_LOG_AND_RETURN( - sai_acl_api->create_acl_table(acl_table_oid, gSwitchId, (uint32_t)acl_attr_list.size(), acl_attr_list.data()), + sai_acl_api->create_acl_table(acl_table_oid, gSwitchId, (uint32_t)attrs.size(), attrs.data()), "Failed to create ACL table " << QuotedVar(acl_table.acl_table_name)); SWSS_LOG_NOTICE("Called SAI API to create ACL table %s ", sai_serialize_object_id(*acl_table_oid).c_str()); auto status = createAclGroupMember(acl_table, acl_group_member_oid); @@ -706,7 +759,7 @@ ReturnCode AclTableManager::removeAclTable(P4AclTableDefinition &acl_table) { SWSS_LOG_ENTER(); - auto status = removeAclGroupMember(acl_table.acl_table_name); + auto status = removeAclGroupMember(acl_table); if (!status.ok()) { SWSS_LOG_ERROR("Failed to remove ACL table with key %s : failed to delete group " @@ -851,51 +904,377 @@ ReturnCode AclTableManager::createAclGroupMember(const P4AclTableDefinition &acl sai_object_id_t *acl_grp_mem_oid) { SWSS_LOG_ENTER(); - std::vector acl_mem_attrs; - sai_attribute_t acl_mem_attr; - acl_mem_attr.id = SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_GROUP_ID; - acl_mem_attr.value.oid = acl_table.group_oid; - acl_mem_attrs.push_back(acl_mem_attr); - - acl_mem_attr.id = SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_ID; - acl_mem_attr.value.oid = acl_table.table_oid; - acl_mem_attrs.push_back(acl_mem_attr); - - acl_mem_attr.id = SAI_ACL_TABLE_GROUP_MEMBER_ATTR_PRIORITY; - acl_mem_attr.value.u32 = acl_table.priority; - acl_mem_attrs.push_back(acl_mem_attr); + auto attrs = getGroupMemSaiAttrs(acl_table); CHECK_ERROR_AND_LOG_AND_RETURN( - sai_acl_api->create_acl_table_group_member(acl_grp_mem_oid, gSwitchId, (uint32_t)acl_mem_attrs.size(), - acl_mem_attrs.data()), + sai_acl_api->create_acl_table_group_member(acl_grp_mem_oid, gSwitchId, (uint32_t)attrs.size(), attrs.data()), "Failed to create ACL group member in group " << sai_serialize_object_id(acl_table.group_oid)); m_p4OidMapper->setOID(SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER, acl_table.acl_table_name, *acl_grp_mem_oid); - m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_ACL_TABLE_GROUP, std::to_string(acl_table.stage)); + // Add reference on the ACL group + auto &group_map = gSwitchOrch->getAclGroupsBindingToSwitch(); + auto group_it = group_map.find(acl_table.stage); + if (group_it == group_map.end()) + { + RETURN_INTERNAL_ERROR_AND_RAISE_CRITICAL("Failed to find ACL group binding to switch at stage " + << acl_table.stage); + } + auto *referenced_group = &group_it->second; + referenced_group->m_objsDependingOnMe.insert(sai_serialize_object_id(*acl_grp_mem_oid)); SWSS_LOG_NOTICE("ACL group member for table %s was created successfully: %s", QuotedVar(acl_table.acl_table_name).c_str(), sai_serialize_object_id(*acl_grp_mem_oid).c_str()); return ReturnCode(); } -ReturnCode AclTableManager::removeAclGroupMember(const std::string &acl_table_name) +ReturnCode AclTableManager::removeAclGroupMember(P4AclTableDefinition &acl_table) { SWSS_LOG_ENTER(); sai_object_id_t grp_mem_oid; - if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER, acl_table_name, &grp_mem_oid)) + if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER, acl_table.acl_table_name, &grp_mem_oid)) { LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Failed to remove ACL group member " << sai_serialize_object_id(grp_mem_oid) - << " for table " << QuotedVar(acl_table_name) << ": invalid table key."); + << " for table " << QuotedVar(acl_table.acl_table_name) << ": invalid table key."); } CHECK_ERROR_AND_LOG_AND_RETURN(sai_acl_api->remove_acl_table_group_member(grp_mem_oid), "Failed to remove ACL group member " << sai_serialize_object_id(grp_mem_oid) - << " for table " << QuotedVar(acl_table_name)); - m_p4OidMapper->eraseOID(SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER, acl_table_name); - m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_ACL_TABLE_GROUP, - std::to_string(m_aclTableDefinitions[acl_table_name].stage).c_str()); + << " for table " + << QuotedVar(acl_table.acl_table_name)); + m_p4OidMapper->eraseOID(SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER, acl_table.acl_table_name); + // Remove reference on the ACL group + auto &group_map = gSwitchOrch->getAclGroupsBindingToSwitch(); + auto group_it = group_map.find(acl_table.stage); + if (group_it == group_map.end()) + { + RETURN_INTERNAL_ERROR_AND_RAISE_CRITICAL("Failed to find ACL group binding to switch at stage " + << acl_table.stage); + } + auto *referenced_group = &group_it->second; + referenced_group->m_objsDependingOnMe.erase(sai_serialize_object_id(grp_mem_oid)); SWSS_LOG_NOTICE("ACL table member %s for table %s was removed successfully.", - sai_serialize_object_id(grp_mem_oid).c_str(), QuotedVar(acl_table_name).c_str()); + sai_serialize_object_id(grp_mem_oid).c_str(), QuotedVar(acl_table.acl_table_name).c_str()); return ReturnCode(); } +std::string AclTableManager::verifyState(const std::string &key, const std::vector &tuple) +{ + SWSS_LOG_ENTER(); + + auto pos = key.find_first_of(kTableKeyDelimiter); + if (pos == std::string::npos) + { + return std::string("Invalid key: ") + key; + } + std::string p4rt_table = key.substr(0, pos); + std::string p4rt_key = key.substr(pos + 1); + if (p4rt_table != APP_P4RT_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + std::string table_name; + std::string key_content; + parseP4RTKey(p4rt_key, &table_name, &key_content); + if (table_name != APP_P4RT_ACL_TABLE_DEFINITION_NAME) + { + return std::string("Invalid key: ") + key; + } + + ReturnCode status; + auto app_db_entry_or = deserializeAclTableDefinitionAppDbEntry(key_content, tuple); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + std::stringstream msg; + msg << "Unable to deserialize key " << QuotedVar(key) << ": " << status.message(); + return msg.str(); + } + auto &app_db_entry = *app_db_entry_or; + + auto *acl_table_definition = getAclTable(app_db_entry.acl_table_name); + if (acl_table_definition == nullptr) + { + std::stringstream msg; + msg << "No entry found with key " << QuotedVar(key); + return msg.str(); + } + + std::string cache_result = verifyStateCache(app_db_entry, acl_table_definition); + std::string asic_db_result = verifyStateAsicDb(acl_table_definition); + if (cache_result.empty()) + { + return asic_db_result; + } + if (asic_db_result.empty()) + { + return cache_result; + } + return cache_result + "; " + asic_db_result; +} + +std::string AclTableManager::verifyStateCache(const P4AclTableDefinitionAppDbEntry &app_db_entry, + const P4AclTableDefinition *acl_table) +{ + ReturnCode status = validateAclTableDefinitionAppDbEntry(app_db_entry); + if (!status.ok()) + { + std::stringstream msg; + msg << "Validation failed for ACL table DB entry " << QuotedVar(app_db_entry.acl_table_name) << ": " + << status.message(); + return msg.str(); + } + + auto stage_it = aclStageLookup.find(app_db_entry.stage); + sai_acl_stage_t stage; + if (stage_it != aclStageLookup.end()) + { + stage = stage_it->second; + } + else + { + std::stringstream msg; + msg << "Invalid stage " << QuotedVar(app_db_entry.stage) << " in ACL table manager."; + return msg.str(); + } + P4AclTableDefinition acl_table_definition_entry(app_db_entry.acl_table_name, stage, app_db_entry.priority, + app_db_entry.size, app_db_entry.meter_unit, + app_db_entry.counter_unit); + + if (acl_table->acl_table_name != app_db_entry.acl_table_name) + { + std::stringstream msg; + msg << "ACL table " << QuotedVar(app_db_entry.acl_table_name) << " does not match internal cache " + << QuotedVar(acl_table->acl_table_name) << " in ACL table manager."; + return msg.str(); + } + if (acl_table->stage != stage) + { + std::stringstream msg; + msg << "ACL table " << QuotedVar(app_db_entry.acl_table_name) << " with stage " << stage + << " does not match internal cache " << acl_table->stage << " in ACL table manager."; + return msg.str(); + } + if (acl_table->size != app_db_entry.size) + { + std::stringstream msg; + msg << "ACL table " << QuotedVar(app_db_entry.acl_table_name) << " with size " << app_db_entry.size + << " does not match internal cache " << acl_table->size << " in ACL table manager."; + return msg.str(); + } + if (acl_table->priority != app_db_entry.priority) + { + std::stringstream msg; + msg << "ACL table " << QuotedVar(app_db_entry.acl_table_name) << " with priority " << app_db_entry.priority + << " does not match internal cache " << acl_table->priority << " in ACL table manager."; + return msg.str(); + } + if (acl_table->meter_unit != app_db_entry.meter_unit) + { + std::stringstream msg; + msg << "ACL table " << QuotedVar(app_db_entry.acl_table_name) << " with meter unit " + << QuotedVar(app_db_entry.meter_unit) << " does not match internal cache " + << QuotedVar(acl_table->meter_unit) << " in ACL table manager."; + return msg.str(); + } + if (acl_table->counter_unit != app_db_entry.counter_unit) + { + std::stringstream msg; + msg << "ACL table " << QuotedVar(app_db_entry.acl_table_name) << " with counter unit " + << QuotedVar(app_db_entry.counter_unit) << " does not match internal cache " + << QuotedVar(acl_table->counter_unit) << " in ACL table manager."; + return msg.str(); + } + + status = buildAclTableDefinitionMatchFieldValues(app_db_entry.match_field_lookup, &acl_table_definition_entry); + if (!status.ok()) + { + std::stringstream msg; + msg << "Failed to build ACL table match field values for table " << QuotedVar(app_db_entry.acl_table_name); + return msg.str(); + } + status = buildAclTableDefinitionActionFieldValues(app_db_entry.action_field_lookup, + &acl_table_definition_entry.rule_action_field_lookup); + if (!status.ok()) + { + std::stringstream msg; + msg << "Failed to build ACL table action field values for table " << QuotedVar(app_db_entry.acl_table_name); + return msg.str(); + } + status = buildAclTableDefinitionActionColorFieldValues(app_db_entry.packet_action_color_lookup, + &acl_table_definition_entry.rule_action_field_lookup, + &acl_table_definition_entry.rule_packet_action_color_lookup); + if (!status.ok()) + { + std::stringstream msg; + msg << "Failed to build ACL table action color field values for table " + << QuotedVar(app_db_entry.acl_table_name); + return msg.str(); + } + + if (acl_table->composite_sai_match_fields_lookup != acl_table_definition_entry.composite_sai_match_fields_lookup) + { + std::stringstream msg; + msg << "Composite SAI match fields mismatch on ACL table " << QuotedVar(app_db_entry.acl_table_name); + return msg.str(); + } + if (acl_table->udf_fields_lookup != acl_table_definition_entry.udf_fields_lookup) + { + std::stringstream msg; + msg << "UDF fields lookup mismatch on ACL table " << QuotedVar(app_db_entry.acl_table_name); + return msg.str(); + } + if (acl_table->udf_group_attr_index_lookup != acl_table_definition_entry.udf_group_attr_index_lookup) + { + std::stringstream msg; + msg << "UDF group attr index lookup mismatch on ACL table " << QuotedVar(app_db_entry.acl_table_name); + return msg.str(); + } + if (acl_table->sai_match_field_lookup != acl_table_definition_entry.sai_match_field_lookup) + { + std::stringstream msg; + msg << "SAI match field lookup mismatch on ACL table " << QuotedVar(app_db_entry.acl_table_name); + return msg.str(); + } + if (acl_table->ip_type_bit_type_lookup != acl_table_definition_entry.ip_type_bit_type_lookup) + { + std::stringstream msg; + msg << "IP type bit type lookup mismatch on ACL table " << QuotedVar(app_db_entry.acl_table_name); + return msg.str(); + } + if (acl_table->rule_action_field_lookup != acl_table_definition_entry.rule_action_field_lookup) + { + std::stringstream msg; + msg << "Rule action field lookup mismatch on ACL table " << QuotedVar(app_db_entry.acl_table_name); + return msg.str(); + } + if (acl_table->rule_packet_action_color_lookup != acl_table_definition_entry.rule_packet_action_color_lookup) + { + std::stringstream msg; + msg << "Rule packet action color lookup mismatch on ACL table " << QuotedVar(app_db_entry.acl_table_name); + return msg.str(); + } + + std::string err_msg = m_p4OidMapper->verifyOIDMapping(SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER, + app_db_entry.acl_table_name, acl_table->group_member_oid); + if (!err_msg.empty()) + { + return err_msg; + } + err_msg = + m_p4OidMapper->verifyOIDMapping(SAI_OBJECT_TYPE_ACL_TABLE, app_db_entry.acl_table_name, acl_table->table_oid); + if (!err_msg.empty()) + { + return err_msg; + } + + return ""; +} + +std::string AclTableManager::verifyStateAsicDb(const P4AclTableDefinition *acl_table) +{ + swss::DBConnector db("ASIC_DB", 0); + swss::Table table(&db, "ASIC_STATE"); + + // Verify table. + auto attrs_or = getTableSaiAttrs(*acl_table); + if (!attrs_or.ok()) + { + return std::string("Failed to get SAI attrs: ") + attrs_or.status().message(); + } + std::vector attrs = *attrs_or; + std::vector exp = + saimeta::SaiAttributeList::serialize_attr_list(SAI_OBJECT_TYPE_ACL_TABLE, (uint32_t)attrs.size(), attrs.data(), + /*countOnly=*/false); + std::string key = + sai_serialize_object_type(SAI_OBJECT_TYPE_ACL_TABLE) + ":" + sai_serialize_object_id(acl_table->table_oid); + std::vector values; + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + std::string err_msg = verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/false); + if (!err_msg.empty()) + { + return err_msg; + } + + // Verify group member. + attrs = getGroupMemSaiAttrs(*acl_table); + exp = saimeta::SaiAttributeList::serialize_attr_list(SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER, (uint32_t)attrs.size(), + attrs.data(), /*countOnly=*/false); + key = sai_serialize_object_type(SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER) + ":" + + sai_serialize_object_id(acl_table->group_member_oid); + values.clear(); + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + err_msg = verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/false); + if (!err_msg.empty()) + { + return err_msg; + } + + for (auto &udf_fields : acl_table->udf_fields_lookup) + { + for (auto &udf_field : fvValue(udf_fields)) + { + sai_object_id_t udf_group_oid; + if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_UDF_GROUP, udf_field.group_id, &udf_group_oid)) + { + return std::string("UDF group ") + udf_field.group_id + " does not exist"; + } + sai_object_id_t udf_oid; + if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_UDF, udf_field.udf_id, &udf_oid)) + { + return std::string("UDF ") + udf_field.udf_id + " does not exist"; + } + + // Verify UDF group. + attrs = getUdfGroupSaiAttrs(udf_field); + exp = saimeta::SaiAttributeList::serialize_attr_list(SAI_OBJECT_TYPE_UDF_GROUP, (uint32_t)attrs.size(), + attrs.data(), + /*countOnly=*/false); + key = sai_serialize_object_type(SAI_OBJECT_TYPE_UDF_GROUP) + ":" + sai_serialize_object_id(udf_group_oid); + values.clear(); + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + err_msg = verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/false); + if (!err_msg.empty()) + { + return err_msg; + } + + // Verify UDF. + attrs_or = getUdfSaiAttrs(udf_field); + if (!attrs_or.ok()) + { + return std::string("Failed to get SAI attrs: ") + attrs_or.status().message(); + } + attrs = *attrs_or; + exp = saimeta::SaiAttributeList::serialize_attr_list(SAI_OBJECT_TYPE_UDF, (uint32_t)attrs.size(), + attrs.data(), + /*countOnly=*/false); + key = sai_serialize_object_type(SAI_OBJECT_TYPE_UDF) + ":" + sai_serialize_object_id(udf_oid); + values.clear(); + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + err_msg = verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/false); + if (!err_msg.empty()) + { + return err_msg; + } + } + } + + return ""; +} + } // namespace p4orch diff --git a/orchagent/p4orch/acl_table_manager.h b/orchagent/p4orch/acl_table_manager.h index 6243c08cb4..5ebaf459e9 100644 --- a/orchagent/p4orch/acl_table_manager.h +++ b/orchagent/p4orch/acl_table_manager.h @@ -31,8 +31,10 @@ class AclTableManager : public ObjectManagerInterface explicit AclTableManager(P4OidMapper *p4oidMapper, ResponsePublisherInterface *publisher); virtual ~AclTableManager(); - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; + std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; // Get ACL table definition by table name in cache. Return nullptr if not // found. @@ -92,7 +94,20 @@ class AclTableManager : public ObjectManagerInterface ReturnCode createAclGroupMember(const P4AclTableDefinition &acl_table, sai_object_id_t *acl_grp_mem_oid); // Remove ACL group member for given ACL table. - ReturnCode removeAclGroupMember(const std::string &acl_table_name); + ReturnCode removeAclGroupMember(P4AclTableDefinition &acl_table); + + // Verifies internal cache for an entry. + std::string verifyStateCache(const P4AclTableDefinitionAppDbEntry &app_db_entry, + const P4AclTableDefinition *acl_table); + + // Verifies ASIC DB for an entry. + std::string verifyStateAsicDb(const P4AclTableDefinition *acl_table); + + // Returns ACl table SAI attributes. + ReturnCodeOr> getTableSaiAttrs(const P4AclTableDefinition &acl_table); + + // Returns UDF SAI attributes. + ReturnCodeOr> getUdfSaiAttrs(const P4UdfField &udf_field); P4OidMapper *m_p4OidMapper; ResponsePublisherInterface *m_publisher; @@ -100,6 +115,9 @@ class AclTableManager : public ObjectManagerInterface std::deque m_entries; std::map> m_aclTablesByStage; + // Always add counter action in ACL table action list during creation + int32_t m_acl_action_list[1]; + friend class p4orch::test::AclManagerTest; }; diff --git a/orchagent/p4orch/acl_util.cpp b/orchagent/p4orch/acl_util.cpp index 6caf67cade..92905ec622 100644 --- a/orchagent/p4orch/acl_util.cpp +++ b/orchagent/p4orch/acl_util.cpp @@ -10,13 +10,6 @@ namespace p4orch { -std::string trim(const std::string &s) -{ - size_t end = s.find_last_not_of(WHITESPACE); - size_t start = s.find_first_not_of(WHITESPACE); - return (end == std::string::npos) ? EMPTY_STRING : s.substr(start, end - start + 1); -} - bool parseAclTableAppDbActionField(const std::string &aggr_actions_str, std::vector *action_list, std::vector *action_color_list) { @@ -327,8 +320,8 @@ ReturnCode validateAndSetCompositeMatchFieldJson( uint32_t composite_bitwidth = bitwidth_it.value(); auto elements_it = aggr_match_json.find(kAclMatchFieldElements); - // b/175596733: temp disable verification on composite elements field until - // p4rt implementation is added. + // TODO: temp disable verification on composite elements field until p4rt + // implementation is added. if (elements_it == aggr_match_json.end()) { (*udf_fields_lookup)[p4_match]; @@ -871,4 +864,98 @@ bool isDiffActionFieldValue(const acl_entry_attr_union_t attr_name, const sai_at } } +bool isDiffMatchFieldValue(const acl_entry_attr_union_t attr_name, const sai_attribute_value_t &value, + const sai_attribute_value_t &old_value, const P4AclRule &acl_rule, + const P4AclRule &old_acl_rule) +{ + if (attr_name >= SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN && + attr_name <= SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MAX) + { + // We compare the size here only. The list is explicitly verified in the + // ACL rule. + return value.aclfield.data.u8list.count != old_value.aclfield.data.u8list.count; + } + switch (attr_name) + { + case SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS: { + // We compare the size here only. The list is explicitly verified in the + // ACL rule. + return value.aclfield.data.objlist.count != old_value.aclfield.data.objlist.count; + } + case SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORTS: { + // We compare the size here only. The list is explicitly verified in the + // ACL rule. + return value.aclfield.data.objlist.count != old_value.aclfield.data.objlist.count; + } + case SAI_ACL_ENTRY_ATTR_FIELD_IN_PORT: + case SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORT: { + return value.aclfield.data.oid != old_value.aclfield.data.oid; + } + case SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_TYPE: + case SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_VNI: + case SAI_ACL_ENTRY_ATTR_FIELD_ROUTE_DST_USER_META: + case SAI_ACL_ENTRY_ATTR_FIELD_IPV6_FLOW_LABEL: + case SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_FRAG: + case SAI_ACL_ENTRY_ATTR_FIELD_PACKET_VLAN: { + return value.aclfield.data.u32 != old_value.aclfield.data.u32 || + value.aclfield.mask.u32 != old_value.aclfield.mask.u32; + } + case SAI_ACL_ENTRY_ATTR_FIELD_TCP_FLAGS: + case SAI_ACL_ENTRY_ATTR_FIELD_IP_FLAGS: + case SAI_ACL_ENTRY_ATTR_FIELD_DSCP: + case SAI_ACL_ENTRY_ATTR_FIELD_TC: + case SAI_ACL_ENTRY_ATTR_FIELD_ICMP_TYPE: + case SAI_ACL_ENTRY_ATTR_FIELD_ICMP_CODE: + case SAI_ACL_ENTRY_ATTR_FIELD_ICMPV6_TYPE: + case SAI_ACL_ENTRY_ATTR_FIELD_ICMPV6_CODE: + case SAI_ACL_ENTRY_ATTR_FIELD_OUTER_VLAN_PRI: + case SAI_ACL_ENTRY_ATTR_FIELD_OUTER_VLAN_CFI: + case SAI_ACL_ENTRY_ATTR_FIELD_INNER_VLAN_PRI: + case SAI_ACL_ENTRY_ATTR_FIELD_INNER_VLAN_CFI: + case SAI_ACL_ENTRY_ATTR_FIELD_INNER_IP_PROTOCOL: + case SAI_ACL_ENTRY_ATTR_FIELD_IP_PROTOCOL: + case SAI_ACL_ENTRY_ATTR_FIELD_ECN: + case SAI_ACL_ENTRY_ATTR_FIELD_TTL: + case SAI_ACL_ENTRY_ATTR_FIELD_TOS: + case SAI_ACL_ENTRY_ATTR_FIELD_IPV6_NEXT_HEADER: { + return value.aclfield.data.u8 != old_value.aclfield.data.u8 || + value.aclfield.mask.u8 != old_value.aclfield.mask.u8; + } + case SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE: + case SAI_ACL_ENTRY_ATTR_FIELD_L4_SRC_PORT: + case SAI_ACL_ENTRY_ATTR_FIELD_L4_DST_PORT: + case SAI_ACL_ENTRY_ATTR_FIELD_IP_IDENTIFICATION: + case SAI_ACL_ENTRY_ATTR_FIELD_OUTER_VLAN_ID: + case SAI_ACL_ENTRY_ATTR_FIELD_INNER_VLAN_ID: + case SAI_ACL_ENTRY_ATTR_FIELD_INNER_ETHER_TYPE: + case SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_SRC_PORT: + case SAI_ACL_ENTRY_ATTR_FIELD_INNER_L4_DST_PORT: { + return value.aclfield.data.u16 != old_value.aclfield.data.u16 || + value.aclfield.mask.u16 != old_value.aclfield.mask.u16; + } + case SAI_ACL_ENTRY_ATTR_FIELD_INNER_SRC_IP: + case SAI_ACL_ENTRY_ATTR_FIELD_INNER_DST_IP: + case SAI_ACL_ENTRY_ATTR_FIELD_SRC_IP: + case SAI_ACL_ENTRY_ATTR_FIELD_DST_IP: { + return value.aclfield.data.ip4 != old_value.aclfield.data.ip4 || + value.aclfield.mask.ip4 != old_value.aclfield.mask.ip4; + } + case SAI_ACL_ENTRY_ATTR_FIELD_INNER_SRC_IPV6: + case SAI_ACL_ENTRY_ATTR_FIELD_INNER_DST_IPV6: + case SAI_ACL_ENTRY_ATTR_FIELD_SRC_IPV6: + case SAI_ACL_ENTRY_ATTR_FIELD_DST_IPV6: { + return memcmp(value.aclfield.data.ip6, old_value.aclfield.data.ip6, sizeof(sai_ip6_t)) || + memcmp(value.aclfield.mask.ip6, old_value.aclfield.mask.ip6, sizeof(sai_ip6_t)); + } + case SAI_ACL_ENTRY_ATTR_FIELD_SRC_MAC: + case SAI_ACL_ENTRY_ATTR_FIELD_DST_MAC: { + return memcmp(value.aclfield.data.mac, old_value.aclfield.data.mac, sizeof(sai_mac_t)) || + memcmp(value.aclfield.mask.mac, old_value.aclfield.mask.mac, sizeof(sai_mac_t)); + } + default: { + return false; + } + } +} + } // namespace p4orch diff --git a/orchagent/p4orch/acl_util.h b/orchagent/p4orch/acl_util.h index c06849506b..74de14d2a5 100644 --- a/orchagent/p4orch/acl_util.h +++ b/orchagent/p4orch/acl_util.h @@ -51,6 +51,16 @@ struct P4AclCounter P4AclCounter() : bytes_enabled(false), packets_enabled(false), counter_oid(SAI_NULL_OBJECT_ID) { } + + bool operator==(const P4AclCounter &entry) const + { + return bytes_enabled == entry.bytes_enabled && packets_enabled == entry.packets_enabled; + } + + bool operator!=(const P4AclCounter &entry) const + { + return !(*this == entry); + } }; struct P4AclMeter @@ -71,6 +81,18 @@ struct P4AclMeter type(SAI_METER_TYPE_PACKETS), mode(SAI_POLICER_MODE_TR_TCM) { } + + bool operator==(const P4AclMeter &entry) const + { + return enabled == entry.enabled && type == entry.type && mode == entry.mode && cir == entry.cir && + cburst == entry.cburst && pir == entry.pir && pburst == entry.pburst && + packet_color_actions == entry.packet_color_actions; + } + + bool operator!=(const P4AclMeter &entry) const + { + return !(*this == entry); + } }; struct P4AclMirrorSession @@ -78,12 +100,32 @@ struct P4AclMirrorSession std::string name; std::string key; // KeyGenerator::generateMirrorSessionKey(name) sai_object_id_t oid; + + bool operator==(const P4AclMirrorSession &entry) const + { + return name == entry.name && key == entry.key && oid == entry.oid; + } + + bool operator!=(const P4AclMirrorSession &entry) const + { + return !(*this == entry); + } }; struct P4UdfDataMask { std::vector data; std::vector mask; + + bool operator==(const P4UdfDataMask &entry) const + { + return data == entry.data && mask == entry.mask; + } + + bool operator!=(const P4UdfDataMask &entry) const + { + return !(*this == entry); + } }; struct P4AclRule @@ -120,6 +162,16 @@ struct SaiActionWithParam acl_entry_attr_union_t action; std::string param_name; std::string param_value; + + bool operator==(const SaiActionWithParam &entry) const + { + return action == entry.action && param_name == entry.param_name && param_value == entry.param_value; + } + + bool operator!=(const SaiActionWithParam &entry) const + { + return !(*this == entry); + } }; struct SaiMatchField @@ -128,6 +180,17 @@ struct SaiMatchField acl_table_attr_union_t table_attr; uint32_t bitwidth; Format format; + + bool operator==(const SaiMatchField &entry) const + { + return entry_attr == entry.entry_attr && table_attr == entry.table_attr && bitwidth == entry.bitwidth && + format == entry.format; + } + + bool operator!=(const SaiMatchField &entry) const + { + return !(*this == entry); + } }; struct P4UdfField @@ -137,6 +200,17 @@ struct P4UdfField std::string udf_id; // {group_id}-base{base}-offset{offset} uint16_t offset; // in Bytes sai_udf_base_t base; + + bool operator==(const P4UdfField &entry) const + { + return length == entry.length && group_id == entry.group_id && udf_id == entry.udf_id && + offset == entry.offset && base == entry.base; + } + + bool operator!=(const P4UdfField &entry) const + { + return !(*this == entry); + } }; struct P4AclTableDefinition @@ -152,8 +226,8 @@ struct P4AclTableDefinition std::string meter_unit; std::string counter_unit; // go/p4-composite-fields - // Only SAI attributes for IPv6-64bit(IPV6_WORDn) are supported as sai_field - // elements in composite field + // Only SAI attributes for IPv6-64bit(IPV6_WORDn) are supported as + // sai_field elements in composite field std::map> composite_sai_match_fields_lookup; // go/gpins-acl-udf // p4_match string to a list of P4UdfFields mapping @@ -199,8 +273,6 @@ using P4AclRuleTables = std::map>; #define P4_FORMAT_IPV6 "IPV6" #define P4_FORMAT_STRING "STRING" -// complete p4 match fields and action list: -// https://docs.google.com/document/d/1gtxJe7aPIJgM2hTLo5gm62DuPJHB31eAyRAsV9zjwW0/edit#heading=h.dzb8jjrtxv49 #define P4_MATCH_IN_PORT "SAI_ACL_TABLE_ATTR_FIELD_IN_PORT" #define P4_MATCH_OUT_PORT "SAI_ACL_TABLE_ATTR_FIELD_OUT_PORT" #define P4_MATCH_IN_PORTS "SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS" @@ -251,6 +323,7 @@ using P4AclRuleTables = std::map>; #define P4_MATCH_DST_IPV6_WORD2 "SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6_WORD2" #define P4_MATCH_SRC_IPV6_WORD3 "SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6_WORD3" #define P4_MATCH_SRC_IPV6_WORD2 "SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6_WORD2" +#define P4_MATCH_ROUTE_DST_USER_META "SAI_ACL_TABLE_ATTR_FIELD_ROUTE_DST_USER_META" #define P4_ACTION_PACKET_ACTION "SAI_ACL_ENTRY_ATTR_ACTION_PACKET_ACTION" #define P4_ACTION_REDIRECT "SAI_ACL_ENTRY_ATTR_ACTION_REDIRECT" @@ -351,7 +424,6 @@ using P4AclRuleTables = std::map>; #define GENL_PACKET_TRAP_GROUP_NAME_PREFIX "trap.group.cpu.queue." -#define WHITESPACE " " #define EMPTY_STRING "" #define P4_CPU_QUEUE_MAX_NUM 8 #define IPV6_SINGLE_WORD_BYTES_LENGTH 4 @@ -411,6 +483,7 @@ static const acl_table_attr_lookup_t aclMatchTableAttrLookup = { {P4_MATCH_PACKET_VLAN, SAI_ACL_TABLE_ATTR_FIELD_PACKET_VLAN}, {P4_MATCH_TUNNEL_VNI, SAI_ACL_TABLE_ATTR_FIELD_TUNNEL_VNI}, {P4_MATCH_IPV6_NEXT_HEADER, SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER}, + {P4_MATCH_ROUTE_DST_USER_META, SAI_ACL_TABLE_ATTR_FIELD_ROUTE_DST_USER_META}, }; static const acl_table_attr_format_lookup_t aclMatchTableAttrFormatLookup = { @@ -459,6 +532,7 @@ static const acl_table_attr_format_lookup_t aclMatchTableAttrFormatLookup = { {SAI_ACL_TABLE_ATTR_FIELD_PACKET_VLAN, Format::STRING}, {SAI_ACL_TABLE_ATTR_FIELD_TUNNEL_VNI, Format::HEX_STRING}, {SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER, Format::HEX_STRING}, + {SAI_ACL_TABLE_ATTR_FIELD_ROUTE_DST_USER_META, Format::HEX_STRING}, }; static const acl_table_attr_lookup_t aclCompositeMatchTableAttrLookup = { @@ -514,6 +588,7 @@ static const acl_rule_attr_lookup_t aclMatchEntryAttrLookup = { {P4_MATCH_PACKET_VLAN, SAI_ACL_ENTRY_ATTR_FIELD_PACKET_VLAN}, {P4_MATCH_TUNNEL_VNI, SAI_ACL_ENTRY_ATTR_FIELD_TUNNEL_VNI}, {P4_MATCH_IPV6_NEXT_HEADER, SAI_ACL_ENTRY_ATTR_FIELD_IPV6_NEXT_HEADER}, + {P4_MATCH_ROUTE_DST_USER_META, SAI_ACL_ENTRY_ATTR_FIELD_ROUTE_DST_USER_META}, }; static const acl_rule_attr_lookup_t aclCompositeMatchEntryAttrLookup = { @@ -629,9 +704,6 @@ static std::map aclCounterStatsIdNameMap = { {SAI_POLICER_STAT_RED_BYTES, P4_COUNTER_STATS_RED_BYTES}, }; -// Trim tailing and leading whitespace -std::string trim(const std::string &s); - // Parse ACL table definition APP DB entry action field to P4ActionParamName // action_list and P4PacketActionWithColor action_color_list bool parseAclTableAppDbActionField(const std::string &aggr_actions_str, std::vector *action_list, @@ -644,8 +716,8 @@ ReturnCode validateAndSetSaiMatchFieldJson(const nlohmann::json &match_json, con std::map *sai_match_field_lookup, std::map *ip_type_bit_type_lookup); -// Validate and set composite match field element with kind:sai_field. Composite -// SAI field only support IPv6-64bit now (IPV6_WORDn) +// Validate and set composite match field element with kind:sai_field. +// Composite SAI field only support IPv6-64bit now (IPV6_WORDn) ReturnCode validateAndSetCompositeElementSaiFieldJson( const nlohmann::json &element_match_json, const std::string &p4_match, std::map> *composite_sai_match_fields_lookup, @@ -681,9 +753,10 @@ ReturnCode buildAclTableDefinitionActionFieldValues( bool isSetUserTrapActionInAclTableDefinition( const std::map> &aggr_sai_actions_lookup); -// Build packet color(sai_policer_attr_t) to packet action(sai_packet_action_t) -// map for ACL table definition by P4PacketActionWithColor action map. If packet -// color is empty, then the packet action should add as a SaiActionWithParam +// Build packet color(sai_policer_attr_t) to packet +// action(sai_packet_action_t) map for ACL table definition by +// P4PacketActionWithColor action map. If packet color is empty, then the +// packet action should add as a SaiActionWithParam ReturnCode buildAclTableDefinitionActionColorFieldValues( const std::map> &action_color_lookup, std::map> *aggr_sai_actions_lookup, @@ -702,9 +775,17 @@ ReturnCode setCompositeSaiMatchValue(const acl_entry_attr_union_t attr_name, con ReturnCode setUdfMatchValue(const P4UdfField &udf_field, const std::string &attr_value, sai_attribute_value_t *value, P4UdfDataMask *udf_data_mask, uint16_t bytes_offset); -// Compares the action value difference if the action field is present in both -// new and old ACL rules. Returns true if action values are different. +// Compares the action value difference if the action field is present in +// both new and old ACL rules. Returns true if action values are different. bool isDiffActionFieldValue(const acl_entry_attr_union_t attr_name, const sai_attribute_value_t &value, const sai_attribute_value_t &old_value, const P4AclRule &acl_rule, const P4AclRule &old_acl_rule); + +// Compares the match value difference if the match field is present in +// both new and old ACL rules. Returns true if match values are different. +// This method is used in state verification only. +bool isDiffMatchFieldValue(const acl_entry_attr_union_t attr_name, const sai_attribute_value_t &value, + const sai_attribute_value_t &old_value, const P4AclRule &acl_rule, + const P4AclRule &old_acl_rule); + } // namespace p4orch diff --git a/orchagent/p4orch/ext_tables_manager.cpp b/orchagent/p4orch/ext_tables_manager.cpp new file mode 100644 index 0000000000..9cec36f8b8 --- /dev/null +++ b/orchagent/p4orch/ext_tables_manager.cpp @@ -0,0 +1,881 @@ +#include "p4orch/ext_tables_manager.h" + +#include +#include +#include +#include +#include +#include + +#include "directory.h" +#include "json.hpp" +#include "logger.h" +#include "tokenize.h" +#include "orch.h" +#include "crmorch.h" +#include "p4orch/p4orch.h" +#include "p4orch/p4orch_util.h" + +extern sai_counter_api_t* sai_counter_api; +extern sai_generic_programmable_api_t *sai_generic_programmable_api; + +extern Directory gDirectory; +extern sai_object_id_t gSwitchId; +extern P4Orch *gP4Orch; +extern CrmOrch *gCrmOrch; + +P4ExtTableEntry *ExtTablesManager::getP4ExtTableEntry(const std::string &table_name, const std::string &key) +{ + SWSS_LOG_ENTER(); + + auto it = m_extTables.find(table_name); + if (it == m_extTables.end()) + return nullptr; + + if (it->second.find(key) == it->second.end()) + return nullptr; + + return &it->second[key]; +} + +std::string getCrossRefTableName(const std::string table_name) +{ + auto it = FixedTablesMap.find(table_name); + if (it != FixedTablesMap.end()) + { + return(it->second); + } + + return(table_name); +} + +ReturnCode ExtTablesManager::validateActionParamsCrossRef(P4ExtTableAppDbEntry &app_db_entry, ActionInfo *action) +{ + const std::string action_name = action->name; + std::unordered_map cross_ref_key_j; + ReturnCode status; + + for (auto param_defn_it = action->params.begin(); + param_defn_it != action->params.end(); param_defn_it++) + { + ActionParamInfo action_param_defn = param_defn_it->second; + if (action_param_defn.table_reference_map.empty()) + { + continue; + } + + std::string param_name = param_defn_it->first; + + auto app_db_param_it = app_db_entry.action_params[action_name].find(param_name); + if (app_db_param_it == app_db_entry.action_params[action_name].end()) + { + SWSS_LOG_ERROR("Required param not specified for action %s\n", action_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Required param not specified for action %s " << action_name.c_str(); + } + + for (auto cross_ref_it = action_param_defn.table_reference_map.begin(); + cross_ref_it != action_param_defn.table_reference_map.end(); cross_ref_it++) + { + cross_ref_key_j[cross_ref_it->first].push_back(nlohmann::json::object_t::value_type(prependMatchField(cross_ref_it->second), app_db_param_it->second)); + } + } + + + for (auto it = cross_ref_key_j.begin(); it != cross_ref_key_j.end(); it++) + { + const std::string table_name = getCrossRefTableName(it->first); + const std::string table_key = it->second.dump(); + std::string key; + sai_object_type_t object_type; + sai_object_id_t oid; + DepObject dep_object = {}; + + if (gP4Orch->m_p4TableToManagerMap.find(table_name) != gP4Orch->m_p4TableToManagerMap.end()) + { + status = gP4Orch->m_p4TableToManagerMap[table_name]->getSaiObject(table_key, object_type, key); + if (!status.ok()) + { + SWSS_LOG_ERROR("Cross-table reference validation failed from fixed-table %s", table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Cross-table reference valdiation failed from fixed-table"; + } + } + else + { + if (getTableInfo(table_name)) + { + auto ext_table_key = KeyGenerator::generateExtTableKey(table_name, table_key); + status = getSaiObject(ext_table_key, object_type, key); + if (!status.ok()) + { + SWSS_LOG_ERROR("Cross-table reference validation failed from extension-table %s", table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Cross-table reference valdiation failed from extension table"; + } + } + else + { + SWSS_LOG_ERROR("Cross-table reference validation failed due to non-existent table %s", table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Cross-table reference valdiation failed due to non-existent table"; + } + } + + if (!m_p4OidMapper->getOID(object_type, key, &oid)) + { + SWSS_LOG_ERROR("Cross-table reference validation failed, no OID found from table %s", table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Cross-table reference valdiation failed, no OID found"; + } + + if (oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Cross-table reference validation failed, null OID expected from table %s", table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Cross-table reference valdiation failed, null OID"; + } + + dep_object.sai_object = object_type; + dep_object.key = key; + dep_object.oid = oid; + app_db_entry.action_dep_objects[action_name] = dep_object; + } + + return ReturnCode(); +} + +ReturnCode ExtTablesManager::validateP4ExtTableAppDbEntry(P4ExtTableAppDbEntry &app_db_entry) +{ + // Perform generic APP DB entry validations. Operation specific validations + // will be done by the respective request process methods. + ReturnCode status; + + TableInfo *table; + table = getTableInfo(app_db_entry.table_name); + if (table == nullptr) + { + SWSS_LOG_ERROR("Not a valid extension table %s", app_db_entry.table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Not a valid extension table " << app_db_entry.table_name.c_str(); + } + + if (table->action_ref_tables.empty()) + { + return ReturnCode(); + } + + ActionInfo *action; + for (auto app_db_action_it = app_db_entry.action_params.begin(); + app_db_action_it != app_db_entry.action_params.end(); app_db_action_it++) + { + auto action_name = app_db_action_it->first; + action = getTableActionInfo(table, action_name); + if (action == nullptr) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Not a valid action " << action_name.c_str() + << " in extension table " << app_db_entry.table_name.c_str(); + } + + if (!action->refers_to) + { + continue; + } + + status = validateActionParamsCrossRef(app_db_entry, action); + if (!status.ok()) + { + return status; + } + } + + return ReturnCode(); +} + + +ReturnCodeOr ExtTablesManager::deserializeP4ExtTableEntry( + const std::string &table_name, + const std::string &key, const std::vector &attributes) +{ + std::string action_name; + + SWSS_LOG_ENTER(); + + P4ExtTableAppDbEntry app_db_entry_or = {}; + app_db_entry_or.table_name = table_name; + app_db_entry_or.table_key = key; + + action_name = ""; + for (const auto &it : attributes) + { + auto field = fvField(it); + auto value = fvValue(it); + + if (field == p4orch::kAction) + { + action_name = value; + continue; + } + + const auto &tokenized_fields = tokenize(field, p4orch::kFieldDelimiter); + if (tokenized_fields.size() <= 1) + { + SWSS_LOG_ERROR("Unknown extension entry field"); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown extension entry field " << QuotedVar(field); + } + + const auto &prefix = tokenized_fields[0]; + if (prefix == p4orch::kActionParamPrefix) + { + const auto ¶m_name = tokenized_fields[1]; + app_db_entry_or.action_params[action_name][param_name] = value; + continue; + } + else + { + SWSS_LOG_ERROR("Unexpected extension entry field"); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unexpected extension entry field " << QuotedVar(field); + } + } + + return app_db_entry_or; +} + + +ReturnCode ExtTablesManager::prepareP4SaiExtAPIParams(const P4ExtTableAppDbEntry &app_db_entry, + std::string &ext_table_entry_attr) +{ + nlohmann::json sai_j, sai_metadata_j, sai_array_j = {}, sai_entry_j; + + SWSS_LOG_ENTER(); + + try + { + TableInfo *table; + table = getTableInfo(app_db_entry.table_name); + if (!table) + { + SWSS_LOG_ERROR("extension entry for invalid table %s", app_db_entry.table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "extension entry for invalid table " << app_db_entry.table_name.c_str(); + } + + nlohmann::json j = nlohmann::json::parse(app_db_entry.table_key); + for (auto it = j.begin(); it != j.end(); ++it) + { + std::string match, value, prefix; + std::size_t pos; + + match = it.key(); + value = it.value(); + + prefix = p4orch::kMatchPrefix; + pos = match.rfind(prefix); + if (pos != std::string::npos) + { + match.erase(0, prefix.length()); + } + else + { + SWSS_LOG_ERROR("Failed to encode match fields for sai call"); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Failed to encode match fields for sai call"; + } + + prefix = p4orch::kFieldDelimiter; + pos = match.rfind(prefix); + if (pos != std::string::npos) + { + match.erase(0, prefix.length()); + } + else + { + SWSS_LOG_ERROR("Failed to encode match fields for sai call"); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Failed to encode match fields for sai call"; + } + + auto match_defn_it = table->match_fields.find(match); + if (match_defn_it == table->match_fields.end()) + { + SWSS_LOG_ERROR("extension entry for invalid match field %s", match.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "extension entry for invalid match field " << match.c_str(); + } + + sai_metadata_j = nlohmann::json::object({}); + sai_metadata_j["sai_attr_value_type"] = match_defn_it->second.datatype; + + sai_j = nlohmann::json::object({}); + sai_j[match]["value"] = value; + sai_j[match]["sai_metadata"] = sai_metadata_j; + + sai_array_j.push_back(sai_j); + } + + for (auto app_db_action_it = app_db_entry.action_params.begin(); + app_db_action_it != app_db_entry.action_params.end(); app_db_action_it++) + { + sai_j = nlohmann::json::object({}); + auto action_dep_object_it = app_db_entry.action_dep_objects.find(app_db_action_it->first); + if (action_dep_object_it == app_db_entry.action_dep_objects.end()) + { + auto action_defn_it = table->action_fields.find(app_db_action_it->first); + for (auto app_db_param_it = app_db_action_it->second.begin(); + app_db_param_it != app_db_action_it->second.end(); app_db_param_it++) + { + nlohmann::json params_j = nlohmann::json::object({}); + if (action_defn_it != table->action_fields.end()) + { + auto param_defn_it = action_defn_it->second.params.find(app_db_param_it->first); + if (param_defn_it != action_defn_it->second.params.end()) + { + sai_metadata_j = nlohmann::json::object({}); + sai_metadata_j["sai_attr_value_type"] = param_defn_it->second.datatype; + + params_j[app_db_param_it->first]["sai_metadata"] = sai_metadata_j; + } + } + params_j[app_db_param_it->first]["value"] = app_db_param_it->second; + sai_j[app_db_action_it->first].push_back(params_j); + } + } + else + { + auto action_dep_object = action_dep_object_it->second; + + sai_metadata_j = nlohmann::json::object({}); + sai_metadata_j["sai_attr_value_type"] = "SAI_ATTR_VALUE_TYPE_OBJECT_ID"; + + sai_j[app_db_action_it->first]["sai_metadata"] = sai_metadata_j; + sai_j[app_db_action_it->first]["value"] = action_dep_object.oid; + } + + sai_array_j.push_back(sai_j); + } + } + catch (std::exception &ex) + { + SWSS_LOG_ERROR("Failed to encode table %s entry for sai call", app_db_entry.table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Failed to encode table entry for sai call"; + } + + sai_entry_j = nlohmann::json::object({}); + sai_entry_j.push_back(nlohmann::json::object_t::value_type("attributes", sai_array_j)); + SWSS_LOG_ERROR("table: %s, sai entry: %s", app_db_entry.table_name.c_str(), sai_entry_j.dump().c_str()); + ext_table_entry_attr = sai_entry_j.dump(); + + return ReturnCode(); +} + +bool removeGenericCounter(sai_object_id_t counter_id) +{ + sai_status_t sai_status = sai_counter_api->remove_counter(counter_id); + if (sai_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove generic counter: %" PRId64 "", counter_id); + return false; + } + + return true; +} + +bool createGenericCounter(sai_object_id_t &counter_id) +{ + sai_attribute_t counter_attr; + counter_attr.id = SAI_COUNTER_ATTR_TYPE; + counter_attr.value.s32 = SAI_COUNTER_TYPE_REGULAR; + sai_status_t sai_status = sai_counter_api->create_counter(&counter_id, gSwitchId, 1, &counter_attr); + if (sai_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Failed to create generic counter"); + return false; + } + + return true; +} + + +ReturnCode ExtTablesManager::createP4ExtTableEntry(const P4ExtTableAppDbEntry &app_db_entry, + P4ExtTableEntry &ext_table_entry) +{ + ReturnCode status; + sai_object_type_t object_type; + std::string key; + std::string ext_table_entry_attr; + sai_object_id_t counter_id; + + SWSS_LOG_ENTER(); + + status = prepareP4SaiExtAPIParams(app_db_entry, ext_table_entry_attr); + if (!status.ok()) + { + return status; + } + + // Prepare attributes for the SAI create call. + std::vector generic_programmable_attrs; + sai_attribute_t generic_programmable_attr; + + generic_programmable_attr.id = SAI_GENERIC_PROGRAMMABLE_ATTR_OBJECT_NAME; + generic_programmable_attr.value.s8list.count = (uint32_t)app_db_entry.table_name.size(); + generic_programmable_attr.value.s8list.list = (int8_t *)const_cast(app_db_entry.table_name.c_str()); + generic_programmable_attrs.push_back(generic_programmable_attr); + + generic_programmable_attr.id = SAI_GENERIC_PROGRAMMABLE_ATTR_ENTRY; + generic_programmable_attr.value.json.json.count = (uint32_t)ext_table_entry_attr.size(); + generic_programmable_attr.value.json.json.list = (int8_t *)const_cast(ext_table_entry_attr.c_str()); + generic_programmable_attrs.push_back(generic_programmable_attr); + + + auto *table = getTableInfo(app_db_entry.table_name); + if (!table) + { + SWSS_LOG_ERROR("extension entry for invalid table %s", app_db_entry.table_name.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "extension entry for invalid table " << app_db_entry.table_name.c_str(); + } + + if (table->counter_bytes_enabled || table->counter_packets_enabled) + { + if (!createGenericCounter(counter_id)) + { + SWSS_LOG_WARN("Failed to create counter for table %s, key %s\n", + app_db_entry.table_name.c_str(), + app_db_entry.table_key.c_str()); + } + else + { + ext_table_entry.sai_counter_oid = counter_id; + } + + generic_programmable_attr.id = SAI_GENERIC_PROGRAMMABLE_ATTR_COUNTER_ID; + generic_programmable_attr.value.oid = counter_id; + generic_programmable_attrs.push_back(generic_programmable_attr); + } + + sai_object_id_t sai_generic_programmable_oid = SAI_NULL_OBJECT_ID; + sai_status_t sai_status = sai_generic_programmable_api->create_generic_programmable( + &sai_generic_programmable_oid, gSwitchId, + (uint32_t)generic_programmable_attrs.size(), + generic_programmable_attrs.data()); + if (sai_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("create sai api call failed for extension entry table %s, entry %s", + app_db_entry.table_name.c_str(), app_db_entry.table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "create sai api call failed for extension entry table " + << app_db_entry.table_name.c_str() + << " , entry " << app_db_entry.table_key.c_str(); + } + std::string crm_table_name = "EXT_" + app_db_entry.table_name; + boost::algorithm::to_upper(crm_table_name); + gCrmOrch->incCrmExtTableUsedCounter(CrmResourceType::CRM_EXT_TABLE, crm_table_name); + + + ext_table_entry.sai_entry_oid = sai_generic_programmable_oid; + for (auto action_dep_object_it = app_db_entry.action_dep_objects.begin(); + action_dep_object_it != app_db_entry.action_dep_objects.end(); action_dep_object_it++) + { + auto action_dep_object = action_dep_object_it->second; + m_p4OidMapper->increaseRefCount(action_dep_object.sai_object, action_dep_object.key); + ext_table_entry.action_dep_objects[action_dep_object_it->first] = action_dep_object; + } + + + auto ext_table_key = KeyGenerator::generateExtTableKey(app_db_entry.table_name, app_db_entry.table_key); + status = getSaiObject(ext_table_key, object_type, key); + if (!status.ok()) + { + SWSS_LOG_ERROR("Invalid formation of a key %s", ext_table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Invalid formation of a key"; + } + + m_p4OidMapper->setOID(object_type, key, ext_table_entry.sai_entry_oid); + m_extTables[app_db_entry.table_name][app_db_entry.table_key] = ext_table_entry; + return ReturnCode(); +} + + +ReturnCode ExtTablesManager::updateP4ExtTableEntry(const P4ExtTableAppDbEntry &app_db_entry, + P4ExtTableEntry *ext_table_entry) +{ + ReturnCode status; + std::string ext_table_entry_attr; + std::unordered_map old_action_dep_objects; + + SWSS_LOG_ENTER(); + + if (ext_table_entry->sai_entry_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("update sai api call for NULL extension entry table %s, entry %s", + app_db_entry.table_name.c_str(), ext_table_entry->table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "update sai api call for NULL extension entry table " + << app_db_entry.table_name.c_str() + << " , entry " << ext_table_entry->table_key.c_str(); + } + + status = prepareP4SaiExtAPIParams(app_db_entry, ext_table_entry_attr); + if (!status.ok()) + { + return status; + } + + // Prepare attribute for the SAI update call. + sai_attribute_t generic_programmable_attr; + + generic_programmable_attr.id = SAI_GENERIC_PROGRAMMABLE_ATTR_ENTRY; + generic_programmable_attr.value.json.json.count = (uint32_t)ext_table_entry_attr.length(); + generic_programmable_attr.value.json.json.list = (int8_t *)const_cast(ext_table_entry_attr.c_str()); + + sai_status_t sai_status = sai_generic_programmable_api->set_generic_programmable_attribute( + ext_table_entry->sai_entry_oid, + &generic_programmable_attr); + if (sai_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("update sai api call failed for extension entry table %s, entry %s", + app_db_entry.table_name.c_str(), ext_table_entry->table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "update sai api call failed for extension entry table " + << app_db_entry.table_name.c_str() + << " , entry " << ext_table_entry->table_key.c_str(); + } + + + old_action_dep_objects = ext_table_entry->action_dep_objects; + ext_table_entry->action_dep_objects.clear(); + + for (auto action_dep_object_it = app_db_entry.action_dep_objects.begin(); + action_dep_object_it != app_db_entry.action_dep_objects.end(); action_dep_object_it++) + { + auto action_dep_object = action_dep_object_it->second; + m_p4OidMapper->increaseRefCount(action_dep_object.sai_object, action_dep_object.key); + ext_table_entry->action_dep_objects[action_dep_object_it->first] = action_dep_object; + } + + for (auto old_action_dep_object_it = old_action_dep_objects.begin(); + old_action_dep_object_it != old_action_dep_objects.end(); old_action_dep_object_it++) + { + auto old_action_dep_object = old_action_dep_object_it->second; + m_p4OidMapper->decreaseRefCount(old_action_dep_object.sai_object, old_action_dep_object.key); + } + + return ReturnCode(); +} + +ReturnCode ExtTablesManager::removeP4ExtTableEntry(const std::string &table_name, + const std::string &table_key) +{ + ReturnCode status; + sai_object_type_t object_type; + std::string key; + + SWSS_LOG_ENTER(); + + auto *ext_table_entry = getP4ExtTableEntry(table_name, table_key); + if (!ext_table_entry) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "extension entry with key " << QuotedVar(table_key) + << " does not exist for table " << QuotedVar(table_name)); + } + + if (ext_table_entry->sai_entry_oid == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("remove sai api call for NULL extension entry table %s, entry %s", + table_name.c_str(), table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "remove sai api call for NULL extension entry table " + << table_name.c_str() << " , entry " << table_key.c_str(); + } + + SWSS_LOG_ERROR("table: %s, key: %s", ext_table_entry->table_name.c_str(), + ext_table_entry->table_key.c_str()); + sai_status_t sai_status = sai_generic_programmable_api->remove_generic_programmable( + ext_table_entry->sai_entry_oid); + if (sai_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("remove sai api call failed for extension entry table %s, entry %s", + table_name.c_str(), table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "remove sai api call failed for extension entry table " + << table_name.c_str() << " , entry " << table_key.c_str(); + } + std::string crm_table_name = "EXT_" + table_name; + boost::algorithm::to_upper(crm_table_name); + gCrmOrch->decCrmExtTableUsedCounter(CrmResourceType::CRM_EXT_TABLE, crm_table_name); + + + auto ext_table_key = KeyGenerator::generateExtTableKey(table_name, table_key); + status = getSaiObject(ext_table_key, object_type, key); + if (!status.ok()) + { + SWSS_LOG_ERROR("Invalid formation of a key %s", ext_table_key.c_str()); + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Invalid formation of a key"; + } + + uint32_t ref_count; + if (!m_p4OidMapper->getRefCount(object_type, key, &ref_count)) + { + RETURN_INTERNAL_ERROR_AND_RAISE_CRITICAL("Failed to get reference count for " << QuotedVar(key)); + } + if (ref_count > 0) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "extension entry " << QuotedVar(key) + << " referenced by other objects (ref_count = " << ref_count); + } + m_p4OidMapper->eraseOID(object_type, key); + + for (auto action_dep_object_it = ext_table_entry->action_dep_objects.begin(); + action_dep_object_it != ext_table_entry->action_dep_objects.end(); action_dep_object_it++) + { + auto action_dep_object = action_dep_object_it->second; + m_p4OidMapper->decreaseRefCount(action_dep_object.sai_object, action_dep_object.key); + } + + if (ext_table_entry->sai_counter_oid != SAI_NULL_OBJECT_ID) + { + m_countersTable->del(ext_table_entry->db_key); + removeGenericCounter(ext_table_entry->sai_counter_oid); + } + + m_extTables[table_name].erase(table_key); + + return ReturnCode(); +} + + +ReturnCode ExtTablesManager::processAddRequest(const P4ExtTableAppDbEntry &app_db_entry) +{ + SWSS_LOG_ENTER(); + + P4ExtTableEntry ext_table_entry(app_db_entry.db_key, app_db_entry.table_name, app_db_entry.table_key); + auto status = createP4ExtTableEntry(app_db_entry, ext_table_entry); + if (!status.ok()) + { + return status; + } + return ReturnCode(); +} + +ReturnCode ExtTablesManager::processUpdateRequest(const P4ExtTableAppDbEntry &app_db_entry, + P4ExtTableEntry *ext_table_entry) +{ + SWSS_LOG_ENTER(); + + auto status = updateP4ExtTableEntry(app_db_entry, ext_table_entry); + if (!status.ok()) + { + SWSS_LOG_ERROR("Failed to update extension entry with key %s", + app_db_entry.table_key.c_str()); + } + return ReturnCode(); +} + +ReturnCode ExtTablesManager::processDeleteRequest(const P4ExtTableAppDbEntry &app_db_entry) +{ + SWSS_LOG_ENTER(); + + auto status = removeP4ExtTableEntry(app_db_entry.table_name, app_db_entry.table_key); + if (!status.ok()) + { + SWSS_LOG_ERROR("Failed to remove extension entry with key %s", + app_db_entry.table_key.c_str()); + } + return ReturnCode(); +} + + +ReturnCode ExtTablesManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +{ + object_type = SAI_OBJECT_TYPE_GENERIC_PROGRAMMABLE; + object_key = json_key; + + return ReturnCode(); +} + +void ExtTablesManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) +{ + m_entriesTables[table_name].push_back(entry); +} + +void ExtTablesManager::drain() +{ + SWSS_LOG_ENTER(); + std::string table_prefix = "EXT_"; + + if (gP4Orch->tablesinfo) { + for (auto table_it = gP4Orch->tablesinfo->m_tablePrecedenceMap.begin(); + table_it != gP4Orch->tablesinfo->m_tablePrecedenceMap.end(); ++table_it) + { + auto table_name = table_prefix + table_it->second; + boost::algorithm::to_upper(table_name); + auto it_m = m_entriesTables.find(table_name); + if (it_m == m_entriesTables.end()) + { + continue; + } + + for (const auto &key_op_fvs_tuple : it_m->second) + { + std::string table_name; + std::string table_key; + + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &table_key); + const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); + + if (table_name.rfind(table_prefix, 0) == std::string::npos) + { + SWSS_LOG_ERROR("Table %s is without prefix %s", table_name.c_str(), table_prefix.c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + StatusCode::SWSS_RC_INVALID_PARAM, /*replace=*/true); + continue; + } + table_name = table_name.substr(table_prefix.length()); + boost::algorithm::to_lower(table_name); + + ReturnCode status; + auto app_db_entry_or = deserializeP4ExtTableEntry(table_name, table_key, attributes); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, /*replace=*/true); + continue; + } + + auto &app_db_entry = *app_db_entry_or; + status = validateP4ExtTableAppDbEntry(app_db_entry); + if (!status.ok()) + { + SWSS_LOG_ERROR("Validation failed for extension APP DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, /*replace=*/true); + continue; + } + + const std::string &operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) + { + auto *ext_table_entry = getP4ExtTableEntry(app_db_entry.table_name, app_db_entry.table_key); + if (ext_table_entry == nullptr) + { + // Create extension entry + app_db_entry.db_key = kfvKey(key_op_fvs_tuple); + status = processAddRequest(app_db_entry); + } + else + { + // Modify existing extension entry + status = processUpdateRequest(app_db_entry, ext_table_entry); + } + } + else if (operation == DEL_COMMAND) + { + // Delete extension entry + status = processDeleteRequest(app_db_entry); + } + else + { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unknown operation type " << QuotedVar(operation); + SWSS_LOG_ERROR("%s", status.message().c_str()); + } + if (!status.ok()) + { + SWSS_LOG_ERROR("Processing failed for extension APP_DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, /*replace=*/true); + } + + it_m->second.clear(); + } + } + + // Now report error for all remaining un-processed entries + for (auto it_m = m_entriesTables.begin(); it_m != m_entriesTables.end(); it_m++) + { + for (const auto &key_op_fvs_tuple : it_m->second) + { + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + StatusCode::SWSS_RC_INVALID_PARAM, /*replace=*/true); + } + + it_m->second.clear(); + } +} + + +void ExtTablesManager::doExtCounterStatsTask() +{ + SWSS_LOG_ENTER(); + + if (!gP4Orch->tablesinfo) + { + return; + } + + sai_stat_id_t stat_ids[] = { SAI_COUNTER_STAT_PACKETS, SAI_COUNTER_STAT_BYTES }; + uint64_t stats[2]; + std::vector counter_stats_values; + + for (auto table_it = gP4Orch->tablesinfo->m_tableInfoMap.begin(); + table_it != gP4Orch->tablesinfo->m_tableInfoMap.end(); ++table_it) + { + if (!table_it->second.counter_bytes_enabled && !table_it->second.counter_packets_enabled) + { + continue; + } + + auto table_name = table_it->second.name; + auto ext_table_it = m_extTables.find(table_name); + if (ext_table_it == m_extTables.end()) + { + continue; + } + + for (auto ext_table_entry_it = ext_table_it->second.begin(); + ext_table_entry_it != ext_table_it->second.end(); ++ext_table_entry_it) + { + auto *ext_table_entry = &ext_table_entry_it->second; + if (ext_table_entry->sai_counter_oid == SAI_NULL_OBJECT_ID) + { + continue; + } + + sai_status_t sai_status = + sai_counter_api->get_counter_stats(ext_table_entry->sai_counter_oid, 2, stat_ids, stats); + if (sai_status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Failed to set counters stats for extension entry %s:%s in COUNTERS_DB: ", + table_name.c_str(), ext_table_entry->table_key.c_str()); + continue; + } + + counter_stats_values.push_back( + swss::FieldValueTuple{P4_COUNTER_STATS_PACKETS, std::to_string(stats[0])}); + counter_stats_values.push_back( + swss::FieldValueTuple{P4_COUNTER_STATS_BYTES, std::to_string(stats[1])}); + + // Set field value tuples for counters stats in COUNTERS_DB + m_countersTable->set(ext_table_entry->db_key, counter_stats_values); + } + } +} + +std::string ExtTablesManager::verifyState(const std::string &key, const std::vector &tuple) +{ + std::string result = ""; + SWSS_LOG_ENTER(); + + return result; +} + diff --git a/orchagent/p4orch/ext_tables_manager.h b/orchagent/p4orch/ext_tables_manager.h new file mode 100644 index 0000000000..d9ac44858f --- /dev/null +++ b/orchagent/p4orch/ext_tables_manager.h @@ -0,0 +1,96 @@ +#pragma once + +#include +#include +#include +#include + +#include "macaddress.h" +#include "json.hpp" +#include "orch.h" +#include "p4orch/object_manager_interface.h" +#include "p4orch/p4oidmapper.h" +#include "p4orch/p4orch_util.h" +#include "p4orch/tables_definition_manager.h" +#include "response_publisher_interface.h" +#include "return_code.h" +#include "vrforch.h" +extern "C" +{ +#include "sai.h" +} + +struct P4ExtTableEntry +{ + std::string db_key; + std::string table_name; + std::string table_key; + sai_object_id_t sai_entry_oid = SAI_NULL_OBJECT_ID; + sai_object_id_t sai_counter_oid = SAI_NULL_OBJECT_ID; + std::unordered_map action_dep_objects; + + P4ExtTableEntry() {}; + P4ExtTableEntry(const std::string &db_key, const std::string &table_name, const std::string &table_key) + : db_key(db_key), table_name(table_name), table_key(table_key) + { + } +}; + +typedef std::unordered_map P4ExtTableEntryMap; +typedef std::unordered_map P4ExtTableMap; +typedef std::unordered_map> m_entriesTableMap; + +class ExtTablesManager : public ObjectManagerInterface +{ + public: + ExtTablesManager(P4OidMapper *p4oidMapper, VRFOrch *vrfOrch, ResponsePublisherInterface *publisher) + : m_vrfOrch(vrfOrch), + m_countersDb(std::make_unique("COUNTERS_DB", 0)), + m_countersTable(std::make_unique( + m_countersDb.get(), std::string(COUNTERS_TABLE) + DEFAULT_KEY_SEPARATOR + APP_P4RT_TABLE_NAME)) + { + SWSS_LOG_ENTER(); + + assert(p4oidMapper != nullptr); + m_p4OidMapper = p4oidMapper; + assert(publisher != nullptr); + m_publisher = publisher; + } + virtual ~ExtTablesManager() = default; + + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; + void drain() override; + std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + + // For every extension entry, update counters stats in COUNTERS_DB, if + // counters are enabled for those entries + void doExtCounterStatsTask(); + + private: + ReturnCodeOr deserializeP4ExtTableEntry( + const std::string &table_name, + const std::string &key, const std::vector &attributes); + ReturnCode validateActionParamsCrossRef(P4ExtTableAppDbEntry &app_db_entry, ActionInfo *action); + ReturnCode validateP4ExtTableAppDbEntry(P4ExtTableAppDbEntry &app_db_entry); + P4ExtTableEntry *getP4ExtTableEntry(const std::string &table_name, const std::string &table_key); + ReturnCode prepareP4SaiExtAPIParams(const P4ExtTableAppDbEntry &app_db_entry, + std::string &ext_table_entry_attr); + ReturnCode createP4ExtTableEntry(const P4ExtTableAppDbEntry &app_db_entry, P4ExtTableEntry &ext_table_entry); + ReturnCode updateP4ExtTableEntry(const P4ExtTableAppDbEntry &app_db_entry, P4ExtTableEntry *ext_table_entry); + ReturnCode removeP4ExtTableEntry(const std::string &table_name, const std::string &table_key); + ReturnCode processAddRequest(const P4ExtTableAppDbEntry &app_db_entry); + ReturnCode processUpdateRequest(const P4ExtTableAppDbEntry &app_db_entry, P4ExtTableEntry *ext_table_entry); + ReturnCode processDeleteRequest(const P4ExtTableAppDbEntry &app_db_entry); + + ReturnCode setExtTableCounterStats(P4ExtTableEntry *ext_table_entry); + + P4ExtTableMap m_extTables; + P4OidMapper *m_p4OidMapper; + VRFOrch *m_vrfOrch; + ResponsePublisherInterface *m_publisher; + m_entriesTableMap m_entriesTables; + + std::unique_ptr m_countersDb; + std::unique_ptr m_countersTable; +}; diff --git a/orchagent/p4orch/gre_tunnel_manager.cpp b/orchagent/p4orch/gre_tunnel_manager.cpp new file mode 100644 index 0000000000..ddadf8ddcd --- /dev/null +++ b/orchagent/p4orch/gre_tunnel_manager.cpp @@ -0,0 +1,634 @@ +#include "p4orch/gre_tunnel_manager.h" + +#include +#include +#include +#include + +#include "SaiAttributeList.h" +#include "crmorch.h" +#include "dbconnector.h" +#include "ipaddress.h" +#include "json.hpp" +#include "logger.h" +#include "p4orch/p4orch_util.h" +#include "sai_serialize.h" +#include "swssnet.h" +#include "table.h" +extern "C" +{ +#include "sai.h" +} + +using ::p4orch::kTableKeyDelimiter; + +extern sai_object_id_t gSwitchId; +extern sai_tunnel_api_t *sai_tunnel_api; +extern sai_router_interface_api_t *sai_router_intfs_api; +extern CrmOrch *gCrmOrch; +extern sai_object_id_t gVirtualRouterId; + +namespace +{ + +ReturnCode validateGreTunnelAppDbEntry(const P4GreTunnelAppDbEntry &app_db_entry) +{ + if (app_db_entry.action_str != p4orch::kTunnelAction) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Invalid action " << QuotedVar(app_db_entry.action_str) << " of GRE Tunnel App DB entry"; + } + if (app_db_entry.router_interface_id.empty()) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << QuotedVar(prependParamField(p4orch::kTunnelId)) << " field is missing in table entry"; + } + if (app_db_entry.encap_src_ip.isZero()) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << QuotedVar(prependParamField(p4orch::kEncapSrcIp)) << " field is missing in table entry"; + } + if (app_db_entry.encap_dst_ip.isZero()) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << QuotedVar(prependParamField(p4orch::kEncapDstIp)) << " field is missing in table entry"; + } + return ReturnCode(); +} + +std::vector getSaiAttrs(const P4GreTunnelEntry &gre_tunnel_entry) +{ + std::vector tunnel_attrs; + sai_attribute_t tunnel_attr; + tunnel_attr.id = SAI_TUNNEL_ATTR_TYPE; + tunnel_attr.value.s32 = SAI_TUNNEL_TYPE_IPINIP_GRE; + tunnel_attrs.push_back(tunnel_attr); + + tunnel_attr.id = SAI_TUNNEL_ATTR_PEER_MODE; + tunnel_attr.value.s32 = SAI_TUNNEL_PEER_MODE_P2P; + tunnel_attrs.push_back(tunnel_attr); + + tunnel_attr.id = SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE; + tunnel_attr.value.oid = gre_tunnel_entry.underlay_if_oid; + tunnel_attrs.push_back(tunnel_attr); + + tunnel_attr.id = SAI_TUNNEL_ATTR_OVERLAY_INTERFACE; + tunnel_attr.value.oid = gre_tunnel_entry.overlay_if_oid; + tunnel_attrs.push_back(tunnel_attr); + + tunnel_attr.id = SAI_TUNNEL_ATTR_ENCAP_SRC_IP; + swss::copy(tunnel_attr.value.ipaddr, gre_tunnel_entry.encap_src_ip); + tunnel_attrs.push_back(tunnel_attr); + + tunnel_attr.id = SAI_TUNNEL_ATTR_ENCAP_DST_IP; + swss::copy(tunnel_attr.value.ipaddr, gre_tunnel_entry.encap_dst_ip); + tunnel_attrs.push_back(tunnel_attr); + return tunnel_attrs; +} + +} // namespace + +P4GreTunnelEntry::P4GreTunnelEntry(const std::string &tunnel_id, const std::string &router_interface_id, + const swss::IpAddress &encap_src_ip, const swss::IpAddress &encap_dst_ip, + const swss::IpAddress &neighbor_id) + : tunnel_id(tunnel_id), router_interface_id(router_interface_id), encap_src_ip(encap_src_ip), + encap_dst_ip(encap_dst_ip), neighbor_id(neighbor_id) +{ + SWSS_LOG_ENTER(); + tunnel_key = KeyGenerator::generateTunnelKey(tunnel_id); +} + +ReturnCode GreTunnelManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +{ + return StatusCode::SWSS_RC_UNIMPLEMENTED; +} + +void GreTunnelManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) +{ + m_entries.push_back(entry); +} + +void GreTunnelManager::drain() +{ + SWSS_LOG_ENTER(); + + for (const auto &key_op_fvs_tuple : m_entries) + { + std::string table_name; + std::string key; + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); + const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); + + const std::string &operation = kfvOp(key_op_fvs_tuple); + + ReturnCode status; + auto app_db_entry_or = deserializeP4GreTunnelAppDbEntry(key, attributes); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize GRE Tunnel APP DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, + /*replace=*/true); + continue; + } + auto &app_db_entry = *app_db_entry_or; + + const std::string tunnel_key = KeyGenerator::generateTunnelKey(app_db_entry.tunnel_id); + + // Fulfill the operation. + if (operation == SET_COMMAND) + { + status = validateGreTunnelAppDbEntry(app_db_entry); + if (!status.ok()) + { + SWSS_LOG_ERROR("Validation failed for GRE Tunnel APP DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, + /*replace=*/true); + continue; + } + auto *gre_tunnel_entry = getGreTunnelEntry(tunnel_key); + if (gre_tunnel_entry == nullptr) + { + // Create new GRE tunnel. + status = processAddRequest(app_db_entry); + } + else + { + // Modify existing GRE tunnel. + status = processUpdateRequest(app_db_entry, gre_tunnel_entry); + } + } + else if (operation == DEL_COMMAND) + { + // Delete GRE tunnel. + status = processDeleteRequest(tunnel_key); + } + else + { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << QuotedVar(operation); + SWSS_LOG_ERROR("%s", status.message().c_str()); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + } + m_entries.clear(); +} + +P4GreTunnelEntry *GreTunnelManager::getGreTunnelEntry(const std::string &tunnel_key) +{ + SWSS_LOG_ENTER(); + + auto it = m_greTunnelTable.find(tunnel_key); + + if (it == m_greTunnelTable.end()) + { + return nullptr; + } + else + { + return &it->second; + } +}; + +ReturnCodeOr GreTunnelManager::getConstGreTunnelEntry(const std::string &tunnel_key) +{ + SWSS_LOG_ENTER(); + + auto *tunnel = getGreTunnelEntry(tunnel_key); + if (tunnel == nullptr) + { + return ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "GRE Tunnel with key " << QuotedVar(tunnel_key) << " was not found."; + } + else + { + return *tunnel; + } +} + +ReturnCodeOr GreTunnelManager::deserializeP4GreTunnelAppDbEntry( + const std::string &key, const std::vector &attributes) +{ + SWSS_LOG_ENTER(); + + P4GreTunnelAppDbEntry app_db_entry = {}; + app_db_entry.encap_src_ip = swss::IpAddress("0.0.0.0"); + app_db_entry.encap_dst_ip = swss::IpAddress("0.0.0.0"); + + try + { + nlohmann::json j = nlohmann::json::parse(key); + app_db_entry.tunnel_id = j[prependMatchField(p4orch::kTunnelId)]; + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Failed to deserialize GRE tunnel id"; + } + + for (const auto &it : attributes) + { + const auto &field = fvField(it); + const auto &value = fvValue(it); + if (field == prependParamField(p4orch::kRouterInterfaceId)) + { + app_db_entry.router_interface_id = value; + } + else if (field == prependParamField(p4orch::kEncapSrcIp)) + { + try + { + app_db_entry.encap_src_ip = swss::IpAddress(value); + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Invalid IP address " << QuotedVar(value) << " of field " << QuotedVar(field); + } + } + else if (field == prependParamField(p4orch::kEncapDstIp)) + { + try + { + app_db_entry.encap_dst_ip = swss::IpAddress(value); + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Invalid IP address " << QuotedVar(value) << " of field " << QuotedVar(field); + } + } + else if (field == p4orch::kAction) + { + app_db_entry.action_str = value; + } + else if (field != p4orch::kControllerMetadata) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unexpected field " << QuotedVar(field) << " in table entry"; + } + } + + return app_db_entry; +} + +ReturnCode GreTunnelManager::processAddRequest(const P4GreTunnelAppDbEntry &app_db_entry) +{ + SWSS_LOG_ENTER(); + + P4GreTunnelEntry gre_tunnel_entry(app_db_entry.tunnel_id, app_db_entry.router_interface_id, + app_db_entry.encap_src_ip, app_db_entry.encap_dst_ip, app_db_entry.encap_dst_ip); + auto status = createGreTunnel(gre_tunnel_entry); + if (!status.ok()) + { + SWSS_LOG_ERROR("Failed to create GRE tunnel with key %s", QuotedVar(gre_tunnel_entry.tunnel_key).c_str()); + } + return status; +} + +ReturnCode GreTunnelManager::createGreTunnel(P4GreTunnelEntry &gre_tunnel_entry) +{ + SWSS_LOG_ENTER(); + + // Check the existence of the GRE tunnel in GRE tunnel manager and centralized + // mapper. + if (getGreTunnelEntry(gre_tunnel_entry.tunnel_key) != nullptr) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_EXISTS) + << "GRE tunnel with key " << QuotedVar(gre_tunnel_entry.tunnel_key) + << " already exists in GRE tunnel manager"); + } + if (m_p4OidMapper->existsOID(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_entry.tunnel_key)) + { + RETURN_INTERNAL_ERROR_AND_RAISE_CRITICAL("GRE tunnel with key " << QuotedVar(gre_tunnel_entry.tunnel_key) + << " already exists in centralized mapper"); + } + + // From centralized mapper, get OID of router interface that GRE tunnel + // depends on. + const auto router_interface_key = KeyGenerator::generateRouterInterfaceKey(gre_tunnel_entry.router_interface_id); + if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, router_interface_key, + &gre_tunnel_entry.underlay_if_oid)) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "Router intf " << QuotedVar(gre_tunnel_entry.router_interface_id) << " does not exist"); + } + + std::vector overlay_intf_attrs; + + sai_attribute_t overlay_intf_attr; + overlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID; + overlay_intf_attr.value.oid = gVirtualRouterId; + overlay_intf_attrs.push_back(overlay_intf_attr); + + overlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_TYPE; + overlay_intf_attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_LOOPBACK; + overlay_intf_attrs.push_back(overlay_intf_attr); + + // Call SAI API. + CHECK_ERROR_AND_LOG_AND_RETURN( + sai_router_intfs_api->create_router_interface(&gre_tunnel_entry.overlay_if_oid, gSwitchId, + (uint32_t)overlay_intf_attrs.size(), overlay_intf_attrs.data()), + "Failed to create the Loopback router interface for GRE tunnel " + "SAI_TUNNEL_ATTR_OVERLAY_INTERFACE attribute" + << QuotedVar(gre_tunnel_entry.tunnel_key)); + + // Prepare attributes for the SAI creation call. + std::vector tunnel_attrs = getSaiAttrs(gre_tunnel_entry); + + // Call SAI API. + auto sai_status = sai_tunnel_api->create_tunnel(&gre_tunnel_entry.tunnel_oid, gSwitchId, + (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); + if (sai_status != SAI_STATUS_SUCCESS) + { + auto status = ReturnCode(sai_status) << "Failed to create GRE tunnel " << QuotedVar(gre_tunnel_entry.tunnel_key) + << " on rif " << QuotedVar(gre_tunnel_entry.router_interface_id); + SWSS_LOG_ERROR("%s", status.message().c_str()); + auto recovery_status = sai_router_intfs_api->remove_router_interface(gre_tunnel_entry.overlay_if_oid); + if (recovery_status != SAI_STATUS_SUCCESS) + { + auto rc = ReturnCode(recovery_status) << "Failed to recover overlay router interface due to SAI call " + "failure: Failed to remove loopback router interface " + << QuotedVar(sai_serialize_object_id(gre_tunnel_entry.overlay_if_oid)) + << " while clean up dependencies."; + SWSS_LOG_ERROR("%s", rc.message().c_str()); + SWSS_RAISE_CRITICAL_STATE(rc.message()); + } + return status; + } + + // On successful creation, increment ref count. + m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_ROUTER_INTERFACE, router_interface_key); + + // Add created entry to internal table. + m_greTunnelTable.emplace(gre_tunnel_entry.tunnel_key, gre_tunnel_entry); + + // Add the key to OID map to centralized mapper. + m_p4OidMapper->setOID(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_entry.tunnel_key, gre_tunnel_entry.tunnel_oid); + + return ReturnCode(); +} + +ReturnCode GreTunnelManager::processUpdateRequest(const P4GreTunnelAppDbEntry &app_db_entry, + P4GreTunnelEntry *gre_tunnel_entry) +{ + SWSS_LOG_ENTER(); + + ReturnCode status = ReturnCode(StatusCode::SWSS_RC_UNIMPLEMENTED) + << "Currently GRE tunnel doesn't support update by SAI. GRE tunnel key " + << QuotedVar(gre_tunnel_entry->tunnel_key); + SWSS_LOG_ERROR("%s", status.message().c_str()); + return status; +} + +ReturnCode GreTunnelManager::processDeleteRequest(const std::string &tunnel_key) +{ + SWSS_LOG_ENTER(); + + auto status = removeGreTunnel(tunnel_key); + if (!status.ok()) + { + SWSS_LOG_ERROR("Failed to remove GRE tunnel with key %s", QuotedVar(tunnel_key).c_str()); + } + + return status; +} + +ReturnCode GreTunnelManager::removeGreTunnel(const std::string &tunnel_key) +{ + SWSS_LOG_ENTER(); + + // Check the existence of the GRE tunnel in GRE tunnel manager and centralized + // mapper. + auto *gre_tunnel_entry = getGreTunnelEntry(tunnel_key); + if (gre_tunnel_entry == nullptr) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "GRE tunnel with key " << QuotedVar(tunnel_key) + << " does not exist in GRE tunnel manager"); + } + + // Check if there is anything referring to the GRE tunnel before deletion. + uint32_t ref_count; + if (!m_p4OidMapper->getRefCount(SAI_OBJECT_TYPE_TUNNEL, tunnel_key, &ref_count)) + { + RETURN_INTERNAL_ERROR_AND_RAISE_CRITICAL("Failed to get reference count for GRE tunnel " + << QuotedVar(tunnel_key)); + } + if (ref_count > 0) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "GRE tunnel " << QuotedVar(gre_tunnel_entry->tunnel_key) + << " referenced by other objects (ref_count = " << ref_count); + } + + // Call SAI API. + CHECK_ERROR_AND_LOG_AND_RETURN(sai_tunnel_api->remove_tunnel(gre_tunnel_entry->tunnel_oid), + "Failed to remove GRE tunnel " << QuotedVar(gre_tunnel_entry->tunnel_key)); + + auto sai_status = sai_router_intfs_api->remove_router_interface(gre_tunnel_entry->overlay_if_oid); + if (sai_status != SAI_STATUS_SUCCESS) + { + auto status = ReturnCode(sai_status) << "Failed to remove loopback router interface " + << QuotedVar(sai_serialize_object_id(gre_tunnel_entry->overlay_if_oid)) + << " when removing GRE tunnel " << QuotedVar(gre_tunnel_entry->tunnel_key); + SWSS_LOG_ERROR("%s", status.message().c_str()); + + // Try to recreate the GRE tunnel + std::vector tunnel_attrs = getSaiAttrs(*gre_tunnel_entry); + + // Call SAI API. + auto recovery_status = sai_tunnel_api->create_tunnel(&gre_tunnel_entry->tunnel_oid, gSwitchId, + (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); + if (recovery_status != SAI_STATUS_SUCCESS) + { + auto rc = ReturnCode(recovery_status) << "Failed to recover the GRE tunnel due to SAI call failure : " + "Failed to create GRE tunnel " + << QuotedVar(gre_tunnel_entry->tunnel_key) << " on rif " + << QuotedVar(gre_tunnel_entry->router_interface_id); + SWSS_LOG_ERROR("%s", rc.message().c_str()); + SWSS_RAISE_CRITICAL_STATE(rc.message()); + } + return status; + } + + // On successful deletion, decrement ref count. + m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(gre_tunnel_entry->router_interface_id)); + + // Remove the key to OID map to centralized mapper. + m_p4OidMapper->eraseOID(SAI_OBJECT_TYPE_TUNNEL, tunnel_key); + + // Remove the entry from internal table. + m_greTunnelTable.erase(tunnel_key); + + return ReturnCode(); +} + +std::string GreTunnelManager::verifyState(const std::string &key, const std::vector &tuple) +{ + SWSS_LOG_ENTER(); + + auto pos = key.find_first_of(kTableKeyDelimiter); + if (pos == std::string::npos) + { + return std::string("Invalid key: ") + key; + } + std::string p4rt_table = key.substr(0, pos); + std::string p4rt_key = key.substr(pos + 1); + if (p4rt_table != APP_P4RT_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + std::string table_name; + std::string key_content; + parseP4RTKey(p4rt_key, &table_name, &key_content); + if (table_name != APP_P4RT_TUNNEL_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + + ReturnCode status; + auto app_db_entry_or = deserializeP4GreTunnelAppDbEntry(key_content, tuple); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + std::stringstream msg; + msg << "Unable to deserialize key " << QuotedVar(key) << ": " << status.message(); + return msg.str(); + } + auto &app_db_entry = *app_db_entry_or; + const std::string tunnel_key = KeyGenerator::generateTunnelKey(app_db_entry.tunnel_id); + auto *gre_tunnel_entry = getGreTunnelEntry(tunnel_key); + if (gre_tunnel_entry == nullptr) + { + std::stringstream msg; + msg << "No entry found with key " << QuotedVar(key); + return msg.str(); + } + + std::string cache_result = verifyStateCache(app_db_entry, gre_tunnel_entry); + std::string asic_db_result = verifyStateAsicDb(gre_tunnel_entry); + if (cache_result.empty()) + { + return asic_db_result; + } + if (asic_db_result.empty()) + { + return cache_result; + } + return cache_result + "; " + asic_db_result; +} + +std::string GreTunnelManager::verifyStateCache(const P4GreTunnelAppDbEntry &app_db_entry, + const P4GreTunnelEntry *gre_tunnel_entry) +{ + const std::string tunnel_key = KeyGenerator::generateTunnelKey(app_db_entry.tunnel_id); + ReturnCode status = validateGreTunnelAppDbEntry(app_db_entry); + if (!status.ok()) + { + std::stringstream msg; + msg << "Validation failed for GRE Tunnel DB entry with key " << QuotedVar(tunnel_key) << ": " + << status.message(); + return msg.str(); + } + + if (gre_tunnel_entry->tunnel_key != tunnel_key) + { + std::stringstream msg; + msg << "GreTunnel with key " << QuotedVar(tunnel_key) << " does not match internal cache " + << QuotedVar(gre_tunnel_entry->tunnel_key) << " in Gre Tunnel manager."; + return msg.str(); + } + if (gre_tunnel_entry->tunnel_id != app_db_entry.tunnel_id) + { + std::stringstream msg; + msg << "GreTunnel " << QuotedVar(app_db_entry.tunnel_id) << " does not match internal cache " + << QuotedVar(gre_tunnel_entry->tunnel_id) << " in GreTunnel manager."; + return msg.str(); + } + if (gre_tunnel_entry->router_interface_id != app_db_entry.router_interface_id) + { + std::stringstream msg; + msg << "GreTunnel " << QuotedVar(app_db_entry.tunnel_id) << " with ritf ID " + << QuotedVar(app_db_entry.router_interface_id) << " does not match internal cache " + << QuotedVar(gre_tunnel_entry->router_interface_id) << " in GreTunnel manager."; + return msg.str(); + } + if (gre_tunnel_entry->encap_src_ip.to_string() != app_db_entry.encap_src_ip.to_string()) + { + std::stringstream msg; + msg << "GreTunnel " << QuotedVar(app_db_entry.tunnel_id) << " with source IP " + << QuotedVar(app_db_entry.encap_src_ip.to_string()) << " does not match internal cache " + << QuotedVar(gre_tunnel_entry->encap_src_ip.to_string()) << " in GreTunnel manager."; + return msg.str(); + } + + if (gre_tunnel_entry->encap_dst_ip.to_string() != app_db_entry.encap_dst_ip.to_string()) + { + std::stringstream msg; + msg << "GreTunnel " << QuotedVar(app_db_entry.tunnel_id) << " with destination IP " + << QuotedVar(app_db_entry.encap_dst_ip.to_string()) << " does not match internal cache " + << QuotedVar(gre_tunnel_entry->encap_dst_ip.to_string()) << " in GreTunnel manager."; + return msg.str(); + } + + if (gre_tunnel_entry->neighbor_id.to_string() != app_db_entry.encap_dst_ip.to_string()) + { + std::stringstream msg; + msg << "GreTunnel " << QuotedVar(app_db_entry.tunnel_id) << " with destination IP " + << QuotedVar(app_db_entry.encap_dst_ip.to_string()) << " does not match internal cache " + << QuotedVar(gre_tunnel_entry->neighbor_id.to_string()) << " fo neighbor_id in GreTunnel manager."; + return msg.str(); + } + + return m_p4OidMapper->verifyOIDMapping(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_entry->tunnel_key, + gre_tunnel_entry->tunnel_oid); +} + +std::string GreTunnelManager::verifyStateAsicDb(const P4GreTunnelEntry *gre_tunnel_entry) +{ + swss::DBConnector db("ASIC_DB", 0); + swss::Table table(&db, "ASIC_STATE"); + + // Verify Overlay router interface ASIC DB attributes + std::string key = sai_serialize_object_type(SAI_OBJECT_TYPE_ROUTER_INTERFACE) + ":" + + sai_serialize_object_id(gre_tunnel_entry->overlay_if_oid); + std::vector values; + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + + std::vector overlay_intf_attrs; + sai_attribute_t overlay_intf_attr; + overlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID; + overlay_intf_attr.value.oid = gVirtualRouterId; + overlay_intf_attrs.push_back(overlay_intf_attr); + overlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_TYPE; + overlay_intf_attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_LOOPBACK; + overlay_intf_attrs.push_back(overlay_intf_attr); + std::vector exp = saimeta::SaiAttributeList::serialize_attr_list( + SAI_OBJECT_TYPE_ROUTER_INTERFACE, (uint32_t)overlay_intf_attrs.size(), overlay_intf_attrs.data(), + /*countOnly=*/false); + verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/false); + + // Verify Tunnel ASIC DB attributes + std::vector attrs = getSaiAttrs(*gre_tunnel_entry); + exp = saimeta::SaiAttributeList::serialize_attr_list(SAI_OBJECT_TYPE_TUNNEL, (uint32_t)attrs.size(), attrs.data(), + /*countOnly=*/false); + key = + sai_serialize_object_type(SAI_OBJECT_TYPE_TUNNEL) + ":" + sai_serialize_object_id(gre_tunnel_entry->tunnel_oid); + values.clear(); + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + + return verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/false); +} diff --git a/orchagent/p4orch/gre_tunnel_manager.h b/orchagent/p4orch/gre_tunnel_manager.h new file mode 100644 index 0000000000..d5cb32e9bf --- /dev/null +++ b/orchagent/p4orch/gre_tunnel_manager.h @@ -0,0 +1,116 @@ +#pragma once + +#include +#include +#include + +#include "ipaddress.h" +#include "orch.h" +#include "p4orch/object_manager_interface.h" +#include "p4orch/p4oidmapper.h" +#include "p4orch/p4orch_util.h" +#include "p4orch/router_interface_manager.h" +#include "response_publisher_interface.h" +#include "return_code.h" +extern "C" +{ +#include "sai.h" +} + +// P4GreTunnelEntry holds GreTunnelManager's internal cache of P4 GRE tunnel +// entry. Example: P4RT_TABLE:FIXED_TUNNEL_TABLE:{"match/tunnel_id":"tunnel-1"} +// "action" = "mark_for_tunnel_encap", +// "param/router_interface_id" = "intf-eth-1/2/3", +// "param/encap_src_ip" = "2607:f8b0:8096:3110::1", +// "param/encap_dst_ip" = "2607:f8b0:8096:311a::2", +// "controller_metadata" = "..." +struct P4GreTunnelEntry +{ + // Key of this entry, built from tunnel_id. + std::string tunnel_key; + + // Fields from P4 table. + // Match + std::string tunnel_id; + // Action + std::string router_interface_id; + swss::IpAddress encap_src_ip; + swss::IpAddress encap_dst_ip; + // neighbor_id is required to be equal to encap_dst_ip by BRCM. And the + // neighbor entry needs to be created before GRE tunnel object + swss::IpAddress neighbor_id; + + // SAI OID associated with this entry. + sai_object_id_t tunnel_oid = SAI_NULL_OBJECT_ID; + // SAI OID of a loopback rif for SAI_TUNNEL_ATTR_OVERLAY_INTERFACE + sai_object_id_t overlay_if_oid = SAI_NULL_OBJECT_ID; + // SAI OID of the router_interface_id for SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE + sai_object_id_t underlay_if_oid = SAI_NULL_OBJECT_ID; + + P4GreTunnelEntry(const std::string &tunnel_id, const std::string &router_interface_id, + const swss::IpAddress &encap_src_ip, const swss::IpAddress &encap_dst_ip, + const swss::IpAddress &neighbor_id); +}; + +// GreTunnelManager listens to changes in table APP_P4RT_TUNNEL_TABLE_NAME and +// creates/updates/deletes tunnel SAI object accordingly. +class GreTunnelManager : public ObjectManagerInterface +{ + public: + GreTunnelManager(P4OidMapper *p4oidMapper, ResponsePublisherInterface *publisher) + { + SWSS_LOG_ENTER(); + + assert(p4oidMapper != nullptr); + m_p4OidMapper = p4oidMapper; + assert(publisher != nullptr); + m_publisher = publisher; + } + + virtual ~GreTunnelManager() = default; + + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; + void drain() override; + std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + + ReturnCodeOr getConstGreTunnelEntry(const std::string &gre_tunnel_key); + + private: + // Gets the internal cached GRE tunnel entry by its key. + // Return nullptr if corresponding GRE tunnel entry is not cached. + P4GreTunnelEntry *getGreTunnelEntry(const std::string &gre_tunnel_key); + + // Deserializes an entry from table APP_P4RT_TUNNEL_TABLE_NAME. + ReturnCodeOr deserializeP4GreTunnelAppDbEntry( + const std::string &key, const std::vector &attributes); + + // Processes add operation for an entry. + ReturnCode processAddRequest(const P4GreTunnelAppDbEntry &app_db_entry); + + // Creates an GRE tunnel in the GRE tunnel table. Return true on success. + ReturnCode createGreTunnel(P4GreTunnelEntry &gre_tunnel_entry); + + // Processes update operation for an entry. + ReturnCode processUpdateRequest(const P4GreTunnelAppDbEntry &app_db_entry, P4GreTunnelEntry *gre_tunnel_entry); + + // Processes delete operation for an entry. + ReturnCode processDeleteRequest(const std::string &gre_tunnel_key); + + // Deletes a GRE tunnel in the GRE tunnel table. Return true on success. + ReturnCode removeGreTunnel(const std::string &gre_tunnel_key); + + std::string verifyStateCache(const P4GreTunnelAppDbEntry &app_db_entry, const P4GreTunnelEntry *gre_tunnel_entry); + std::string verifyStateAsicDb(const P4GreTunnelEntry *gre_tunnel_entry); + + // m_greTunnelTable: gre_tunnel_key, P4GreTunnelEntry + std::unordered_map m_greTunnelTable; + + // Owners of pointers below must outlive this class's instance. + P4OidMapper *m_p4OidMapper; + ResponsePublisherInterface *m_publisher; + std::deque m_entries; + + friend class GreTunnelManagerTest; + friend class NextHopManagerTest; +}; diff --git a/orchagent/p4orch/l3_admit_manager.cpp b/orchagent/p4orch/l3_admit_manager.cpp new file mode 100644 index 0000000000..8f17165df2 --- /dev/null +++ b/orchagent/p4orch/l3_admit_manager.cpp @@ -0,0 +1,465 @@ +#include "p4orch/l3_admit_manager.h" + +#include +#include +#include +#include + +#include "SaiAttributeList.h" +#include "dbconnector.h" +#include "json.hpp" +#include "logger.h" +#include "p4orch/p4orch_util.h" +#include "portsorch.h" +#include "return_code.h" +#include "sai_serialize.h" +#include "table.h" +#include "tokenize.h" +extern "C" +{ +#include "sai.h" +} + +using ::p4orch::kTableKeyDelimiter; + +extern PortsOrch *gPortsOrch; +extern sai_object_id_t gSwitchId; +extern sai_my_mac_api_t *sai_my_mac_api; + +namespace +{ + +ReturnCodeOr> getSaiAttrs(const P4L3AdmitEntry &l3_admit_entry) +{ + std::vector l3_admit_attrs; + sai_attribute_t l3_admit_attr; + + l3_admit_attr.id = SAI_MY_MAC_ATTR_MAC_ADDRESS; + memcpy(l3_admit_attr.value.mac, l3_admit_entry.mac_address_data.getMac(), sizeof(sai_mac_t)); + l3_admit_attrs.push_back(l3_admit_attr); + + l3_admit_attr.id = SAI_MY_MAC_ATTR_MAC_ADDRESS_MASK; + memcpy(l3_admit_attr.value.mac, l3_admit_entry.mac_address_mask.getMac(), sizeof(sai_mac_t)); + l3_admit_attrs.push_back(l3_admit_attr); + + l3_admit_attr.id = SAI_MY_MAC_ATTR_PRIORITY; + l3_admit_attr.value.u32 = l3_admit_entry.priority; + l3_admit_attrs.push_back(l3_admit_attr); + + if (!l3_admit_entry.port_name.empty()) + { + Port port; + if (!gPortsOrch->getPort(l3_admit_entry.port_name, port)) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "Failed to get port info for port " << QuotedVar(l3_admit_entry.port_name)); + } + l3_admit_attr.id = SAI_MY_MAC_ATTR_PORT_ID; + l3_admit_attr.value.oid = port.m_port_id; + l3_admit_attrs.push_back(l3_admit_attr); + } + + return l3_admit_attrs; +} + +} // namespace + +ReturnCode L3AdmitManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +{ + return StatusCode::SWSS_RC_UNIMPLEMENTED; +} + +void L3AdmitManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) +{ + m_entries.push_back(entry); +} + +void L3AdmitManager::drain() +{ + SWSS_LOG_ENTER(); + + for (const auto &key_op_fvs_tuple : m_entries) + { + std::string table_name; + std::string key; + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); + const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); + + ReturnCode status; + auto app_db_entry_or = deserializeP4L3AdmitAppDbEntry(key, attributes); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, + /*replace=*/true); + continue; + } + auto &app_db_entry = *app_db_entry_or; + + const std::string l3_admit_key = + KeyGenerator::generateL3AdmitKey(app_db_entry.mac_address_data, app_db_entry.mac_address_mask, + app_db_entry.port_name, app_db_entry.priority); + + // Fulfill the operation. + const std::string &operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) + { + auto *l3_admit_entry = getL3AdmitEntry(l3_admit_key); + if (l3_admit_entry == nullptr) + { + // Create new l3 admit. + status = processAddRequest(app_db_entry, l3_admit_key); + } + else + { + // Duplicate l3 admit entry, no-op + status = ReturnCode(StatusCode::SWSS_RC_SUCCESS) + << "L3 Admit entry with the same key received: " << QuotedVar(l3_admit_key); + } + } + else if (operation == DEL_COMMAND) + { + // Delete l3 admit. + status = processDeleteRequest(l3_admit_key); + } + else + { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << QuotedVar(operation); + SWSS_LOG_ERROR("%s", status.message().c_str()); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), status, + /*replace=*/true); + } + m_entries.clear(); +} + +P4L3AdmitEntry *L3AdmitManager::getL3AdmitEntry(const std::string &l3_admit_key) +{ + SWSS_LOG_ENTER(); + + auto it = m_l3AdmitTable.find(l3_admit_key); + + if (it == m_l3AdmitTable.end()) + { + return nullptr; + } + else + { + return &it->second; + } +} + +ReturnCodeOr L3AdmitManager::deserializeP4L3AdmitAppDbEntry( + const std::string &key, const std::vector &attributes) +{ + SWSS_LOG_ENTER(); + + P4L3AdmitAppDbEntry app_db_entry = {}; + + try + { + nlohmann::json j = nlohmann::json::parse(key); + // "match/dst_mac":"00:02:03:04:00:00&ff:ff:ff:ff:00:00" + if (j.find(prependMatchField(p4orch::kDstMac)) != j.end()) + { + std::string dst_mac_data_and_mask = j[prependMatchField(p4orch::kDstMac)]; + const auto &data_and_mask = swss::tokenize(dst_mac_data_and_mask, p4orch::kDataMaskDelimiter); + app_db_entry.mac_address_data = swss::MacAddress(trim(data_and_mask[0])); + if (data_and_mask.size() > 1) + { + app_db_entry.mac_address_mask = swss::MacAddress(trim(data_and_mask[1])); + } + else + { + app_db_entry.mac_address_mask = swss::MacAddress("ff:ff:ff:ff:ff:ff"); + } + } + else + { + // P4RT set "don't care" value for dst_mac - mask should be all 0 + app_db_entry.mac_address_data = swss::MacAddress("00:00:00:00:00:00"); + app_db_entry.mac_address_mask = swss::MacAddress("00:00:00:00:00:00"); + } + + // "priority":2030 + auto priority_j = j[p4orch::kPriority]; + if (!priority_j.is_number_unsigned()) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Invalid l3 admit entry priority type: should be uint32_t"; + } + app_db_entry.priority = static_cast(priority_j); + + // "match/in_port":"Ethernet0" + if (j.find(prependMatchField(p4orch::kInPort)) != j.end()) + { + app_db_entry.port_name = j[prependMatchField(p4orch::kInPort)]; + } + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Failed to deserialize l3 admit key"; + } + + for (const auto &it : attributes) + { + const auto &field = fvField(it); + const auto &value = fvValue(it); + // "action": "admit_to_l3" + if (field == p4orch::kAction) + { + if (value != p4orch::kL3AdmitAction) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unexpected action " << QuotedVar(value) << " in L3 Admit table entry"; + } + } + else if (field != p4orch::kControllerMetadata) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unexpected field " << QuotedVar(field) << " in L3 Admit table entry"; + } + } + + return app_db_entry; +} + +ReturnCode L3AdmitManager::processAddRequest(const P4L3AdmitAppDbEntry &app_db_entry, const std::string &l3_admit_key) +{ + SWSS_LOG_ENTER(); + + // Check the existence of the l3 admit in l3 admit manager and centralized + // mapper. + if (getL3AdmitEntry(l3_admit_key) != nullptr) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_EXISTS) << "l3 admit with key " << QuotedVar(l3_admit_key) + << " already exists in l3 admit manager"); + } + if (m_p4OidMapper->existsOID(SAI_OBJECT_TYPE_MY_MAC, l3_admit_key)) + { + RETURN_INTERNAL_ERROR_AND_RAISE_CRITICAL("l3 admit with key " << QuotedVar(l3_admit_key) + << " already exists in centralized mapper"); + } + // Create L3 admit entry + P4L3AdmitEntry l3_admit_entry(app_db_entry.mac_address_data, app_db_entry.mac_address_mask, app_db_entry.priority, + app_db_entry.port_name); + auto status = createL3Admit(l3_admit_entry); + if (!status.ok()) + { + SWSS_LOG_ERROR("Failed to create l3 admit with key %s", QuotedVar(l3_admit_key).c_str()); + return status; + } + // Increase reference count to port + if (!l3_admit_entry.port_name.empty()) + { + gPortsOrch->increasePortRefCount(l3_admit_entry.port_name); + } + // Add created entry to internal table. + m_l3AdmitTable.emplace(l3_admit_key, l3_admit_entry); + + // Add the key to OID map to centralized mapper. + m_p4OidMapper->setOID(SAI_OBJECT_TYPE_MY_MAC, l3_admit_key, l3_admit_entry.l3_admit_oid); + return status; +} + +ReturnCode L3AdmitManager::createL3Admit(P4L3AdmitEntry &l3_admit_entry) +{ + SWSS_LOG_ENTER(); + + ASSIGN_OR_RETURN(std::vector l3_admit_attrs, getSaiAttrs(l3_admit_entry)); + // Call SAI API. + CHECK_ERROR_AND_LOG_AND_RETURN( + sai_my_mac_api->create_my_mac(&l3_admit_entry.l3_admit_oid, gSwitchId, (uint32_t)l3_admit_attrs.size(), + l3_admit_attrs.data()), + "Failed to create l3 admit with mac:" << QuotedVar(l3_admit_entry.mac_address_data.to_string()) + << "; mac_mask:" << QuotedVar(l3_admit_entry.mac_address_mask.to_string()) + << "; priority:" << QuotedVar(std::to_string(l3_admit_entry.priority)) + << "; in_port:" << QuotedVar(l3_admit_entry.port_name)); + + return ReturnCode(); +} + +ReturnCode L3AdmitManager::processDeleteRequest(const std::string &l3_admit_key) +{ + SWSS_LOG_ENTER(); + + // Check the existence of the l3 admit in l3 admit manager and centralized + // mapper. + auto *l3_admit_entry = getL3AdmitEntry(l3_admit_key); + if (l3_admit_entry == nullptr) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "l3 admit with key " << QuotedVar(l3_admit_key) + << " does not exist in l3 admit manager"); + } + + // Check if there is anything referring to the l3 admit before deletion. + uint32_t ref_count; + if (!m_p4OidMapper->getRefCount(SAI_OBJECT_TYPE_MY_MAC, l3_admit_key, &ref_count)) + { + RETURN_INTERNAL_ERROR_AND_RAISE_CRITICAL("Failed to get reference count for l3 admit " + << QuotedVar(l3_admit_key)); + } + if (ref_count > 0) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "l3 admit " << QuotedVar(l3_admit_key) + << " referenced by other objects (ref_count = " << ref_count); + } + + // Call SAI API + auto status = removeL3Admit(l3_admit_key); + if (!status.ok()) + { + SWSS_LOG_ERROR("Failed to remove l3 admit with key %s", QuotedVar(l3_admit_key).c_str()); + return status; + } + + // Decrease reference count to port + if (!l3_admit_entry->port_name.empty()) + { + gPortsOrch->decreasePortRefCount(l3_admit_entry->port_name); + } + // Remove the key to OID map to centralized mapper. + m_p4OidMapper->eraseOID(SAI_OBJECT_TYPE_MY_MAC, l3_admit_key); + + // Remove the entry from internal table. + m_l3AdmitTable.erase(l3_admit_key); + return status; +} + +ReturnCode L3AdmitManager::removeL3Admit(const std::string &l3_admit_key) +{ + SWSS_LOG_ENTER(); + + auto *l3_admit_entry = getL3AdmitEntry(l3_admit_key); + CHECK_ERROR_AND_LOG_AND_RETURN(sai_my_mac_api->remove_my_mac(l3_admit_entry->l3_admit_oid), + "Failed to remove l3 admit " << QuotedVar(l3_admit_key)); + + return ReturnCode(); +} + +std::string L3AdmitManager::verifyState(const std::string &key, const std::vector &tuple) +{ + SWSS_LOG_ENTER(); + + auto pos = key.find_first_of(kTableKeyDelimiter); + if (pos == std::string::npos) + { + return std::string("Invalid key: ") + key; + } + std::string p4rt_table = key.substr(0, pos); + std::string p4rt_key = key.substr(pos + 1); + if (p4rt_table != APP_P4RT_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + std::string table_name; + std::string key_content; + parseP4RTKey(p4rt_key, &table_name, &key_content); + if (table_name != APP_P4RT_L3_ADMIT_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + + ReturnCode status; + auto app_db_entry_or = deserializeP4L3AdmitAppDbEntry(key_content, tuple); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + std::stringstream msg; + msg << "Unable to deserialize key " << QuotedVar(key) << ": " << status.message(); + return msg.str(); + } + auto &app_db_entry = *app_db_entry_or; + const std::string l3_admit_key = KeyGenerator::generateL3AdmitKey( + app_db_entry.mac_address_data, app_db_entry.mac_address_mask, app_db_entry.port_name, app_db_entry.priority); + auto *l3_admit_entry = getL3AdmitEntry(l3_admit_key); + if (l3_admit_entry == nullptr) + { + std::stringstream msg; + msg << "No entry found with key " << QuotedVar(key); + return msg.str(); + } + + std::string cache_result = verifyStateCache(app_db_entry, l3_admit_entry); + std::string asic_db_result = verifyStateAsicDb(l3_admit_entry); + if (cache_result.empty()) + { + return asic_db_result; + } + if (asic_db_result.empty()) + { + return cache_result; + } + return cache_result + "; " + asic_db_result; +} + +std::string L3AdmitManager::verifyStateCache(const P4L3AdmitAppDbEntry &app_db_entry, + const P4L3AdmitEntry *l3_admit_entry) +{ + const std::string l3_admit_key = KeyGenerator::generateL3AdmitKey( + app_db_entry.mac_address_data, app_db_entry.mac_address_mask, app_db_entry.port_name, app_db_entry.priority); + + if (l3_admit_entry->port_name != app_db_entry.port_name) + { + std::stringstream msg; + msg << "L3 admit " << QuotedVar(l3_admit_key) << " with port " << QuotedVar(app_db_entry.port_name) + << " does not match internal cache " << QuotedVar(l3_admit_entry->port_name) << " in L3 admit manager."; + return msg.str(); + } + if (l3_admit_entry->mac_address_data.to_string() != app_db_entry.mac_address_data.to_string()) + { + std::stringstream msg; + msg << "L3 admit " << QuotedVar(l3_admit_key) << " with MAC addr " << app_db_entry.mac_address_data.to_string() + << " does not match internal cache " << l3_admit_entry->mac_address_data.to_string() + << " in L3 admit manager."; + return msg.str(); + } + if (l3_admit_entry->mac_address_mask.to_string() != app_db_entry.mac_address_mask.to_string()) + { + std::stringstream msg; + msg << "L3 admit " << QuotedVar(l3_admit_key) << " with MAC mask " << app_db_entry.mac_address_mask.to_string() + << " does not match internal cache " << l3_admit_entry->mac_address_mask.to_string() + << " in L3 admit manager."; + return msg.str(); + } + if (l3_admit_entry->priority != app_db_entry.priority) + { + std::stringstream msg; + msg << "L3 admit " << QuotedVar(l3_admit_key) << " with priority " << app_db_entry.priority + << " does not match internal cache " << l3_admit_entry->priority << " in L3 admit manager."; + return msg.str(); + } + + return m_p4OidMapper->verifyOIDMapping(SAI_OBJECT_TYPE_MY_MAC, l3_admit_key, l3_admit_entry->l3_admit_oid); +} + +std::string L3AdmitManager::verifyStateAsicDb(const P4L3AdmitEntry *l3_admit_entry) +{ + auto attrs_or = getSaiAttrs(*l3_admit_entry); + if (!attrs_or.ok()) + { + return std::string("Failed to get SAI attrs: ") + attrs_or.status().message(); + } + std::vector attrs = *attrs_or; + std::vector exp = + saimeta::SaiAttributeList::serialize_attr_list(SAI_OBJECT_TYPE_MY_MAC, (uint32_t)attrs.size(), attrs.data(), + /*countOnly=*/false); + + swss::DBConnector db("ASIC_DB", 0); + swss::Table table(&db, "ASIC_STATE"); + std::string key = + sai_serialize_object_type(SAI_OBJECT_TYPE_MY_MAC) + ":" + sai_serialize_object_id(l3_admit_entry->l3_admit_oid); + std::vector values; + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + + return verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/false); +} diff --git a/orchagent/p4orch/l3_admit_manager.h b/orchagent/p4orch/l3_admit_manager.h new file mode 100644 index 0000000000..d378775c4f --- /dev/null +++ b/orchagent/p4orch/l3_admit_manager.h @@ -0,0 +1,99 @@ +#pragma once + +#include + +#include "orch.h" +#include "p4orch/object_manager_interface.h" +#include "p4orch/p4oidmapper.h" +#include "p4orch/p4orch_util.h" +#include "response_publisher_interface.h" +#include "return_code.h" + +#define EMPTY_STRING "" + +struct P4L3AdmitEntry +{ + std::string port_name; // Optional + swss::MacAddress mac_address_data; + swss::MacAddress mac_address_mask; + sai_uint32_t priority; + sai_object_id_t l3_admit_oid = SAI_NULL_OBJECT_ID; + + P4L3AdmitEntry() = default; + P4L3AdmitEntry(const swss::MacAddress &mac_address_data, const swss::MacAddress &mac_address_mask, + const sai_uint32_t &priority, const std::string &port_name) + : port_name(port_name), mac_address_data(mac_address_data), mac_address_mask(mac_address_mask), + priority(priority) + { + } +}; + +// L3Admit manager is responsible for subscribing to APPL_DB FIXED_L3_ADMIT +// table. +// +// Example without optional port +// P4RT_TABLE:FIXED_L3_ADMIT_TABLE:{\"match/dst_mac\":\"00:02:03:04:00:00&ff:ff:ff:ff:00:00\",\"priority\":2030} +// "action": "admit_to_l3" +// "controller_metadata": "..." +// +// Example with optional port +// P4RT_TABLE:FIXED_L3_ADMIT_TABLE:{\"match/dst_mac\":\"00:02:03:04:00:00&ff:ff:ff:ff:00:00\",\"match/in_port\":\"Ethernet0\",\"priority\":2030} +// "action": "admit_to_l3" +// "controller_metadata": "..." +// +// Example without optional port/dst_mac +// P4RT:FIXED_L3_ADMIT_TABLE:{\"priority\":2030} +// "action": "admit_to_l3" +// "controller_metadata": "..." +class L3AdmitManager : public ObjectManagerInterface +{ + public: + L3AdmitManager(P4OidMapper *p4oidMapper, ResponsePublisherInterface *publisher) + { + SWSS_LOG_ENTER(); + + assert(p4oidMapper != nullptr); + m_p4OidMapper = p4oidMapper; + assert(publisher != nullptr); + m_publisher = publisher; + } + + virtual ~L3AdmitManager() = default; + + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; + void drain() override; + std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + + private: + // Gets the internal cached next hop entry by its key. + // Return nullptr if corresponding next hop entry is not cached. + P4L3AdmitEntry *getL3AdmitEntry(const std::string &l3_admit_key); + + // Deserializes an entry from table APP_P4RT_L3_ADMIT_TABLE_NAME. + ReturnCodeOr deserializeP4L3AdmitAppDbEntry( + const std::string &key, const std::vector &attributes); + + ReturnCode processAddRequest(const P4L3AdmitAppDbEntry &app_db_entry, const std::string &l3_admit_key); + + // Creates a L3 Admit entry. Return true on success. + ReturnCode createL3Admit(P4L3AdmitEntry &l3_admit_entry); + + ReturnCode processDeleteRequest(const std::string &l3_admit_key); + + // Deletes a L3 Admit entry. Return true on success. + ReturnCode removeL3Admit(const std::string &l3_admit_key); + + // state verification DB helper functions. Return err string or empty string. + std::string verifyStateCache(const P4L3AdmitAppDbEntry &app_db_entry, const P4L3AdmitEntry *l3_admit_entry); + std::string verifyStateAsicDb(const P4L3AdmitEntry *l3_admit_entry); + + // m_l3AdmitTable: l3_admit_key, P4L3AdmitEntry + std::unordered_map m_l3AdmitTable; + + ResponsePublisherInterface *m_publisher; + std::deque m_entries; + P4OidMapper *m_p4OidMapper; + + friend class L3AdmitManagerTest; +}; diff --git a/orchagent/p4orch/mirror_session_manager.cpp b/orchagent/p4orch/mirror_session_manager.cpp index 067bc5aa1a..f61b3d4be5 100644 --- a/orchagent/p4orch/mirror_session_manager.cpp +++ b/orchagent/p4orch/mirror_session_manager.cpp @@ -1,10 +1,18 @@ #include "p4orch/mirror_session_manager.h" +#include + +#include "SaiAttributeList.h" +#include "dbconnector.h" #include "json.hpp" #include "p4orch/p4orch_util.h" #include "portsorch.h" +#include "sai_serialize.h" #include "swss/logger.h" #include "swssnet.h" +#include "table.h" + +using ::p4orch::kTableKeyDelimiter; extern PortsOrch *gPortsOrch; extern sai_mirror_api_t *sai_mirror_api; @@ -13,7 +21,34 @@ extern sai_object_id_t gSwitchId; namespace p4orch { -void MirrorSessionManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode MirrorSessionManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +{ + std::string value; + + try + { + nlohmann::json j = nlohmann::json::parse(json_key); + if (j.find(prependMatchField(p4orch::kMirrorSessionId)) != j.end()) + { + value = j.at(prependMatchField(p4orch::kMirrorSessionId)).get(); + object_key = KeyGenerator::generateMirrorSessionKey(value); + object_type = SAI_OBJECT_TYPE_MIRROR_SESSION; + return ReturnCode(); + } + else + { + SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", p4orch::kMirrorSessionId); + } + } + catch (std::exception &ex) + { + SWSS_LOG_ERROR("json_key parse error"); + } + + return StatusCode::SWSS_RC_INVALID_PARAM; +} + +void MirrorSessionManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { SWSS_LOG_ENTER(); m_entries.push_back(entry); @@ -78,6 +113,72 @@ void MirrorSessionManager::drain() m_entries.clear(); } +ReturnCodeOr> getSaiAttrs(const P4MirrorSessionEntry &mirror_session_entry) +{ + swss::Port port; + if (!gPortsOrch->getPort(mirror_session_entry.port, port)) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "Failed to get port info for port " << QuotedVar(mirror_session_entry.port)); + } + if (port.m_type != Port::Type::PHY) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Port " << QuotedVar(mirror_session_entry.port) << "'s type " << port.m_type + << " is not physical and is invalid as destination " + "port for mirror packet."); + } + + std::vector attrs; + sai_attribute_t attr; + + attr.id = SAI_MIRROR_SESSION_ATTR_MONITOR_PORT; + attr.value.oid = port.m_port_id; + attrs.push_back(attr); + + attr.id = SAI_MIRROR_SESSION_ATTR_TYPE; + attr.value.s32 = SAI_MIRROR_SESSION_TYPE_ENHANCED_REMOTE; + attrs.push_back(attr); + + attr.id = SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE; + attr.value.s32 = SAI_ERSPAN_ENCAPSULATION_TYPE_MIRROR_L3_GRE_TUNNEL; + attrs.push_back(attr); + + attr.id = SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION; + attr.value.u8 = MIRROR_SESSION_DEFAULT_IP_HDR_VER; + attrs.push_back(attr); + + attr.id = SAI_MIRROR_SESSION_ATTR_TOS; + attr.value.u8 = mirror_session_entry.tos; + attrs.push_back(attr); + + attr.id = SAI_MIRROR_SESSION_ATTR_TTL; + attr.value.u8 = mirror_session_entry.ttl; + attrs.push_back(attr); + + attr.id = SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS; + swss::copy(attr.value.ipaddr, mirror_session_entry.src_ip); + attrs.push_back(attr); + + attr.id = SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS; + swss::copy(attr.value.ipaddr, mirror_session_entry.dst_ip); + attrs.push_back(attr); + + attr.id = SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS; + memcpy(attr.value.mac, mirror_session_entry.src_mac.getMac(), sizeof(sai_mac_t)); + attrs.push_back(attr); + + attr.id = SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS; + memcpy(attr.value.mac, mirror_session_entry.dst_mac.getMac(), sizeof(sai_mac_t)); + attrs.push_back(attr); + + attr.id = SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE; + attr.value.u16 = GRE_PROTOCOL_ERSPAN; + attrs.push_back(attr); + + return attrs; +} + ReturnCodeOr MirrorSessionManager::deserializeP4MirrorSessionAppDbEntry( const std::string &key, const std::vector &attributes) { @@ -264,68 +365,8 @@ ReturnCode MirrorSessionManager::createMirrorSession(P4MirrorSessionEntry mirror << QuotedVar(mirror_session_entry.mirror_session_key) << " already exists in centralized mapper"); } - - swss::Port port; - if (!gPortsOrch->getPort(mirror_session_entry.port, port)) - { - LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) - << "Failed to get port info for port " << QuotedVar(mirror_session_entry.port)); - } - if (port.m_type != Port::Type::PHY) - { - LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Port " << QuotedVar(mirror_session_entry.port) << "'s type " << port.m_type - << " is not physical and is invalid as destination " - "port for mirror packet."); - } - // Prepare attributes for the SAI creation call. - std::vector attrs; - sai_attribute_t attr; - - attr.id = SAI_MIRROR_SESSION_ATTR_MONITOR_PORT; - attr.value.oid = port.m_port_id; - attrs.push_back(attr); - - attr.id = SAI_MIRROR_SESSION_ATTR_TYPE; - attr.value.s32 = SAI_MIRROR_SESSION_TYPE_ENHANCED_REMOTE; - attrs.push_back(attr); - - attr.id = SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE; - attr.value.s32 = SAI_ERSPAN_ENCAPSULATION_TYPE_MIRROR_L3_GRE_TUNNEL; - attrs.push_back(attr); - - attr.id = SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION; - attr.value.u8 = MIRROR_SESSION_DEFAULT_IP_HDR_VER; - attrs.push_back(attr); - - attr.id = SAI_MIRROR_SESSION_ATTR_TOS; - attr.value.u8 = mirror_session_entry.tos; - attrs.push_back(attr); - - attr.id = SAI_MIRROR_SESSION_ATTR_TTL; - attr.value.u8 = mirror_session_entry.ttl; - attrs.push_back(attr); - - attr.id = SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS; - swss::copy(attr.value.ipaddr, mirror_session_entry.src_ip); - attrs.push_back(attr); - - attr.id = SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS; - swss::copy(attr.value.ipaddr, mirror_session_entry.dst_ip); - attrs.push_back(attr); - - attr.id = SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS; - memcpy(attr.value.mac, mirror_session_entry.src_mac.getMac(), sizeof(sai_mac_t)); - attrs.push_back(attr); - - attr.id = SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS; - memcpy(attr.value.mac, mirror_session_entry.dst_mac.getMac(), sizeof(sai_mac_t)); - attrs.push_back(attr); - - attr.id = SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE; - attr.value.u16 = GRE_PROTOCOL_ERSPAN; - attrs.push_back(attr); + ASSIGN_OR_RETURN(std::vector attrs, getSaiAttrs(mirror_session_entry)); // Call SAI API. CHECK_ERROR_AND_LOG_AND_RETURN( @@ -723,4 +764,163 @@ ReturnCode MirrorSessionManager::processDeleteRequest(const std::string &mirror_ return ReturnCode(); } +std::string MirrorSessionManager::verifyState(const std::string &key, const std::vector &tuple) +{ + SWSS_LOG_ENTER(); + + auto pos = key.find_first_of(kTableKeyDelimiter); + if (pos == std::string::npos) + { + return std::string("Invalid key: ") + key; + } + std::string p4rt_table = key.substr(0, pos); + std::string p4rt_key = key.substr(pos + 1); + if (p4rt_table != APP_P4RT_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + std::string table_name; + std::string key_content; + parseP4RTKey(p4rt_key, &table_name, &key_content); + if (table_name != APP_P4RT_MIRROR_SESSION_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + + ReturnCode status; + auto app_db_entry_or = deserializeP4MirrorSessionAppDbEntry(key_content, tuple); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + std::stringstream msg; + msg << "Unable to deserialize key " << QuotedVar(key) << ": " << status.message(); + return msg.str(); + } + auto &app_db_entry = *app_db_entry_or; + const std::string mirror_session_key = KeyGenerator::generateMirrorSessionKey(app_db_entry.mirror_session_id); + auto *mirror_session_entry = getMirrorSessionEntry(mirror_session_key); + if (mirror_session_entry == nullptr) + { + std::stringstream msg; + msg << "No entry found with key " << QuotedVar(key); + return msg.str(); + } + + std::string cache_result = verifyStateCache(app_db_entry, mirror_session_entry); + std::string asic_db_result = verifyStateAsicDb(mirror_session_entry); + if (cache_result.empty()) + { + return asic_db_result; + } + if (asic_db_result.empty()) + { + return cache_result; + } + return cache_result + "; " + asic_db_result; +} + +std::string MirrorSessionManager::verifyStateCache(const P4MirrorSessionAppDbEntry &app_db_entry, + const P4MirrorSessionEntry *mirror_session_entry) +{ + const std::string mirror_session_key = KeyGenerator::generateMirrorSessionKey(app_db_entry.mirror_session_id); + + if (mirror_session_entry->mirror_session_key != mirror_session_key) + { + std::stringstream msg; + msg << "Mirror section with key " << QuotedVar(mirror_session_key) << " does not match internal cache " + << QuotedVar(mirror_session_entry->mirror_session_key) << " in mirror section manager."; + return msg.str(); + } + if (mirror_session_entry->mirror_session_id != app_db_entry.mirror_session_id) + { + std::stringstream msg; + msg << "Mirror section " << QuotedVar(app_db_entry.mirror_session_id) << " does not match internal cache " + << QuotedVar(mirror_session_entry->mirror_session_id) << " in mirror section manager."; + return msg.str(); + } + if (mirror_session_entry->port != app_db_entry.port) + { + std::stringstream msg; + msg << "Mirror section " << QuotedVar(app_db_entry.mirror_session_id) << " with port " + << QuotedVar(app_db_entry.port) << " does not match internal cache " + << QuotedVar(mirror_session_entry->port) << " in mirror section manager."; + return msg.str(); + } + if (mirror_session_entry->src_ip.to_string() != app_db_entry.src_ip.to_string()) + { + std::stringstream msg; + msg << "Mirror section " << QuotedVar(app_db_entry.mirror_session_id) << " with source IP " + << app_db_entry.src_ip.to_string() << " does not match internal cache " + << mirror_session_entry->src_ip.to_string() << " in mirror section manager."; + return msg.str(); + } + if (mirror_session_entry->dst_ip.to_string() != app_db_entry.dst_ip.to_string()) + { + std::stringstream msg; + msg << "Mirror section " << QuotedVar(app_db_entry.mirror_session_id) << " with dest IP " + << app_db_entry.dst_ip.to_string() << " does not match internal cache " + << mirror_session_entry->dst_ip.to_string() << " in mirror section manager."; + return msg.str(); + } + if (mirror_session_entry->src_mac.to_string() != app_db_entry.src_mac.to_string()) + { + std::stringstream msg; + msg << "Mirror section " << QuotedVar(app_db_entry.mirror_session_id) << " with source MAC " + << app_db_entry.src_mac.to_string() << " does not match internal cache " + << mirror_session_entry->src_mac.to_string() << " in mirror section manager."; + return msg.str(); + } + if (mirror_session_entry->dst_mac.to_string() != app_db_entry.dst_mac.to_string()) + { + std::stringstream msg; + msg << "Mirror section " << QuotedVar(app_db_entry.mirror_session_id) << " with dest MAC " + << app_db_entry.dst_mac.to_string() << " does not match internal cache " + << mirror_session_entry->dst_mac.to_string() << " in mirror section manager."; + return msg.str(); + } + if (mirror_session_entry->ttl != app_db_entry.ttl) + { + std::stringstream msg; + msg << "Mirror section " << QuotedVar(app_db_entry.mirror_session_id) << " with ttl " << app_db_entry.ttl + << " does not match internal cache " << mirror_session_entry->ttl << " in mirror section manager."; + return msg.str(); + } + if (mirror_session_entry->tos != app_db_entry.tos) + { + std::stringstream msg; + msg << "Mirror section " << QuotedVar(app_db_entry.mirror_session_id) << " with tos " << app_db_entry.tos + << " does not match internal cache " << mirror_session_entry->tos << " in mirror section manager."; + return msg.str(); + } + + return m_p4OidMapper->verifyOIDMapping(SAI_OBJECT_TYPE_MIRROR_SESSION, mirror_session_entry->mirror_session_key, + mirror_session_entry->mirror_session_oid); +} + +std::string MirrorSessionManager::verifyStateAsicDb(const P4MirrorSessionEntry *mirror_session_entry) +{ + auto attrs_or = getSaiAttrs(*mirror_session_entry); + if (!attrs_or.ok()) + { + return std::string("Failed to get SAI attrs: ") + attrs_or.status().message(); + } + std::vector attrs = *attrs_or; + std::vector exp = saimeta::SaiAttributeList::serialize_attr_list( + SAI_OBJECT_TYPE_MIRROR_SESSION, (uint32_t)attrs.size(), attrs.data(), + /*countOnly=*/false); + + swss::DBConnector db("ASIC_DB", 0); + swss::Table table(&db, "ASIC_STATE"); + std::string key = sai_serialize_object_type(SAI_OBJECT_TYPE_MIRROR_SESSION) + ":" + + sai_serialize_object_id(mirror_session_entry->mirror_session_oid); + std::vector values; + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + + return verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/false); +} + } // namespace p4orch diff --git a/orchagent/p4orch/mirror_session_manager.h b/orchagent/p4orch/mirror_session_manager.h index c41dc07eb3..5f1c26e10a 100644 --- a/orchagent/p4orch/mirror_session_manager.h +++ b/orchagent/p4orch/mirror_session_manager.h @@ -81,10 +81,14 @@ class MirrorSessionManager : public ObjectManagerInterface m_publisher = publisher; } - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; + std::string verifyState(const std::string &key, const std::vector &tuple) override; + + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + private: ReturnCodeOr deserializeP4MirrorSessionAppDbEntry( const std::string &key, const std::vector &attributes); @@ -108,6 +112,11 @@ class MirrorSessionManager : public ObjectManagerInterface ReturnCode processDeleteRequest(const std::string &mirror_session_key); + // state verification DB helper functions. Return err string or empty string. + std::string verifyStateCache(const P4MirrorSessionAppDbEntry &app_db_entry, + const P4MirrorSessionEntry *mirror_session_entry); + std::string verifyStateAsicDb(const P4MirrorSessionEntry *mirror_session_entry); + std::unordered_map m_mirrorSessionTable; // Owners of pointers below must outlive this class's instance. diff --git a/orchagent/p4orch/neighbor_manager.cpp b/orchagent/p4orch/neighbor_manager.cpp index 059aa76698..06c1ee9eb1 100644 --- a/orchagent/p4orch/neighbor_manager.cpp +++ b/orchagent/p4orch/neighbor_manager.cpp @@ -4,23 +4,51 @@ #include #include +#include "SaiAttributeList.h" #include "crmorch.h" +#include "dbconnector.h" #include "json.hpp" #include "logger.h" #include "orch.h" #include "p4orch/p4orch_util.h" +#include "sai_serialize.h" #include "swssnet.h" +#include "table.h" extern "C" { #include "sai.h" } +using ::p4orch::kTableKeyDelimiter; + extern sai_object_id_t gSwitchId; extern sai_neighbor_api_t *sai_neighbor_api; extern CrmOrch *gCrmOrch; +namespace +{ + +std::vector getSaiAttrs(const P4NeighborEntry &neighbor_entry) +{ + std::vector attrs; + sai_attribute_t attr; + attr.id = SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS; + memcpy(attr.value.mac, neighbor_entry.dst_mac_address.getMac(), sizeof(sai_mac_t)); + attrs.push_back(attr); + + // Do not program host route. + // This is mainly for neighbor with IPv6 link-local addresses. + attr.id = SAI_NEIGHBOR_ENTRY_ATTR_NO_HOST_ROUTE; + attr.value.booldata = true; + attrs.push_back(attr); + + return attrs; +} + +} // namespace + P4NeighborEntry::P4NeighborEntry(const std::string &router_interface_id, const swss::IpAddress &ip_address, const swss::MacAddress &mac_address) { @@ -34,6 +62,25 @@ P4NeighborEntry::P4NeighborEntry(const std::string &router_interface_id, const s neighbor_key = KeyGenerator::generateNeighborKey(router_intf_id, neighbor_id); } +ReturnCodeOr NeighborManager::getSaiEntry(const P4NeighborEntry &neighbor_entry) +{ + const std::string &router_intf_key = neighbor_entry.router_intf_key; + sai_object_id_t router_intf_oid; + if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, router_intf_key, &router_intf_oid)) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "Router intf key " << QuotedVar(router_intf_key) + << " does not exist in certralized map"); + } + + sai_neighbor_entry_t neigh_entry; + neigh_entry.switch_id = gSwitchId; + copy(neigh_entry.ip_address, neighbor_entry.neighbor_id); + neigh_entry.rif_id = router_intf_oid; + + return neigh_entry; +} + ReturnCodeOr NeighborManager::deserializeNeighborEntry( const std::string &key, const std::vector &attributes) { @@ -138,37 +185,14 @@ ReturnCode NeighborManager::createNeighbor(P4NeighborEntry &neighbor_entry) << " already exists in centralized map"); } - const std::string &router_intf_key = neighbor_entry.router_intf_key; - sai_object_id_t router_intf_oid; - if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, router_intf_key, &router_intf_oid)) - { - LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) - << "Router intf key " << QuotedVar(router_intf_key) - << " does not exist in certralized map"); - } - - neighbor_entry.neigh_entry.switch_id = gSwitchId; - copy(neighbor_entry.neigh_entry.ip_address, neighbor_entry.neighbor_id); - neighbor_entry.neigh_entry.rif_id = router_intf_oid; - - std::vector neigh_attrs; - sai_attribute_t neigh_attr; - neigh_attr.id = SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS; - memcpy(neigh_attr.value.mac, neighbor_entry.dst_mac_address.getMac(), sizeof(sai_mac_t)); - neigh_attrs.push_back(neigh_attr); - - // Do not program host route. - // This is mainly for neighbor with IPv6 link-local addresses. - neigh_attr.id = SAI_NEIGHBOR_ENTRY_ATTR_NO_HOST_ROUTE; - neigh_attr.value.booldata = true; - neigh_attrs.push_back(neigh_attr); + ASSIGN_OR_RETURN(neighbor_entry.neigh_entry, getSaiEntry(neighbor_entry)); + auto attrs = getSaiAttrs(neighbor_entry); - CHECK_ERROR_AND_LOG_AND_RETURN(sai_neighbor_api->create_neighbor_entry(&neighbor_entry.neigh_entry, - static_cast(neigh_attrs.size()), - neigh_attrs.data()), + CHECK_ERROR_AND_LOG_AND_RETURN(sai_neighbor_api->create_neighbor_entry( + &neighbor_entry.neigh_entry, static_cast(attrs.size()), attrs.data()), "Failed to create neighbor with key " << QuotedVar(neighbor_key)); - m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_ROUTER_INTERFACE, router_intf_key); + m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_ROUTER_INTERFACE, neighbor_entry.router_intf_key); if (neighbor_entry.neighbor_id.isV4()) { gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEIGHBOR); @@ -300,7 +324,44 @@ ReturnCode NeighborManager::processDeleteRequest(const std::string &neighbor_key return status; } -void NeighborManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode NeighborManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +{ + std::string router_intf_id, neighbor_id; + swss::IpAddress neighbor; + + try + { + nlohmann::json j = nlohmann::json::parse(json_key); + if (j.find(prependMatchField(p4orch::kRouterInterfaceId)) != j.end()) + { + router_intf_id = j.at(prependMatchField(p4orch::kRouterInterfaceId)).get(); + if (j.find(prependMatchField(p4orch::kNeighborId)) != j.end()) + { + neighbor_id = j.at(prependMatchField(p4orch::kNeighborId)).get(); + neighbor = swss::IpAddress(neighbor_id); + object_key = KeyGenerator::generateNeighborKey(router_intf_id, neighbor); + object_type = SAI_OBJECT_TYPE_NEIGHBOR_ENTRY; + return ReturnCode(); + } + else + { + SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", p4orch::kNeighborId); + } + } + else + { + SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", p4orch::kRouterInterfaceId); + } + } + catch (std::exception &ex) + { + SWSS_LOG_ERROR("json_key parse error"); + } + + return StatusCode::SWSS_RC_INVALID_PARAM; +} + +void NeighborManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } @@ -374,3 +435,141 @@ void NeighborManager::drain() } m_entries.clear(); } + +std::string NeighborManager::verifyState(const std::string &key, const std::vector &tuple) +{ + SWSS_LOG_ENTER(); + + auto pos = key.find_first_of(kTableKeyDelimiter); + if (pos == std::string::npos) + { + return std::string("Invalid key: ") + key; + } + std::string p4rt_table = key.substr(0, pos); + std::string p4rt_key = key.substr(pos + 1); + if (p4rt_table != APP_P4RT_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + std::string table_name; + std::string key_content; + parseP4RTKey(p4rt_key, &table_name, &key_content); + if (table_name != APP_P4RT_NEIGHBOR_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + + ReturnCode status; + auto app_db_entry_or = deserializeNeighborEntry(key_content, tuple); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + std::stringstream msg; + msg << "Unable to deserialize key " << QuotedVar(key) << ": " << status.message(); + return msg.str(); + } + auto &app_db_entry = *app_db_entry_or; + + const std::string neighbor_key = + KeyGenerator::generateNeighborKey(app_db_entry.router_intf_id, app_db_entry.neighbor_id); + auto *neighbor_entry = getNeighborEntry(neighbor_key); + if (neighbor_entry == nullptr) + { + std::stringstream msg; + msg << "No entry found with key " << QuotedVar(key); + return msg.str(); + } + + std::string cache_result = verifyStateCache(app_db_entry, neighbor_entry); + std::string asic_db_result = verifyStateAsicDb(neighbor_entry); + if (cache_result.empty()) + { + return asic_db_result; + } + if (asic_db_result.empty()) + { + return cache_result; + } + return cache_result + "; " + asic_db_result; +} + +std::string NeighborManager::verifyStateCache(const P4NeighborAppDbEntry &app_db_entry, + const P4NeighborEntry *neighbor_entry) +{ + const std::string neighbor_key = + KeyGenerator::generateNeighborKey(app_db_entry.router_intf_id, app_db_entry.neighbor_id); + ReturnCode status = validateNeighborAppDbEntry(app_db_entry); + if (!status.ok()) + { + std::stringstream msg; + msg << "Validation failed for neighbor DB entry with key " << QuotedVar(neighbor_key) << ": " + << status.message(); + return msg.str(); + } + + if (neighbor_entry->router_intf_id != app_db_entry.router_intf_id) + { + std::stringstream msg; + msg << "Neighbor " << QuotedVar(neighbor_key) << " with ritf ID " << QuotedVar(app_db_entry.router_intf_id) + << " does not match internal cache " << QuotedVar(neighbor_entry->router_intf_id) + << " in neighbor manager."; + return msg.str(); + } + if (neighbor_entry->neighbor_id.to_string() != app_db_entry.neighbor_id.to_string()) + { + std::stringstream msg; + msg << "Neighbor " << QuotedVar(neighbor_key) << " with neighbor ID " << app_db_entry.neighbor_id.to_string() + << " does not match internal cache " << neighbor_entry->neighbor_id.to_string() << " in neighbor manager."; + return msg.str(); + } + if (neighbor_entry->dst_mac_address.to_string() != app_db_entry.dst_mac_address.to_string()) + { + std::stringstream msg; + msg << "Neighbor " << QuotedVar(neighbor_key) << " with dest MAC " << app_db_entry.dst_mac_address.to_string() + << " does not match internal cache " << neighbor_entry->dst_mac_address.to_string() + << " in neighbor manager."; + return msg.str(); + } + if (neighbor_entry->router_intf_key != KeyGenerator::generateRouterInterfaceKey(app_db_entry.router_intf_id)) + { + std::stringstream msg; + msg << "Neighbor " << QuotedVar(neighbor_key) << " does not match internal cache on ritf key " + << QuotedVar(neighbor_entry->router_intf_key) << " in neighbor manager."; + return msg.str(); + } + if (neighbor_entry->neighbor_key != neighbor_key) + { + std::stringstream msg; + msg << "Neighbor " << QuotedVar(neighbor_key) << " does not match internal cache on neighbor key " + << QuotedVar(neighbor_entry->neighbor_key) << " in neighbor manager."; + return msg.str(); + } + return ""; +} + +std::string NeighborManager::verifyStateAsicDb(const P4NeighborEntry *neighbor_entry) +{ + auto sai_entry_or = getSaiEntry(*neighbor_entry); + if (!sai_entry_or.ok()) + { + return std::string("Failed to get SAI entry: ") + sai_entry_or.status().message(); + } + sai_neighbor_entry_t sai_entry = *sai_entry_or; + auto attrs = getSaiAttrs(*neighbor_entry); + std::vector exp = saimeta::SaiAttributeList::serialize_attr_list( + SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, (uint32_t)attrs.size(), attrs.data(), + /*countOnly=*/false); + + swss::DBConnector db("ASIC_DB", 0); + swss::Table table(&db, "ASIC_STATE"); + std::string key = + sai_serialize_object_type(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY) + ":" + sai_serialize_neighbor_entry(sai_entry); + std::vector values; + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + + return verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/false); +} diff --git a/orchagent/p4orch/neighbor_manager.h b/orchagent/p4orch/neighbor_manager.h index 2ede9de763..0022d3a8cc 100644 --- a/orchagent/p4orch/neighbor_manager.h +++ b/orchagent/p4orch/neighbor_manager.h @@ -49,8 +49,10 @@ class NeighborManager : public ObjectManagerInterface } virtual ~NeighborManager() = default; - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; + std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; private: ReturnCodeOr deserializeNeighborEntry(const std::string &key, @@ -63,6 +65,9 @@ class NeighborManager : public ObjectManagerInterface ReturnCode processAddRequest(const P4NeighborAppDbEntry &app_db_entry, const std::string &neighbor_key); ReturnCode processUpdateRequest(const P4NeighborAppDbEntry &app_db_entry, P4NeighborEntry *neighbor_entry); ReturnCode processDeleteRequest(const std::string &neighbor_key); + std::string verifyStateCache(const P4NeighborAppDbEntry &app_db_entry, const P4NeighborEntry *neighbor_entry); + std::string verifyStateAsicDb(const P4NeighborEntry *neighbor_entry); + ReturnCodeOr getSaiEntry(const P4NeighborEntry &neighbor_entry); P4OidMapper *m_p4OidMapper; P4NeighborTable m_neighborTable; diff --git a/orchagent/p4orch/next_hop_manager.cpp b/orchagent/p4orch/next_hop_manager.cpp index 3e2d9ff548..5718fbb72b 100644 --- a/orchagent/p4orch/next_hop_manager.cpp +++ b/orchagent/p4orch/next_hop_manager.cpp @@ -4,30 +4,177 @@ #include #include +#include "SaiAttributeList.h" #include "crmorch.h" +#include "dbconnector.h" #include "ipaddress.h" #include "json.hpp" #include "logger.h" +#include "p4orch/p4orch.h" #include "p4orch/p4orch_util.h" +#include "sai_serialize.h" #include "swssnet.h" +#include "table.h" extern "C" { #include "sai.h" } +using ::p4orch::kTableKeyDelimiter; + extern sai_object_id_t gSwitchId; extern sai_next_hop_api_t *sai_next_hop_api; extern CrmOrch *gCrmOrch; +extern P4Orch *gP4Orch; P4NextHopEntry::P4NextHopEntry(const std::string &next_hop_id, const std::string &router_interface_id, - const swss::IpAddress &neighbor_id) - : next_hop_id(next_hop_id), router_interface_id(router_interface_id), neighbor_id(neighbor_id) + const std::string &gre_tunnel_id, const swss::IpAddress &neighbor_id) + : next_hop_id(next_hop_id), router_interface_id(router_interface_id), gre_tunnel_id(gre_tunnel_id), + neighbor_id(neighbor_id) { SWSS_LOG_ENTER(); next_hop_key = KeyGenerator::generateNextHopKey(next_hop_id); } -void NextHopManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +namespace +{ + +ReturnCode validateAppDbEntry(const P4NextHopAppDbEntry &app_db_entry) +{ + // TODO(b/225242372): remove kSetNexthop action after P4RT and Orion update + // naming + if (app_db_entry.action_str != p4orch::kSetIpNexthop && app_db_entry.action_str != p4orch::kSetNexthop && + app_db_entry.action_str != p4orch::kSetTunnelNexthop) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Invalid action " << QuotedVar(app_db_entry.action_str) << " of Nexthop App DB entry"; + } + if (app_db_entry.action_str == p4orch::kSetIpNexthop && app_db_entry.neighbor_id.isZero()) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Missing field " << QuotedVar(prependParamField(p4orch::kNeighborId)) << " for action " + << QuotedVar(p4orch::kSetIpNexthop) << " in table entry"; + } + // TODO(b/225242372): remove kSetNexthop action after P4RT and Orion update + // naming + if (app_db_entry.action_str == p4orch::kSetIpNexthop || app_db_entry.action_str == p4orch::kSetNexthop) + { + if (!app_db_entry.gre_tunnel_id.empty()) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unexpected field " << QuotedVar(prependParamField(p4orch::kTunnelId)) << " for action " + << QuotedVar(p4orch::kSetIpNexthop) << " in table entry"; + } + if (app_db_entry.router_interface_id.empty()) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Missing field " << QuotedVar(prependParamField(p4orch::kRouterInterfaceId)) << " for action " + << QuotedVar(p4orch::kSetIpNexthop) << " in table entry"; + } + } + + if (app_db_entry.action_str == p4orch::kSetTunnelNexthop) + { + if (!app_db_entry.router_interface_id.empty()) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unexpected field " << QuotedVar(prependParamField(p4orch::kRouterInterfaceId)) << " for action " + << QuotedVar(p4orch::kSetTunnelNexthop) << " in table entry"; + } + if (app_db_entry.gre_tunnel_id.empty()) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Missing field " << QuotedVar(prependParamField(p4orch::kTunnelId)) << " for action " + << QuotedVar(p4orch::kSetTunnelNexthop) << " in table entry"; + } + } + return ReturnCode(); +} + +} // namespace + +ReturnCodeOr> NextHopManager::getSaiAttrs(const P4NextHopEntry &next_hop_entry) +{ + std::vector next_hop_attrs; + sai_attribute_t next_hop_attr; + + if (!next_hop_entry.gre_tunnel_id.empty()) + { + // From centralized mapper and, get gre tunnel that next hop depends on. Get + // underlay router interface from gre tunnel manager, + sai_object_id_t tunnel_oid; + if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_TUNNEL, + KeyGenerator::generateTunnelKey(next_hop_entry.gre_tunnel_id), &tunnel_oid)) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "GRE Tunnel " << QuotedVar(next_hop_entry.gre_tunnel_id) << " does not exist"); + } + + next_hop_attr.id = SAI_NEXT_HOP_ATTR_TYPE; + next_hop_attr.value.s32 = SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP; + next_hop_attrs.push_back(next_hop_attr); + + next_hop_attr.id = SAI_NEXT_HOP_ATTR_TUNNEL_ID; + next_hop_attr.value.oid = tunnel_oid; + next_hop_attrs.push_back(next_hop_attr); + } + else + { + // From centralized mapper, get OID of router interface that next hop + // depends on. + sai_object_id_t rif_oid; + if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(next_hop_entry.router_interface_id), + &rif_oid)) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "Router intf " << QuotedVar(next_hop_entry.router_interface_id) + << " does not exist"); + } + next_hop_attr.id = SAI_NEXT_HOP_ATTR_TYPE; + next_hop_attr.value.s32 = SAI_NEXT_HOP_TYPE_IP; + next_hop_attrs.push_back(next_hop_attr); + + next_hop_attr.id = SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID; + next_hop_attr.value.oid = rif_oid; + next_hop_attrs.push_back(next_hop_attr); + } + + next_hop_attr.id = SAI_NEXT_HOP_ATTR_IP; + swss::copy(next_hop_attr.value.ipaddr, next_hop_entry.neighbor_id); + next_hop_attrs.push_back(next_hop_attr); + + return next_hop_attrs; +} + +ReturnCode NextHopManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +{ + std::string value; + + try + { + nlohmann::json j = nlohmann::json::parse(json_key); + if (j.find(prependMatchField(p4orch::kNexthopId)) != j.end()) + { + value = j.at(prependMatchField(p4orch::kNexthopId)).get(); + object_key = KeyGenerator::generateNextHopKey(value); + object_type = SAI_OBJECT_TYPE_NEXT_HOP; + return ReturnCode(); + } + else + { + SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", p4orch::kNexthopId); + } + } + catch (std::exception &ex) + { + SWSS_LOG_ERROR("json_key parse error"); + } + + return StatusCode::SWSS_RC_INVALID_PARAM; +} + +void NextHopManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } @@ -63,6 +210,16 @@ void NextHopManager::drain() const std::string &operation = kfvOp(key_op_fvs_tuple); if (operation == SET_COMMAND) { + status = validateAppDbEntry(app_db_entry); + if (!status.ok()) + { + SWSS_LOG_ERROR("Validation failed for Nexthop APP DB entry with key %s: %s", + QuotedVar(kfvKey(key_op_fvs_tuple)).c_str(), status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, + /*replace=*/true); + continue; + } auto *next_hop_entry = getNextHopEntry(next_hop_key); if (next_hop_entry == nullptr) { @@ -113,6 +270,7 @@ ReturnCodeOr NextHopManager::deserializeP4NextHopAppDbEntry SWSS_LOG_ENTER(); P4NextHopAppDbEntry app_db_entry = {}; + app_db_entry.neighbor_id = swss::IpAddress("0.0.0.0"); try { @@ -131,7 +289,6 @@ ReturnCodeOr NextHopManager::deserializeP4NextHopAppDbEntry if (field == prependParamField(p4orch::kRouterInterfaceId)) { app_db_entry.router_interface_id = value; - app_db_entry.is_set_router_interface_id = true; } else if (field == prependParamField(p4orch::kNeighborId)) { @@ -144,9 +301,16 @@ ReturnCodeOr NextHopManager::deserializeP4NextHopAppDbEntry return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Invalid IP address " << QuotedVar(value) << " of field " << QuotedVar(field); } - app_db_entry.is_set_neighbor_id = true; } - else if (field != p4orch::kAction && field != p4orch::kControllerMetadata) + else if (field == prependParamField(p4orch::kTunnelId)) + { + app_db_entry.gre_tunnel_id = value; + } + else if (field == p4orch::kAction) + { + app_db_entry.action_str = value; + } + else if (field != p4orch::kControllerMetadata) { return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unexpected field " << QuotedVar(field) << " in table entry"; @@ -160,7 +324,8 @@ ReturnCode NextHopManager::processAddRequest(const P4NextHopAppDbEntry &app_db_e { SWSS_LOG_ENTER(); - P4NextHopEntry next_hop_entry(app_db_entry.next_hop_id, app_db_entry.router_interface_id, app_db_entry.neighbor_id); + P4NextHopEntry next_hop_entry(app_db_entry.next_hop_id, app_db_entry.router_interface_id, + app_db_entry.gre_tunnel_id, app_db_entry.neighbor_id); auto status = createNextHop(next_hop_entry); if (!status.ok()) { @@ -187,14 +352,21 @@ ReturnCode NextHopManager::createNextHop(P4NextHopEntry &next_hop_entry) << " already exists in centralized mapper"); } - // From centralized mapper, get OID of router interface that next hop depends - // on. - const auto router_interface_key = KeyGenerator::generateRouterInterfaceKey(next_hop_entry.router_interface_id); - sai_object_id_t rif_oid; - if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, router_interface_key, &rif_oid)) + if (!next_hop_entry.gre_tunnel_id.empty()) { - LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) - << "Router intf " << QuotedVar(next_hop_entry.router_interface_id) << " does not exist"); + auto gre_tunnel_or = gP4Orch->getGreTunnelManager()->getConstGreTunnelEntry( + KeyGenerator::generateTunnelKey(next_hop_entry.gre_tunnel_id)); + if (!gre_tunnel_or.ok()) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "GRE Tunnel " << QuotedVar(next_hop_entry.gre_tunnel_id) + << " does not exist in GRE Tunnel Manager"); + } + next_hop_entry.router_interface_id = (*gre_tunnel_or).router_interface_id; + // BRCM requires neighbor object to be created before GRE tunnel, referring + // to the one in GRE tunnel object when creating next_hop_entry_with + // setTunnelAction + next_hop_entry.neighbor_id = (*gre_tunnel_or).neighbor_id; } // Neighbor doesn't have OID and the IP addr needed in next hop creation is @@ -208,31 +380,26 @@ ReturnCode NextHopManager::createNextHop(P4NextHopEntry &next_hop_entry) << " does not exist in centralized mapper"); } - // Prepare attributes for the SAI creation call. - std::vector next_hop_attrs; - sai_attribute_t next_hop_attr; - - next_hop_attr.id = SAI_NEXT_HOP_ATTR_TYPE; - next_hop_attr.value.s32 = SAI_NEXT_HOP_TYPE_IP; - next_hop_attrs.push_back(next_hop_attr); - - next_hop_attr.id = SAI_NEXT_HOP_ATTR_IP; - swss::copy(next_hop_attr.value.ipaddr, next_hop_entry.neighbor_id); - next_hop_attrs.push_back(next_hop_attr); - - next_hop_attr.id = SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID; - next_hop_attr.value.oid = rif_oid; - next_hop_attrs.push_back(next_hop_attr); + ASSIGN_OR_RETURN(std::vector attrs, getSaiAttrs(next_hop_entry)); // Call SAI API. CHECK_ERROR_AND_LOG_AND_RETURN(sai_next_hop_api->create_next_hop(&next_hop_entry.next_hop_oid, gSwitchId, - (uint32_t)next_hop_attrs.size(), - next_hop_attrs.data()), - "Failed to create next hop " << QuotedVar(next_hop_entry.next_hop_key) << " on rif " - << QuotedVar(next_hop_entry.router_interface_id)); + (uint32_t)attrs.size(), attrs.data()), + "Failed to create next hop " << QuotedVar(next_hop_entry.next_hop_key)); + + if (!next_hop_entry.gre_tunnel_id.empty()) + { + // On successful creation, increment ref count for tunnel object + m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_TUNNEL, + KeyGenerator::generateTunnelKey(next_hop_entry.gre_tunnel_id)); + } + else + { + // On successful creation, increment ref count for router intf object + m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(next_hop_entry.router_interface_id)); + } - // On successful creation, increment ref count. - m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_ROUTER_INTERFACE, router_interface_key); m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_key); if (next_hop_entry.neighbor_id.isV4()) { @@ -308,12 +475,35 @@ ReturnCode NextHopManager::removeNextHop(const std::string &next_hop_key) CHECK_ERROR_AND_LOG_AND_RETURN(sai_next_hop_api->remove_next_hop(next_hop_entry->next_hop_oid), "Failed to remove next hop " << QuotedVar(next_hop_entry->next_hop_key)); - // On successful deletion, decrement ref count. - m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_ROUTER_INTERFACE, - KeyGenerator::generateRouterInterfaceKey(next_hop_entry->router_interface_id)); + if (!next_hop_entry->gre_tunnel_id.empty()) + { + // On successful deletion, decrement ref count for tunnel object + m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_TUNNEL, + KeyGenerator::generateTunnelKey(next_hop_entry->gre_tunnel_id)); + } + else + { + // On successful deletion, decrement ref count for router intf object + m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(next_hop_entry->router_interface_id)); + } + + std::string router_interface_id = next_hop_entry->router_interface_id; + if (!next_hop_entry->gre_tunnel_id.empty()) + { + auto gre_tunnel_or = gP4Orch->getGreTunnelManager()->getConstGreTunnelEntry( + KeyGenerator::generateTunnelKey(next_hop_entry->gre_tunnel_id)); + if (!gre_tunnel_or.ok()) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "GRE Tunnel " << QuotedVar(next_hop_entry->gre_tunnel_id) + << " does not exist in GRE Tunnel Manager"); + } + router_interface_id = (*gre_tunnel_or).router_interface_id; + } m_p4OidMapper->decreaseRefCount( SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, - KeyGenerator::generateNeighborKey(next_hop_entry->router_interface_id, next_hop_entry->neighbor_id)); + KeyGenerator::generateNeighborKey(router_interface_id, next_hop_entry->neighbor_id)); if (next_hop_entry->neighbor_id.isV4()) { gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_NEXTHOP); @@ -331,3 +521,165 @@ ReturnCode NextHopManager::removeNextHop(const std::string &next_hop_key) return ReturnCode(); } + +std::string NextHopManager::verifyState(const std::string &key, const std::vector &tuple) +{ + SWSS_LOG_ENTER(); + + auto pos = key.find_first_of(kTableKeyDelimiter); + if (pos == std::string::npos) + { + return std::string("Invalid key: ") + key; + } + std::string p4rt_table = key.substr(0, pos); + std::string p4rt_key = key.substr(pos + 1); + if (p4rt_table != APP_P4RT_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + std::string table_name; + std::string key_content; + parseP4RTKey(p4rt_key, &table_name, &key_content); + if (table_name != APP_P4RT_NEXTHOP_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + + ReturnCode status; + auto app_db_entry_or = deserializeP4NextHopAppDbEntry(key_content, tuple); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + std::stringstream msg; + msg << "Unable to deserialize key " << QuotedVar(key) << ": " << status.message(); + return msg.str(); + } + auto &app_db_entry = *app_db_entry_or; + const std::string next_hop_key = KeyGenerator::generateNextHopKey(app_db_entry.next_hop_id); + auto *next_hop_entry = getNextHopEntry(next_hop_key); + if (next_hop_entry == nullptr) + { + std::stringstream msg; + msg << "No entry found with key " << QuotedVar(key); + return msg.str(); + } + + std::string cache_result = verifyStateCache(app_db_entry, next_hop_entry); + std::string asic_db_result = verifyStateAsicDb(next_hop_entry); + if (cache_result.empty()) + { + return asic_db_result; + } + if (asic_db_result.empty()) + { + return cache_result; + } + return cache_result + "; " + asic_db_result; +} + +std::string NextHopManager::verifyStateCache(const P4NextHopAppDbEntry &app_db_entry, + const P4NextHopEntry *next_hop_entry) +{ + const std::string next_hop_key = KeyGenerator::generateNextHopKey(app_db_entry.next_hop_id); + if (next_hop_entry->next_hop_key != next_hop_key) + { + std::stringstream msg; + msg << "Nexthop with key " << QuotedVar(next_hop_key) << " does not match internal cache " + << QuotedVar(next_hop_entry->next_hop_key) << " in nexthop manager."; + return msg.str(); + } + if (next_hop_entry->next_hop_id != app_db_entry.next_hop_id) + { + std::stringstream msg; + msg << "Nexthop " << QuotedVar(app_db_entry.next_hop_id) << " does not match internal cache " + << QuotedVar(next_hop_entry->next_hop_id) << " in nexthop manager."; + return msg.str(); + } + if (app_db_entry.action_str == p4orch::kSetIpNexthop && + next_hop_entry->router_interface_id != app_db_entry.router_interface_id) + { + std::stringstream msg; + msg << "Nexthop " << QuotedVar(app_db_entry.next_hop_id) << " with ritf ID " + << QuotedVar(app_db_entry.router_interface_id) << " does not match internal cache " + << QuotedVar(next_hop_entry->router_interface_id) << " in nexthop manager."; + return msg.str(); + } + if (app_db_entry.action_str == p4orch::kSetIpNexthop && + next_hop_entry->neighbor_id.to_string() != app_db_entry.neighbor_id.to_string()) + { + std::stringstream msg; + msg << "Nexthop " << QuotedVar(app_db_entry.next_hop_id) << " with neighbor ID " + << app_db_entry.neighbor_id.to_string() << " does not match internal cache " + << next_hop_entry->neighbor_id.to_string() << " in nexthop manager."; + return msg.str(); + } + + if (app_db_entry.action_str == p4orch::kSetTunnelNexthop && + next_hop_entry->gre_tunnel_id != app_db_entry.gre_tunnel_id) + { + std::stringstream msg; + msg << "Nexthop " << QuotedVar(app_db_entry.next_hop_id) << " with GRE tunnel ID " + << QuotedVar(app_db_entry.gre_tunnel_id) << " does not match internal cache " + << QuotedVar(next_hop_entry->gre_tunnel_id) << " in nexthop manager."; + return msg.str(); + } + if (!next_hop_entry->gre_tunnel_id.empty()) + { + auto gre_tunnel_or = gP4Orch->getGreTunnelManager()->getConstGreTunnelEntry( + KeyGenerator::generateTunnelKey(next_hop_entry->gre_tunnel_id)); + if (!gre_tunnel_or.ok()) + { + std::stringstream msg; + msg << "GRE Tunnel " << QuotedVar(next_hop_entry->gre_tunnel_id) << " does not exist in GRE Tunnel Manager"; + return msg.str(); + } + P4GreTunnelEntry gre_tunnel = *gre_tunnel_or; + if (gre_tunnel.neighbor_id.to_string() != next_hop_entry->neighbor_id.to_string()) + { + std::stringstream msg; + msg << "Nexthop " << QuotedVar(next_hop_entry->next_hop_id) << " with neighbor ID " + << QuotedVar(next_hop_entry->neighbor_id.to_string()) + << " in nexthop manager does not match internal cache " << QuotedVar(gre_tunnel.neighbor_id.to_string()) + << " with tunnel ID " << QuotedVar(gre_tunnel.tunnel_id) << " in GRE tunnel manager."; + return msg.str(); + } + if (gre_tunnel.router_interface_id != next_hop_entry->router_interface_id) + { + std::stringstream msg; + msg << "Nexthop " << QuotedVar(next_hop_entry->next_hop_id) << " with rif ID " + << QuotedVar(next_hop_entry->router_interface_id) + << " in nexthop manager does not match internal cache " << QuotedVar(gre_tunnel.router_interface_id) + << " with tunnel ID " << QuotedVar(gre_tunnel.tunnel_id) << " in GRE tunnel manager."; + return msg.str(); + } + } + + return m_p4OidMapper->verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, next_hop_entry->next_hop_key, + next_hop_entry->next_hop_oid); +} + +std::string NextHopManager::verifyStateAsicDb(const P4NextHopEntry *next_hop_entry) +{ + auto attrs_or = getSaiAttrs(*next_hop_entry); + if (!attrs_or.ok()) + { + return std::string("Failed to get SAI attrs: ") + attrs_or.status().message(); + } + std::vector attrs = *attrs_or; + std::vector exp = + saimeta::SaiAttributeList::serialize_attr_list(SAI_OBJECT_TYPE_NEXT_HOP, (uint32_t)attrs.size(), attrs.data(), + /*countOnly=*/false); + + swss::DBConnector db("ASIC_DB", 0); + swss::Table table(&db, "ASIC_STATE"); + std::string key = sai_serialize_object_type(SAI_OBJECT_TYPE_NEXT_HOP) + ":" + + sai_serialize_object_id(next_hop_entry->next_hop_oid); + std::vector values; + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + + return verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/false); +} diff --git a/orchagent/p4orch/next_hop_manager.h b/orchagent/p4orch/next_hop_manager.h index 7b4a318f87..7bacdad534 100644 --- a/orchagent/p4orch/next_hop_manager.h +++ b/orchagent/p4orch/next_hop_manager.h @@ -6,6 +6,7 @@ #include "ipaddress.h" #include "orch.h" +#include "p4orch/gre_tunnel_manager.h" #include "p4orch/neighbor_manager.h" #include "p4orch/object_manager_interface.h" #include "p4orch/p4oidmapper.h" @@ -29,13 +30,14 @@ struct P4NextHopEntry std::string next_hop_id; // Action std::string router_interface_id; + std::string gre_tunnel_id; swss::IpAddress neighbor_id; // SAI OID associated with this entry. - sai_object_id_t next_hop_oid = 0; + sai_object_id_t next_hop_oid = SAI_NULL_OBJECT_ID; P4NextHopEntry(const std::string &next_hop_id, const std::string &router_interface_id, - const swss::IpAddress &neighbor_id); + const std::string &gre_tunnel_id, const swss::IpAddress &neighbor_id); }; // NextHopManager listens to changes in table APP_P4RT_NEXTHOP_TABLE_NAME and @@ -55,8 +57,10 @@ class NextHopManager : public ObjectManagerInterface virtual ~NextHopManager() = default; - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; + std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; private: // Gets the internal cached next hop entry by its key. @@ -82,6 +86,15 @@ class NextHopManager : public ObjectManagerInterface // Deletes an next hop in the next hop table. Return true on success. ReturnCode removeNextHop(const std::string &next_hop_key); + // Verifies internal cache for an entry. + std::string verifyStateCache(const P4NextHopAppDbEntry &app_db_entry, const P4NextHopEntry *next_hop_entry); + + // Verifies ASIC DB for an entry. + std::string verifyStateAsicDb(const P4NextHopEntry *next_hop_entry); + + // Returns the SAI attributes for an entry. + ReturnCodeOr> getSaiAttrs(const P4NextHopEntry &next_hop_entry); + // m_nextHopTable: next_hop_key, P4NextHopEntry std::unordered_map m_nextHopTable; diff --git a/orchagent/p4orch/object_manager_interface.h b/orchagent/p4orch/object_manager_interface.h index ec9775f8e4..966288a156 100644 --- a/orchagent/p4orch/object_manager_interface.h +++ b/orchagent/p4orch/object_manager_interface.h @@ -8,8 +8,15 @@ class ObjectManagerInterface virtual ~ObjectManagerInterface() = default; // Enqueues an entry into the manager - virtual void enqueue(const swss::KeyOpFieldsValuesTuple &entry) = 0; + virtual void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) = 0; // Processes all entries in the queue virtual void drain() = 0; + + // StateVerification helper function for the manager + virtual std::string verifyState(const std::string &key, const std::vector &tuple) = 0; + + // For sai extension objects depending on a sai object + // return sai object id for a given table with a given key + virtual ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) = 0; }; diff --git a/orchagent/p4orch/p4oidmapper.cpp b/orchagent/p4orch/p4oidmapper.cpp index f4ff6e3433..63215846a6 100644 --- a/orchagent/p4orch/p4oidmapper.cpp +++ b/orchagent/p4orch/p4oidmapper.cpp @@ -1,6 +1,7 @@ #include "p4oidmapper.h" #include +#include #include #include "logger.h" @@ -41,7 +42,8 @@ bool P4OidMapper::setOID(_In_ sai_object_type_t object_type, _In_ const std::str return true; } -bool P4OidMapper::getOID(_In_ sai_object_type_t object_type, _In_ const std::string &key, _Out_ sai_object_id_t *oid) +bool P4OidMapper::getOID(_In_ sai_object_type_t object_type, _In_ const std::string &key, + _Out_ sai_object_id_t *oid) const { SWSS_LOG_ENTER(); @@ -57,12 +59,12 @@ bool P4OidMapper::getOID(_In_ sai_object_type_t object_type, _In_ const std::str return false; } - *oid = m_oidTables[object_type][key].sai_oid; + *oid = m_oidTables[object_type].at(key).sai_oid; return true; } bool P4OidMapper::getRefCount(_In_ sai_object_type_t object_type, _In_ const std::string &key, - _Out_ uint32_t *ref_count) + _Out_ uint32_t *ref_count) const { SWSS_LOG_ENTER(); @@ -80,7 +82,7 @@ bool P4OidMapper::getRefCount(_In_ sai_object_type_t object_type, _In_ const std return false; } - *ref_count = m_oidTables[object_type][key].ref_count; + *ref_count = m_oidTables[object_type].at(key).ref_count; return true; } @@ -117,14 +119,14 @@ void P4OidMapper::eraseAllOIDs(_In_ sai_object_type_t object_type) m_table.del(""); } -size_t P4OidMapper::getNumEntries(_In_ sai_object_type_t object_type) +size_t P4OidMapper::getNumEntries(_In_ sai_object_type_t object_type) const { SWSS_LOG_ENTER(); return (m_oidTables[object_type].size()); } -bool P4OidMapper::existsOID(_In_ sai_object_type_t object_type, _In_ const std::string &key) +bool P4OidMapper::existsOID(_In_ sai_object_type_t object_type, _In_ const std::string &key) const { SWSS_LOG_ENTER(); @@ -178,3 +180,40 @@ bool P4OidMapper::decreaseRefCount(_In_ sai_object_type_t object_type, _In_ cons m_oidTables[object_type][key].ref_count--; return true; } + +std::string P4OidMapper::verifyOIDMapping(_In_ sai_object_type_t object_type, _In_ const std::string &key, + _In_ sai_object_id_t oid) +{ + SWSS_LOG_ENTER(); + + sai_object_id_t mapper_oid; + if (!getOID(object_type, key, &mapper_oid)) + { + std::stringstream msg; + msg << "OID not found in mapper for key " << key; + return msg.str(); + } + if (mapper_oid != oid) + { + std::stringstream msg; + msg << "OID mismatched in mapper for key " << key << ": " << sai_serialize_object_id(oid) << " vs " + << sai_serialize_object_id(mapper_oid); + return msg.str(); + } + std::string db_oid; + if (!m_table.hget("", convertToDBField(object_type, key), db_oid)) + { + std::stringstream msg; + msg << "OID not found in mapper DB for key " << key; + return msg.str(); + } + if (db_oid != sai_serialize_object_id(oid)) + { + std::stringstream msg; + msg << "OID mismatched in mapper DB for key " << key << ": " << db_oid << " vs " + << sai_serialize_object_id(oid); + return msg.str(); + } + + return ""; +} diff --git a/orchagent/p4orch/p4oidmapper.h b/orchagent/p4orch/p4oidmapper.h index 6f7b86ab8f..325acf9503 100644 --- a/orchagent/p4orch/p4oidmapper.h +++ b/orchagent/p4orch/p4oidmapper.h @@ -37,11 +37,11 @@ class P4OidMapper // Gets oid for the given key for the SAI object_type. // Returns true on success. - bool getOID(_In_ sai_object_type_t object_type, _In_ const std::string &key, _Out_ sai_object_id_t *oid); + bool getOID(_In_ sai_object_type_t object_type, _In_ const std::string &key, _Out_ sai_object_id_t *oid) const; // Gets the reference count for the given key for the SAI object_type. // Returns true on success. - bool getRefCount(_In_ sai_object_type_t object_type, _In_ const std::string &key, _Out_ uint32_t *ref_count); + bool getRefCount(_In_ sai_object_type_t object_type, _In_ const std::string &key, _Out_ uint32_t *ref_count) const; // Erases oid for the given key for the SAI object_type. // This function checks if the reference count is zero or not before the @@ -54,11 +54,11 @@ class P4OidMapper void eraseAllOIDs(_In_ sai_object_type_t object_type); // Gets the number of oids for the SAI object_type. - size_t getNumEntries(_In_ sai_object_type_t object_type); + size_t getNumEntries(_In_ sai_object_type_t object_type) const; // Checks whether OID mapping exists for the given key for the specific // object type. - bool existsOID(_In_ sai_object_type_t object_type, _In_ const std::string &key); + bool existsOID(_In_ sai_object_type_t object_type, _In_ const std::string &key) const; // Increases the reference count for the given object. // Returns true on success. @@ -68,6 +68,12 @@ class P4OidMapper // Returns true on success. bool decreaseRefCount(_In_ sai_object_type_t object_type, _In_ const std::string &key); + // Verifies the OID mapping. + // Returns an empty string if the input has the correct mapping. Returns a + // non-empty error string otherwise. + std::string verifyOIDMapping(_In_ sai_object_type_t object_type, _In_ const std::string &key, + _In_ sai_object_id_t oid); + private: struct MapperEntry { diff --git a/orchagent/p4orch/p4orch.cpp b/orchagent/p4orch/p4orch.cpp index 57d50aa5ce..eca0918171 100644 --- a/orchagent/p4orch/p4orch.cpp +++ b/orchagent/p4orch/p4orch.cpp @@ -8,12 +8,17 @@ #include "copporch.h" #include "logger.h" #include "orch.h" +#include "p4orch/p4orch_util.h" +#include "p4orch/tables_definition_manager.h" #include "p4orch/acl_rule_manager.h" #include "p4orch/acl_table_manager.h" +#include "p4orch/gre_tunnel_manager.h" +#include "p4orch/l3_admit_manager.h" #include "p4orch/neighbor_manager.h" #include "p4orch/next_hop_manager.h" #include "p4orch/route_manager.h" #include "p4orch/router_interface_manager.h" +#include "p4orch/ext_tables_manager.h" #include "portsorch.h" #include "return_code.h" #include "sai_serialize.h" @@ -21,46 +26,68 @@ extern PortsOrch *gPortsOrch; #define P4_ACL_COUNTERS_STATS_POLL_TIMER_NAME "P4_ACL_COUNTERS_STATS_POLL_TIMER" +#define P4_EXT_COUNTERS_STATS_POLL_TIMER_NAME "P4_EXT_COUNTERS_STATS_POLL_TIMER" +#define APP_P4RT_EXT_TABLES_MANAGER "EXT_TABLES_MANAGER" P4Orch::P4Orch(swss::DBConnector *db, std::vector tableNames, VRFOrch *vrfOrch, CoppOrch *coppOrch) : Orch(db, tableNames) { SWSS_LOG_ENTER(); + m_tablesDefnManager = std::make_unique(&m_p4OidMapper, &m_publisher); m_routerIntfManager = std::make_unique(&m_p4OidMapper, &m_publisher); m_neighborManager = std::make_unique(&m_p4OidMapper, &m_publisher); + m_greTunnelManager = std::make_unique(&m_p4OidMapper, &m_publisher); m_nextHopManager = std::make_unique(&m_p4OidMapper, &m_publisher); m_routeManager = std::make_unique(&m_p4OidMapper, vrfOrch, &m_publisher); m_mirrorSessionManager = std::make_unique(&m_p4OidMapper, &m_publisher); m_aclTableManager = std::make_unique(&m_p4OidMapper, &m_publisher); m_aclRuleManager = std::make_unique(&m_p4OidMapper, vrfOrch, coppOrch, &m_publisher); m_wcmpManager = std::make_unique(&m_p4OidMapper, &m_publisher); + m_l3AdmitManager = std::make_unique(&m_p4OidMapper, &m_publisher); + m_extTablesManager = std::make_unique(&m_p4OidMapper, vrfOrch, &m_publisher); + m_p4TableToManagerMap[APP_P4RT_TABLES_DEFINITION_TABLE_NAME] = m_tablesDefnManager.get(); m_p4TableToManagerMap[APP_P4RT_ROUTER_INTERFACE_TABLE_NAME] = m_routerIntfManager.get(); m_p4TableToManagerMap[APP_P4RT_NEIGHBOR_TABLE_NAME] = m_neighborManager.get(); + m_p4TableToManagerMap[APP_P4RT_TUNNEL_TABLE_NAME] = m_greTunnelManager.get(); m_p4TableToManagerMap[APP_P4RT_NEXTHOP_TABLE_NAME] = m_nextHopManager.get(); m_p4TableToManagerMap[APP_P4RT_IPV4_TABLE_NAME] = m_routeManager.get(); m_p4TableToManagerMap[APP_P4RT_IPV6_TABLE_NAME] = m_routeManager.get(); m_p4TableToManagerMap[APP_P4RT_MIRROR_SESSION_TABLE_NAME] = m_mirrorSessionManager.get(); m_p4TableToManagerMap[APP_P4RT_ACL_TABLE_DEFINITION_NAME] = m_aclTableManager.get(); m_p4TableToManagerMap[APP_P4RT_WCMP_GROUP_TABLE_NAME] = m_wcmpManager.get(); + m_p4TableToManagerMap[APP_P4RT_L3_ADMIT_TABLE_NAME] = m_l3AdmitManager.get(); + m_p4TableToManagerMap[APP_P4RT_EXT_TABLES_MANAGER] = m_extTablesManager.get(); + m_p4ManagerPrecedence.push_back(m_tablesDefnManager.get()); m_p4ManagerPrecedence.push_back(m_routerIntfManager.get()); m_p4ManagerPrecedence.push_back(m_neighborManager.get()); + m_p4ManagerPrecedence.push_back(m_greTunnelManager.get()); m_p4ManagerPrecedence.push_back(m_nextHopManager.get()); m_p4ManagerPrecedence.push_back(m_wcmpManager.get()); m_p4ManagerPrecedence.push_back(m_routeManager.get()); m_p4ManagerPrecedence.push_back(m_mirrorSessionManager.get()); m_p4ManagerPrecedence.push_back(m_aclTableManager.get()); m_p4ManagerPrecedence.push_back(m_aclRuleManager.get()); + m_p4ManagerPrecedence.push_back(m_l3AdmitManager.get()); + m_p4ManagerPrecedence.push_back(m_extTablesManager.get()); + tablesinfo = nullptr; // Add timer executor to update ACL counters stats in COUNTERS_DB - auto interv = timespec{.tv_sec = P4_COUNTERS_READ_INTERVAL, .tv_nsec = 0}; - m_aclCounterStatsTimer = new swss::SelectableTimer(interv); - auto executor = new swss::ExecutableTimer(m_aclCounterStatsTimer, this, P4_ACL_COUNTERS_STATS_POLL_TIMER_NAME); - Orch::addExecutor(executor); + auto acl_interv = timespec{.tv_sec = P4_COUNTERS_READ_INTERVAL, .tv_nsec = 0}; + m_aclCounterStatsTimer = new swss::SelectableTimer(acl_interv); + auto acl_executor = new swss::ExecutableTimer(m_aclCounterStatsTimer, this, P4_ACL_COUNTERS_STATS_POLL_TIMER_NAME); + Orch::addExecutor(acl_executor); m_aclCounterStatsTimer->start(); + // Add timer executor to update EXT counters stats in COUNTERS_DB + auto ext_interv = timespec{.tv_sec = P4_COUNTERS_READ_INTERVAL, .tv_nsec = 0}; + m_extCounterStatsTimer = new swss::SelectableTimer(ext_interv); + auto ext_executor = new swss::ExecutableTimer(m_extCounterStatsTimer, this, P4_EXT_COUNTERS_STATS_POLL_TIMER_NAME); + Orch::addExecutor(ext_executor); + m_extCounterStatsTimer->start(); + // Add port state change notification handling support swss::DBConnector notificationsDb("ASIC_DB", 0); m_portStatusNotificationConsumer = new swss::NotificationConsumer(¬ificationsDb, "NOTIFICATIONS"); @@ -102,16 +129,25 @@ void P4Orch::doTask(Consumer &consumer) status); continue; } - if (m_p4TableToManagerMap.find(table_name) == m_p4TableToManagerMap.end()) + if (m_p4TableToManagerMap.find(table_name) != m_p4TableToManagerMap.end()) { - auto status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Failed to find P4Orch Manager for " << table_name << " P4RT DB table"; - SWSS_LOG_ERROR("%s", status.message().c_str()); - m_publisher.publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), - status); - continue; + m_p4TableToManagerMap[table_name]->enqueue(table_name, key_op_fvs_tuple); + } + else + { + if (table_name.rfind(p4orch::kTablePrefixEXT, 0) != std::string::npos) + { + m_p4TableToManagerMap[APP_P4RT_EXT_TABLES_MANAGER]->enqueue(table_name, key_op_fvs_tuple); + } + else + { + auto status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Failed to find P4Orch Manager for " << table_name << " P4RT DB table"; + SWSS_LOG_ERROR("%s", status.message().c_str()); + m_publisher.publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status); + } } - m_p4TableToManagerMap[table_name]->enqueue(key_op_fvs_tuple); } for (const auto &manager : m_p4ManagerPrecedence) @@ -133,6 +169,10 @@ void P4Orch::doTask(swss::SelectableTimer &timer) { m_aclRuleManager->doAclCounterStatsTask(); } + else if (&timer == m_extCounterStatsTimer) + { + m_extTablesManager->doExtCounterStatsTask(); + } else { SWSS_LOG_NOTICE("Unrecognized timer passed in P4Orch::doTask(swss::SelectableTimer& " @@ -235,3 +275,8 @@ p4orch::WcmpManager *P4Orch::getWcmpManager() { return m_wcmpManager.get(); } + +GreTunnelManager *P4Orch::getGreTunnelManager() +{ + return m_greTunnelManager.get(); +} diff --git a/orchagent/p4orch/p4orch.h b/orchagent/p4orch/p4orch.h index 42159f3981..9385346d20 100644 --- a/orchagent/p4orch/p4orch.h +++ b/orchagent/p4orch/p4orch.h @@ -10,8 +10,11 @@ #include "notificationconsumer.h" #include "notifier.h" #include "orch.h" +#include "p4orch/tables_definition_manager.h" #include "p4orch/acl_rule_manager.h" #include "p4orch/acl_table_manager.h" +#include "p4orch/gre_tunnel_manager.h" +#include "p4orch/l3_admit_manager.h" #include "p4orch/mirror_session_manager.h" #include "p4orch/neighbor_manager.h" #include "p4orch/next_hop_manager.h" @@ -20,9 +23,22 @@ #include "p4orch/route_manager.h" #include "p4orch/router_interface_manager.h" #include "p4orch/wcmp_manager.h" +#include "p4orch/ext_tables_manager.h" #include "response_publisher.h" #include "vrforch.h" +static const std::map FixedTablesMap = { + {"router_interface_table", APP_P4RT_ROUTER_INTERFACE_TABLE_NAME }, + {"neighbor_table", APP_P4RT_NEIGHBOR_TABLE_NAME}, + {"nexthop_table", APP_P4RT_NEXTHOP_TABLE_NAME}, + {"wcmp_group_table", APP_P4RT_WCMP_GROUP_TABLE_NAME}, + {"ipv4_table", APP_P4RT_IPV4_TABLE_NAME}, + {"ipv6_table", APP_P4RT_IPV6_TABLE_NAME}, + {"mirror_session_table", APP_P4RT_MIRROR_SESSION_TABLE_NAME}, + {"l3_admit_table", APP_P4RT_L3_ADMIT_TABLE_NAME}, + {"tunnel_table", APP_P4RT_TUNNEL_TABLE_NAME} +}; + class P4Orch : public Orch { public: @@ -34,6 +50,12 @@ class P4Orch : public Orch p4orch::AclTableManager *getAclTableManager(); p4orch::AclRuleManager *getAclRuleManager(); p4orch::WcmpManager *getWcmpManager(); + GreTunnelManager *getGreTunnelManager(); + TablesInfo *tablesinfo = NULL; + + // m_p4TableToManagerMap: P4 APP DB table name, P4 Object Manager + std::unordered_map m_p4TableToManagerMap; + private: void doTask(Consumer &consumer); @@ -41,14 +63,15 @@ class P4Orch : public Orch void doTask(swss::NotificationConsumer &consumer); void handlePortStatusChangeNotification(const std::string &op, const std::string &data); - // m_p4TableToManagerMap: P4 APP DB table name, P4 Object Manager - std::unordered_map m_p4TableToManagerMap; // P4 object manager request processing order. std::vector m_p4ManagerPrecedence; swss::SelectableTimer *m_aclCounterStatsTimer; + swss::SelectableTimer *m_extCounterStatsTimer; P4OidMapper m_p4OidMapper; + std::unique_ptr m_tablesDefnManager; std::unique_ptr m_routerIntfManager; + std::unique_ptr m_greTunnelManager; std::unique_ptr m_neighborManager; std::unique_ptr m_nextHopManager; std::unique_ptr m_routeManager; @@ -56,6 +79,8 @@ class P4Orch : public Orch std::unique_ptr m_aclTableManager; std::unique_ptr m_aclRuleManager; std::unique_ptr m_wcmpManager; + std::unique_ptr m_l3AdmitManager; + std::unique_ptr m_extTablesManager; // Notification consumer for port state change swss::NotificationConsumer *m_portStatusNotificationConsumer; diff --git a/orchagent/p4orch/p4orch_util.cpp b/orchagent/p4orch/p4orch_util.cpp index e5d4479436..b2ea0a762b 100644 --- a/orchagent/p4orch/p4orch_util.cpp +++ b/orchagent/p4orch/p4orch_util.cpp @@ -1,8 +1,10 @@ +#include "p4orch/p4orch.h" #include "p4orch/p4orch_util.h" #include "schema.h" using ::p4orch::kTableKeyDelimiter; +extern P4Orch *gP4Orch; // Prepends "match/" to the input string str to construct a new string. std::string prependMatchField(const std::string &str) @@ -29,6 +31,97 @@ void parseP4RTKey(const std::string &key, std::string *table_name, std::string * *key_content = key.substr(pos + 1); } +std::string verifyAttrs(const std::vector &targets, + const std::vector &exp, const std::vector &opt, + bool allow_unknown) +{ + std::map exp_map; + for (const auto &fv : exp) + { + exp_map[fvField(fv)] = fvValue(fv); + } + std::map opt_map; + for (const auto &fv : opt) + { + opt_map[fvField(fv)] = fvValue(fv); + } + + std::set fields; + for (const auto &fv : targets) + { + fields.insert(fvField(fv)); + bool found = false; + if (exp_map.count(fvField(fv))) + { + found = true; + if (fvValue(fv) != exp_map.at(fvField(fv))) + { + return fvField(fv) + " value mismatch, exp " + exp_map.at(fvField(fv)) + " got " + fvValue(fv); + } + } + if (opt_map.count(fvField(fv))) + { + found = true; + if (fvValue(fv) != opt_map.at(fvField(fv))) + { + return fvField(fv) + " value mismatch, exp " + opt_map.at(fvField(fv)) + " got " + fvValue(fv); + } + } + if (!found && !allow_unknown) + { + return std::string("Unexpected field ") + fvField(fv); + } + } + for (const auto &it : exp_map) + { + if (!fields.count(it.first)) + { + return std::string("Missing field ") + it.first; + } + } + return ""; +} + +TableInfo *getTableInfo(const std::string &table_name) +{ + if (!gP4Orch->tablesinfo) + { + return nullptr; + } + + auto it = gP4Orch->tablesinfo->m_tableInfoMap.find(table_name); + if (it == gP4Orch->tablesinfo->m_tableInfoMap.end()) + { + return nullptr; + } + + return &it->second; +} + +ActionInfo *getTableActionInfo(TableInfo *table, const std::string &action_name) +{ + if (!table) + { + return nullptr; + } + + auto it = table->action_fields.find(action_name); + if (it == table->action_fields.end()) + { + return nullptr; + } + + return &it->second; +} + +std::string KeyGenerator::generateTablesInfoKey(const std::string &context) +{ + std::map fv_map = { + {"context", context} + }; + return generateKey(fv_map); +} + std::string KeyGenerator::generateRouteKey(const std::string &vrf_id, const swss::IpPrefix &ip_prefix) { std::map fv_map = { @@ -80,6 +173,38 @@ std::string KeyGenerator::generateAclRuleKey(const std::map fv_map = {}; + fv_map.emplace(std::string(p4orch::kMatchPrefix) + p4orch::kFieldDelimiter + p4orch::kDstMac, + mac_address_data.to_string() + p4orch::kDataMaskDelimiter + mac_address_mask.to_string()); + if (!port_name.empty()) + { + fv_map.emplace(std::string(p4orch::kMatchPrefix) + p4orch::kFieldDelimiter + p4orch::kInPort, port_name); + } + fv_map.emplace(p4orch::kPriority, std::to_string(priority)); + return generateKey(fv_map); +} + +std::string KeyGenerator::generateTunnelKey(const std::string &tunnel_id) +{ + std::map fv_map = {{p4orch::kTunnelId, tunnel_id}}; + return generateKey(fv_map); +} + +std::string KeyGenerator::generateExtTableKey(const std::string &table_name, const std::string &table_key) +{ + std::string key; + + key.append(table_name); + key.append(":"); + key.append(table_key); + + return key; +} + std::string KeyGenerator::generateKey(const std::map &fv_map) { std::string key; @@ -101,3 +226,10 @@ std::string KeyGenerator::generateKey(const std::map & return key; } + +std::string trim(const std::string &s) +{ + size_t end = s.find_last_not_of(" "); + size_t start = s.find_first_not_of(" "); + return (end == std::string::npos) ? "" : s.substr(start, end - start + 1); +} diff --git a/orchagent/p4orch/p4orch_util.h b/orchagent/p4orch/p4orch_util.h index a3684a5fb8..f95a9fd8eb 100644 --- a/orchagent/p4orch/p4orch_util.h +++ b/orchagent/p4orch/p4orch_util.h @@ -2,20 +2,30 @@ #include #include +#include #include #include #include +#include #include "ipaddress.h" #include "ipprefix.h" #include "macaddress.h" +#include "table.h" +extern "C" +{ +#include "saitypes.h" +} + namespace p4orch { // Field names in P4RT APP DB entry. +constexpr char *kTablePrefixEXT = "EXT_"; constexpr char *kRouterInterfaceId = "router_interface_id"; constexpr char *kPort = "port"; +constexpr char *kInPort = "in_port"; constexpr char *kSrcMac = "src_mac"; constexpr char *kAction = "action"; constexpr char *kActions = "actions"; @@ -24,13 +34,22 @@ constexpr char *kWatchPort = "watch_port"; constexpr char *kNeighborId = "neighbor_id"; constexpr char *kDstMac = "dst_mac"; constexpr char *kNexthopId = "nexthop_id"; +constexpr char *kTunnelId = "tunnel_id"; constexpr char *kVrfId = "vrf_id"; constexpr char *kIpv4Dst = "ipv4_dst"; constexpr char *kIpv6Dst = "ipv6_dst"; constexpr char *kWcmpGroupId = "wcmp_group_id"; +constexpr char *kRouteMetadata = "route_metadata"; constexpr char *kSetNexthopId = "set_nexthop_id"; constexpr char *kSetWcmpGroupId = "set_wcmp_group_id"; +constexpr char *kSetNexthopIdAndMetadata = "set_nexthop_id_and_metadata"; +constexpr char *kSetWcmpGroupIdAndMetadata = "set_wcmp_group_id_and_metadata"; +constexpr char *kSetMetadataAndDrop = "set_metadata_and_drop"; +constexpr char *kSetNexthop = "set_nexthop"; +constexpr char *kSetIpNexthop = "set_ip_nexthop"; +constexpr char *kSetTunnelNexthop = "set_p2p_tunnel_encap_nexthop"; constexpr char *kDrop = "drop"; +constexpr char *kTrap = "trap"; constexpr char *kStage = "stage"; constexpr char *kSize = "size"; constexpr char *kPriority = "priority"; @@ -61,9 +80,26 @@ constexpr char *kAclUdfOffset = "offset"; constexpr char *kMirrorSessionId = "mirror_session_id"; constexpr char *kSrcIp = "src_ip"; constexpr char *kDstIp = "dst_ip"; +constexpr char *kEncapSrcIp = "encap_src_ip"; +constexpr char *kEncapDstIp = "encap_dst_ip"; constexpr char *kTtl = "ttl"; constexpr char *kTos = "tos"; constexpr char *kMirrorAsIpv4Erspan = "mirror_as_ipv4_erspan"; +constexpr char *kL3AdmitAction = "admit_to_l3"; +constexpr char *kTunnelAction = "mark_for_p2p_tunnel_encap"; + +// Field names in P4RT TABLE DEFINITION APP DB entry. +constexpr char *kTables = "tables"; +constexpr char *kId = "id"; +constexpr char *kName = "name"; +constexpr char *kAlias = "alias"; +constexpr char *kBitwidth = "bitwidth"; +constexpr char *kFormat = "format"; +constexpr char *kmatchFields = "matchFields"; +constexpr char *kActionParams = "params"; +constexpr char *kReferences = "references"; +constexpr char *kTableRef = "table"; +constexpr char *kMatchRef = "match"; } // namespace p4orch // Prepends "match/" to the input string str to construct a new string. @@ -72,6 +108,58 @@ std::string prependMatchField(const std::string &str); // Prepends "param/" to the input string str to construct a new string. std::string prependParamField(const std::string &str); +struct ActionParamInfo +{ + std::string name; + std::string fieldtype; + std::string datatype; + std::unordered_map table_reference_map; +}; + +struct ActionInfo +{ + std::string name; + std::unordered_map params; + bool refers_to; +}; + +struct TableMatchInfo +{ + std::string name; + std::string fieldtype; + std::string datatype; + std::unordered_map table_reference_map; +}; + +/** + * Dervied table definition + * This is a derived state out of table definition provided by P4RT-APP + */ +struct TableInfo +{ + std::string name; + int id; + int precedence; + std::unordered_map match_fields; + std::unordered_map action_fields; + bool counter_bytes_enabled; + bool counter_packets_enabled; + std::vector action_ref_tables; + // list of tables across all actions, of current table, refer to +}; + +/** + * table-name to table-definition map + */ +typedef std::unordered_map TableInfoMap; + +struct TablesInfoAppDbEntry +{ + std::string context; + std::string info; +}; + + struct P4RouterInterfaceAppDbEntry { std::string router_interface_id; @@ -89,6 +177,17 @@ struct P4NeighborAppDbEntry bool is_set_dst_mac = false; }; +struct P4GreTunnelAppDbEntry +{ + // Match + std::string tunnel_id; + // Action + std::string router_interface_id; + swss::IpAddress encap_src_ip; + swss::IpAddress encap_dst_ip; + std::string action_str; +}; + // P4NextHopAppDbEntry holds entry deserialized from table // APP_P4RT_NEXTHOP_TABLE_NAME. struct P4NextHopAppDbEntry @@ -97,9 +196,20 @@ struct P4NextHopAppDbEntry std::string next_hop_id; // Fields std::string router_interface_id; + std::string gre_tunnel_id; swss::IpAddress neighbor_id; - bool is_set_router_interface_id = false; - bool is_set_neighbor_id = false; + std::string action_str; +}; + +// P4L3AdmitAppDbEntry holds entry deserialized from table +// APP_P4RT_L3_ADMIT_TABLE_NAME. +struct P4L3AdmitAppDbEntry +{ + // Key (match parameters) + std::string port_name; // Optional + swss::MacAddress mac_address_data; + swss::MacAddress mac_address_mask; + uint32_t priority; }; struct P4MirrorSessionAppDbEntry @@ -183,6 +293,26 @@ struct P4AclRuleAppDbEntry P4AclMeterAppDb meter; }; +struct DepObject +{ + sai_object_type_t sai_object; + std::string key; + sai_object_id_t oid; +}; + +struct P4ExtTableAppDbEntry +{ + std::string db_key; + std::string table_name; + std::string table_key; + std::unordered_map> action_params; + std::unordered_map action_dep_objects; +}; + + +TableInfo *getTableInfo(const std::string &table_name); +ActionInfo *getTableActionInfo(TableInfo *table, const std::string &action_name); + // Get the table name and key content from the given P4RT key. // Outputs will be empty strings in case of error. // Example: FIXED_NEIGHBOR_TABLE:{content} @@ -190,11 +320,26 @@ struct P4AclRuleAppDbEntry // Key content: {content} void parseP4RTKey(const std::string &key, std::string *table_name, std::string *key_content); +// State verification function that verifies the table attributes. +// Returns a non-empty string if verification fails. +// +// targets: the table attributes that we need to verify. +// exp: the attributes that must be included and have correct value. +// opt: the attributes that can be excluded, but must have correct value if +// included. +// allow_unknown: if set to false, verification will fail if there is an +// attribute that is not in exp or opt. +std::string verifyAttrs(const std::vector &targets, + const std::vector &exp, const std::vector &opt, + bool allow_unknown); + // class KeyGenerator includes member functions to generate keys for entries // stored in P4 Orch managers. class KeyGenerator { public: + static std::string generateTablesInfoKey(const std::string &context); + static std::string generateRouteKey(const std::string &vrf_id, const swss::IpPrefix &ip_prefix); static std::string generateRouterInterfaceKey(const std::string &router_intf_id); @@ -210,6 +355,14 @@ class KeyGenerator static std::string generateAclRuleKey(const std::map &match_fields, const std::string &priority); + static std::string generateL3AdmitKey(const swss::MacAddress &mac_address_data, + const swss::MacAddress &mac_address_mask, const std::string &port_name, + const uint32_t &priority); + + static std::string generateTunnelKey(const std::string &tunnel_id); + + static std::string generateExtTableKey(const std::string &table_name, const std::string &table_key); + // Generates key used by object managers and centralized mapper. // Takes map of as input and returns a concatenated string // of the form id1=value1:id2=value2... @@ -224,3 +377,6 @@ template std::string QuotedVar(T name) ss << std::quoted(name, '\''); return ss.str(); } + +// Trim tailing and leading whitespace +std::string trim(const std::string &s); diff --git a/orchagent/p4orch/route_manager.cpp b/orchagent/p4orch/route_manager.cpp index 7732a143e5..3b1886179f 100644 --- a/orchagent/p4orch/route_manager.cpp +++ b/orchagent/p4orch/route_manager.cpp @@ -3,14 +3,22 @@ #include #include #include +#include #include #include +#include "SaiAttributeList.h" +#include "converter.h" #include "crmorch.h" +#include "dbconnector.h" #include "json.hpp" #include "logger.h" #include "p4orch/p4orch_util.h" +#include "sai_serialize.h" #include "swssnet.h" +#include "table.h" + +using ::p4orch::kTableKeyDelimiter; extern sai_object_id_t gSwitchId; extern sai_object_id_t gVirtualRouterId; @@ -19,86 +27,284 @@ extern sai_route_api_t *sai_route_api; extern CrmOrch *gCrmOrch; +extern size_t gMaxBulkSize; + namespace { -// This function will perform a route update. A route update will have two -// attribute update. If the second attribut update fails, the function will try -// to revert the first attribute. If the revert fails, the function will raise -// critical state. -ReturnCode UpdateRouteAttrs(sai_packet_action_t old_action, sai_packet_action_t new_action, sai_object_id_t old_nexthop, - sai_object_id_t new_nexthop, const std::string &route_entry_key, - sai_route_entry_t *rotue_entry) +ReturnCode checkNextHopAndWcmpGroupAndRouteMetadataExistence(bool expected_next_hop_existence, + bool expected_wcmp_group_existence, + bool expected_route_metadata_existence, + const P4RouteEntry &route_entry) { - SWSS_LOG_ENTER(); - // For drop action, we will update the action attribute first. - bool action_first = (new_action == SAI_PACKET_ACTION_DROP); + if (route_entry.nexthop_id.empty() && expected_next_hop_existence) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Empty nexthop_id for route with " << route_entry.action << " action"; + } + if (!route_entry.nexthop_id.empty() && !expected_next_hop_existence) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Non-empty nexthop_id for route with " << route_entry.action << " action"; + } + if (route_entry.wcmp_group.empty() && expected_wcmp_group_existence) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Empty wcmp_group_id for route with " << route_entry.action << " action"; + } + if (!route_entry.wcmp_group.empty() && !expected_wcmp_group_existence) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Non-empty wcmp_group_id for route with " << route_entry.action << " action"; + } + if (route_entry.route_metadata.empty() && expected_route_metadata_existence) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Empty route_metadata for route with " << route_entry.action << " action"; + } + if (!route_entry.route_metadata.empty() && !expected_route_metadata_existence) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Non-empty route_metadata for route with " << route_entry.action << " action"; + } + return ReturnCode(); +} - // First attribute - sai_attribute_t route_attr; - route_attr.id = (action_first) ? SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION : SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - if (action_first) +// Returns the nexthop OID of the given entry. +// Raise critical state if OID cannot be found. +sai_object_id_t getNexthopOid(const P4RouteEntry &route_entry, const P4OidMapper &mapper) +{ + sai_object_id_t oid = SAI_NULL_OBJECT_ID; + if (route_entry.action == p4orch::kSetNexthopId || route_entry.action == p4orch::kSetNexthopIdAndMetadata) { - route_attr.value.s32 = new_action; + auto nexthop_key = KeyGenerator::generateNextHopKey(route_entry.nexthop_id); + if (!mapper.getOID(SAI_OBJECT_TYPE_NEXT_HOP, nexthop_key, &oid)) + { + std::stringstream msg; + msg << "Nexthop " << QuotedVar(route_entry.nexthop_id) << " does not exist"; + SWSS_LOG_ERROR("%s", msg.str().c_str()); + SWSS_RAISE_CRITICAL_STATE(msg.str()); + return oid; + } } - else + else if (route_entry.action == p4orch::kSetWcmpGroupId || route_entry.action == p4orch::kSetWcmpGroupIdAndMetadata) { - route_attr.value.oid = new_nexthop; + auto wcmp_group_key = KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group); + if (!mapper.getOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key, &oid)) + { + std::stringstream msg; + msg << "WCMP group " << QuotedVar(route_entry.nexthop_id) << " does not exist"; + SWSS_LOG_ERROR("%s", msg.str().c_str()); + SWSS_RAISE_CRITICAL_STATE(msg.str()); + return oid; + } } - CHECK_ERROR_AND_LOG_AND_RETURN(sai_route_api->set_route_entry_attribute(rotue_entry, &route_attr), - "Failed to set SAI attribute " - << (action_first ? "SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION" - : "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID") - << " when updating route " << QuotedVar(route_entry_key)); + return oid; +} - // Second attribute - route_attr.id = (action_first) ? SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID : SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; - if (action_first) +// Returns the SAI action of the given entry. +sai_packet_action_t getSaiAction(const P4RouteEntry &route_entry) +{ + if (route_entry.action == p4orch::kDrop || route_entry.action == p4orch::kSetMetadataAndDrop) { - route_attr.value.oid = new_nexthop; + return SAI_PACKET_ACTION_DROP; } - else + else if (route_entry.action == p4orch::kTrap) { - route_attr.value.s32 = new_action; + return SAI_PACKET_ACTION_TRAP; } - ReturnCode status; - auto sai_status = sai_route_api->set_route_entry_attribute(rotue_entry, &route_attr); - if (sai_status == SAI_STATUS_SUCCESS) + return SAI_PACKET_ACTION_FORWARD; +} + +// Returns the metadata of the given entry. +uint32_t getMetadata(const P4RouteEntry &route_entry) +{ + if (route_entry.route_metadata.empty()) { - return ReturnCode(); + return 0; } - status = ReturnCode(sai_status) << "Failed to set SAI attribute " - << (action_first ? "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" - : "SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION") - << " when updating route " << QuotedVar(route_entry_key); - SWSS_LOG_ERROR("%s SAI_STATUS: %s", status.message().c_str(), sai_serialize_status(sai_status).c_str()); + return swss::to_uint(route_entry.route_metadata); +} - // Revert the first attribute - route_attr.id = (action_first) ? SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION : SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - if (action_first) +// Returns a list of SAI actions for route update. +std::vector getSaiActions(const std::string action) +{ + static const auto *const kRouteActionToSaiActions = + new std::unordered_map>({ + {p4orch::kSetNexthopId, + std::vector{SAI_ROUTE_ENTRY_ATTR_META_DATA, SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, + SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION}}, + {p4orch::kSetWcmpGroupId, + std::vector{SAI_ROUTE_ENTRY_ATTR_META_DATA, SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, + SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION}}, + {p4orch::kSetNexthopIdAndMetadata, + std::vector{SAI_ROUTE_ENTRY_ATTR_META_DATA, SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, + SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION}}, + {p4orch::kSetWcmpGroupIdAndMetadata, + std::vector{SAI_ROUTE_ENTRY_ATTR_META_DATA, SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, + SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION}}, + {p4orch::kDrop, + std::vector{SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION, SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, + SAI_ROUTE_ENTRY_ATTR_META_DATA}}, + {p4orch::kTrap, + std::vector{SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION, SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, + SAI_ROUTE_ENTRY_ATTR_META_DATA}}, + {p4orch::kSetMetadataAndDrop, + std::vector{SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION, SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID, + SAI_ROUTE_ENTRY_ATTR_META_DATA}}, + }); + + if (kRouteActionToSaiActions->count(action) == 0) { - route_attr.value.s32 = old_action; + return std::vector{}; } - else + return kRouteActionToSaiActions->at(action); +} + +} // namespace + +RouteUpdater::RouteUpdater(const P4RouteEntry &old_route, const P4RouteEntry &new_route, P4OidMapper *mapper) + : m_oldRoute(old_route), m_newRoute(new_route), m_p4OidMapper(mapper), m_actions(getSaiActions(new_route.action)) +{ + updateIdx(); +} + +P4RouteEntry RouteUpdater::getOldEntry() const +{ + return m_oldRoute; +} + +P4RouteEntry RouteUpdater::getNewEntry() const +{ + return m_newRoute; +} + +sai_route_entry_t RouteUpdater::getSaiEntry() const +{ + return m_newRoute.sai_route_entry; +} + +sai_attribute_t RouteUpdater::getSaiAttr() const +{ + sai_attribute_t route_attr = {}; + if (m_idx < 0 || m_idx >= static_cast(m_actions.size())) + { + return route_attr; + } + route_attr.id = m_actions[m_idx]; + switch (m_actions[m_idx]) { - route_attr.value.oid = old_nexthop; + case SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID: + route_attr.value.oid = + (m_revert) ? getNexthopOid(m_oldRoute, *m_p4OidMapper) : getNexthopOid(m_newRoute, *m_p4OidMapper); + break; + case SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION: + route_attr.value.s32 = (m_revert) ? getSaiAction(m_oldRoute) : getSaiAction(m_newRoute); + break; + default: + route_attr.value.u32 = (m_revert) ? getMetadata(m_oldRoute) : getMetadata(m_newRoute); } - sai_status = sai_route_api->set_route_entry_attribute(rotue_entry, &route_attr); + return route_attr; +} + +bool RouteUpdater::updateResult(sai_status_t sai_status) +{ if (sai_status != SAI_STATUS_SUCCESS) { - // Raise critical state if we fail to recover. - std::stringstream msg; - msg << "Failed to revert route attribute " - << (action_first ? "SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION" : "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID") - << " for route " << QuotedVar(route_entry_key); - SWSS_LOG_ERROR("%s SAI_STATUS: %s", msg.str().c_str(), sai_serialize_status(sai_status).c_str()); - SWSS_RAISE_CRITICAL_STATE(msg.str()); + if (m_revert) + { + std::stringstream msg; + msg << "Failed to revert SAI attribute for route entry " << QuotedVar(m_newRoute.route_entry_key); + SWSS_LOG_ERROR("%s SAI_STATUS: %s", msg.str().c_str(), sai_serialize_status(sai_status).c_str()); + SWSS_RAISE_CRITICAL_STATE(msg.str()); + } + else + { + m_status = ReturnCode(sai_status) + << "Failed to update route entry " << QuotedVar(m_newRoute.route_entry_key); + m_revert = true; + } } + return updateIdx(); +} - return status; +ReturnCode RouteUpdater::getStatus() const +{ + return m_status; } -} // namespace +bool RouteUpdater::updateIdx() +{ + if (m_revert) + { + for (--m_idx; m_idx >= 0; --m_idx) + { + if (checkAction()) + { + return false; + } + } + return true; + } + for (++m_idx; m_idx < static_cast(m_actions.size()); ++m_idx) + { + if (checkAction()) + { + return false; + } + } + return true; +} + +bool RouteUpdater::checkAction() const +{ + if (m_idx < 0 || m_idx >= static_cast(m_actions.size())) + { + return false; + } + switch (m_actions[m_idx]) + { + case SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID: + if (getNexthopOid(m_oldRoute, *m_p4OidMapper) == getNexthopOid(m_newRoute, *m_p4OidMapper)) + { + return false; + } + return true; + case SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION: + if (getSaiAction(m_oldRoute) == getSaiAction(m_newRoute)) + { + return false; + } + return true; + default: + if (getMetadata(m_oldRoute) == getMetadata(m_newRoute)) + { + return false; + } + return true; + } + return false; +} + +RouteManager::RouteManager(P4OidMapper *p4oidMapper, VRFOrch *vrfOrch, ResponsePublisherInterface *publisher) + : m_vrfOrch(vrfOrch), m_routerBulker(sai_route_api, gMaxBulkSize) +{ + SWSS_LOG_ENTER(); + + assert(p4oidMapper != nullptr); + m_p4OidMapper = p4oidMapper; + assert(publisher != nullptr); + m_publisher = publisher; +} + +sai_route_entry_t RouteManager::getSaiEntry(const P4RouteEntry &route_entry) +{ + sai_route_entry_t sai_entry; + sai_entry.vr_id = m_vrfOrch->getVRFid(route_entry.vrf_id); + sai_entry.switch_id = gSwitchId; + copy(sai_entry.destination, route_entry.route_prefix); + return sai_entry; +} bool RouteManager::mergeRouteEntry(const P4RouteEntry &dest, const P4RouteEntry &src, P4RouteEntry *ret) { @@ -106,11 +312,8 @@ bool RouteManager::mergeRouteEntry(const P4RouteEntry &dest, const P4RouteEntry *ret = src; ret->sai_route_entry = dest.sai_route_entry; - if (ret->action.empty()) - { - ret->action = dest.action; - } - if (ret->action != dest.action || ret->nexthop_id != dest.nexthop_id || ret->wcmp_group != dest.wcmp_group) + if (ret->action != dest.action || ret->nexthop_id != dest.nexthop_id || ret->wcmp_group != dest.wcmp_group || + ret->route_metadata != dest.route_metadata) { return true; } @@ -183,6 +386,10 @@ ReturnCodeOr RouteManager::deserializeRouteEntry(const std::string { route_entry.wcmp_group = value; } + else if (field == prependParamField(p4orch::kRouteMetadata)) + { + route_entry.route_metadata = value; + } else if (field != p4orch::kControllerMetadata) { return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) @@ -203,7 +410,7 @@ P4RouteEntry *RouteManager::getRouteEntry(const std::string &route_entry_key) return &m_routeTable[route_entry_key]; } -ReturnCode RouteManager::validateRouteEntry(const P4RouteEntry &route_entry) +ReturnCode RouteManager::validateRouteEntry(const P4RouteEntry &route_entry, const std::string &operation) { SWSS_LOG_ENTER(); @@ -229,7 +436,16 @@ ReturnCode RouteManager::validateRouteEntry(const P4RouteEntry &route_entry) { return ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) << "No VRF found with name " << QuotedVar(route_entry.vrf_id); } - return ReturnCode(); + + if (operation == SET_COMMAND) + { + return validateSetRouteEntry(route_entry); + } + else if (operation == DEL_COMMAND) + { + return validateDelRouteEntry(route_entry); + } + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << QuotedVar(operation); } ReturnCode RouteManager::validateSetRouteEntry(const P4RouteEntry &route_entry) @@ -258,45 +474,65 @@ ReturnCode RouteManager::validateSetRouteEntry(const P4RouteEntry &route_entry) } if (action == p4orch::kSetNexthopId) { - if (route_entry.nexthop_id.empty()) - { - return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Empty nexthop_id for route with nexthop_id action"; - } - if (!route_entry.wcmp_group.empty()) - { - return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Non-empty wcmp_group_id for route with nexthop_id action"; - } + RETURN_IF_ERROR(checkNextHopAndWcmpGroupAndRouteMetadataExistence( + /*expected_next_hop_existence=*/true, + /*expected_wcmp_group_existence=*/false, + /*expected_route_metadata_existence=*/false, route_entry)); } else if (action == p4orch::kSetWcmpGroupId) { - if (!route_entry.nexthop_id.empty()) - { - return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Non-empty nexthop_id for route with wcmp_group action"; - } - if (route_entry.wcmp_group.empty()) - { - return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Empty wcmp_group_id for route with wcmp_group action"; - } + RETURN_IF_ERROR(checkNextHopAndWcmpGroupAndRouteMetadataExistence( + /*expected_next_hop_existence=*/false, + /*expected_wcmp_group_existence=*/true, + /*expected_route_metadata_existence=*/false, route_entry)); } - else if (action == p4orch::kDrop) + else if (action == p4orch::kSetNexthopIdAndMetadata) { - if (!route_entry.nexthop_id.empty()) + RETURN_IF_ERROR(checkNextHopAndWcmpGroupAndRouteMetadataExistence( + /*expected_next_hop_existence=*/true, + /*expected_wcmp_group_existence=*/false, + /*expected_route_metadata_existence=*/true, route_entry)); + } + else if (action == p4orch::kSetWcmpGroupIdAndMetadata) + { + RETURN_IF_ERROR(checkNextHopAndWcmpGroupAndRouteMetadataExistence( + /*expected_next_hop_existence=*/false, + /*expected_wcmp_group_existence=*/true, + /*expected_route_metadata_existence=*/true, route_entry)); + } + else if (action == p4orch::kDrop || action == p4orch::kTrap) + { + RETURN_IF_ERROR(checkNextHopAndWcmpGroupAndRouteMetadataExistence( + /*expected_next_hop_existence=*/false, + /*expected_wcmp_group_existence=*/false, + /*expected_route_metadata_existence=*/false, route_entry)); + } + else if (action == p4orch::kSetMetadataAndDrop) + { + RETURN_IF_ERROR(checkNextHopAndWcmpGroupAndRouteMetadataExistence( + /*expected_next_hop_existence=*/false, + /*expected_wcmp_group_existence=*/false, + /*expected_route_metadata_existence=*/true, route_entry)); + } + else + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Invalid action " << QuotedVar(action); + } + + if (!route_entry.route_metadata.empty()) + { + try { - return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Non-empty nexthop_id for route with drop action"; + swss::to_uint(route_entry.route_metadata); } - if (!route_entry.wcmp_group.empty()) + catch (std::exception &e) { return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Non-empty wcmp_group_id for route with drop action"; + << "Action attribute " << QuotedVar(p4orch::kRouteMetadata) << " is invalid for " + << QuotedVar(route_entry.route_entry_key) << ": Expect integer but got " + << QuotedVar(route_entry.route_metadata); } } - else - { - return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Invalid action " << QuotedVar(action); - } return ReturnCode(); } @@ -322,180 +558,291 @@ ReturnCode RouteManager::validateDelRouteEntry(const P4RouteEntry &route_entry) { return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Non-empty wcmp_group for Del route"; } + if (!route_entry.route_metadata.empty()) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Non-empty route_metadata for Del route"; + } return ReturnCode(); } -ReturnCode RouteManager::createRouteEntry(const P4RouteEntry &route_entry) +std::vector RouteManager::createRouteEntries(const std::vector &route_entries) { SWSS_LOG_ENTER(); - sai_route_entry_t sai_route_entry; - sai_route_entry.vr_id = m_vrfOrch->getVRFid(route_entry.vrf_id); - sai_route_entry.switch_id = gSwitchId; - copy(sai_route_entry.destination, route_entry.route_prefix); - if (route_entry.action == p4orch::kSetNexthopId) - { - auto nexthop_key = KeyGenerator::generateNextHopKey(route_entry.nexthop_id); - sai_object_id_t next_hop_oid; - m_p4OidMapper->getOID(SAI_OBJECT_TYPE_NEXT_HOP, nexthop_key, &next_hop_oid); - sai_attribute_t route_attr; - route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - route_attr.value.oid = next_hop_oid; - // Default SAI_ROUTE_ATTR_PACKET_ACTION is SAI_PACKET_ACTION_FORWARD. - CHECK_ERROR_AND_LOG_AND_RETURN(sai_route_api->create_route_entry(&sai_route_entry, /*size=*/1, &route_attr), - "Failed to create route " << QuotedVar(route_entry.route_entry_key) - << " with next hop " - << QuotedVar(route_entry.nexthop_id)); - m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, nexthop_key); - } - else if (route_entry.action == p4orch::kSetWcmpGroupId) - { - auto wcmp_group_key = KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group); - sai_object_id_t wcmp_group_oid; - m_p4OidMapper->getOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key, &wcmp_group_oid); - sai_attribute_t route_attr; - route_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; - route_attr.value.oid = wcmp_group_oid; - // Default SAI_ROUTE_ATTR_PACKET_ACTION is SAI_PACKET_ACTION_FORWARD. - CHECK_ERROR_AND_LOG_AND_RETURN(sai_route_api->create_route_entry(&sai_route_entry, /*size=*/1, &route_attr), - "Failed to create route " << QuotedVar(route_entry.route_entry_key) - << " with wcmp group " - << QuotedVar(route_entry.wcmp_group)); - m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key); - } - else - { - sai_attribute_t route_attr; - route_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; - route_attr.value.s32 = SAI_PACKET_ACTION_DROP; - CHECK_ERROR_AND_LOG_AND_RETURN(sai_route_api->create_route_entry(&sai_route_entry, /*size=*/1, &route_attr), - "Failed to create route " << QuotedVar(route_entry.route_entry_key) - << " with action drop"); - } + std::vector sai_route_entries(route_entries.size()); + // Currently, there are maximum of 2 SAI attributes for route creation. + // For drop and trap routes, there is one SAI attribute: + // SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION. + // For forwarding routes, the default SAI_ROUTE_ATTR_PACKET_ACTION is already + // SAI_PACKET_ACTION_FORWARD, so we don't need SAI_ROUTE_ATTR_PACKET_ACTION. + // But we need SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID and optionally + // SAI_ROUTE_ENTRY_ATTR_META_DATA. + std::vector sai_attrs(2 * route_entries.size()); + std::vector object_statuses(route_entries.size()); + std::vector statuses(route_entries.size()); - m_routeTable[route_entry.route_entry_key] = route_entry; - m_routeTable[route_entry.route_entry_key].sai_route_entry = sai_route_entry; - m_p4OidMapper->setDummyOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry.route_entry_key); - if (route_entry.route_prefix.isV4()) + for (size_t i = 0; i < route_entries.size(); ++i) { - gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); + const auto &route_entry = route_entries[i]; + sai_route_entries[i] = getSaiEntry(route_entry); + uint32_t num_attrs = 1; + if (route_entry.action == p4orch::kDrop) + { + sai_attrs[2 * i].id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + sai_attrs[2 * i].value.s32 = SAI_PACKET_ACTION_DROP; + } + else if (route_entry.action == p4orch::kTrap) + { + sai_attrs[2 * i].id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + sai_attrs[2 * i].value.s32 = SAI_PACKET_ACTION_TRAP; + } + else if (route_entry.action == p4orch::kSetMetadataAndDrop) + { + sai_attrs[2 * i].id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + sai_attrs[2 * i].value.s32 = SAI_PACKET_ACTION_DROP; + sai_attrs[2 * i + 1].id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + sai_attrs[2 * i + 1].value.u32 = swss::to_uint(route_entry.route_metadata); + num_attrs++; + } + else + { + // Default SAI_ROUTE_ATTR_PACKET_ACTION is SAI_PACKET_ACTION_FORWARD. + sai_attrs[2 * i].id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + sai_attrs[2 * i].value.oid = getNexthopOid(route_entry, *m_p4OidMapper); + if (route_entry.action == p4orch::kSetNexthopIdAndMetadata || + route_entry.action == p4orch::kSetWcmpGroupIdAndMetadata) + { + sai_attrs[2 * i + 1].id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + sai_attrs[2 * i + 1].value.u32 = swss::to_uint(route_entry.route_metadata); + num_attrs++; + } + } + object_statuses[i] = + m_routerBulker.create_entry(&object_statuses[i], &sai_route_entries[i], num_attrs, &sai_attrs[2 * i]); } - else + + m_routerBulker.flush(); + + for (size_t i = 0; i < route_entries.size(); ++i) { - gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + const auto &route_entry = route_entries[i]; + CHECK_ERROR_AND_LOG(object_statuses[i], + "Failed to create route entry " << QuotedVar(route_entry.route_entry_key)); + if (object_statuses[i] == SAI_STATUS_SUCCESS) + { + if (route_entry.action == p4orch::kSetNexthopId || route_entry.action == p4orch::kSetNexthopIdAndMetadata) + { + m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, + KeyGenerator::generateNextHopKey(route_entry.nexthop_id)); + } + else if (route_entry.action == p4orch::kSetWcmpGroupId || + route_entry.action == p4orch::kSetWcmpGroupIdAndMetadata) + { + m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group)); + } + m_routeTable[route_entry.route_entry_key] = route_entry; + m_routeTable[route_entry.route_entry_key].sai_route_entry = sai_route_entries[i]; + m_p4OidMapper->setDummyOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry.route_entry_key); + if (route_entry.route_prefix.isV4()) + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); + } + else + { + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + } + m_vrfOrch->increaseVrfRefCount(route_entry.vrf_id); + statuses[i] = ReturnCode(); + } + else + { + statuses[i] = ReturnCode(object_statuses[i]) + << "Failed to create route entry " << QuotedVar(route_entry.route_entry_key); + } } - m_vrfOrch->increaseVrfRefCount(route_entry.vrf_id); - return ReturnCode(); + + return statuses; } -ReturnCodeOr RouteManager::getNexthopOid(const P4RouteEntry &route_entry) +void RouteManager::updateRouteEntriesMeta(const P4RouteEntry &old_entry, const P4RouteEntry &new_entry) { - sai_object_id_t oid = SAI_NULL_OBJECT_ID; - if (route_entry.action == p4orch::kSetNexthopId) + if (getNexthopOid(old_entry, *m_p4OidMapper) != getNexthopOid(new_entry, *m_p4OidMapper)) { - auto nexthop_key = KeyGenerator::generateNextHopKey(route_entry.nexthop_id); - if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_NEXT_HOP, nexthop_key, &oid)) + if (new_entry.action == p4orch::kSetNexthopId || new_entry.action == p4orch::kSetNexthopIdAndMetadata) { - RETURN_INTERNAL_ERROR_AND_RAISE_CRITICAL("Nexthop " << QuotedVar(route_entry.nexthop_id) - << " does not exist"); + m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, + KeyGenerator::generateNextHopKey(new_entry.nexthop_id)); } - } - else if (route_entry.action == p4orch::kSetWcmpGroupId) - { - auto wcmp_group_key = KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group); - if (!m_p4OidMapper->getOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key, &oid)) + else if (new_entry.action == p4orch::kSetWcmpGroupId || new_entry.action == p4orch::kSetWcmpGroupIdAndMetadata) + { + m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(new_entry.wcmp_group)); + } + if (old_entry.action == p4orch::kSetNexthopId || old_entry.action == p4orch::kSetNexthopIdAndMetadata) { - RETURN_INTERNAL_ERROR_AND_RAISE_CRITICAL("WCMP group " << QuotedVar(route_entry.wcmp_group) - << " does not exist"); + m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, + KeyGenerator::generateNextHopKey(old_entry.nexthop_id)); + } + else if (old_entry.action == p4orch::kSetWcmpGroupId || old_entry.action == p4orch::kSetWcmpGroupIdAndMetadata) + { + m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(old_entry.wcmp_group)); } } - return oid; + m_routeTable[new_entry.route_entry_key] = new_entry; } -ReturnCode RouteManager::updateRouteEntry(const P4RouteEntry &route_entry) +void RouteManager::updateRouteAttrs(int size, const std::vector> &updaters, + std::vector &indice, std::vector &statuses) { - SWSS_LOG_ENTER(); - - auto *route_entry_ptr = getRouteEntry(route_entry.route_entry_key); - P4RouteEntry new_route_entry; - if (!mergeRouteEntry(*route_entry_ptr, route_entry, &new_route_entry)) - { - return ReturnCode(); - } - - ASSIGN_OR_RETURN(sai_object_id_t old_nexthop, getNexthopOid(*route_entry_ptr)); - ASSIGN_OR_RETURN(sai_object_id_t new_nexthop, getNexthopOid(new_route_entry)); - RETURN_IF_ERROR(UpdateRouteAttrs( - (route_entry_ptr->action == p4orch::kDrop) ? SAI_PACKET_ACTION_DROP : SAI_PACKET_ACTION_FORWARD, - (new_route_entry.action == p4orch::kDrop) ? SAI_PACKET_ACTION_DROP : SAI_PACKET_ACTION_FORWARD, old_nexthop, - new_nexthop, new_route_entry.route_entry_key, &new_route_entry.sai_route_entry)); - - if (new_route_entry.action == p4orch::kSetNexthopId) + std::vector sai_route_entries(size); + std::vector sai_attrs(size); + std::vector object_statuses(size); + // We will perform route update in multiple SAI calls. + // If error is encountered, the previous SAI calls will be reverted. + // Raise critical state if the revert fails. + // We avoid changing multiple attributes of the same entry in a single bulk + // call. + constexpr int kMaxAttrUpdate = 20; + int i; + for (i = 0; i < kMaxAttrUpdate; ++i) { - m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, - KeyGenerator::generateNextHopKey(new_route_entry.nexthop_id)); + for (int j = 0; j < size; ++j) + { + sai_route_entries[j] = updaters[indice[j]]->getSaiEntry(); + sai_attrs[j] = updaters[indice[j]]->getSaiAttr(); + m_routerBulker.set_entry_attribute(&object_statuses[j], &sai_route_entries[j], &sai_attrs[j]); + } + m_routerBulker.flush(); + int new_size = 0; + for (int j = 0; j < size; j++) + { + if (updaters[indice[j]]->updateResult(object_statuses[j])) + { + statuses[indice[j]] = updaters[indice[j]]->getStatus(); + if (statuses[indice[j]].ok()) + { + updateRouteEntriesMeta(updaters[indice[j]]->getOldEntry(), updaters[indice[j]]->getNewEntry()); + } + } + else + { + indice[new_size++] = indice[j]; + } + } + if (new_size == 0) + { + break; + } + size = new_size; } - if (new_route_entry.action == p4orch::kSetWcmpGroupId) + // Just a safety check to prevent infinite loop. Should not happen. + if (i == kMaxAttrUpdate) { - m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, - KeyGenerator::generateWcmpGroupKey(new_route_entry.wcmp_group)); + SWSS_RAISE_CRITICAL_STATE("Route update operation did not terminate."); } + return; +} - if (route_entry_ptr->action == p4orch::kSetNexthopId) +std::vector RouteManager::updateRouteEntries(const std::vector &route_entries) +{ + SWSS_LOG_ENTER(); + + std::vector> updaters(route_entries.size()); + std::vector indice(route_entries.size()); // index to the route_entries + std::vector statuses(route_entries.size()); + + int size = 0; + for (size_t i = 0; i < route_entries.size(); ++i) { - if (new_route_entry.action != p4orch::kSetNexthopId || - new_route_entry.nexthop_id != route_entry_ptr->nexthop_id) + const auto &route_entry = route_entries[i]; + auto *route_entry_ptr = getRouteEntry(route_entry.route_entry_key); + P4RouteEntry new_entry; + if (!mergeRouteEntry(*route_entry_ptr, route_entry, &new_entry)) { - m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, - KeyGenerator::generateNextHopKey(route_entry_ptr->nexthop_id)); + statuses[i] = ReturnCode(); + continue; } + updaters[i] = std::unique_ptr(new RouteUpdater(*route_entry_ptr, new_entry, m_p4OidMapper)); + indice[size++] = i; } - if (route_entry_ptr->action == p4orch::kSetWcmpGroupId) + if (size == 0) { - if (new_route_entry.action != p4orch::kSetWcmpGroupId || - new_route_entry.wcmp_group != route_entry_ptr->wcmp_group) - { - m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, - KeyGenerator::generateWcmpGroupKey(route_entry_ptr->wcmp_group)); - } + return statuses; } - m_routeTable[route_entry.route_entry_key] = new_route_entry; - return ReturnCode(); + + updateRouteAttrs(size, updaters, indice, statuses); + return statuses; } -ReturnCode RouteManager::deleteRouteEntry(const P4RouteEntry &route_entry) +std::vector RouteManager::deleteRouteEntries(const std::vector &route_entries) { SWSS_LOG_ENTER(); - auto *route_entry_ptr = getRouteEntry(route_entry.route_entry_key); - CHECK_ERROR_AND_LOG_AND_RETURN(sai_route_api->remove_route_entry(&route_entry_ptr->sai_route_entry), - "Failed to delete route " << QuotedVar(route_entry.route_entry_key)); + std::vector sai_route_entries(route_entries.size()); + std::vector object_statuses(route_entries.size()); + std::vector statuses(route_entries.size()); - if (route_entry_ptr->action == p4orch::kSetNexthopId) + for (size_t i = 0; i < route_entries.size(); ++i) { - m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, - KeyGenerator::generateNextHopKey(route_entry_ptr->nexthop_id)); + const auto &route_entry = route_entries[i]; + auto *route_entry_ptr = getRouteEntry(route_entry.route_entry_key); + sai_route_entries[i] = route_entry_ptr->sai_route_entry; + object_statuses[i] = m_routerBulker.remove_entry(&object_statuses[i], &sai_route_entries[i]); } - if (route_entry_ptr->action == p4orch::kSetWcmpGroupId) - { - m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, - KeyGenerator::generateWcmpGroupKey(route_entry_ptr->wcmp_group)); - } - m_p4OidMapper->eraseOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry.route_entry_key); - if (route_entry.route_prefix.isV4()) - { - gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); - } - else + + m_routerBulker.flush(); + + for (size_t i = 0; i < route_entries.size(); ++i) { - gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + const auto &route_entry = route_entries[i]; + auto *route_entry_ptr = getRouteEntry(route_entry.route_entry_key); + CHECK_ERROR_AND_LOG(object_statuses[i], + "Failed to delete route entry " << QuotedVar(route_entry.route_entry_key)); + if (object_statuses[i] == SAI_STATUS_SUCCESS) + { + if (route_entry_ptr->action == p4orch::kSetNexthopId || + route_entry_ptr->action == p4orch::kSetNexthopIdAndMetadata) + { + m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, + KeyGenerator::generateNextHopKey(route_entry_ptr->nexthop_id)); + } + else if (route_entry_ptr->action == p4orch::kSetWcmpGroupId || + route_entry_ptr->action == p4orch::kSetWcmpGroupIdAndMetadata) + { + m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(route_entry_ptr->wcmp_group)); + } + m_p4OidMapper->eraseOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry.route_entry_key); + if (route_entry.route_prefix.isV4()) + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV4_ROUTE); + } + else + { + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + } + m_vrfOrch->decreaseVrfRefCount(route_entry.vrf_id); + m_routeTable.erase(route_entry.route_entry_key); + statuses[i] = ReturnCode(); + } + else + { + statuses[i] = ReturnCode(object_statuses[i]) + << "Failed to delete route entry " << QuotedVar(route_entry.route_entry_key); + } } - m_vrfOrch->decreaseVrfRefCount(route_entry.vrf_id); - m_routeTable.erase(route_entry.route_entry_key); - return ReturnCode(); + + return statuses; } -void RouteManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode RouteManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +{ + return StatusCode::SWSS_RC_UNIMPLEMENTED; +} + +void RouteManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } @@ -504,6 +851,14 @@ void RouteManager::drain() { SWSS_LOG_ENTER(); + std::vector create_route_list; + std::vector update_route_list; + std::vector delete_route_list; + std::vector create_tuple_list; + std::vector update_tuple_list; + std::vector delete_tuple_list; + std::unordered_set route_entry_list; + for (const auto &key_op_fvs_tuple : m_entries) { std::string table_name; @@ -525,7 +880,19 @@ void RouteManager::drain() } auto &route_entry = *route_entry_or; - status = validateRouteEntry(route_entry); + // A single batch should not modify the same route more than once. + if (route_entry_list.count(route_entry.route_entry_key) != 0) + { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Route entry has been included in the same batch"; + SWSS_LOG_ERROR("%s: %s", status.message().c_str(), QuotedVar(route_entry.route_entry_key).c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, + /*replace=*/true); + continue; + } + + const std::string &operation = kfvOp(key_op_fvs_tuple); + status = validateRouteEntry(route_entry, operation); if (!status.ok()) { SWSS_LOG_ERROR("Validation failed for Route APP DB entry with key %s: %s", @@ -535,45 +902,270 @@ void RouteManager::drain() /*replace=*/true); continue; } + route_entry_list.insert(route_entry.route_entry_key); - const std::string &operation = kfvOp(key_op_fvs_tuple); if (operation == SET_COMMAND) { - status = validateSetRouteEntry(route_entry); - if (!status.ok()) - { - SWSS_LOG_ERROR("Validation failed for Set Route APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); - } - else if (getRouteEntry(route_entry.route_entry_key) == nullptr) + if (getRouteEntry(route_entry.route_entry_key) == nullptr) { - status = createRouteEntry(route_entry); + create_route_list.push_back(route_entry); + create_tuple_list.push_back(key_op_fvs_tuple); } else { - status = updateRouteEntry(route_entry); + update_route_list.push_back(route_entry); + update_tuple_list.push_back(key_op_fvs_tuple); } } - else if (operation == DEL_COMMAND) + else { - status = validateDelRouteEntry(route_entry); - if (!status.ok()) - { - SWSS_LOG_ERROR("Validation failed for Del Route APP DB entry with key %s: %s", - QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); - } - else - { - status = deleteRouteEntry(route_entry); - } + delete_route_list.push_back(route_entry); + delete_tuple_list.push_back(key_op_fvs_tuple); } - else + } + + if (!create_route_list.empty()) + { + auto statuses = createRouteEntries(create_route_list); + for (size_t i = 0; i < create_route_list.size(); ++i) { - status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << QuotedVar(operation); - SWSS_LOG_ERROR("%s", status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(create_tuple_list[i]), + kfvFieldsValues(create_tuple_list[i]), statuses[i], + /*replace=*/true); + } + } + if (!update_route_list.empty()) + { + auto statuses = updateRouteEntries(update_route_list); + for (size_t i = 0; i < update_route_list.size(); ++i) + { + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(update_tuple_list[i]), + kfvFieldsValues(update_tuple_list[i]), statuses[i], + /*replace=*/true); + } + } + if (!delete_route_list.empty()) + { + auto statuses = deleteRouteEntries(delete_route_list); + for (size_t i = 0; i < delete_route_list.size(); ++i) + { + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(delete_tuple_list[i]), + kfvFieldsValues(delete_tuple_list[i]), statuses[i], + /*replace=*/true); } - m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), status, - /*replace=*/true); } m_entries.clear(); } + +std::string RouteManager::verifyState(const std::string &key, const std::vector &tuple) +{ + SWSS_LOG_ENTER(); + + auto pos = key.find_first_of(kTableKeyDelimiter); + if (pos == std::string::npos) + { + return std::string("Invalid key: ") + key; + } + std::string p4rt_table = key.substr(0, pos); + std::string p4rt_key = key.substr(pos + 1); + if (p4rt_table != APP_P4RT_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + std::string table_name; + std::string key_content; + parseP4RTKey(p4rt_key, &table_name, &key_content); + if (table_name != APP_P4RT_IPV4_TABLE_NAME && table_name != APP_P4RT_IPV6_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + + ReturnCode status; + auto app_db_entry_or = deserializeRouteEntry(key_content, tuple, table_name); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + std::stringstream msg; + msg << "Unable to deserialize key " << QuotedVar(key) << ": " << status.message(); + return msg.str(); + } + auto &app_db_entry = *app_db_entry_or; + + auto *route_entry = getRouteEntry(app_db_entry.route_entry_key); + if (route_entry == nullptr) + { + std::stringstream msg; + msg << "No entry found with key " << QuotedVar(key); + return msg.str(); + } + + std::string cache_result = verifyStateCache(app_db_entry, route_entry); + std::string asic_db_result = verifyStateAsicDb(route_entry); + if (cache_result.empty()) + { + return asic_db_result; + } + if (asic_db_result.empty()) + { + return cache_result; + } + return cache_result + "; " + asic_db_result; +} + +std::string RouteManager::verifyStateCache(const P4RouteEntry &app_db_entry, const P4RouteEntry *route_entry) +{ + ReturnCode status = validateRouteEntry(app_db_entry, SET_COMMAND); + if (!status.ok()) + { + std::stringstream msg; + msg << "Validation failed for route DB entry with key " << QuotedVar(app_db_entry.route_entry_key) << ": " + << status.message(); + return msg.str(); + } + + if (route_entry->route_entry_key != app_db_entry.route_entry_key) + { + std::stringstream msg; + msg << "Route entry " << QuotedVar(app_db_entry.route_entry_key) << " does not match internal cache " + << QuotedVar(route_entry->route_entry_key) << " in route manager."; + return msg.str(); + } + if (route_entry->vrf_id != app_db_entry.vrf_id) + { + std::stringstream msg; + msg << "Route entry " << QuotedVar(app_db_entry.route_entry_key) << " with VRF " + << QuotedVar(app_db_entry.vrf_id) << " does not match internal cache " << QuotedVar(route_entry->vrf_id) + << " in route manager."; + return msg.str(); + } + if (route_entry->route_prefix.to_string() != app_db_entry.route_prefix.to_string()) + { + std::stringstream msg; + msg << "Route entry " << QuotedVar(app_db_entry.route_entry_key) << " with route prefix " + << app_db_entry.route_prefix.to_string() << " does not match internal cache " + << route_entry->route_prefix.to_string() << " in route manager."; + return msg.str(); + } + if (route_entry->action != app_db_entry.action) + { + std::stringstream msg; + msg << "Route entry " << QuotedVar(app_db_entry.route_entry_key) << " with action " + << QuotedVar(app_db_entry.action) << " does not match internal cache " << QuotedVar(route_entry->action) + << " in route manager."; + return msg.str(); + } + if (route_entry->nexthop_id != app_db_entry.nexthop_id) + { + std::stringstream msg; + msg << "Route entry " << QuotedVar(app_db_entry.route_entry_key) << " with nexthop ID " + << QuotedVar(app_db_entry.nexthop_id) << " does not match internal cache " + << QuotedVar(route_entry->nexthop_id) << " in route manager."; + return msg.str(); + } + if (route_entry->wcmp_group != app_db_entry.wcmp_group) + { + std::stringstream msg; + msg << "Route entry " << QuotedVar(app_db_entry.route_entry_key) << " with WCMP group " + << QuotedVar(app_db_entry.wcmp_group) << " does not match internal cache " + << QuotedVar(route_entry->wcmp_group) << " in route manager."; + return msg.str(); + } + if (route_entry->route_metadata != app_db_entry.route_metadata) + { + std::stringstream msg; + msg << "Route entry " << QuotedVar(app_db_entry.route_entry_key) << " with metadata " + << QuotedVar(app_db_entry.route_metadata) << " does not match internal cache " + << QuotedVar(route_entry->route_metadata) << " in route manager."; + return msg.str(); + } + + return ""; +} + +std::string RouteManager::verifyStateAsicDb(const P4RouteEntry *route_entry) +{ + std::vector exp_attrs; + std::vector opt_attrs; + sai_attribute_t attr; + + if (route_entry->action == p4orch::kDrop) + { + attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + attr.value.s32 = SAI_PACKET_ACTION_DROP; + exp_attrs.push_back(attr); + } + else if (route_entry->action == p4orch::kTrap) + { + attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + attr.value.s32 = SAI_PACKET_ACTION_TRAP; + exp_attrs.push_back(attr); + } + else if (route_entry->action == p4orch::kSetMetadataAndDrop) + { + attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + attr.value.s32 = SAI_PACKET_ACTION_DROP; + exp_attrs.push_back(attr); + attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + attr.value.u32 = swss::to_uint(route_entry->route_metadata); + exp_attrs.push_back(attr); + } + else + { + attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + attr.value.oid = getNexthopOid(*route_entry, *m_p4OidMapper); + exp_attrs.push_back(attr); + if (route_entry->action == p4orch::kSetNexthopIdAndMetadata || + route_entry->action == p4orch::kSetWcmpGroupIdAndMetadata) + { + attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + attr.value.u32 = swss::to_uint(route_entry->route_metadata); + exp_attrs.push_back(attr); + } + } + + if (route_entry->action == p4orch::kDrop || route_entry->action == p4orch::kTrap) + { + attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + attr.value.oid = SAI_NULL_OBJECT_ID; + opt_attrs.push_back(attr); + attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + attr.value.u32 = 0; + opt_attrs.push_back(attr); + } + else if (route_entry->action == p4orch::kSetMetadataAndDrop) + { + attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + attr.value.oid = SAI_NULL_OBJECT_ID; + opt_attrs.push_back(attr); + } + else + { + attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + attr.value.s32 = SAI_PACKET_ACTION_FORWARD; + opt_attrs.push_back(attr); + if (route_entry->action != p4orch::kSetNexthopIdAndMetadata && + route_entry->action != p4orch::kSetWcmpGroupIdAndMetadata) + { + attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + attr.value.u32 = 0; + opt_attrs.push_back(attr); + } + } + + std::vector exp = saimeta::SaiAttributeList::serialize_attr_list( + SAI_OBJECT_TYPE_ROUTE_ENTRY, (uint32_t)exp_attrs.size(), exp_attrs.data(), /*countOnly=*/false); + std::vector opt = saimeta::SaiAttributeList::serialize_attr_list( + SAI_OBJECT_TYPE_ROUTE_ENTRY, (uint32_t)opt_attrs.size(), opt_attrs.data(), /*countOnly=*/false); + + swss::DBConnector db("ASIC_DB", 0); + swss::Table table(&db, "ASIC_STATE"); + std::string key = sai_serialize_object_type(SAI_OBJECT_TYPE_ROUTE_ENTRY) + ":" + + sai_serialize_route_entry(getSaiEntry(*route_entry)); + std::vector values; + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + + return verifyAttrs(values, exp, opt, /*allow_unknown=*/false); +} diff --git a/orchagent/p4orch/route_manager.h b/orchagent/p4orch/route_manager.h index 6e494709b3..6572fd6137 100644 --- a/orchagent/p4orch/route_manager.h +++ b/orchagent/p4orch/route_manager.h @@ -4,7 +4,9 @@ #include #include #include +#include +#include "bulker.h" #include "ipprefix.h" #include "orch.h" #include "p4orch/next_hop_manager.h" @@ -27,28 +29,64 @@ struct P4RouteEntry std::string action; std::string nexthop_id; std::string wcmp_group; + std::string route_metadata; // go/gpins-pinball-vip-stats sai_route_entry_t sai_route_entry; }; // P4RouteTable: Route ID, P4RouteEntry typedef std::unordered_map P4RouteTable; +// RouteUpdater is a helper class in performing route update. +// It keeps track of the state of the route update. It provides the next SAI +// attribute required in the route update. +// RouteUpdater will raise critical state if recovery fails or nexthop OID +// cannot be found. +class RouteUpdater +{ + public: + RouteUpdater(const P4RouteEntry &old_route, const P4RouteEntry &new_route, P4OidMapper *mapper); + ~RouteUpdater() = default; + + P4RouteEntry getOldEntry() const; + P4RouteEntry getNewEntry() const; + sai_route_entry_t getSaiEntry() const; + // Returns the next SAI attribute that should be performed. + sai_attribute_t getSaiAttr() const; + // Updates the state by the given SAI result. + // Returns true if all operations are completed. + // This method will raise critical state if a recovery action fails. + bool updateResult(sai_status_t sai_status); + // Returns the overall status of the route update. + // This method should only be called after UpdateResult returns true. + ReturnCode getStatus() const; + + private: + // Updates the action index. + // Returns true if there are no more actions. + bool updateIdx(); + // Checks if the current action should be performed or not. + // Returns true if the action should be performed. + bool checkAction() const; + + P4OidMapper *m_p4OidMapper; + P4RouteEntry m_oldRoute; + P4RouteEntry m_newRoute; + ReturnCode m_status; + std::vector m_actions; + bool m_revert = false; + int m_idx = -1; +}; + class RouteManager : public ObjectManagerInterface { public: - RouteManager(P4OidMapper *p4oidMapper, VRFOrch *vrfOrch, ResponsePublisherInterface *publisher) : m_vrfOrch(vrfOrch) - { - SWSS_LOG_ENTER(); - - assert(p4oidMapper != nullptr); - m_p4OidMapper = p4oidMapper; - assert(publisher != nullptr); - m_publisher = publisher; - } + RouteManager(P4OidMapper *p4oidMapper, VRFOrch *vrfOrch, ResponsePublisherInterface *publisher); virtual ~RouteManager() = default; - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; + std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; private: // Applies route entry updates from src to dest. The merged result will be @@ -66,8 +104,8 @@ class RouteManager : public ObjectManagerInterface // Return nullptr if corresponding route entry is not cached. P4RouteEntry *getRouteEntry(const std::string &route_entry_key); - // Validated non-empty fields in a route entry. - ReturnCode validateRouteEntry(const P4RouteEntry &route_entry); + // Performs route entry validation. + ReturnCode validateRouteEntry(const P4RouteEntry &route_entry, const std::string &operation); // Performs route entry validation for SET command. ReturnCode validateSetRouteEntry(const P4RouteEntry &route_entry); @@ -75,26 +113,36 @@ class RouteManager : public ObjectManagerInterface // Performs route entry validation for DEL command. ReturnCode validateDelRouteEntry(const P4RouteEntry &route_entry); - // Creates a route entry. - // Returns a SWSS status code. - ReturnCode createRouteEntry(const P4RouteEntry &route_entry); + // Creates a list of route entries. + std::vector createRouteEntries(const std::vector &route_entries); + + // Updates a list of route entries. + std::vector updateRouteEntries(const std::vector &route_entries); + + // Deletes a list of route entries. + std::vector deleteRouteEntries(const std::vector &route_entries); + + // On a successful route entry update, updates the reference counters and + // internal data. + void updateRouteEntriesMeta(const P4RouteEntry &old_entry, const P4RouteEntry &new_entry); + + // Auxiliary method to perform route update. + void updateRouteAttrs(int size, const std::vector> &updaters, + std::vector &indice, std::vector &statuses); - // Updates a route entry. - // Returns a SWSS status code. - ReturnCode updateRouteEntry(const P4RouteEntry &route_entry); + // Verifies internal cache for an entry. + std::string verifyStateCache(const P4RouteEntry &app_db_entry, const P4RouteEntry *route_entry); - // Deletes a route entry. - // Returns a SWSS status code. - ReturnCode deleteRouteEntry(const P4RouteEntry &route_entry); + // Verifies ASIC DB for an entry. + std::string verifyStateAsicDb(const P4RouteEntry *route_entry); - // Returns the nexthop OID for a given route entry. - // This method will raise critical state if the OID cannot be found. So this - // should only be called after validation. - ReturnCodeOr getNexthopOid(const P4RouteEntry &route_entry); + // Returns the SAI entry. + sai_route_entry_t getSaiEntry(const P4RouteEntry &route_entry); P4RouteTable m_routeTable; P4OidMapper *m_p4OidMapper; VRFOrch *m_vrfOrch; + EntityBulker m_routerBulker; ResponsePublisherInterface *m_publisher; std::deque m_entries; diff --git a/orchagent/p4orch/router_interface_manager.cpp b/orchagent/p4orch/router_interface_manager.cpp index ea9abf083a..bc059217b4 100644 --- a/orchagent/p4orch/router_interface_manager.cpp +++ b/orchagent/p4orch/router_interface_manager.cpp @@ -1,19 +1,26 @@ #include "p4orch/router_interface_manager.h" +#include #include #include #include #include #include +#include "SaiAttributeList.h" +#include "dbconnector.h" #include "directory.h" #include "json.hpp" #include "logger.h" #include "orch.h" #include "p4orch/p4orch_util.h" #include "portsorch.h" +#include "sai_serialize.h" +#include "table.h" #include "vrforch.h" +using ::p4orch::kTableKeyDelimiter; + extern sai_object_id_t gSwitchId; extern sai_object_id_t gVirtualRouterId; @@ -49,6 +56,68 @@ ReturnCode validateRouterInterfaceAppDbEntry(const P4RouterInterfaceAppDbEntry & return ReturnCode(); } +ReturnCodeOr> getSaiAttrs(const P4RouterInterfaceEntry &router_intf_entry) +{ + Port port; + if (!gPortsOrch->getPort(router_intf_entry.port_name, port)) + { + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "Failed to get port info for port " << QuotedVar(router_intf_entry.port_name)); + } + + std::vector attrs; + sai_attribute_t attr; + + // Map all P4 router interfaces to default VRF as virtual router is mandatory + // parameter for creation of router interfaces in SAI. + attr.id = SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID; + attr.value.oid = gVirtualRouterId; + attrs.push_back(attr); + + // If mac address is not set then swss::MacAddress initializes mac address + // to 00:00:00:00:00:00. + if (router_intf_entry.src_mac_address.to_string() != "00:00:00:00:00:00") + { + attr.id = SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS; + memcpy(attr.value.mac, router_intf_entry.src_mac_address.getMac(), sizeof(sai_mac_t)); + attrs.push_back(attr); + } + + attr.id = SAI_ROUTER_INTERFACE_ATTR_TYPE; + switch (port.m_type) + { + case Port::PHY: + attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_PORT; + attrs.push_back(attr); + attr.id = SAI_ROUTER_INTERFACE_ATTR_PORT_ID; + attr.value.oid = port.m_port_id; + break; + case Port::LAG: + attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_PORT; + attrs.push_back(attr); + attr.id = SAI_ROUTER_INTERFACE_ATTR_PORT_ID; + attr.value.oid = port.m_lag_id; + break; + case Port::VLAN: + attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_VLAN; + attrs.push_back(attr); + attr.id = SAI_ROUTER_INTERFACE_ATTR_VLAN_ID; + attr.value.oid = port.m_vlan_info.vlan_oid; + break; + // TODO: add support for PORT::SUBPORT + default: + LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unsupported port type: " << port.m_type); + } + attrs.push_back(attr); + + // Configure port MTU on router interface + attr.id = SAI_ROUTER_INTERFACE_ATTR_MTU; + attr.value.u32 = port.m_mtu; + attrs.push_back(attr); + + return attrs; +} + } // namespace ReturnCodeOr RouterInterfaceManager::deserializeRouterIntfEntry( @@ -127,62 +196,7 @@ ReturnCode RouterInterfaceManager::createRouterInterface(const std::string &rout << " already exists in the centralized map"); } - Port port; - if (!gPortsOrch->getPort(router_intf_entry.port_name, port)) - { - LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) - << "Failed to get port info for port " << QuotedVar(router_intf_entry.port_name)); - } - - std::vector attrs; - sai_attribute_t attr; - - // Map all P4 router interfaces to default VRF as virtual router is mandatory - // parameter for creation of router interfaces in SAI. - attr.id = SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID; - attr.value.oid = gVirtualRouterId; - attrs.push_back(attr); - - // If mac address is not set then swss::MacAddress initializes mac address - // to 00:00:00:00:00:00. - if (router_intf_entry.src_mac_address.to_string() != "00:00:00:00:00:00") - { - attr.id = SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS; - memcpy(attr.value.mac, router_intf_entry.src_mac_address.getMac(), sizeof(sai_mac_t)); - attrs.push_back(attr); - } - - attr.id = SAI_ROUTER_INTERFACE_ATTR_TYPE; - switch (port.m_type) - { - case Port::PHY: - attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_PORT; - attrs.push_back(attr); - attr.id = SAI_ROUTER_INTERFACE_ATTR_PORT_ID; - attr.value.oid = port.m_port_id; - break; - case Port::LAG: - attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_PORT; - attrs.push_back(attr); - attr.id = SAI_ROUTER_INTERFACE_ATTR_PORT_ID; - attr.value.oid = port.m_lag_id; - break; - case Port::VLAN: - attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_VLAN; - attrs.push_back(attr); - attr.id = SAI_ROUTER_INTERFACE_ATTR_VLAN_ID; - attr.value.oid = port.m_vlan_info.vlan_oid; - break; - // TODO: add support for PORT::SUBPORT - default: - LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unsupported port type: " << port.m_type); - } - attrs.push_back(attr); - - // Configure port MTU on router interface - attr.id = SAI_ROUTER_INTERFACE_ATTR_MTU; - attr.value.u32 = port.m_mtu; - attrs.push_back(attr); + ASSIGN_OR_RETURN(std::vector attrs, getSaiAttrs(router_intf_entry)); CHECK_ERROR_AND_LOG_AND_RETURN( sai_router_intfs_api->create_router_interface(&router_intf_entry.router_interface_oid, gSwitchId, @@ -323,7 +337,34 @@ ReturnCode RouterInterfaceManager::processDeleteRequest(const std::string &route return status; } -void RouterInterfaceManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode RouterInterfaceManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +{ + std::string value; + + try + { + nlohmann::json j = nlohmann::json::parse(json_key); + if (j.find(prependMatchField(p4orch::kRouterInterfaceId)) != j.end()) + { + value = j.at(prependMatchField(p4orch::kRouterInterfaceId)).get(); + object_key = KeyGenerator::generateRouterInterfaceKey(value); + object_type = SAI_OBJECT_TYPE_ROUTER_INTERFACE; + return ReturnCode(); + } + else + { + SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", p4orch::kRouterInterfaceId); + } + } + catch (std::exception &ex) + { + SWSS_LOG_ERROR("json_key parse error"); + } + + return StatusCode::SWSS_RC_INVALID_PARAM; +} + +void RouterInterfaceManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } @@ -396,3 +437,121 @@ void RouterInterfaceManager::drain() } m_entries.clear(); } + +std::string RouterInterfaceManager::verifyState(const std::string &key, const std::vector &tuple) +{ + SWSS_LOG_ENTER(); + + auto pos = key.find_first_of(kTableKeyDelimiter); + if (pos == std::string::npos) + { + return std::string("Invalid key: ") + key; + } + std::string p4rt_table = key.substr(0, pos); + std::string p4rt_key = key.substr(pos + 1); + if (p4rt_table != APP_P4RT_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + std::string table_name; + std::string key_content; + parseP4RTKey(p4rt_key, &table_name, &key_content); + if (table_name != APP_P4RT_ROUTER_INTERFACE_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + + auto app_db_entry_or = deserializeRouterIntfEntry(key_content, tuple); + if (!app_db_entry_or.ok()) + { + ReturnCode status = app_db_entry_or.status(); + std::stringstream msg; + msg << "Unable to deserialize key " << QuotedVar(key) << ": " << status.message(); + return msg.str(); + } + auto &app_db_entry = *app_db_entry_or; + + const std::string router_intf_key = KeyGenerator::generateRouterInterfaceKey(app_db_entry.router_interface_id); + auto *router_intf_entry = getRouterInterfaceEntry(router_intf_key); + if (router_intf_entry == nullptr) + { + std::stringstream msg; + msg << "No entry found with key " << QuotedVar(key); + return msg.str(); + } + + std::string cache_result = verifyStateCache(app_db_entry, router_intf_entry); + std::string asic_db_result = verifyStateAsicDb(router_intf_entry); + if (cache_result.empty()) + { + return asic_db_result; + } + if (asic_db_result.empty()) + { + return cache_result; + } + return cache_result + "; " + asic_db_result; +} + +std::string RouterInterfaceManager::verifyStateCache(const P4RouterInterfaceAppDbEntry &app_db_entry, + const P4RouterInterfaceEntry *router_intf_entry) +{ + const std::string router_intf_key = KeyGenerator::generateRouterInterfaceKey(app_db_entry.router_interface_id); + ReturnCode status = validateRouterInterfaceAppDbEntry(app_db_entry); + if (!status.ok()) + { + std::stringstream msg; + msg << "Validation failed for Router Interface DB entry with key " << QuotedVar(router_intf_key) << ": " + << status.message(); + return msg.str(); + } + if (router_intf_entry->router_interface_id != app_db_entry.router_interface_id) + { + std::stringstream msg; + msg << "Router interface ID " << QuotedVar(app_db_entry.router_interface_id) + << " does not match internal cache " << QuotedVar(router_intf_entry->router_interface_id) + << " in router interface manager."; + return msg.str(); + } + if (router_intf_entry->port_name != app_db_entry.port_name) + { + std::stringstream msg; + msg << "Port name " << QuotedVar(app_db_entry.port_name) << " does not match internal cache " + << QuotedVar(router_intf_entry->port_name) << " in router interface manager."; + return msg.str(); + } + if (router_intf_entry->src_mac_address.to_string() != app_db_entry.src_mac_address.to_string()) + { + std::stringstream msg; + msg << "Source MAC address " << app_db_entry.src_mac_address.to_string() << " does not match internal cache " + << router_intf_entry->src_mac_address.to_string() << " in router interface manager."; + return msg.str(); + } + return m_p4OidMapper->verifyOIDMapping(SAI_OBJECT_TYPE_ROUTER_INTERFACE, router_intf_key, + router_intf_entry->router_interface_oid); +} + +std::string RouterInterfaceManager::verifyStateAsicDb(const P4RouterInterfaceEntry *router_intf_entry) +{ + auto attrs_or = getSaiAttrs(*router_intf_entry); + if (!attrs_or.ok()) + { + return std::string("Failed to get SAI attrs: ") + attrs_or.status().message(); + } + std::vector attrs = *attrs_or; + std::vector exp = saimeta::SaiAttributeList::serialize_attr_list( + SAI_OBJECT_TYPE_ROUTER_INTERFACE, (uint32_t)attrs.size(), attrs.data(), /*countOnly=*/false); + + swss::DBConnector db("ASIC_DB", 0); + swss::Table table(&db, "ASIC_STATE"); + std::string key = sai_serialize_object_type(SAI_OBJECT_TYPE_ROUTER_INTERFACE) + ":" + + sai_serialize_object_id(router_intf_entry->router_interface_oid); + std::vector values; + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + + return verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/false); +} diff --git a/orchagent/p4orch/router_interface_manager.h b/orchagent/p4orch/router_interface_manager.h index a300b2a7a4..427400e9c0 100644 --- a/orchagent/p4orch/router_interface_manager.h +++ b/orchagent/p4orch/router_interface_manager.h @@ -49,8 +49,10 @@ class RouterInterfaceManager : public ObjectManagerInterface } virtual ~RouterInterfaceManager() = default; - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; + std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; private: ReturnCodeOr deserializeRouterIntfEntry( @@ -63,6 +65,9 @@ class RouterInterfaceManager : public ObjectManagerInterface ReturnCode processUpdateRequest(const P4RouterInterfaceAppDbEntry &app_db_entry, P4RouterInterfaceEntry *router_intf_entry); ReturnCode processDeleteRequest(const std::string &router_intf_key); + std::string verifyStateCache(const P4RouterInterfaceAppDbEntry &app_db_entry, + const P4RouterInterfaceEntry *router_intf_entry); + std::string verifyStateAsicDb(const P4RouterInterfaceEntry *router_intf_entry); P4RouterInterfaceTable m_routerIntfTable; P4OidMapper *m_p4OidMapper; diff --git a/orchagent/p4orch/tables_definition_manager.cpp b/orchagent/p4orch/tables_definition_manager.cpp new file mode 100644 index 0000000000..aa91fb40f4 --- /dev/null +++ b/orchagent/p4orch/tables_definition_manager.cpp @@ -0,0 +1,687 @@ +#include "p4orch/tables_definition_manager.h" + +#include +#include +#include +#include +#include + +#include "directory.h" +#include "json.hpp" +#include "logger.h" +#include "tokenize.h" +#include "orch.h" +#include "p4orch/p4orch.h" +#include "p4orch/p4orch_util.h" +extern "C" +{ +#include "saitypes.h" +} + + +extern Directory gDirectory; +extern P4Orch *gP4Orch; +const std::map format_datatype_map = +{ + {"MAC", "SAI_ATTR_VALUE_TYPE_MAC"}, + {"IPV4", "SAI_ATTR_VALUE_TYPE_IPV4"}, + {"IPV6", "SAI_ATTR_VALUE_TYPE_IPV6"} +}; + + +std::string +BitwidthToDatatype (int bitwidth) +{ + std::string datatype = "SAI_ATTR_VALUE_TYPE_CHARDATA"; + + if (bitwidth <= 0) + { + datatype = "SAI_ATTR_VALUE_TYPE_CHARDATA"; + } + else if (bitwidth <= 8) + { + datatype = "SAI_ATTR_VALUE_TYPE_UINT8"; + } + else if (bitwidth <= 16) + { + datatype = "SAI_ATTR_VALUE_TYPE_UINT16"; + } + else if (bitwidth <= 32) + { + datatype = "SAI_ATTR_VALUE_TYPE_UINT32"; + } + else if (bitwidth <= 64) + { + datatype = "SAI_ATTR_VALUE_TYPE_UINT64"; + } + + return datatype; +} + +std::string +parseBitwidthToDatatype (const nlohmann::json &json) +{ + int bitwidth; + std::string datatype = "SAI_ATTR_VALUE_TYPE_CHARDATA"; + + if (json.find(p4orch::kBitwidth) != json.end()) + { + bitwidth = json.at(p4orch::kBitwidth).get(); + datatype = BitwidthToDatatype(bitwidth); + } + + return datatype; +} + +std::string +parseFormatToDatatype (const nlohmann::json &json, std::string datatype) +{ + std::string format; + + if (json.find(p4orch::kFormat) != json.end()) + { + format = json.at(p4orch::kFormat).get(); + + auto it = format_datatype_map.find(format); + if (it != format_datatype_map.end()) + { + datatype = it->second; + } + } + + return datatype; +} + +ReturnCode +parseTableMatchReferences (const nlohmann::json &match_json, TableMatchInfo &match) +{ + std::string table, field; + + if (match_json.find(p4orch::kReferences) != match_json.end()) + { + for (const auto &ref_json : match_json[p4orch::kReferences]) + { + try + { + table = ref_json.at(p4orch::kTableRef).get(); + field = ref_json.at(p4orch::kMatchRef).get(); + match.table_reference_map[table] = field; + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "can not parse tables from app-db supplied table definition info"; + } + } + } + + return ReturnCode(); +} + +ReturnCode +parseActionParamReferences (const nlohmann::json ¶m_json, ActionParamInfo ¶m) +{ + std::string table, field; + + if (param_json.find(p4orch::kReferences) != param_json.end()) + { + for (const auto &ref_json : param_json[p4orch::kReferences]) + { + try + { + table = ref_json.at(p4orch::kTableRef).get(); + field = ref_json.at(p4orch::kMatchRef).get(); + param.table_reference_map[table] = field; + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "can not parse tables from app-db supplied table definition info"; + } + } + } + + return ReturnCode(); +} + +ReturnCode +parseTableActionParams (const nlohmann::json &action_json, ActionInfo &action) +{ + action.refers_to = false; + if (action_json.find(p4orch::kActionParams) != action_json.end()) + { + for (const auto ¶m_json : action_json[p4orch::kActionParams]) + { + try + { + ActionParamInfo param; + std::string param_name; + + param_name = param_json.at(p4orch::kName).get(); + param.name = param_name; + param.datatype = parseBitwidthToDatatype(param_json); + param.datatype = parseFormatToDatatype(param_json, param.datatype); + parseActionParamReferences(param_json, param); + action.params[param_name] = param; + + if (!param.table_reference_map.empty()) + { + /** + * Helps avoid walk of action parameters if this is set to false at action level + */ + action.refers_to = true; + } + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "can not parse tables from app-db supplied table definition info"; + } + } + } + + return ReturnCode(); +} + +ReturnCode +parseTableCounter (const nlohmann::json &table_json, TableInfo &table) +{ + if (table_json.find(p4orch::kCounterUnit) != table_json.end()) + { + auto unit = table_json.at(p4orch::kCounterUnit); + if (unit == "PACKETS") + { + table.counter_packets_enabled = true; + } + else if (unit == "BYTES") + { + table.counter_bytes_enabled = true; + } + else + { + table.counter_packets_enabled = true; + table.counter_bytes_enabled = true; + } + } + + return ReturnCode(); +} + +ReturnCode +parseTablesInfo (const nlohmann::json &info_json, TablesInfo &info_entry) +{ + ReturnCode status; + int table_id; + std::string table_name, field_name; + + if (info_json.find(p4orch::kTables) == info_json.end()) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "no tables in app-db supplied table definition info"; + } + + for (const auto &table_json : info_json[p4orch::kTables]) + { + try + { + table_id = table_json.at(p4orch::kId).get(); + table_name = table_json.at(p4orch::kAlias).get(); + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "can not parse tables from app-db supplied table definition info"; + } + + + TableInfo table = {}; + table.name = table_name; + table.id = table_id; + try + { + for (const auto &match_json : table_json[p4orch::kmatchFields]) + { + TableMatchInfo match = {}; + std::string match_name; + + match_name = match_json.at(p4orch::kName).get(); + match.name = match_name; + match.datatype = parseBitwidthToDatatype(match_json); + match.datatype = parseFormatToDatatype(match_json, match.datatype); + parseTableMatchReferences(match_json, match); + table.match_fields[match_name] = match; + } + + for (const auto &action_json : table_json[p4orch::kActions]) + { + ActionInfo action = {}; + std::string action_name; + + action_name = action_json.at(p4orch::kAlias).get(); + action.name = action_name; + parseTableActionParams(action_json, action); + table.action_fields[action_name] = action; + + /** + * If any parameter of action refers to another table, add that one in the + * cross-reference list of current table + */ + for (auto param_it = action.params.begin(); + param_it != action.params.end(); param_it++) + { + ActionParamInfo action_param = param_it->second; + for (auto ref_it = action_param.table_reference_map.begin(); + ref_it != action_param.table_reference_map.end(); ref_it++) + { + if (std::find(table.action_ref_tables.begin(), + table.action_ref_tables.end(), + ref_it->first) == table.action_ref_tables.end()) + { + table.action_ref_tables.push_back(ref_it->first); + } + } + } + } + + parseTableCounter(table_json, table); + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "can not parse table " << QuotedVar(table_name.c_str()) << "match fields"; + } + + + info_entry.m_tableIdNameMap[std::to_string(table_id)] = table_name; + info_entry.m_tableInfoMap[table_name] = table; + } + + return ReturnCode(); +} + + +ReturnCodeOr TablesDefnManager::deserializeTablesInfoEntry( + const std::string &key, const std::vector &attributes) +{ + SWSS_LOG_ENTER(); + + TablesInfoAppDbEntry app_db_entry = {}; + try + { + nlohmann::json j = nlohmann::json::parse(key); + app_db_entry.context = j["context"]; + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Failed to deserialize tables info"; + } + + for (const auto &it : attributes) + { + const auto &field = fvField(it); + std::string value = fvValue(it); + if (field == "info") + { + app_db_entry.info = value; + } + else + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) + << "Unexpected field " << QuotedVar(field) << " in table entry"; + } + } + + return app_db_entry; +} + +ReturnCode validateTablesInfoAppDbEntry(const TablesInfoAppDbEntry &app_db_entry) +{ + // Perform generic APP DB entry validations. Operation specific validations + // will be done by the respective request process methods. + + return ReturnCode(); +} + +TablesInfo *TablesDefnManager::getTablesInfoEntry(const std::string &context_key) +{ + SWSS_LOG_ENTER(); + + if (m_tablesinfoMap.find(context_key) == m_tablesinfoMap.end()) + return nullptr; + + return &m_tablesinfoMap[context_key]; +} + +ReturnCode TablesDefnManager::processAddRequest(const TablesInfoAppDbEntry &app_db_entry, + const std::string &context_key) +{ + nlohmann::json tablesinfo_json; + ReturnCode status; + + SWSS_LOG_ENTER(); + + if (!m_tablesinfoMap.empty()) + { + // For now p4rt can send only same table-definition, so ignore it silently + return ReturnCode(); + } + + try + { + tablesinfo_json = nlohmann::json::parse(app_db_entry.info); + } + catch (std::exception &ex) + { + return ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "tables info from appdb can not be parsed\n"; + } + + TablesInfo tablesinfo_entry(app_db_entry.context, tablesinfo_json); + + status = parseTablesInfo(tablesinfo_json, tablesinfo_entry); + if (!status.ok()) + { + return status; + } + + m_tablesinfoMap[app_db_entry.context] = tablesinfo_entry; + gP4Orch->tablesinfo = &m_tablesinfoMap[app_db_entry.context]; + return ReturnCode(); +} + +ReturnCode TablesDefnManager::processUpdateRequest(const TablesInfoAppDbEntry &app_db_entry, + const std::string &context_key) +{ + SWSS_LOG_ENTER(); + + return ReturnCode(StatusCode::SWSS_RC_UNIMPLEMENTED) << "update of Tables Definition not supported"; +} + +ReturnCode TablesDefnManager::processDeleteRequest(const std::string &context_key) +{ + SWSS_LOG_ENTER(); + + auto *tablesinfo = getTablesInfoEntry(context_key); + + if (tablesinfo) + { + if (gP4Orch->tablesinfo == tablesinfo) + { + gP4Orch->tablesinfo = nullptr; + } + + tablesinfo->m_tableIdNameMap.clear(); + } + + m_tablesinfoMap.erase(context_key); + return ReturnCode(); +} + +ReturnCode TablesDefnManager::getSaiObject(const std::string &json_key, + sai_object_type_t &object_type, std::string &object_key) +{ + return StatusCode::SWSS_RC_INVALID_PARAM; +} + +std::unordered_map> +createGraph (std::vector> preReq) +{ + std::unordered_map> graph; + + for (auto pre : preReq) + { + auto it = graph.find(pre.second); + if (it != graph.end()) + { + it->second.insert(pre.first); + } + else + { + graph[pre.second].insert(pre.first); + } + } + + return graph; +} + +std::unordered_map +computeIndegree (std::unordered_map> &graph) +{ + std::unordered_map degrees; + + for (auto g_it = graph.begin(); g_it != graph.end(); g_it++) + { + for (int neigh : g_it->second) + { + auto n_it = degrees.find(neigh); + if (n_it != degrees.end()) + { + n_it->second++; + } + else + { + degrees.insert({neigh, 0}); + } + } + } + + return degrees; +} + + +std::vector +findTablePrecedence (int tables, std::vector> preReq, TablesInfo *tables_info) +{ + std::unordered_map> graph = createGraph(preReq); + std::unordered_map degrees = computeIndegree(graph); + std::vector visited; + std::vector toposort; + std::queue zeros; + + // initialize queue with tables having no dependencies + for (auto table_it = tables_info->m_tableInfoMap.begin(); + table_it != tables_info->m_tableInfoMap.end(); table_it++) + { + TableInfo table_info = table_it->second; + if (degrees.find(table_info.id) == degrees.end()) + { + zeros.push(table_info.id); + visited.push_back(table_info.id); + } + } + + for (int i = 0; i < tables; i++) + { + // Err input data like possible cyclic dependencies, could not build precedence order + if (zeros.empty()) + { + SWSS_LOG_ERROR("Filed to build table precedence order"); + return {}; + } + + // Run BFS + int zero = zeros.front(); + zeros.pop(); + toposort.push_back(zero); + auto g_it = graph.find(zero); + if (g_it != graph.end()) + { + for (int neigh : g_it->second) + { + auto n_it = degrees.find(neigh); + if (n_it != degrees.end()) + { + if (!n_it->second) + { + if (std::find(visited.begin(), visited.end(), neigh) == visited.end()) + { + zeros.push(neigh); + visited.push_back(neigh); + } + } + else + { + n_it->second--; + } + } + } + } + } + + return toposort; +} + + +void +buildTablePrecedence (TablesInfo *tables_info) +{ + std::vector> preReq; + std::vector orderedTables; + int tables = 0; + + if (!tables_info) { + return; + } + + // build dependencies + for (auto table_it = tables_info->m_tableInfoMap.begin(); + table_it != tables_info->m_tableInfoMap.end(); table_it++) + { + TableInfo table_info = table_it->second; + tables++; + + for (std::size_t i = 0; i < table_info.action_ref_tables.size(); i++) + { + /** + * For now processing precedence order is only amongst extension tables + * Skip fixed tables, include them in precedence calculations when fixed + * and extension tables processing precedence may be interleaved + */ + if (FixedTablesMap.find(table_info.action_ref_tables[i]) != FixedTablesMap.end()) + { + continue; + } + + TableInfo ref_table_info = tables_info->m_tableInfoMap[table_info.action_ref_tables[i]]; + if (std::find(preReq.begin(), preReq.end(), + std::make_pair(table_info.id, ref_table_info.id)) == preReq.end()) + { + preReq.push_back(std::make_pair(table_info.id, ref_table_info.id)); + } + } + } + + // find precedence of tables based on dependencies + orderedTables = findTablePrecedence(tables, preReq, tables_info); + + // update each table with calculated precedence value and build table precedence map + for (std::size_t i = 0; i < orderedTables.size(); i++) + { + auto table_id = orderedTables[i]; + auto id_it = tables_info->m_tableIdNameMap.find(std::to_string(table_id)); + if (id_it == tables_info->m_tableIdNameMap.end()) + { + continue; + } + + auto table_it = tables_info->m_tableInfoMap.find(id_it->second); + if (table_it == tables_info->m_tableInfoMap.end()) + { + continue; + } + + table_it->second.precedence = (int)i; + tables_info->m_tablePrecedenceMap[(int)i] = table_it->second.name; + } + + return; +} + + +void TablesDefnManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) +{ + m_entries.push_back(entry); +} + +void TablesDefnManager::drain() +{ + SWSS_LOG_ENTER(); + + for (const auto &key_op_fvs_tuple : m_entries) + { + std::string table_name; + std::string key; + parseP4RTKey(kfvKey(key_op_fvs_tuple), &table_name, &key); + const std::vector &attributes = kfvFieldsValues(key_op_fvs_tuple); + + ReturnCode status; + auto app_db_entry_or = deserializeTablesInfoEntry(key, attributes); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + SWSS_LOG_ERROR("Unable to deserialize APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, /*replace=*/true); + continue; + } + auto &app_db_entry = *app_db_entry_or; + + status = validateTablesInfoAppDbEntry(app_db_entry); + if (!status.ok()) + { + SWSS_LOG_ERROR("Validation failed for tables definition APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, /*replace=*/true); + continue; + } + + const std::string context_key = KeyGenerator::generateTablesInfoKey(app_db_entry.context); + + const std::string &operation = kfvOp(key_op_fvs_tuple); + if (operation == SET_COMMAND) + { + auto *tablesinfo = getTablesInfoEntry(context_key); + if (tablesinfo == nullptr) + { + // Create TablesInfo + status = processAddRequest(app_db_entry, context_key); + } + else + { + // Modify existing TablesInfo + status = processUpdateRequest(app_db_entry, context_key); + } + } + else if (operation == DEL_COMMAND) + { + // Delete TablesInfo + status = processDeleteRequest(context_key); + } + else + { + status = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << "Unknown operation type " << QuotedVar(operation); + SWSS_LOG_ERROR("%s", status.message().c_str()); + } + if (!status.ok()) + { + SWSS_LOG_ERROR("Processing failed for tables definition APP DB entry with key %s: %s", + QuotedVar(table_name + ":" + key).c_str(), status.message().c_str()); + } + else + { + buildTablePrecedence(gP4Orch->tablesinfo); + } + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, /*replace=*/true); + } + m_entries.clear(); +} + +std::string TablesDefnManager::verifyState(const std::string &key, const std::vector &tuple) +{ + std::string result = ""; + SWSS_LOG_ENTER(); + + return result; +} diff --git a/orchagent/p4orch/tables_definition_manager.h b/orchagent/p4orch/tables_definition_manager.h new file mode 100644 index 0000000000..6d0fbe444a --- /dev/null +++ b/orchagent/p4orch/tables_definition_manager.h @@ -0,0 +1,78 @@ +#pragma once + +#include +#include +#include +#include + +#include "macaddress.h" +#include "json.hpp" +#include "orch.h" +#include "p4orch/object_manager_interface.h" +#include "p4orch/p4oidmapper.h" +#include "p4orch/p4orch_util.h" +#include "response_publisher_interface.h" +#include "return_code.h" +extern "C" +{ +#include "sai.h" +} + +/** + * A set of tables definition + */ +struct TablesInfo +{ + std::string context; + nlohmann::json info; + std::unordered_map m_tableIdNameMap; + std::unordered_map m_tableInfoMap; + std::map m_tablePrecedenceMap; + + TablesInfo() {}; + TablesInfo(const std::string &context_key, const nlohmann::json &info_value) + : context(context_key), info(info_value) + { + } +}; + +/** + * Datastructure is designed to hold multiple set of table definition. + * However, current support handles only one set of table definition. + */ +typedef std::unordered_map TablesInfoMap; + +class TablesDefnManager : public ObjectManagerInterface +{ + public: + TablesDefnManager(P4OidMapper *p4oidMapper, ResponsePublisherInterface *publisher) + { + SWSS_LOG_ENTER(); + + assert(p4oidMapper != nullptr); + m_p4OidMapper = p4oidMapper; + assert(publisher != nullptr); + m_publisher = publisher; + } + virtual ~TablesDefnManager() = default; + + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; + void drain() override; + std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; + + private: + ReturnCodeOr deserializeTablesInfoEntry( + const std::string &key, const std::vector &attributes); + TablesInfo *getTablesInfoEntry(const std::string &context_key); + ReturnCode createTablesInfo(const std::string &context_key, TablesInfo &tablesinfo_entry); + ReturnCode removeTablesInfo(const std::string &context_key); + ReturnCode processAddRequest(const TablesInfoAppDbEntry &app_db_entry, const std::string &context_key); + ReturnCode processUpdateRequest(const TablesInfoAppDbEntry &app_db_entry, const std::string &context_key); + ReturnCode processDeleteRequest(const std::string &context_key); + + TablesInfoMap m_tablesinfoMap; + P4OidMapper *m_p4OidMapper; + ResponsePublisherInterface *m_publisher; + std::deque m_entries; +}; diff --git a/orchagent/p4orch/tests/Makefile.am b/orchagent/p4orch/tests/Makefile.am index 489acd8f99..671aa9eb80 100644 --- a/orchagent/p4orch/tests/Makefile.am +++ b/orchagent/p4orch/tests/Makefile.am @@ -4,7 +4,9 @@ INCLUDES = -I $(top_srcdir) -I $(ORCHAGENT_DIR) -I $(P4ORCH_DIR) -I $(top_srcdir CFLAGS_SAI = -I /usr/include/sai -bin_PROGRAMS = p4orch_tests p4orch_tests_asan p4orch_tests_tsan p4orch_tests_usan +TESTS = p4orch_tests p4orch_tests_asan p4orch_tests_tsan p4orch_tests_usan + +noinst_PROGRAMS = p4orch_tests p4orch_tests_asan p4orch_tests_tsan p4orch_tests_usan if DEBUG DBGFLAGS = -ggdb -DDEBUG @@ -24,13 +26,19 @@ p4orch_tests_SOURCES = $(ORCHAGENT_DIR)/orch.cpp \ $(ORCHAGENT_DIR)/vrforch.cpp \ $(ORCHAGENT_DIR)/vxlanorch.cpp \ $(ORCHAGENT_DIR)/copporch.cpp \ + $(ORCHAGENT_DIR)/switch/switch_capabilities.cpp \ + $(ORCHAGENT_DIR)/switch/switch_helper.cpp \ $(ORCHAGENT_DIR)/switchorch.cpp \ $(ORCHAGENT_DIR)/request_parser.cpp \ + $(top_srcdir)/lib/recorder.cpp \ $(ORCHAGENT_DIR)/flex_counter/flex_counter_manager.cpp \ + $(ORCHAGENT_DIR)/flex_counter/flow_counter_handler.cpp \ $(P4ORCH_DIR)/p4oidmapper.cpp \ $(P4ORCH_DIR)/p4orch.cpp \ $(P4ORCH_DIR)/p4orch_util.cpp \ + $(P4ORCH_DIR)/tables_definition_manager.cpp \ $(P4ORCH_DIR)/router_interface_manager.cpp \ + $(P4ORCH_DIR)/gre_tunnel_manager.cpp \ $(P4ORCH_DIR)/neighbor_manager.cpp \ $(P4ORCH_DIR)/next_hop_manager.cpp \ $(P4ORCH_DIR)/route_manager.cpp \ @@ -39,9 +47,13 @@ p4orch_tests_SOURCES = $(ORCHAGENT_DIR)/orch.cpp \ $(P4ORCH_DIR)/acl_rule_manager.cpp \ $(P4ORCH_DIR)/wcmp_manager.cpp \ $(P4ORCH_DIR)/mirror_session_manager.cpp \ - $(top_srcdir)/tests/mock_tests/fake_response_publisher.cpp \ + $(P4ORCH_DIR)/l3_admit_manager.cpp \ + $(P4ORCH_DIR)/ext_tables_manager.cpp \ + $(top_srcdir)/tests/mock_tests/fake_response_publisher.cpp \ fake_portorch.cpp \ fake_crmorch.cpp \ + fake_flexcounterorch.cpp \ + fake_flowcounterrouteorch.cpp \ fake_dbconnector.cpp \ fake_producertable.cpp \ fake_consumerstatetable.cpp \ @@ -52,9 +64,11 @@ p4orch_tests_SOURCES = $(ORCHAGENT_DIR)/orch.cpp \ p4orch_util_test.cpp \ return_code_test.cpp \ route_manager_test.cpp \ + gre_tunnel_manager_test.cpp \ next_hop_manager_test.cpp \ wcmp_manager_test.cpp \ acl_manager_test.cpp \ + l3_admit_manager_test.cpp \ router_interface_manager_test.cpp \ neighbor_manager_test.cpp \ mirror_session_manager_test.cpp \ @@ -62,6 +76,7 @@ p4orch_tests_SOURCES = $(ORCHAGENT_DIR)/orch.cpp \ mock_sai_acl.cpp \ mock_sai_hostif.cpp \ mock_sai_serialize.cpp \ + mock_sai_router_interface.cpp \ mock_sai_switch.cpp \ mock_sai_udf.cpp diff --git a/orchagent/p4orch/tests/acl_manager_test.cpp b/orchagent/p4orch/tests/acl_manager_test.cpp index 64ba37e5a3..9b0154ab55 100644 --- a/orchagent/p4orch/tests/acl_manager_test.cpp +++ b/orchagent/p4orch/tests/acl_manager_test.cpp @@ -23,6 +23,8 @@ #include "tokenize.h" #include "vrforch.h" +using ::p4orch::kTableKeyDelimiter; + extern swss::DBConnector *gAppDb; extern swss::DBConnector *gStateDb; extern swss::DBConnector *gCountersDb; @@ -78,8 +80,85 @@ constexpr sai_object_id_t kAclMeterOid2 = 2002; constexpr sai_object_id_t kAclCounterOid1 = 3001; constexpr sai_object_id_t kUdfGroupOid1 = 4001; constexpr sai_object_id_t kUdfMatchOid1 = 5001; +constexpr sai_object_id_t kUdfOid1 = 6001; constexpr char *kAclIngressTableName = "ACL_PUNT_TABLE"; +// Matches the policer sai_attribute_t[] argument. +bool MatchSaiPolicerAttribute(const int attrs_size, const sai_meter_type_t expected_type, + const sai_packet_action_t expected_gpa, const sai_packet_action_t expected_ypa, + const sai_packet_action_t expected_rpa, const sai_uint64_t expected_cir, + const sai_uint64_t expected_pir, const sai_uint64_t expected_cbs, + const sai_uint64_t expected_pbs, const sai_attribute_t *attr_list) +{ + if (attr_list == nullptr) + { + return false; + } + for (int i = 0; i < attrs_size; ++i) + { + switch (attr_list[i].id) + { + case SAI_POLICER_ATTR_CBS: + if (attr_list[i].value.u64 != expected_cbs) + { + return false; + } + break; + case SAI_POLICER_ATTR_PBS: + if (attr_list[i].value.u64 != expected_pbs) + { + return false; + } + break; + case SAI_POLICER_ATTR_CIR: + if (attr_list[i].value.u64 != expected_cir) + { + return false; + } + break; + case SAI_POLICER_ATTR_PIR: + if (attr_list[i].value.u64 != expected_pir) + { + return false; + } + break; + case SAI_POLICER_ATTR_GREEN_PACKET_ACTION: + if (attr_list[i].value.s32 != expected_gpa) + { + return false; + } + break; + case SAI_POLICER_ATTR_YELLOW_PACKET_ACTION: + if (attr_list[i].value.s32 != expected_ypa) + { + return false; + } + break; + case SAI_POLICER_ATTR_RED_PACKET_ACTION: + if (attr_list[i].value.s32 != expected_rpa) + { + return false; + } + break; + case SAI_POLICER_ATTR_MODE: + if (attr_list[i].value.s32 != SAI_POLICER_MODE_TR_TCM) + { + return false; + } + break; + case SAI_POLICER_ATTR_METER_TYPE: + if (attr_list[i].value.s32 != expected_type) + { + return false; + } + break; + default: + break; + } + } + return true; +} + // Check the ACL stage sai_attribute_t list for ACL table group bool MatchSaiAttributeAclGroupStage(const sai_acl_stage_t expected_stage, const sai_attribute_t *attr_list) { @@ -464,6 +543,8 @@ std::vector getDefaultTableDefFieldValueTuples() swss::FieldValueTuple{"match/icmp_type", BuildMatchFieldJsonStrKindSaiField(P4_MATCH_ICMP_TYPE)}); attributes.push_back( swss::FieldValueTuple{"match/l4_dst_port", BuildMatchFieldJsonStrKindSaiField(P4_MATCH_L4_DST_PORT)}); + attributes.push_back(swss::FieldValueTuple{ + "match/udf2", BuildMatchFieldJsonStrKindUdf("SAI_UDF_BASE_L3", 56, P4_FORMAT_HEX_STRING, 16)}); attributes.push_back(swss::FieldValueTuple{"action/copy_and_set_tc", "[{\"action\":\"SAI_PACKET_ACTION_COPY\",\"packet_color\":\"SAI_" "PACKET_" @@ -484,7 +565,7 @@ P4AclTableDefinitionAppDbEntry getDefaultAclTableDefAppDbEntry() app_db_entry.stage = STAGE_INGRESS; app_db_entry.priority = 234; app_db_entry.meter_unit = P4_METER_UNIT_BYTES; - app_db_entry.counter_unit = P4_COUNTER_UNIT_BYTES; + app_db_entry.counter_unit = P4_COUNTER_UNIT_BOTH; // Match field mapping from P4 program to SAI entry attribute app_db_entry.match_field_lookup["ether_type"] = BuildMatchFieldJsonStrKindSaiField(P4_MATCH_ETHER_TYPE); app_db_entry.match_field_lookup["ether_dst"] = BuildMatchFieldJsonStrKindSaiField(P4_MATCH_DST_MAC, P4_FORMAT_MAC); @@ -553,6 +634,8 @@ P4AclTableDefinitionAppDbEntry getDefaultAclTableDefAppDbEntry() app_db_entry.match_field_lookup["inner_vlan_pri"] = BuildMatchFieldJsonStrKindSaiField(P4_MATCH_INNER_VLAN_PRI); app_db_entry.match_field_lookup["inner_vlan_id"] = BuildMatchFieldJsonStrKindSaiField(P4_MATCH_INNER_VLAN_ID); app_db_entry.match_field_lookup["inner_vlan_cfi"] = BuildMatchFieldJsonStrKindSaiField(P4_MATCH_INNER_VLAN_CFI); + app_db_entry.match_field_lookup["l3_class_id"] = + BuildMatchFieldJsonStrKindSaiField(P4_MATCH_ROUTE_DST_USER_META, P4_FORMAT_HEX_STRING, /*bitwidth=*/6); app_db_entry.match_field_lookup["src_ipv6_64bit"] = BuildMatchFieldJsonStrKindComposite( {nlohmann::json::parse(BuildMatchFieldJsonStrKindSaiField(P4_MATCH_SRC_IPV6_WORD3, P4_FORMAT_IPV6, 32)), nlohmann::json::parse(BuildMatchFieldJsonStrKindSaiField(P4_MATCH_SRC_IPV6_WORD2, P4_FORMAT_IPV6, 32))}, @@ -625,6 +708,24 @@ P4AclTableDefinitionAppDbEntry getDefaultAclTableDefAppDbEntry() app_db_entry.action_field_lookup["set_vrf"].push_back({.sai_action = P4_ACTION_SET_VRF, .p4_param_name = "vrf"}); app_db_entry.action_field_lookup["qos_queue"].push_back( {.sai_action = P4_ACTION_SET_QOS_QUEUE, .p4_param_name = "cpu_queue"}); + + // "action/acl_trap" = [ + // {"action": "SAI_PACKET_ACTION_TRAP", "packet_color": + // "SAI_PACKET_COLOR_GREEN"}, + // {"action": "SAI_PACKET_ACTION_DROP", "packet_color": + // "SAI_PACKET_COLOR_YELLOW"}, + // {"action": "SAI_PACKET_ACTION_DROP", "packet_color": + // "SAI_PACKET_COLOR_RED"}, + // {"action": "QOS_QUEUE", "param": "queue"} + // ] + app_db_entry.action_field_lookup["acl_trap"].push_back( + {.sai_action = P4_ACTION_SET_QOS_QUEUE, .p4_param_name = "queue"}); + app_db_entry.packet_action_color_lookup["acl_trap"].push_back( + {.packet_action = P4_PACKET_ACTION_PUNT, .packet_color = P4_PACKET_COLOR_GREEN}); + app_db_entry.packet_action_color_lookup["acl_trap"].push_back( + {.packet_action = P4_PACKET_ACTION_DROP, .packet_color = P4_PACKET_COLOR_YELLOW}); + app_db_entry.packet_action_color_lookup["acl_trap"].push_back( + {.packet_action = P4_PACKET_ACTION_DROP, .packet_color = P4_PACKET_COLOR_RED}); return app_db_entry; } @@ -659,7 +760,7 @@ P4AclRuleAppDbEntry getDefaultAclRuleAppDbEntryWithoutAction() "\"fdf8:f53b:82e4::53\",\"match/ether_dst\": \"AA:BB:CC:DD:EE:FF\", " "\"match/ether_src\": \"AA:BB:CC:DD:EE:FF\", \"match/ipv6_next_header\": " "\"1\", \"match/src_ipv6_64bit\": " - "\"fdf8:f53b:82e4::\",\"match/arp_tpa\": \"0xff11223\",\"match/udf2\": " + "\"fdf8:f53b:82e4::\",\"match/arp_tpa\": \"0xff112231\",\"match/udf2\": " "\"0x9876 & 0xAAAA\",\"priority\":100}"; // ACL meter fields app_db_entry.meter.enabled = true; @@ -686,14 +787,17 @@ class AclManagerTest : public ::testing::Test setUpCoppOrch(); setUpSwitchOrch(); setUpP4Orch(); - // const auto& acl_groups = gSwitchOrch->getAclGroupOidsBindingToSwitch(); + // const auto& acl_groups = gSwitchOrch->getAclGroupsBindingToSwitch(); // EXPECT_EQ(3, acl_groups.size()); // EXPECT_NE(acl_groups.end(), acl_groups.find(SAI_ACL_STAGE_INGRESS)); - // EXPECT_EQ(kAclGroupIngressOid, acl_groups.at(SAI_ACL_STAGE_INGRESS)); + // EXPECT_EQ(kAclGroupIngressOid, + // acl_groups.at(SAI_ACL_STAGE_INGRESS).m_saiObjectId); // EXPECT_NE(acl_groups.end(), acl_groups.find(SAI_ACL_STAGE_EGRESS)); - // EXPECT_EQ(kAclGroupEgressOid, acl_groups.at(SAI_ACL_STAGE_EGRESS)); + // EXPECT_EQ(kAclGroupEgressOid, + // acl_groups.at(SAI_ACL_STAGE_EGRESS).m_saiObjectId); // EXPECT_NE(acl_groups.end(), acl_groups.find(SAI_ACL_STAGE_PRE_INGRESS)); - // EXPECT_EQ(kAclGroupLookupOid, acl_groups.at(SAI_ACL_STAGE_PRE_INGRESS)); + // EXPECT_EQ(kAclGroupLookupOid, + // acl_groups.at(SAI_ACL_STAGE_PRE_INGRESS).m_saiObjectId); p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_MIRROR_SESSION, KeyGenerator::generateMirrorSessionKey(gMirrorSession1), kMirrorSessionOid1); p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_MIRROR_SESSION, KeyGenerator::generateMirrorSessionKey(gMirrorSession2), @@ -834,8 +938,6 @@ class AclManagerTest : public ::testing::Test Truly(std::bind(MatchSaiSwitchAttrByAclStage, SAI_SWITCH_ATTR_PRE_INGRESS_ACL, kAclGroupLookupOid, std::placeholders::_1)))) .WillRepeatedly(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); std::vector p4_tables; gP4Orch = new P4Orch(gAppDb, p4_tables, gVrfOrch, copp_orch_); acl_table_manager_ = gP4Orch->getAclTableManager(); @@ -860,10 +962,14 @@ class AclManagerTest : public ::testing::Test .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclGroupMemberIngressOid), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .Times(3) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); sai_object_id_t user_defined_trap_oid = gUserDefinedTrapStartOid; AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); ASSERT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddTableRequest(app_db_entry)); @@ -877,16 +983,24 @@ class AclManagerTest : public ::testing::Test } void EnqueueTableTuple(const swss::KeyOpFieldsValuesTuple &entry) { - acl_table_manager_->enqueue(entry); + acl_table_manager_->enqueue(APP_P4RT_ACL_TABLE_DEFINITION_NAME, entry); + } + std::string VerifyTableState(const std::string &key, const std::vector &tuple) + { + return acl_table_manager_->verifyState(key, tuple); } void DrainRuleTuples() { acl_rule_manager_->drain(); } - void EnqueueRuleTuple(const swss::KeyOpFieldsValuesTuple &entry) + void EnqueueRuleTuple(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) + { + acl_rule_manager_->enqueue(table_name, entry); + } + std::string VerifyRuleState(const std::string &key, const std::vector &tuple) { - acl_rule_manager_->enqueue(entry); + return acl_rule_manager_->verifyState(key, tuple); } ReturnCodeOr DeserializeAclTableDefinitionAppDbEntry( @@ -942,6 +1056,11 @@ class AclManagerTest : public ::testing::Test acl_rule_manager_->doAclCounterStatsTask(); } + ReturnCode CreateAclGroupMember(const P4AclTableDefinition &acl_table, sai_object_id_t *acl_grp_mem_oid) + { + return acl_table_manager_->createAclGroupMember(acl_table, acl_grp_mem_oid); + } + StrictMock mock_sai_acl_; StrictMock mock_sai_serialize_; StrictMock mock_sai_policer_; @@ -968,6 +1087,12 @@ TEST_F(AclManagerTest, DrainTableTuplesToProcessSetDelRequestSucceeds) .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, Eq(gSwitchId), Eq(3), NotNull())) .WillOnce(DoAll(SetArgPointee<0>(kAclGroupMemberIngressOid), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); DrainTableTuples(); EXPECT_NE(nullptr, GetAclTable(kAclIngressTableName)); @@ -975,6 +1100,8 @@ TEST_F(AclManagerTest, DrainTableTuplesToProcessSetDelRequestSucceeds) EXPECT_CALL(mock_sai_acl_, remove_acl_table(Eq(kAclTableIngressOid))).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_acl_, remove_acl_table_group_member(Eq(kAclGroupMemberIngressOid))) .WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, remove_udf_group(Eq(kUdfGroupOid1))).WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, remove_udf(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); EnqueueTableTuple(swss::KeyOpFieldsValuesTuple({p4rtAclTableName, DEL_COMMAND, {}})); DrainTableTuples(); EXPECT_EQ(nullptr, GetAclTable(kAclIngressTableName)); @@ -995,6 +1122,13 @@ TEST_F(AclManagerTest, DrainTableTuplesToProcessUpdateRequestExpectFails) .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, Eq(gSwitchId), Eq(3), NotNull())) .WillOnce(DoAll(SetArgPointee<0>(kAclGroupMemberIngressOid), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); + DrainTableTuples(); EXPECT_NE(nullptr, GetAclTable(kAclIngressTableName)); @@ -1066,6 +1200,20 @@ TEST_F(AclManagerTest, CreateIngressPuntTableSucceeds) ASSERT_NO_FATAL_FAILURE(AddDefaultIngressTable()); auto acl_table = GetAclTable(kAclIngressTableName); EXPECT_NE(nullptr, acl_table); + sai_object_id_t grp_member_oid; + sai_object_id_t grp_oid; + sai_object_id_t table_oid; + EXPECT_TRUE(p4_oid_mapper_->getOID(SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER, kAclIngressTableName, &grp_member_oid)); + EXPECT_EQ(kAclGroupMemberIngressOid, grp_member_oid); + EXPECT_TRUE(p4_oid_mapper_->getOID(SAI_OBJECT_TYPE_ACL_TABLE, kAclIngressTableName, &table_oid)); + EXPECT_EQ(kAclTableIngressOid, table_oid); + // The ACL group creation logic is moved out from P4Orch to SwitchOrch + EXPECT_FALSE(p4_oid_mapper_->getOID(SAI_OBJECT_TYPE_ACL_TABLE_GROUP, kAclIngressTableName, &grp_oid)); + const auto &acl_groups = gSwitchOrch->getAclGroupsBindingToSwitch(); + ASSERT_NE(acl_groups.end(), acl_groups.find(SAI_ACL_STAGE_INGRESS)); + EXPECT_EQ(1, acl_groups.at(SAI_ACL_STAGE_INGRESS).m_objsDependingOnMe.size()); + EXPECT_NE(acl_groups.at(SAI_ACL_STAGE_INGRESS).m_objsDependingOnMe.end(), + acl_groups.at(SAI_ACL_STAGE_INGRESS).m_objsDependingOnMe.find(sai_serialize_object_id(grp_member_oid))); } TEST_F(AclManagerTest, CreatePuntTableFailsWhenUserTrapsSaiCallFails) @@ -1156,9 +1304,13 @@ TEST_F(AclManagerTest, CreateIngressPuntTableFailsWhenCapabilityExceeds) auto app_db_entry = getDefaultAclTableDefAppDbEntry(); sai_object_id_t user_defined_trap_oid = gUserDefinedTrapStartOid; AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table(_, _, _, _)).WillOnce(Return(SAI_STATUS_INSUFFICIENT_RESOURCES)); EXPECT_CALL(mock_sai_udf_, remove_udf_group(Eq(kUdfGroupOid1))).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_udf_, remove_udf(_)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); @@ -1170,9 +1322,13 @@ TEST_F(AclManagerTest, CreateIngressPuntTableFailsWhenFailedToCreateTableGroupMe auto app_db_entry = getDefaultAclTableDefAppDbEntry(); sai_object_id_t user_defined_trap_oid = gUserDefinedTrapStartOid; AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)).WillOnce(Return(SAI_STATUS_FAILURE)); @@ -1187,16 +1343,20 @@ TEST_F(AclManagerTest, CreateIngressPuntTableRaisesCriticalStateWhenAclTableReco auto app_db_entry = getDefaultAclTableDefAppDbEntry(); sai_object_id_t user_defined_trap_oid = gUserDefinedTrapStartOid; AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)).WillOnce(Return(SAI_STATUS_FAILURE)); EXPECT_CALL(mock_sai_acl_, remove_acl_table(_)).WillOnce(Return(SAI_STATUS_FAILURE)); EXPECT_CALL(mock_sai_udf_, remove_udf_group(Eq(kUdfGroupOid1))).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_udf_, remove_udf(_)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddTableRequest(app_db_entry)); } @@ -1205,16 +1365,20 @@ TEST_F(AclManagerTest, CreateIngressPuntTableRaisesCriticalStateWhenUdfGroupReco auto app_db_entry = getDefaultAclTableDefAppDbEntry(); sai_object_id_t user_defined_trap_oid = gUserDefinedTrapStartOid; AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)).WillOnce(Return(SAI_STATUS_FAILURE)); EXPECT_CALL(mock_sai_acl_, remove_acl_table(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_udf_, remove_udf_group(Eq(kUdfGroupOid1))).Times(3).WillRepeatedly(Return(SAI_STATUS_FAILURE)); EXPECT_CALL(mock_sai_udf_, remove_udf(_)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); - // (TODO): Expect critical state x3. + // TODO: Expect critical state x3. EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddTableRequest(app_db_entry)); } @@ -1223,9 +1387,13 @@ TEST_F(AclManagerTest, CreateIngressPuntTableRaisesCriticalStateWhenUdfRecoveryF auto app_db_entry = getDefaultAclTableDefAppDbEntry(); sai_object_id_t user_defined_trap_oid = gUserDefinedTrapStartOid; AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)).WillOnce(Return(SAI_STATUS_FAILURE)); @@ -1233,7 +1401,7 @@ TEST_F(AclManagerTest, CreateIngressPuntTableRaisesCriticalStateWhenUdfRecoveryF // UDF recovery failure will also cause UDF group recovery failure since the // reference count will not be zero if UDF failed to be removed. EXPECT_CALL(mock_sai_udf_, remove_udf(_)).Times(3).WillRepeatedly(Return(SAI_STATUS_FAILURE)); - // (TODO): Expect critical state x6. + // TODO: Expect critical state x6. EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddTableRequest(app_db_entry)); } @@ -1244,6 +1412,8 @@ TEST_F(AclManagerTest, CreateIngressPuntTableFailsWhenFailedToCreateUdf) AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); // Fail to create the first UDF, and success to remove the first UDF // group + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).WillOnce(Return(SAI_STATUS_FAILURE)); EXPECT_CALL(mock_sai_udf_, remove_udf_group(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); @@ -1258,7 +1428,8 @@ TEST_F(AclManagerTest, CreateIngressPuntTableFailsWhenFailedToCreateUdf) EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillOnce(Return(SAI_STATUS_SUCCESS)) .WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, remove_udf_group(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_udf_, remove_udf(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddTableRequest(app_db_entry)); @@ -1272,10 +1443,11 @@ TEST_F(AclManagerTest, CreateIngressPuntTableFailsWhenFailedToCreateUdf) EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillOnce(Return(SAI_STATUS_SUCCESS)) .WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, remove_udf_group(_)).WillOnce(Return(SAI_STATUS_FAILURE)); EXPECT_CALL(mock_sai_udf_, remove_udf(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddTableRequest(app_db_entry)); EXPECT_FALSE(p4_oid_mapper_->existsOID(SAI_OBJECT_TYPE_UDF, std::string(kAclIngressTableName) + "-arp_tpa-0-base1-offset24")); @@ -1287,9 +1459,10 @@ TEST_F(AclManagerTest, CreateIngressPuntTableFailsWhenFailedToCreateUdf) EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillOnce(Return(SAI_STATUS_SUCCESS)) .WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, remove_udf(_)).WillOnce(Return(SAI_STATUS_FAILURE)); - // (TODO): Expect critical state x2. + // TODO: Expect critical state x2. EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddTableRequest(app_db_entry)); EXPECT_TRUE(p4_oid_mapper_->existsOID(SAI_OBJECT_TYPE_UDF, std::string(kAclIngressTableName) + "-arp_tpa-0-base1-offset24")); @@ -1642,6 +1815,18 @@ TEST_F(AclManagerTest, CreatePuntTableWithInvalidPacketColorFieldFails) EXPECT_EQ(nullptr, GetAclTable(app_db_entry.acl_table_name)); } +TEST_F(AclManagerTest, CreateAclGroupMemberFailsWhenAclGroupWasNotFound) +{ + ASSERT_NO_FATAL_FAILURE(AddDefaultIngressTable()); + auto *acl_table = GetAclTable(kAclIngressTableName); + acl_table->stage = SAI_ACL_STAGE_INGRESS_MACSEC; + sai_object_id_t grp_member_oid; + EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kAclGroupMemberIngressOid), Return(SAI_STATUS_SUCCESS))); + // TODO: Expect critical state. + EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, CreateAclGroupMember(*acl_table, &grp_member_oid)); +} + TEST_F(AclManagerTest, DeserializeValidAclTableDefAppDbSucceeds) { auto app_db_entry_or = @@ -1781,7 +1966,15 @@ TEST_F(AclManagerTest, RemoveIngressPuntTableSucceeds) EXPECT_CALL(mock_sai_acl_, remove_acl_table(_)).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_udf_, remove_udf_group(Eq(kUdfGroupOid1))).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_udf_, remove_udf(_)).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); + const auto &acl_groups = gSwitchOrch->getAclGroupsBindingToSwitch(); + ASSERT_NE(acl_groups.end(), acl_groups.find(SAI_ACL_STAGE_INGRESS)); + EXPECT_EQ(1, acl_groups.at(SAI_ACL_STAGE_INGRESS).m_objsDependingOnMe.size()); + EXPECT_NE(acl_groups.at(SAI_ACL_STAGE_INGRESS).m_objsDependingOnMe.end(), + acl_groups.at(SAI_ACL_STAGE_INGRESS) + .m_objsDependingOnMe.find(sai_serialize_object_id(kAclGroupMemberIngressOid))); EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessDeleteTableRequest(kAclIngressTableName)); + ASSERT_NE(acl_groups.end(), acl_groups.find(SAI_ACL_STAGE_INGRESS)); + EXPECT_EQ(0, acl_groups.at(SAI_ACL_STAGE_INGRESS).m_objsDependingOnMe.size()); } TEST_F(AclManagerTest, RemoveIngressPuntRuleFails) @@ -1810,7 +2003,7 @@ TEST_F(AclManagerTest, RemoveIngressPuntRuleFails) const auto &table_name_and_rule_key = concatTableNameAndRuleKey(kAclIngressTableName, acl_rule_key); // Fails to remove ACL rule when rule does not exist p4_oid_mapper_->eraseOID(SAI_OBJECT_TYPE_ACL_ENTRY, table_name_and_rule_key); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessDeleteRuleRequest(kAclIngressTableName, acl_rule_key)); p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_ACL_ENTRY, table_name_and_rule_key, kAclIngressRuleOid1); @@ -1832,7 +2025,7 @@ TEST_F(AclManagerTest, RemoveIngressPuntRuleFails) EXPECT_CALL(mock_sai_policer_, remove_policer(Eq(kAclMeterOid1))).WillOnce(Return(SAI_STATUS_OBJECT_IN_USE)); EXPECT_CALL(mock_sai_acl_, create_acl_entry(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclIngressRuleOid1), Return(SAI_STATUS_FAILURE))); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_IN_USE, ProcessDeleteRuleRequest(kAclIngressTableName, acl_rule_key)); // Fails to remove ACL rule when the counter does not exist. @@ -1843,7 +2036,7 @@ TEST_F(AclManagerTest, RemoveIngressPuntRuleFails) .WillOnce(DoAll(SetArgPointee<0>(kAclIngressRuleOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_policer_, create_policer(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclMeterOid1), Return(SAI_STATUS_SUCCESS))); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessDeleteRuleRequest(kAclIngressTableName, acl_rule_key)); p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_ACL_COUNTER, table_name_and_rule_key, kAclCounterOid1, 1); p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_POLICER, table_name_and_rule_key, kAclMeterOid1); @@ -1863,7 +2056,7 @@ TEST_F(AclManagerTest, RemoveIngressPuntRuleFails) .WillOnce(DoAll(SetArgPointee<0>(kAclIngressRuleOid1), Return(SAI_STATUS_FAILURE))); EXPECT_CALL(mock_sai_policer_, create_policer(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclMeterOid1), Return(SAI_STATUS_SUCCESS))); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_IN_USE, ProcessDeleteRuleRequest(kAclIngressTableName, acl_rule_key)); // Fails to remove ACL rule when sai_acl_api->remove_acl_counter() fails and @@ -1871,7 +2064,7 @@ TEST_F(AclManagerTest, RemoveIngressPuntRuleFails) EXPECT_CALL(mock_sai_acl_, remove_acl_counter(_)).WillOnce(Return(SAI_STATUS_OBJECT_IN_USE)); EXPECT_CALL(mock_sai_policer_, create_policer(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclMeterOid1), Return(SAI_STATUS_FAILURE))); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_IN_USE, ProcessDeleteRuleRequest(kAclIngressTableName, acl_rule_key)); // Fails to remove ACL rule when the meter does not exist. @@ -1879,7 +2072,7 @@ TEST_F(AclManagerTest, RemoveIngressPuntRuleFails) // not exist. EXPECT_CALL(mock_sai_acl_, create_acl_entry(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclIngressRuleOid1), Return(SAI_STATUS_SUCCESS))); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessDeleteRuleRequest(kAclIngressTableName, acl_rule_key)); p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_POLICER, table_name_and_rule_key, kAclMeterOid1); } @@ -1913,7 +2106,7 @@ TEST_F(AclManagerTest, RemoveAclTableFailsWhenTableDoesNotExist) { ASSERT_NO_FATAL_FAILURE(AddDefaultIngressTable()); p4_oid_mapper_->eraseOID(SAI_OBJECT_TYPE_ACL_TABLE, kAclIngressTableName); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessDeleteTableRequest(kAclIngressTableName)); } @@ -1940,7 +2133,7 @@ TEST_F(AclManagerTest, RemoveAclTableRaisesCriticalStateWhenAclGroupMemberRecove EXPECT_CALL(mock_sai_acl_, remove_acl_table_group_member(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_acl_, remove_acl_table(_)).WillOnce(Return(SAI_STATUS_FAILURE)); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)).WillOnce(Return(SAI_STATUS_FAILURE)); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessDeleteTableRequest(kAclIngressTableName)); } @@ -1978,7 +2171,8 @@ TEST_F(AclManagerTest, RemoveAclTableFailsWhenRemoveUdfGroupSaiCallFails) EXPECT_CALL(mock_sai_acl_, remove_acl_table(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_udf_, remove_udf(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_udf_, remove_udf_group(_)).WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)) @@ -1998,7 +2192,7 @@ TEST_F(AclManagerTest, RemoveAclTableFailsRaisesCriticalStateWhenUdfRecoveryFail .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclGroupMemberIngressOid), Return(SAI_STATUS_SUCCESS))); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessDeleteTableRequest(kAclIngressTableName)); } @@ -2012,7 +2206,7 @@ TEST_F(AclManagerTest, RemoveAclTableFailsRaisesCriticalStateWhenUdfGroupRecover // If UDF group recovery fails, UDF recovery and ACL table recovery will also // fail since they depend on the UDF group. EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)).WillOnce(Return(SAI_STATUS_FAILURE)); - // (TODO): Expect critical state x3. + // TODO: Expect critical state x3. EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessDeleteTableRequest(kAclIngressTableName)); } @@ -2036,7 +2230,8 @@ TEST_F(AclManagerTest, RemoveAclTableFailsWhenUdfGroupHasNonZeroRefCount) EXPECT_CALL(mock_sai_acl_, remove_acl_table_group_member(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_acl_, remove_acl_table(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_udf_, remove_udf(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)) @@ -2045,6 +2240,16 @@ TEST_F(AclManagerTest, RemoveAclTableFailsWhenUdfGroupHasNonZeroRefCount) EXPECT_EQ(StatusCode::SWSS_RC_IN_USE, ProcessDeleteTableRequest(kAclIngressTableName)); } +TEST_F(AclManagerTest, RemoveAclTableFailsWhenAclGroupWasNotFound) +{ + ASSERT_NO_FATAL_FAILURE(AddDefaultIngressTable()); + auto *acl_table = GetAclTable(kAclIngressTableName); + acl_table->stage = SAI_ACL_STAGE_INGRESS_MACSEC; + EXPECT_CALL(mock_sai_acl_, remove_acl_table_group_member(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); + // TODO: Expect critical state. + EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessDeleteTableRequest(kAclIngressTableName)); +} + TEST_F(AclManagerTest, RemoveAclGroupsSucceedsAfterCleanup) { // Create ACL table @@ -2099,9 +2304,13 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetRequestSucceeds) .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclGroupMemberIngressOid), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); sai_object_id_t user_defined_trap_oid = gUserDefinedTrapStartOid; AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); ASSERT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddTableRequest(app_db_entry)); @@ -2111,10 +2320,12 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetRequestSucceeds) "ipv6_dst\":\"fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53\",\"priority\":15}"; const auto &rule_tuple_key = std::string(kAclIngressTableName) + kTableKeyDelimiter + acl_rule_json_key; - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, getDefaultRuleFieldValueTuples()})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, getDefaultRuleFieldValueTuples()})); // Update request on exact rule without change will not need SAI call - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, getDefaultRuleFieldValueTuples()})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, getDefaultRuleFieldValueTuples()})); // Drain rule tuples to process SET request EXPECT_CALL(mock_sai_acl_, create_acl_entry(_, _, _, _)) @@ -2139,12 +2350,14 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetDelRequestSucceeds) "ipv6_dst\":\"fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53\",\"priority\":15}"; const auto &rule_tuple_key = std::string(kAclIngressTableName) + kTableKeyDelimiter + acl_rule_json_key; - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); // Drain ACL rule tuple to process SET request EXPECT_CALL(mock_sai_acl_, create_acl_entry(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclIngressRuleOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_acl_, create_acl_counter(_, _, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_acl_, create_acl_counter(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kAclCounterOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_policer_, create_policer(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclMeterOid1), Return(SAI_STATUS_SUCCESS))); DrainRuleTuples(); @@ -2155,6 +2368,11 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetDelRequestSucceeds) counters[0] = 100; // green_bytes }), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_acl_, get_acl_counter_attribute(Eq(kAclCounterOid1), _, _)) + .WillOnce(DoAll(Invoke([](sai_object_id_t acl_counter_id, uint32_t attr_count, sai_attribute_t *counter_attr) { + counter_attr[0].value.u64 = 100; // bytes + }), + Return(SAI_STATUS_SUCCESS))); DoAclCounterStatsTask(); auto counters_table = std::make_unique(gCountersDb, std::string(COUNTERS_TABLE) + DEFAULT_KEY_SEPARATOR + APP_P4RT_TABLE_NAME); @@ -2170,7 +2388,8 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetDelRequestSucceeds) // Drain ACL rule tuple to process DEL request attributes.clear(); - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, DEL_COMMAND, attributes})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, DEL_COMMAND, attributes})); EXPECT_CALL(mock_sai_acl_, remove_acl_entry(Eq(kAclIngressRuleOid1))).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_acl_, remove_acl_counter(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); @@ -2186,7 +2405,8 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetRequestInvalidTableNameRuleKey "ipv6_dst\":\"fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53\",\"priority\":15}"; auto rule_tuple_key = std::string("INVALID_TABLE_NAME") + kTableKeyDelimiter + acl_rule_json_key; - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); + EnqueueRuleTuple(std::string("INVALID_TABLE_NAME"), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); // Drain rule tuple to process SET request with invalid ACL table name: // "INVALID_TABLE_NAME" DrainRuleTuples(); @@ -2202,7 +2422,8 @@ TEST_F(AclManagerTest, DrainRuleTuplesToProcessSetRequestInvalidTableNameRuleKey rule_tuple_key = std::string(kAclIngressTableName) + kTableKeyDelimiter + acl_rule_json_key; acl_rule_key = "match/ether_type=0x0800:match/ipv6_dst=fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53"; - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); // Drain rule tuple to process SET request without priority field in rule // JSON key DrainRuleTuples(); @@ -2264,7 +2485,8 @@ TEST_F(AclManagerTest, DrainRuleTuplesWithInvalidCommand) "ipv6_dst\":\"fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53\",\"priority\":15}"; const auto &rule_tuple_key = std::string(kAclIngressTableName) + kTableKeyDelimiter + acl_rule_json_key; - EnqueueRuleTuple(swss::KeyOpFieldsValuesTuple({rule_tuple_key, "INVALID_COMMAND", attributes})); + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, "INVALID_COMMAND", attributes})); DrainRuleTuples(); const auto &acl_rule_key = "match/ether_type=0x0800:match/ipv6_dst=fdf8:f53b:82e4::53 & " "fdf8:f53b:82e4::53:priority=15"; @@ -2392,7 +2614,7 @@ TEST_F(AclManagerTest, CreateAclRuleWithInvalidSaiMatchFails) acl_table->udf_group_attr_index_lookup.clear(); app_db_entry.match_fvs["arp_tpa"] = "0xff112231"; acl_rule_key = KeyGenerator::generateAclRuleKey(app_db_entry.match_fvs, "100"); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessAddRuleRequest(acl_rule_key, app_db_entry)); app_db_entry.match_fvs.erase("arp_tpa"); acl_table->udf_group_attr_index_lookup = saved_udf_group_attr_index_lookup; @@ -2499,6 +2721,35 @@ TEST_F(AclManagerTest, AclRuleWithValidMatchFields) EXPECT_EQ(2, acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORTS].aclfield.data.objlist.count); EXPECT_EQ(0x9988, acl_rule->out_ports_oids[0]); EXPECT_EQ(0x56789abcdef, acl_rule->out_ports_oids[1]); + + // Verify SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN + EXPECT_EQ(2, acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].aclfield.data.u8list.count); + EXPECT_EQ(acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].data[0], + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].aclfield.data.u8list.list[0]); + EXPECT_EQ(acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].data[1], + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].aclfield.data.u8list.list[1]); + EXPECT_EQ(acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].mask[0], + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].aclfield.mask.u8list.list[0]); + EXPECT_EQ(acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].mask[1], + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].aclfield.mask.u8list.list[1]); + EXPECT_EQ(0xff, acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].data[0]); + EXPECT_EQ(0x11, acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].data[1]); + EXPECT_EQ(0xff, acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].mask[0]); + EXPECT_EQ(0xff, acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].mask[1]); + // Verify SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1 + EXPECT_EQ(2, acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1].aclfield.data.objlist.count); + EXPECT_EQ(acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1].data[0], + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1].aclfield.data.u8list.list[0]); + EXPECT_EQ(acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1].data[1], + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1].aclfield.data.u8list.list[1]); + EXPECT_EQ(acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1].mask[0], + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1].aclfield.mask.u8list.list[0]); + EXPECT_EQ(acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1].mask[1], + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1].aclfield.mask.u8list.list[1]); + EXPECT_EQ(0x22, acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1].data[0]); + EXPECT_EQ(0x31, acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1].data[1]); + EXPECT_EQ(0xff, acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1].mask[0]); + EXPECT_EQ(0xff, acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1].mask[1]); EXPECT_EQ(0xaabbccdd, acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_IN_PORT].aclfield.data.oid); EXPECT_EQ(0x56789abcdff, acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORT].aclfield.data.oid); EXPECT_EQ(0x2, acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_TCP_FLAGS].aclfield.data.u8); @@ -2550,6 +2801,54 @@ TEST_F(AclManagerTest, AclRuleWithValidMatchFields) EXPECT_EQ(0x20, acl_rule->action_fvs[SAI_ACL_ENTRY_ATTR_ACTION_SET_TC].aclaction.parameter.u8); } +TEST_F(AclManagerTest, AclRuleWithColorPacketActionsButNoRateLimit) +{ + ASSERT_NO_FATAL_FAILURE(AddDefaultIngressTable()); + + // Create app_db_entry with color packet action, but no rate limit attributes + P4AclRuleAppDbEntry app_db_entry; + app_db_entry.acl_table_name = kAclIngressTableName; + app_db_entry.priority = 100; + // ACL rule match fields + app_db_entry.match_fvs["ether_type"] = "0x0800"; + app_db_entry.match_fvs["ipv6_dst"] = "fdf8:f53b:82e4::53"; + app_db_entry.match_fvs["ether_dst"] = "AA:BB:CC:DD:EE:FF"; + app_db_entry.match_fvs["ether_src"] = "AA:BB:CC:DD:EE:FF"; + app_db_entry.match_fvs["ipv6_next_header"] = "1"; + app_db_entry.match_fvs["src_ipv6_64bit"] = "fdf8:f53b:82e4::"; + app_db_entry.match_fvs["arp_tpa"] = "0xff112231"; + app_db_entry.match_fvs["udf2"] = "0x9876 & 0xAAAA"; + app_db_entry.db_key = "ACL_PUNT_TABLE:{\"match/ether_type\": \"0x0800\",\"match/ipv6_dst\": " + "\"fdf8:f53b:82e4::53\",\"match/ether_dst\": \"AA:BB:CC:DD:EE:FF\", " + "\"match/ether_src\": \"AA:BB:CC:DD:EE:FF\", \"match/ipv6_next_header\": " + "\"1\", \"match/src_ipv6_64bit\": " + "\"fdf8:f53b:82e4::\",\"match/arp_tpa\": \"0xff112231\",\"match/udf2\": " + "\"0x9876 & 0xAAAA\",\"priority\":100}"; + + const auto &acl_rule_key = KeyGenerator::generateAclRuleKey(app_db_entry.match_fvs, "100"); + + // Set user defined trap for QOS_QUEUE, and color packet actions in meter + int queue_num = 8; + app_db_entry.action = "acl_trap"; + app_db_entry.action_param_fvs["queue"] = std::to_string(queue_num); + // Install rule + EXPECT_CALL(mock_sai_acl_, create_acl_entry(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kAclIngressRuleOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_acl_, create_acl_counter(_, _, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_policer_, + create_policer(_, Eq(gSwitchId), Eq(9), + Truly(std::bind(MatchSaiPolicerAttribute, 9, SAI_METER_TYPE_PACKETS, + SAI_PACKET_ACTION_TRAP, SAI_PACKET_ACTION_DROP, SAI_PACKET_ACTION_DROP, + 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, std::placeholders::_1)))) + .WillOnce(DoAll(SetArgPointee<0>(kAclMeterOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddRuleRequest(acl_rule_key, app_db_entry)); + auto acl_rule = GetAclRule(kAclIngressTableName, acl_rule_key); + ASSERT_NE(nullptr, acl_rule); + // Check action field value + EXPECT_EQ(gUserDefinedTrapStartOid + queue_num, + acl_rule->action_fvs[SAI_ACL_ENTRY_ATTR_ACTION_SET_USER_TRAP_ID].aclaction.parameter.oid); +} + TEST_F(AclManagerTest, AclRuleWithValidAction) { ASSERT_NO_FATAL_FAILURE(AddDefaultIngressTable()); @@ -3214,11 +3513,11 @@ TEST_F(AclManagerTest, UpdateAclRuleWithActionMeterChange) EXPECT_EQ(2, acl_rule->action_fvs[SAI_ACL_ENTRY_ATTR_ACTION_SET_TC].aclaction.parameter.u8); EXPECT_TRUE(acl_rule->action_fvs[SAI_ACL_ENTRY_ATTR_ACTION_SET_TC].aclaction.enable); EXPECT_TRUE(p4_oid_mapper_->getOID(SAI_OBJECT_TYPE_POLICER, table_name_and_rule_key, &meter_oid)); - EXPECT_FALSE(acl_rule->meter.enabled); - EXPECT_EQ(0, acl_rule->meter.cburst); - EXPECT_EQ(0, acl_rule->meter.cir); - EXPECT_EQ(0, acl_rule->meter.pburst); - EXPECT_EQ(0, acl_rule->meter.pir); + EXPECT_TRUE(acl_rule->meter.enabled); + EXPECT_EQ(0x7fffffff, acl_rule->meter.cburst); + EXPECT_EQ(0x7fffffff, acl_rule->meter.cir); + EXPECT_EQ(0x7fffffff, acl_rule->meter.pburst); + EXPECT_EQ(0x7fffffff, acl_rule->meter.pir); // Update meter: enable rate limiting and reset green packet action app_db_entry.action = "punt_and_set_tc"; @@ -3635,7 +3934,7 @@ TEST_F(AclManagerTest, UpdateAclRuleFailsWhenSaiCallFails) .WillOnce(Return(SAI_STATUS_SUCCESS)) .WillOnce(Return(SAI_STATUS_FAILURE)) .WillOnce(Return(SAI_STATUS_FAILURE)); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessUpdateRuleRequest(app_db_entry, *acl_rule)); acl_rule = GetAclRule(kAclIngressTableName, acl_rule_key); ASSERT_NE(nullptr, acl_rule); @@ -3710,7 +4009,7 @@ TEST_F(AclManagerTest, UpdateAclRuleFailsWhenSaiCallFails) .WillOnce(Return(SAI_STATUS_FAILURE)); EXPECT_CALL(mock_sai_acl_, set_acl_entry_attribute(Eq(kAclIngressRuleOid1), _)) .WillOnce(Return(SAI_STATUS_NOT_SUPPORTED)); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_UNIMPLEMENTED, ProcessUpdateRuleRequest(app_db_entry, *acl_rule)); acl_rule = GetAclRule(kAclIngressTableName, acl_rule_key); ASSERT_NE(nullptr, acl_rule); @@ -3738,7 +4037,7 @@ TEST_F(AclManagerTest, UpdateAclRuleFailsWhenSaiCallFails) .WillOnce(Return(SAI_STATUS_SUCCESS)) .WillOnce(Return(SAI_STATUS_SUCCESS)) .WillOnce(Return(SAI_STATUS_FAILURE)); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_IN_USE, ProcessUpdateRuleRequest(app_db_entry, *acl_rule)); acl_rule = GetAclRule(kAclIngressTableName, acl_rule_key); ASSERT_NE(nullptr, acl_rule); @@ -3801,7 +4100,7 @@ TEST_F(AclManagerTest, UpdateAclRuleFailsWhenSaiCallFails) EXPECT_CALL(mock_sai_acl_, set_acl_entry_attribute(Eq(kAclIngressRuleOid1), _)) .WillOnce(Return(SAI_STATUS_OBJECT_IN_USE)); EXPECT_CALL(mock_sai_policer_, remove_policer(Eq(kAclMeterOid2))).WillOnce(Return(SAI_STATUS_FAILURE)); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_IN_USE, ProcessUpdateRuleRequest(app_db_entry, *acl_rule)); acl_rule = GetAclRule(kAclIngressTableName, acl_rule_key); ASSERT_NE(nullptr, acl_rule); @@ -3920,7 +4219,7 @@ TEST_F(AclManagerTest, CreateAclRuleWithInvalidUnitsInTableFails) // Invalid counter unit acl_table->counter_unit = "INVALID"; EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ProcessAddRuleRequest(acl_rule_key, app_db_entry)); - acl_table->counter_unit = P4_COUNTER_UNIT_BYTES; + acl_table->counter_unit = P4_COUNTER_UNIT_BOTH; } TEST_F(AclManagerTest, CreateAclRuleFailsWhenSaiCallFails) @@ -3971,7 +4270,7 @@ TEST_F(AclManagerTest, CreateAclRuleFailsWhenSaiCallFails) EXPECT_CALL(mock_sai_acl_, create_acl_entry(_, _, _, _)).WillOnce(Return(SAI_STATUS_NOT_IMPLEMENTED)); EXPECT_CALL(mock_sai_acl_, remove_acl_counter(_)).WillOnce(Return(SAI_STATUS_FAILURE)); EXPECT_CALL(mock_sai_policer_, remove_policer(Eq(kAclMeterOid1))).WillOnce(Return(SAI_STATUS_FAILURE)); - // (TODO): Expect critical state x2. + // TODO: Expect critical state x2. EXPECT_EQ(StatusCode::SWSS_RC_UNIMPLEMENTED, ProcessAddRuleRequest(acl_rule_key, app_db_entry)); // Fails to create ACL rule when sai_acl_api->create_policer() fails @@ -3989,7 +4288,7 @@ TEST_F(AclManagerTest, CreateAclRuleFailsWhenSaiCallFails) EXPECT_CALL(mock_sai_policer_, create_policer(_, _, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_acl_, create_acl_counter(_, _, _, _)).WillOnce(Return(SAI_STATUS_NOT_IMPLEMENTED)); EXPECT_CALL(mock_sai_policer_, remove_policer(_)).WillOnce(Return(SAI_STATUS_FAILURE)); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_UNIMPLEMENTED, ProcessAddRuleRequest(acl_rule_key, app_db_entry)); } @@ -4043,7 +4342,7 @@ TEST_F(AclManagerTest, DeleteAclRuleWhenTableDoesNotExistFails) p4_oid_mapper_->eraseOID(SAI_OBJECT_TYPE_ACL_TABLE, kAclIngressTableName); EXPECT_CALL(mock_sai_acl_, remove_acl_entry(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_policer_, remove_policer(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessDeleteRuleRequest(kAclIngressTableName, acl_rule_key)); } @@ -4055,9 +4354,13 @@ TEST_F(AclManagerTest, DoAclCounterStatsTaskSucceeds) .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kAclGroupMemberIngressOid), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); sai_object_id_t user_defined_trap_oid = gUserDefinedTrapStartOid; AddDefaultUserTrapsSaiCalls(&user_defined_trap_oid); ASSERT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddTableRequest(app_db_def_entry)); @@ -4121,6 +4424,12 @@ TEST_F(AclManagerTest, DoAclCounterStatsTaskSucceeds) counters[1] = 100; // green_bytes }), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_acl_, get_acl_counter_attribute(Eq(kAclCounterOid1), _, _)) + .WillOnce(DoAll(Invoke([](sai_object_id_t acl_counter_id, uint32_t attr_count, sai_attribute_t *counter_attr) { + counter_attr[0].value.u64 = 10; // packets + counter_attr[1].value.u64 = 100; // bytes + }), + Return(SAI_STATUS_SUCCESS))); DoAclCounterStatsTask(); // Only green_packets and green_bytes are populated in COUNTERS_DB EXPECT_TRUE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_GREEN_PACKETS, stats)); @@ -4132,8 +4441,10 @@ TEST_F(AclManagerTest, DoAclCounterStatsTaskSucceeds) EXPECT_FALSE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_RED_BYTES, stats)); EXPECT_FALSE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_YELLOW_PACKETS, stats)); EXPECT_FALSE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_YELLOW_BYTES, stats)); - EXPECT_FALSE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_PACKETS, stats)); - EXPECT_FALSE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_BYTES, stats)); + EXPECT_TRUE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_PACKETS, stats)); + EXPECT_EQ("10", stats); + EXPECT_TRUE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_BYTES, stats)); + EXPECT_EQ("100", stats); // Remove rule EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessDeleteRuleRequest(kAclIngressTableName, acl_rule_key)); @@ -4153,6 +4464,12 @@ TEST_F(AclManagerTest, DoAclCounterStatsTaskSucceeds) counters[3] = 300; // red_bytes }), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_acl_, get_acl_counter_attribute(Eq(kAclCounterOid1), _, _)) + .WillOnce(DoAll(Invoke([](sai_object_id_t acl_counter_id, uint32_t attr_count, sai_attribute_t *counter_attr) { + counter_attr[0].value.u64 = 50; // packets + counter_attr[1].value.u64 = 500; // bytes + }), + Return(SAI_STATUS_SUCCESS))); DoAclCounterStatsTask(); // Only yellow/red_packets and yellow/red_bytes are populated in COUNTERS_DB EXPECT_TRUE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_YELLOW_PACKETS, stats)); @@ -4165,8 +4482,10 @@ TEST_F(AclManagerTest, DoAclCounterStatsTaskSucceeds) EXPECT_EQ("300", stats); EXPECT_FALSE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_GREEN_PACKETS, stats)); EXPECT_FALSE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_GREEN_BYTES, stats)); - EXPECT_FALSE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_PACKETS, stats)); - EXPECT_FALSE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_BYTES, stats)); + EXPECT_TRUE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_PACKETS, stats)); + EXPECT_EQ("50", stats); + EXPECT_TRUE(counters_table->hget(counter_stats_key, P4_COUNTER_STATS_BYTES, stats)); + EXPECT_EQ("500", stats); // Remove rule EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessDeleteRuleRequest(kAclIngressTableName, acl_rule_key)); EXPECT_EQ(nullptr, GetAclRule(kAclIngressTableName, acl_rule_key)); @@ -4249,5 +4568,731 @@ TEST_F(AclManagerTest, DISABLED_InitBindGroupToSwitchFails) EXPECT_THROW(new SwitchOrch(gAppDb, switch_tables, stateDbSwitchTable), std::runtime_error); } +TEST_F(AclManagerTest, AclTableVerifyStateTest) +{ + const auto &p4rtAclTableName = + std::string(APP_P4RT_ACL_TABLE_DEFINITION_NAME) + kTableKeyDelimiter + kAclIngressTableName; + std::vector attributes = getDefaultTableDefFieldValueTuples(); + EnqueueTableTuple(swss::KeyOpFieldsValuesTuple({p4rtAclTableName, SET_COMMAND, attributes})); + EXPECT_CALL(mock_sai_acl_, create_acl_table(_, Eq(gSwitchId), Gt(2), + Truly(std::bind(MatchSaiAttributeAclTableStage, SAI_ACL_STAGE_INGRESS, + std::placeholders::_1)))) + .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, Eq(gSwitchId), Eq(3), NotNull())) + .WillOnce(DoAll(SetArgPointee<0>(kAclGroupMemberIngressOid), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); + DrainTableTuples(); + auto *acl_table = GetAclTable(kAclIngressTableName); + EXPECT_NE(acl_table, nullptr); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_ACL_TABLE:oid:0x7000000000606", + std::vector{ + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_ACL_STAGE", "SAI_ACL_STAGE_INGRESS"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_SIZE", "123"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_DST_MAC", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_ICMP_TYPE", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_TTL", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_USER_DEFINED_FIELD_GROUP_MIN", "oid:0xfa1"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_ACL_ACTION_TYPE_LIST", "1:SAI_ACL_ACTION_TYPE_COUNTER"}}); + table.set("SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER:oid:0xc000000000607", + std::vector{ + swss::FieldValueTuple{"SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_GROUP_ID", "oid:0xb00000000058f"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_ID", "oid:0x7000000000606"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_GROUP_MEMBER_ATTR_PRIORITY", "234"}}); + table.set("SAI_OBJECT_TYPE_UDF_GROUP:oid:0xfa1", + std::vector{ + swss::FieldValueTuple{"SAI_UDF_GROUP_ATTR_TYPE", "SAI_UDF_GROUP_TYPE_GENERIC"}, + swss::FieldValueTuple{"SAI_UDF_GROUP_ATTR_LENGTH", "2"}}); + table.set("SAI_OBJECT_TYPE_UDF:oid:0x1771", + std::vector{swss::FieldValueTuple{"SAI_UDF_ATTR_GROUP_ID", "oid:0xfa1"}, + swss::FieldValueTuple{"SAI_UDF_ATTR_MATCH_ID", "oid:0x1389"}, + swss::FieldValueTuple{"SAI_UDF_ATTR_BASE", "SAI_UDF_BASE_L3"}, + swss::FieldValueTuple{"SAI_UDF_ATTR_OFFSET", "56"}}); + + // Verification should succeed with vaild key and value. + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + p4rtAclTableName; + EXPECT_EQ(VerifyTableState(db_key, attributes), ""); + + // Invalid key should fail verification. + EXPECT_FALSE(VerifyTableState("invalid", attributes).empty()); + EXPECT_FALSE(VerifyTableState("invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyTableState(std::string(APP_P4RT_TABLE_NAME) + ":invalid", attributes).empty()); + EXPECT_FALSE(VerifyTableState(std::string(APP_P4RT_TABLE_NAME) + ":invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyTableState(std::string(APP_P4RT_TABLE_NAME) + ":DEFINITION:invalid", attributes).empty()); + + // Verification should fail with invalid attribute. + EXPECT_FALSE( + VerifyTableState(db_key, std::vector{swss::FieldValueTuple{kSize, "-1"}}).empty()); + EXPECT_FALSE( + VerifyTableState(db_key, std::vector{swss::FieldValueTuple{"meter/unit", "invalid"}}) + .empty()); + EXPECT_FALSE( + VerifyTableState(db_key, std::vector{swss::FieldValueTuple{kStage, "invalid"}}).empty()); + EXPECT_FALSE(VerifyTableState(db_key, + std::vector{ + swss::FieldValueTuple{kStage, STAGE_INGRESS}, swss::FieldValueTuple{kSize, "123"}, + swss::FieldValueTuple{kPriority, "234"}, + swss::FieldValueTuple{"meter/unit", P4_METER_UNIT_BYTES}, + swss::FieldValueTuple{"counter/unit", P4_COUNTER_UNIT_BOTH}, + swss::FieldValueTuple{"match/ether_type", "invalid"}}) + .empty()); + EXPECT_FALSE(VerifyTableState(db_key, + std::vector{ + swss::FieldValueTuple{kStage, STAGE_INGRESS}, swss::FieldValueTuple{kSize, "123"}, + swss::FieldValueTuple{kPriority, "234"}, + swss::FieldValueTuple{"meter/unit", P4_METER_UNIT_BYTES}, + swss::FieldValueTuple{"counter/unit", P4_COUNTER_UNIT_BOTH}, + swss::FieldValueTuple{"action/copy_and_set_tc", "[{\"action\":\"invalid\"}]"}}) + .empty()); + EXPECT_FALSE( + VerifyTableState(db_key, + std::vector{ + swss::FieldValueTuple{kStage, STAGE_INGRESS}, swss::FieldValueTuple{kSize, "123"}, + swss::FieldValueTuple{kPriority, "234"}, + swss::FieldValueTuple{"meter/unit", P4_METER_UNIT_BYTES}, + swss::FieldValueTuple{"counter/unit", P4_COUNTER_UNIT_BOTH}, + swss::FieldValueTuple{"action/punt_and_set_tc", "[{\"action\":\"SAI_PACKET_ACTION_COPY\"," + "\"packet_color\":\"invalid\"}]"}}) + .empty()); + + // Verification should fail if ACL table name mismatches. + auto saved_acl_table_name = acl_table->acl_table_name; + acl_table->acl_table_name = "invalid"; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->acl_table_name = saved_acl_table_name; + + // Verification should fail if stage mismatches. + auto saved_stage = acl_table->stage; + acl_table->stage = SAI_ACL_STAGE_EGRESS; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->stage = saved_stage; + + // Verification should fail if size mismatches. + auto saved_size = acl_table->size; + acl_table->size = 111; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->size = saved_size; + + // Verification should fail if priority mismatches. + auto saved_priority = acl_table->priority; + acl_table->priority = 111; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->priority = saved_priority; + + // Verification should fail if meter unit mismatches. + auto saved_meter_unit = acl_table->meter_unit; + acl_table->meter_unit = P4_METER_UNIT_PACKETS; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->meter_unit = saved_meter_unit; + + // Verification should fail if counter unit mismatches. + auto saved_counter_unit = acl_table->counter_unit; + acl_table->counter_unit = P4_COUNTER_UNIT_PACKETS; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->counter_unit = saved_counter_unit; + + // Verification should fail if composite SAI match fields lookup mismatches. + acl_table->composite_sai_match_fields_lookup["invalid"] = std::vector{}; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->composite_sai_match_fields_lookup.erase("invalid"); + + // Verification should fail if UDF fields lookup mismatches. + acl_table->udf_fields_lookup["invalid"] = std::vector{}; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->udf_fields_lookup.erase("invalid"); + + // Verification should fail if UDF group attr index lookup mismatches. + acl_table->udf_group_attr_index_lookup["invalid"] = 0; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->udf_group_attr_index_lookup.erase("invalid"); + + // Verification should fail if SAI match field mismatches. + acl_table->sai_match_field_lookup["invalid"] = SaiMatchField{}; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->sai_match_field_lookup.erase("invalid"); + + // Verification should fail if IP type bit type lookup mismatches. + acl_table->ip_type_bit_type_lookup["invalid"] = "invalid"; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->ip_type_bit_type_lookup.erase("invalid"); + + // Verification should fail if rule action field lookup mismatches. + acl_table->rule_action_field_lookup["invalid"] = std::vector{}; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->rule_action_field_lookup.erase("invalid"); + + // Verification should fail if rule packet action color lookup mismatches. + acl_table->rule_packet_action_color_lookup["invalid"] = std::map{}; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->rule_packet_action_color_lookup.erase("invalid"); + + // Verification should fail if group member OID mapping mismatches. + auto saved_group_member_oid = acl_table->group_member_oid; + acl_table->group_member_oid = 0; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->group_member_oid = saved_group_member_oid; + + // Verification should fail if ACL table OID mapping mismatches. + auto saved_table_oid = acl_table->table_oid; + acl_table->table_oid = 0; + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + acl_table->table_oid = saved_table_oid; +} + +TEST_F(AclManagerTest, AclRuleVerifyStateTest) +{ + ASSERT_NO_FATAL_FAILURE(AddDefaultIngressTable()); + std::vector attributes; + attributes.push_back(swss::FieldValueTuple{kAction, "mirror_ingress"}); + attributes.push_back(swss::FieldValueTuple{"param/target", gMirrorSession1}); + attributes.push_back(swss::FieldValueTuple{"meter/cir", "80"}); + attributes.push_back(swss::FieldValueTuple{"meter/cburst", "80"}); + attributes.push_back(swss::FieldValueTuple{"meter/pir", "200"}); + attributes.push_back(swss::FieldValueTuple{"meter/pburst", "200"}); + attributes.push_back(swss::FieldValueTuple{"controller_metadata", "..."}); + const auto &acl_rule_json_key = "{\"match/ether_type\":\"0x0800\",\"match/" + "ipv6_dst\":\"fdf8:f53b:82e4::53 & " + "fdf8:f53b:82e4::53\",\"match/arp_tpa\": \"0xff112231\", " + "\"match/in_ports\": \"Ethernet1,Ethernet2\", \"match/out_ports\": " + "\"Ethernet4,Ethernet5\", \"priority\":15}"; + const auto &rule_tuple_key = std::string(kAclIngressTableName) + kTableKeyDelimiter + acl_rule_json_key; + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); + EXPECT_CALL(mock_sai_acl_, create_acl_entry(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kAclIngressRuleOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_acl_, create_acl_counter(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kAclCounterOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_policer_, create_policer(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kAclMeterOid1), Return(SAI_STATUS_SUCCESS))); + DrainRuleTuples(); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set( + "SAI_OBJECT_TYPE_ACL_ENTRY:oid:0x3e9", + std::vector{ + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_TABLE_ID", "oid:0x7000000000606"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_PRIORITY", "15"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_ADMIN_STATE", "true"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_DST_IPV6", "fdf8:f53b:82e4::53&mask:fdf8:f53b:82e4::53"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE", "2048&mask:0xffff"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_TYPE", + "SAI_ACL_IP_TYPE_ANY&mask:0xffffffffffffffff"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN", "2:255,17&mask:2:0xff,0xff"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_1", "2:34,49&mask:2:0xff,0xff"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS", "2:oid:0x112233,oid:0x1fed3"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORTS", "2:oid:0x9988,oid:0x56789abcdef"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_INGRESS", "1:oid:0x2329"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_ACTION_SET_POLICER", "oid:0x7d1"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_ACTION_COUNTER", "oid:0xbb9"}}); + table.set("SAI_OBJECT_TYPE_ACL_COUNTER:oid:0xbb9", + std::vector{ + swss::FieldValueTuple{"SAI_ACL_COUNTER_ATTR_TABLE_ID", "oid:0x7000000000606"}, + swss::FieldValueTuple{"SAI_ACL_COUNTER_ATTR_ENABLE_BYTE_COUNT", "true"}, + swss::FieldValueTuple{"SAI_ACL_COUNTER_ATTR_ENABLE_PACKET_COUNT", "true"}}); + table.set( + "SAI_OBJECT_TYPE_POLICER:oid:0x7d1", + std::vector{ + swss::FieldValueTuple{"SAI_POLICER_ATTR_MODE", "SAI_POLICER_MODE_TR_TCM"}, + swss::FieldValueTuple{"SAI_POLICER_ATTR_METER_TYPE", "SAI_METER_TYPE_BYTES"}, + swss::FieldValueTuple{"SAI_POLICER_ATTR_CBS", "80"}, swss::FieldValueTuple{"SAI_POLICER_ATTR_CIR", "80"}, + swss::FieldValueTuple{"SAI_POLICER_ATTR_PIR", "200"}, swss::FieldValueTuple{"SAI_POLICER_ATTR_PBS", "200"}, + swss::FieldValueTuple{"SAI_POLICER_ATTR_GREEN_PACKET_ACTION", "SAI_PACKET_ACTION_COPY"}}); + + // Verification should succeed with vaild key and value. + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + rule_tuple_key; + EXPECT_EQ(VerifyRuleState(db_key, attributes), ""); + + // Invalid key should fail verification. + EXPECT_FALSE(VerifyRuleState("invalid", attributes).empty()); + EXPECT_FALSE(VerifyRuleState("invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyRuleState(std::string(APP_P4RT_TABLE_NAME) + ":invalid", attributes).empty()); + EXPECT_FALSE(VerifyRuleState(std::string(APP_P4RT_TABLE_NAME) + ":invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyRuleState(std::string(APP_P4RT_TABLE_NAME) + ":ACL_PUNT_TABLE:invalid", attributes).empty()); + EXPECT_FALSE(VerifyRuleState(std::string(APP_P4RT_TABLE_NAME) + + ":ACL_PUNT_TABLE:{\"match/ether_type\":\"0x0800\",\"match/" + "ipv6_dst\":\"fdf8:f53b:82e4::53 & " + "fdf8:f53b:82e4::53\",\"priority\":0}", + attributes) + .empty()); + EXPECT_FALSE(VerifyRuleState(std::string(APP_P4RT_TABLE_NAME) + + ":ACL_PUNT_TABLE:{\"match/ether_type\":\"0x0800\",\"match/" + "ipv6_dst\":\"127.0.0.1/24\",\"priority\":15}", + attributes) + .empty()); + + // Verification should fail if entry does not exist. + EXPECT_FALSE(VerifyRuleState(std::string(APP_P4RT_TABLE_NAME) + + ":ACL_PUNT_TABLE:{\"match/ether_type\":\"0x0800\",\"match/" + "ipv6_dst\":\"fdf8:f53b:82e4::54 & " + "fdf8:f53b:82e4::54\",\"priority\":15}", + attributes) + .empty()); + + // Verification should fail with invalid attribute. + EXPECT_FALSE(VerifyTableState(db_key, std::vector{{kAction, "invalid"}}).empty()); + + auto *acl_table = GetAclTable(kAclIngressTableName); + EXPECT_NE(acl_table, nullptr); + const auto &acl_rule_key = "match/arp_tpa=0xff112231:match/ether_type=0x0800:match/" + "in_ports=Ethernet1,Ethernet2:match/ipv6_dst=fdf8:f53b:82e4::53 & " + "fdf8:f53b:82e4::53:match/out_ports=Ethernet4,Ethernet5:priority=15"; + auto *acl_rule = GetAclRule(kAclIngressTableName, acl_rule_key); + ASSERT_NE(acl_rule, nullptr); + + // Verification should fail if ACL rule key mismatches. + auto saved_acl_rule_key = acl_rule->acl_rule_key; + acl_rule->acl_rule_key = "invalid"; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->acl_rule_key = saved_acl_rule_key; + + // Verification should fail if ACL table name mismatches. + auto saved_acl_table_name = acl_rule->acl_table_name; + acl_rule->acl_table_name = "invalid"; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->acl_table_name = saved_acl_table_name; + + // Verification should fail if DB key mismatches. + auto saved_db_key = acl_rule->db_key; + acl_rule->db_key = "invalid"; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->db_key = saved_db_key; + + // Verification should fail if action mismatches. + auto saved_p4_action = acl_rule->p4_action; + acl_rule->p4_action = "invalid"; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->p4_action = saved_p4_action; + + // Verification should fail if priority mismatches. + auto saved_priority = acl_rule->priority; + acl_rule->priority = 111; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->priority = saved_priority; + + // Verification should fail if ACL table name mismatches. + saved_acl_table_name = acl_table->acl_table_name; + acl_table->acl_table_name = "invalid"; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_table->acl_table_name = saved_acl_table_name; + + // Verification should fail if ACL table OID mismatches. + auto saved_acl_table_oid = acl_rule->acl_table_oid; + acl_rule->acl_table_oid = 0; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->acl_table_oid = saved_acl_table_oid; + + // Verification should fail if ACL table meter unit is invalid. + auto saved_meter_unit = acl_table->meter_unit; + acl_table->meter_unit = "invalid"; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_table->meter_unit = saved_meter_unit; + + // Verification should fail if ACL table counter unit mismatches. + auto saved_counter_unit = acl_table->counter_unit; + acl_table->counter_unit = P4_COUNTER_UNIT_PACKETS; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_table->counter_unit = P4_COUNTER_UNIT_BYTES; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_table->counter_unit = saved_counter_unit; + + // Verification should fail if mirror_ingress action is incorrect + EXPECT_NE(acl_rule->action_fvs.find(SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_INGRESS), acl_rule->action_fvs.end()); + auto mirror_sessions = std::move(acl_rule->action_fvs[SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_INGRESS]); + acl_rule->action_fvs[SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_INGRESS] = sai_attribute_value_t{}; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + + acl_rule->action_fvs.erase(SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_INGRESS); + acl_rule->action_fvs[SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_EGRESS] = sai_attribute_value_t{}; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->action_fvs[SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_INGRESS] = std::move(mirror_sessions); + acl_rule->action_fvs.erase(SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_EGRESS); + + // Verification should fail if match fvs mismatches. + auto saved_match_fvs = acl_rule->match_fvs; + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_IN_PORT] = sai_attribute_value_t{}; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->match_fvs.erase(SAI_ACL_ENTRY_ATTR_FIELD_IN_PORT); + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE] = sai_attribute_value_t{}; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->match_fvs.erase(SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE); + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_IN_PORT] = sai_attribute_value_t{}; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->match_fvs = saved_match_fvs; + + // Verification should fail if action fvs mismatches. + auto saved_action_fvs = acl_rule->action_fvs; + acl_rule->action_fvs[SAI_ACL_ENTRY_ATTR_ACTION_REDIRECT] = sai_attribute_value_t{}; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->action_fvs.erase(SAI_ACL_ENTRY_ATTR_ACTION_REDIRECT); + acl_rule->action_fvs[SAI_ACL_ENTRY_ATTR_ACTION_SET_TC] = sai_attribute_value_t{}; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->action_fvs.erase(SAI_ACL_ENTRY_ATTR_ACTION_SET_TC); + acl_rule->action_fvs[SAI_ACL_ENTRY_ATTR_ACTION_REDIRECT] = sai_attribute_value_t{}; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->action_fvs = saved_action_fvs; + + // Verification should fail if meter mismatches. + auto saved_meter_cir = acl_rule->meter.cir; + acl_rule->meter.cir = 0; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->meter.cir = saved_meter_cir; + + // Verification should fail if counter mismatches. + auto saved_counter_bytes_enabled = acl_rule->counter.bytes_enabled; + acl_rule->counter.bytes_enabled = false; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->counter.bytes_enabled = saved_counter_bytes_enabled; + + // Verification should fail if action qos queue number mismatches. + auto saved_action_qos_queue_num = acl_rule->action_qos_queue_num; + acl_rule->action_qos_queue_num = 111; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->action_qos_queue_num = saved_action_qos_queue_num; + + // Verification should fail if action redirect nexthop key mismatches. + auto saved_action_redirect_nexthop_key = acl_rule->action_redirect_nexthop_key; + acl_rule->action_redirect_nexthop_key = 111; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->action_redirect_nexthop_key = saved_action_redirect_nexthop_key; + + // Verification should fail if action mirror section mismatches. + acl_rule->action_mirror_sessions[SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_EGRESS] = P4AclMirrorSession{}; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->action_mirror_sessions.erase(SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_EGRESS); + + // Verification should fail if UDF data mask mismatches. + acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_2] = P4UdfDataMask{}; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->udf_data_masks.erase(SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_2); + + // Verification should fail if UDF data mask pointer mismatches. + auto udf_data_mask = std::move(acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN]); + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN] = sai_attribute_value_t{}; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()) << VerifyRuleState(db_key, attributes); + + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].aclfield.data.u8list.count = 1; + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].aclfield.mask.u8list.count = 2; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()) << VerifyRuleState(db_key, attributes); + + acl_rule->match_fvs.erase(SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN); + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_2] = sai_attribute_value_t{}; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()) << VerifyRuleState(db_key, attributes); + acl_rule->match_fvs.erase(SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_2); + + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].aclfield.data.u8list.list = nullptr; + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].aclfield.data.u8list.count = 2; + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].aclfield.mask.u8list.list = + acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].mask.data(); + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].aclfield.mask.u8list.count = 2; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()) << VerifyRuleState(db_key, attributes); + + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].aclfield.data.u8list.list = + acl_rule->udf_data_masks[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].data.data(); + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN].aclfield.mask.u8list.list = nullptr; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()) << VerifyRuleState(db_key, attributes); + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN] = std::move(udf_data_mask); + + // Verification should fail if in ports mismatches. + acl_rule->in_ports.push_back("invalid"); + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->in_ports.pop_back(); + + // Verification should fail if out ports mismatches. + acl_rule->out_ports.push_back("invalid"); + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->out_ports.pop_back(); + + // Verification should fail if in ports OIDs mismatches. + acl_rule->in_ports_oids.push_back(0); + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->in_ports_oids.pop_back(); + + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS].aclfield.data.objlist.list = nullptr; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()) << VerifyRuleState(db_key, attributes); + + acl_rule->match_fvs.erase(SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS); + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()) << VerifyRuleState(db_key, attributes); + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS].aclfield.data.objlist.list = acl_rule->in_ports_oids.data(); + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS].aclfield.enable = true; + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS].aclfield.data.objlist.count = 2; + EXPECT_TRUE(VerifyRuleState(db_key, attributes).empty()) << VerifyRuleState(db_key, attributes); + + // Verification should fail if out ports OIDs mismatches. + acl_rule->out_ports_oids.push_back(0); + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()) << VerifyRuleState(db_key, attributes); + acl_rule->out_ports_oids.pop_back(); + + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORTS].aclfield.data.objlist.list = nullptr; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()) << VerifyRuleState(db_key, attributes); + + acl_rule->match_fvs.erase(SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORTS); + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()) << VerifyRuleState(db_key, attributes); + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORTS].aclfield.data.objlist.list = + acl_rule->out_ports_oids.data(); + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORTS].aclfield.data.objlist.count = 2; + acl_rule->match_fvs[SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORTS].aclfield.enable = true; + EXPECT_TRUE(VerifyRuleState(db_key, attributes).empty()) << VerifyRuleState(db_key, attributes); + + // Verification should fail if ACL rule OID mismatches. + auto saved_acl_entry_oid = acl_rule->acl_entry_oid; + acl_rule->acl_entry_oid = 0; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->acl_entry_oid = saved_acl_entry_oid; + + // Verification should fail if meter OID mismatches. + auto saved_meter_oid = acl_rule->meter.meter_oid; + acl_rule->meter.meter_oid = 0; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->meter.meter_oid = saved_meter_oid; + + // Verification should fail if counter OID mismatches. + auto saved_counter_oid = acl_rule->counter.counter_oid; + acl_rule->counter.counter_oid = 0; + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + acl_rule->counter.counter_oid = saved_counter_oid; +} + +TEST_F(AclManagerTest, AclTableVerifyStateAsicDbTest) +{ + const auto &p4rtAclTableName = + std::string(APP_P4RT_ACL_TABLE_DEFINITION_NAME) + kTableKeyDelimiter + kAclIngressTableName; + std::vector attributes = getDefaultTableDefFieldValueTuples(); + EnqueueTableTuple(swss::KeyOpFieldsValuesTuple({p4rtAclTableName, SET_COMMAND, attributes})); + EXPECT_CALL(mock_sai_acl_, create_acl_table(_, Eq(gSwitchId), Gt(2), + Truly(std::bind(MatchSaiAttributeAclTableStage, SAI_ACL_STAGE_INGRESS, + std::placeholders::_1)))) + .WillOnce(DoAll(SetArgPointee<0>(kAclTableIngressOid), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_acl_, create_acl_table_group_member(_, Eq(gSwitchId), Eq(3), NotNull())) + .WillOnce(DoAll(SetArgPointee<0>(kAclGroupMemberIngressOid), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf_group(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfGroupOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_udf_, create_udf(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kUdfOid1), Return(SAI_STATUS_SUCCESS))); + DrainTableTuples(); + auto *acl_table = GetAclTable(kAclIngressTableName); + EXPECT_NE(acl_table, nullptr); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_ACL_TABLE:oid:0x7000000000606", + std::vector{ + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_ACL_STAGE", "SAI_ACL_STAGE_INGRESS"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_SIZE", "123"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_DST_MAC", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_ICMP_TYPE", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_TTL", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_USER_DEFINED_FIELD_GROUP_MIN", "oid:0xfa1"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_ACL_ACTION_TYPE_LIST", "1:SAI_ACL_ACTION_TYPE_COUNTER"}}); + table.set("SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER:oid:0xc000000000607", + std::vector{ + swss::FieldValueTuple{"SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_GROUP_ID", "oid:0xb00000000058f"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_ID", "oid:0x7000000000606"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_GROUP_MEMBER_ATTR_PRIORITY", "234"}}); + table.set("SAI_OBJECT_TYPE_UDF_GROUP:oid:0xfa1", + std::vector{ + swss::FieldValueTuple{"SAI_UDF_GROUP_ATTR_TYPE", "SAI_UDF_GROUP_TYPE_GENERIC"}, + swss::FieldValueTuple{"SAI_UDF_GROUP_ATTR_LENGTH", "2"}}); + table.set("SAI_OBJECT_TYPE_UDF:oid:0x1771", + std::vector{swss::FieldValueTuple{"SAI_UDF_ATTR_GROUP_ID", "oid:0xfa1"}, + swss::FieldValueTuple{"SAI_UDF_ATTR_MATCH_ID", "oid:0x1389"}, + swss::FieldValueTuple{"SAI_UDF_ATTR_BASE", "SAI_UDF_BASE_L3"}, + swss::FieldValueTuple{"SAI_UDF_ATTR_OFFSET", "56"}}); + + // Verification should succeed with correct ASIC DB values. + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + p4rtAclTableName; + EXPECT_EQ(VerifyTableState(db_key, attributes), ""); + + // Verification should fail if ACL table mismatch. + table.set("SAI_OBJECT_TYPE_ACL_TABLE:oid:0x7000000000606", + std::vector{ + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_ACL_STAGE", "SAI_ACL_STAGE_EGRESS"}}); + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + + // Verification should fail if ACL table is missing. + table.del("SAI_OBJECT_TYPE_ACL_TABLE:oid:0x7000000000606"); + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + table.set("SAI_OBJECT_TYPE_ACL_TABLE:oid:0x7000000000606", + std::vector{ + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_ACL_STAGE", "SAI_ACL_STAGE_INGRESS"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_SIZE", "123"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_DST_MAC", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_ICMP_TYPE", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_DST_IPV6", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_IPV6_NEXT_HEADER", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_L4_DST_PORT", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_FIELD_TTL", "true"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_USER_DEFINED_FIELD_GROUP_MIN", "oid:0xfa1"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_ATTR_ACL_ACTION_TYPE_LIST", "1:SAI_ACL_ACTION_TYPE_COUNTER"}}); + + // Verification should fail if table group member mismatch. + table.set( + "SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER:oid:0xc000000000607", + std::vector{swss::FieldValueTuple{"SAI_ACL_TABLE_GROUP_MEMBER_ATTR_PRIORITY", "0"}}); + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + + // Verification should fail if table group member is missing. + table.del("SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER:oid:0xc000000000607"); + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + table.set("SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER:oid:0xc000000000607", + std::vector{ + swss::FieldValueTuple{"SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_GROUP_ID", "oid:0xb00000000058f"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_GROUP_MEMBER_ATTR_ACL_TABLE_ID", "oid:0x7000000000606"}, + swss::FieldValueTuple{"SAI_ACL_TABLE_GROUP_MEMBER_ATTR_PRIORITY", "234"}}); + + // Verification should fail if udf group mismatch. + table.set("SAI_OBJECT_TYPE_UDF_GROUP:oid:0xfa1", + std::vector{swss::FieldValueTuple{"SAI_UDF_GROUP_ATTR_LENGTH", "1"}}); + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + + // Verification should fail if udf group is missing. + table.del("SAI_OBJECT_TYPE_UDF_GROUP:oid:0xfa1"); + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + table.set("SAI_OBJECT_TYPE_UDF_GROUP:oid:0xfa1", + std::vector{ + swss::FieldValueTuple{"SAI_UDF_GROUP_ATTR_TYPE", "SAI_UDF_GROUP_TYPE_GENERIC"}, + swss::FieldValueTuple{"SAI_UDF_GROUP_ATTR_LENGTH", "2"}}); + + // Verification should fail if udf mismatch. + table.set("SAI_OBJECT_TYPE_UDF:oid:0x1771", + std::vector{swss::FieldValueTuple{"SAI_UDF_ATTR_OFFSET", "1"}}); + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + + // Verification should fail if udf is missing. + table.del("SAI_OBJECT_TYPE_UDF:oid:0x1771"); + EXPECT_FALSE(VerifyTableState(db_key, attributes).empty()); + table.set("SAI_OBJECT_TYPE_UDF:oid:0x1771", + std::vector{swss::FieldValueTuple{"SAI_UDF_ATTR_GROUP_ID", "oid:0xfa1"}, + swss::FieldValueTuple{"SAI_UDF_ATTR_MATCH_ID", "oid:0x1389"}, + swss::FieldValueTuple{"SAI_UDF_ATTR_BASE", "SAI_UDF_BASE_L3"}, + swss::FieldValueTuple{"SAI_UDF_ATTR_OFFSET", "56"}}); +} + +TEST_F(AclManagerTest, AclRuleVerifyStateAsicDbTest) +{ + ASSERT_NO_FATAL_FAILURE(AddDefaultIngressTable()); + auto attributes = getDefaultRuleFieldValueTuples(); + const auto &acl_rule_json_key = "{\"match/ether_type\":\"0x0800\",\"match/" + "ipv6_dst\":\"fdf8:f53b:82e4::53 & " + "fdf8:f53b:82e4::53\",\"priority\":15}"; + const auto &rule_tuple_key = std::string(kAclIngressTableName) + kTableKeyDelimiter + acl_rule_json_key; + EnqueueRuleTuple(std::string(kAclIngressTableName), + swss::KeyOpFieldsValuesTuple({rule_tuple_key, SET_COMMAND, attributes})); + EXPECT_CALL(mock_sai_acl_, create_acl_entry(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kAclIngressRuleOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_acl_, create_acl_counter(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kAclCounterOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_policer_, create_policer(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kAclMeterOid1), Return(SAI_STATUS_SUCCESS))); + DrainRuleTuples(); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set( + "SAI_OBJECT_TYPE_ACL_ENTRY:oid:0x3e9", + std::vector{ + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_TABLE_ID", "oid:0x7000000000606"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_PRIORITY", "15"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_ADMIN_STATE", "true"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_DST_IPV6", "fdf8:f53b:82e4::53&mask:fdf8:f53b:82e4::53"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE", "2048&mask:0xffff"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_TYPE", + "SAI_ACL_IP_TYPE_ANY&mask:0xffffffffffffffff"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_ACTION_SET_TC", "32"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_ACTION_SET_POLICER", "oid:0x7d1"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_ACTION_COUNTER", "oid:0xbb9"}}); + table.set("SAI_OBJECT_TYPE_ACL_COUNTER:oid:0xbb9", + std::vector{ + swss::FieldValueTuple{"SAI_ACL_COUNTER_ATTR_TABLE_ID", "oid:0x7000000000606"}, + swss::FieldValueTuple{"SAI_ACL_COUNTER_ATTR_ENABLE_BYTE_COUNT", "true"}, + swss::FieldValueTuple{"SAI_ACL_COUNTER_ATTR_ENABLE_PACKET_COUNT", "true"}}); + table.set( + "SAI_OBJECT_TYPE_POLICER:oid:0x7d1", + std::vector{ + swss::FieldValueTuple{"SAI_POLICER_ATTR_MODE", "SAI_POLICER_MODE_TR_TCM"}, + swss::FieldValueTuple{"SAI_POLICER_ATTR_METER_TYPE", "SAI_METER_TYPE_BYTES"}, + swss::FieldValueTuple{"SAI_POLICER_ATTR_CBS", "80"}, swss::FieldValueTuple{"SAI_POLICER_ATTR_CIR", "80"}, + swss::FieldValueTuple{"SAI_POLICER_ATTR_PIR", "200"}, swss::FieldValueTuple{"SAI_POLICER_ATTR_PBS", "200"}, + swss::FieldValueTuple{"SAI_POLICER_ATTR_GREEN_PACKET_ACTION", "SAI_PACKET_ACTION_COPY"}}); + + // Verification should succeed with correct ASIC DB values. + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + rule_tuple_key; + EXPECT_EQ(VerifyRuleState(db_key, attributes), ""); + + // Verification should fail if ACL entry mismatch. + table.set("SAI_OBJECT_TYPE_ACL_ENTRY:oid:0x3e9", + std::vector{swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_PRIORITY", "20"}}); + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + + // Verification should fail if ACL entry is missing. + table.del("SAI_OBJECT_TYPE_ACL_ENTRY:oid:0x3e9"); + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + table.set( + "SAI_OBJECT_TYPE_ACL_ENTRY:oid:0x3e9", + std::vector{ + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_TABLE_ID", "oid:0x7000000000606"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_PRIORITY", "15"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_ADMIN_STATE", "true"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_DST_IPV6", "fdf8:f53b:82e4::53&mask:fdf8:f53b:82e4::53"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE", "2048&mask:0xffff"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_TYPE", + "SAI_ACL_IP_TYPE_ANY&mask:0xffffffffffffffff"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_ACTION_SET_TC", "32"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_ACTION_SET_POLICER", "oid:0x7d1"}, + swss::FieldValueTuple{"SAI_ACL_ENTRY_ATTR_ACTION_COUNTER", "oid:0xbb9"}}); + + // Verification should fail if counter entry mismatch. + table.set("SAI_OBJECT_TYPE_ACL_COUNTER:oid:0xbb9", + std::vector{swss::FieldValueTuple{"SAI_ACL_COUNTER_ATTR_TABLE_ID", "oid:0x0"}}); + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + + // Verification should fail if counter entry is missing. + table.del("SAI_OBJECT_TYPE_ACL_COUNTER:oid:0xbb9"); + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + table.set("SAI_OBJECT_TYPE_ACL_COUNTER:oid:0xbb9", + std::vector{ + swss::FieldValueTuple{"SAI_ACL_COUNTER_ATTR_TABLE_ID", "oid:0x7000000000606"}, + swss::FieldValueTuple{"SAI_ACL_COUNTER_ATTR_ENABLE_BYTE_COUNT", "true"}, + swss::FieldValueTuple{"SAI_ACL_COUNTER_ATTR_ENABLE_PACKET_COUNT", "true"}}); + + // Verification should fail if meter entry mismatch. + table.set("SAI_OBJECT_TYPE_POLICER:oid:0x7d1", + std::vector{swss::FieldValueTuple{"SAI_POLICER_ATTR_CBS", "0"}}); + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + + // Verification should fail if meter entry is missing. + table.del("SAI_OBJECT_TYPE_POLICER:oid:0x7d1"); + EXPECT_FALSE(VerifyRuleState(db_key, attributes).empty()); + table.set( + "SAI_OBJECT_TYPE_POLICER:oid:0x7d1", + std::vector{ + swss::FieldValueTuple{"SAI_POLICER_ATTR_MODE", "SAI_POLICER_MODE_TR_TCM"}, + swss::FieldValueTuple{"SAI_POLICER_ATTR_METER_TYPE", "SAI_METER_TYPE_BYTES"}, + swss::FieldValueTuple{"SAI_POLICER_ATTR_CBS", "80"}, swss::FieldValueTuple{"SAI_POLICER_ATTR_CIR", "80"}, + swss::FieldValueTuple{"SAI_POLICER_ATTR_PIR", "200"}, swss::FieldValueTuple{"SAI_POLICER_ATTR_PBS", "200"}, + swss::FieldValueTuple{"SAI_POLICER_ATTR_GREEN_PACKET_ACTION", "SAI_PACKET_ACTION_COPY"}}); +} + } // namespace test } // namespace p4orch diff --git a/orchagent/p4orch/tests/fake_crmorch.cpp b/orchagent/p4orch/tests/fake_crmorch.cpp index 03f15c28ac..63c19b9fdf 100644 --- a/orchagent/p4orch/tests/fake_crmorch.cpp +++ b/orchagent/p4orch/tests/fake_crmorch.cpp @@ -32,6 +32,14 @@ void CrmOrch::decCrmAclTableUsedCounter(CrmResourceType resource, sai_object_id_ { } +void CrmOrch::incCrmExtTableUsedCounter(CrmResourceType resource, std::string table_name) +{ +} + +void CrmOrch::decCrmExtTableUsedCounter(CrmResourceType resource, std::string table_name) +{ +} + void CrmOrch::doTask(Consumer &consumer) { } diff --git a/orchagent/p4orch/tests/fake_dbconnector.cpp b/orchagent/p4orch/tests/fake_dbconnector.cpp index 1709d9d977..89487fad61 100644 --- a/orchagent/p4orch/tests/fake_dbconnector.cpp +++ b/orchagent/p4orch/tests/fake_dbconnector.cpp @@ -1,5 +1,7 @@ #include #include +#include +#include #include "dbconnector.h" @@ -10,6 +12,17 @@ static std::map dbNameIdMap = { {"APPL_DB", 0}, {"ASIC_DB", 1}, {"COUNTERS_DB", 2}, {"CONFIG_DB", 4}, {"FLEX_COUNTER_DB", 5}, {"STATE_DB", 6}, }; +using DbDataT = std::map>>; + +namespace fake_db_connector +{ + +DbDataT gDB; + +} // namespace fake_db_connector + +using namespace fake_db_connector; + RedisContext::RedisContext() { } @@ -43,4 +56,19 @@ int DBConnector::getDbId() const return m_dbId; } +void DBConnector::hset(const std::string &key, const std::string &field, const std::string &value) +{ + gDB[m_dbId][key][field] = value; +} + +std::vector DBConnector::keys(const std::string &key) +{ + std::vector list; + for (auto const &x : gDB[m_dbId]) + { + list.push_back(x.first); + } + return list; +} + } // namespace swss diff --git a/orchagent/p4orch/tests/fake_flexcounterorch.cpp b/orchagent/p4orch/tests/fake_flexcounterorch.cpp new file mode 100644 index 0000000000..e44fc555f6 --- /dev/null +++ b/orchagent/p4orch/tests/fake_flexcounterorch.cpp @@ -0,0 +1,33 @@ +#include "copporch.h" +#include "flexcounterorch.h" + +FlexCounterOrch::FlexCounterOrch(swss::DBConnector *db, std::vector &tableNames) : + Orch(db, tableNames), + m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), + m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), + m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME) +{ +} + +FlexCounterOrch::~FlexCounterOrch(void) +{ +} + +void FlexCounterOrch::doTask(Consumer &consumer) +{ +} + +bool FlexCounterOrch::getPortCountersState() const +{ + return true; +} + +bool FlexCounterOrch::getPortBufferDropCountersState() const +{ + return true; +} + +bool FlexCounterOrch::bake() +{ + return true; +} \ No newline at end of file diff --git a/orchagent/p4orch/tests/fake_flowcounterrouteorch.cpp b/orchagent/p4orch/tests/fake_flowcounterrouteorch.cpp new file mode 100644 index 0000000000..08caf52fe6 --- /dev/null +++ b/orchagent/p4orch/tests/fake_flowcounterrouteorch.cpp @@ -0,0 +1,178 @@ +#include "copporch.h" +#include "flowcounterrouteorch.h" + +extern size_t gMaxBulkSize; +extern sai_route_api_t *sai_route_api; + +#define ROUTE_FLOW_COUNTER_POLLING_INTERVAL_MS 10000 + +FlowCounterRouteOrch::FlowCounterRouteOrch(swss::DBConnector *db, const std::vector &tableNames) + : Orch(db, tableNames), mRouteFlowCounterMgr(ROUTE_FLOW_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, + ROUTE_FLOW_COUNTER_POLLING_INTERVAL_MS, false), + gRouteBulker(sai_route_api, gMaxBulkSize) +{ +} + +FlowCounterRouteOrch::~FlowCounterRouteOrch(void) +{ +} + +void FlowCounterRouteOrch::generateRouteFlowStats() +{ +} + +void FlowCounterRouteOrch::clearRouteFlowStats() +{ +} + +void FlowCounterRouteOrch::addRoutePattern(const std::string &pattern, size_t) +{ +} + +void FlowCounterRouteOrch::removeRoutePattern(const std::string &pattern) +{ +} + +void FlowCounterRouteOrch::onAddMiscRouteEntry(sai_object_id_t vrf_id, const IpPrefix &ip_prefix, bool add_to_cache) +{ +} + +void FlowCounterRouteOrch::onAddMiscRouteEntry(sai_object_id_t vrf_id, const sai_ip_prefix_t &ip_pfx, bool add_to_cache) +{ +} + +void FlowCounterRouteOrch::onRemoveMiscRouteEntry(sai_object_id_t vrf_id, const IpPrefix &ip_prefix, + bool remove_from_cache) +{ +} + +void FlowCounterRouteOrch::onRemoveMiscRouteEntry(sai_object_id_t vrf_id, const sai_ip_prefix_t &ip_pfx, + bool remove_from_cache) +{ +} + +void FlowCounterRouteOrch::onAddVR(sai_object_id_t vrf_id) +{ +} + +void FlowCounterRouteOrch::onRemoveVR(sai_object_id_t vrf_id) +{ +} + +void FlowCounterRouteOrch::handleRouteAdd(sai_object_id_t vrf_id, const IpPrefix &ip_prefix) +{ +} + +void FlowCounterRouteOrch::handleRouteRemove(sai_object_id_t vrf_id, const IpPrefix &ip_prefix) +{ +} + +void FlowCounterRouteOrch::processRouteFlowCounterBinding() +{ +} + +void FlowCounterRouteOrch::doTask(Consumer &consumer) +{ +} + +void FlowCounterRouteOrch::doTask(SelectableTimer &timer) +{ +} + +void FlowCounterRouteOrch::initRouteFlowCounterCapability() +{ +} + +void FlowCounterRouteOrch::removeRoutePattern(const RoutePattern &route_pattern) +{ +} + +void FlowCounterRouteOrch::removeRouteFlowCounterFromDB(sai_object_id_t vrf_id, const IpPrefix &ip_prefix, + sai_object_id_t counter_oid) +{ +} + +bool FlowCounterRouteOrch::bindFlowCounter(const RoutePattern &route_pattern, sai_object_id_t vrf_id, + const IpPrefix &ip_prefix) +{ + return true; +} + +void FlowCounterRouteOrch::unbindFlowCounter(const RoutePattern &route_pattern, sai_object_id_t vrf_id, + const IpPrefix &ip_prefix, sai_object_id_t counter_oid) +{ +} + +void FlowCounterRouteOrch::pendingUpdateFlexDb(const RoutePattern &route_pattern, const IpPrefix &ip_prefix, + sai_object_id_t counter_oid) +{ +} + +void FlowCounterRouteOrch::updateRouterFlowCounterCache(const RoutePattern &route_pattern, const IpPrefix &ip_prefix, + sai_object_id_t counter_oid, RouterFlowCounterCache &cache) +{ +} + +bool FlowCounterRouteOrch::validateRoutePattern(const RoutePattern &route_pattern) const +{ + return true; +} + +void FlowCounterRouteOrch::onRoutePatternMaxMatchCountChange(RoutePattern &route_pattern, size_t new_max_match_count) +{ +} + +bool FlowCounterRouteOrch::isRouteAlreadyBound(const RoutePattern &route_pattern, const IpPrefix &ip_prefix) const +{ + return true; +} + +void FlowCounterRouteOrch::createRouteFlowCounterByPattern(const RoutePattern &route_pattern, size_t currentBoundCount) +{ +} + +bool FlowCounterRouteOrch::removeRouteFlowCounter(const RoutePattern &route_pattern, sai_object_id_t vrf_id, + const IpPrefix &ip_prefix) +{ + return true; +} + +void FlowCounterRouteOrch::createRouteFlowCounterFromVnetRoutes(const RoutePattern &route_pattern, + size_t ¤t_bound_count) +{ +} + +void FlowCounterRouteOrch::reapRouteFlowCounterByPattern(const RoutePattern &route_pattern, size_t currentBoundCount) +{ +} + +bool FlowCounterRouteOrch::isRouteFlowCounterEnabled() const +{ + return true; +} + +void FlowCounterRouteOrch::getRouteFlowCounterNameMapKey(sai_object_id_t vrf_id, const IpPrefix &ip_prefix, + std::string &key) +{ +} + +size_t FlowCounterRouteOrch::getRouteFlowCounterSizeByPattern(const RoutePattern &route_pattern) const +{ + return 0; +} + +bool FlowCounterRouteOrch::parseRouteKeyForRoutePattern(const std::string &key, char sep, sai_object_id_t &vrf_id, + IpPrefix &ip_prefix, std::string &vrf_name) +{ + return true; +} + +bool FlowCounterRouteOrch::getVrfIdByVnetName(const std::string &vnet_name, sai_object_id_t &vrf_id) +{ + return true; +} + +bool FlowCounterRouteOrch::getVnetNameByVrfId(sai_object_id_t vrf_id, std::string &vnet_name) +{ + return true; +} diff --git a/orchagent/p4orch/tests/fake_portorch.cpp b/orchagent/p4orch/tests/fake_portorch.cpp index aaf766e1aa..93b654c5e7 100644 --- a/orchagent/p4orch/tests/fake_portorch.cpp +++ b/orchagent/p4orch/tests/fake_portorch.cpp @@ -181,11 +181,35 @@ bool PortsOrch::setPortPfc(sai_object_id_t portId, uint8_t pfc_bitmask) return true; } -void PortsOrch::generateQueueMap() +void PortsOrch::generateQueueMap(std::map queuesStateVector) { } -void PortsOrch::generatePriorityGroupMap() +void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState, bool voq) +{ +} + +void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) +{ +} + +void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) +{ +} + +void PortsOrch::generatePriorityGroupMap(std::map pgsStateVector) +{ +} + +void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState) +{ +} + +void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) +{ +} + +void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) { } @@ -206,7 +230,8 @@ bool PortsOrch::removeAclTableGroup(const Port &p) return true; } -bool PortsOrch::addSubPort(Port &port, const string &alias, const bool &adminUp, const uint32_t &mtu) +bool PortsOrch::addSubPort(Port &port, const string &alias, const string &vlan, const bool &adminUp, + const uint32_t &mtu) { return true; } @@ -297,7 +322,7 @@ bool PortsOrch::setVoqInbandIntf(string &alias, string &type) return true; } -bool PortsOrch::getRecircPort(Port &p, string role) +bool PortsOrch::getRecircPort(Port &p, Port::Role role) { return true; } @@ -400,7 +425,7 @@ void PortsOrch::initializePriorityGroups(Port &port) { } -void PortsOrch::initializePortMaximumHeadroom(Port &port) +void PortsOrch::initializePortBufferMaximumParameters(Port &port) { } @@ -418,7 +443,7 @@ bool PortsOrch::setHostIntfsStripTag(Port &port, sai_hostif_vlan_tag_t strip) return true; } -bool PortsOrch::setBridgePortLearnMode(Port &port, string learn_mode) +bool PortsOrch::setBridgePortLearnMode(Port &port, sai_bridge_port_fdb_learning_mode_t learn_mode) { return true; } @@ -448,7 +473,7 @@ bool PortsOrch::setLagTpid(sai_object_id_t id, sai_uint16_t tpid) return true; } -bool PortsOrch::addLagMember(Port &lag, Port &port, bool enableForwarding) +bool PortsOrch::addLagMember(Port &lag, Port &port, string member_status) { return true; } @@ -468,7 +493,7 @@ bool PortsOrch::setDistributionOnLagMember(Port &lagMember, bool enableDistribut return true; } -bool PortsOrch::addPort(const set &lane_set, uint32_t speed, int an, string fec) +bool PortsOrch::addPort(const PortConfig &port) { return true; } @@ -478,7 +503,7 @@ sai_status_t PortsOrch::removePort(sai_object_id_t port_id) return SAI_STATUS_SUCCESS; } -bool PortsOrch::initPort(const string &alias, const string &role, const int index, const set &lane_set) +bool PortsOrch::initPort(const PortConfig &port) { return true; } @@ -497,12 +522,12 @@ bool PortsOrch::getPortAdminStatus(sai_object_id_t id, bool &up) return true; } -bool PortsOrch::setPortMtu(sai_object_id_t id, sai_uint32_t mtu) +bool PortsOrch::setPortMtu(const Port &port, sai_uint32_t mtu) { return true; } -bool PortsOrch::setPortTpid(sai_object_id_t id, sai_uint16_t tpid) +bool PortsOrch::setPortTpid(Port &port, sai_uint16_t tpid) { return true; } @@ -517,12 +542,12 @@ bool PortsOrch::getPortPvid(Port &port, sai_uint32_t &pvid) return true; } -bool PortsOrch::setPortFec(Port &port, sai_port_fec_mode_t mode) +bool PortsOrch::setPortFec(Port &port, sai_port_fec_mode_t fec_mode) { return true; } -bool PortsOrch::setPortPfcAsym(Port &port, string pfc_asym) +bool PortsOrch::setPortPfcAsym(Port &port, sai_port_priority_flow_control_mode_t pfc_asym) { return true; } @@ -561,17 +586,17 @@ bool PortsOrch::getPortSpeed(sai_object_id_t port_id, sai_uint32_t &speed) return true; } -bool PortsOrch::setGearboxPortsAttr(Port &port, sai_port_attr_t id, void *value) +bool PortsOrch::setGearboxPortsAttr(const Port &port, sai_port_attr_t id, void *value) { return true; } -bool PortsOrch::setGearboxPortAttr(Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value) +bool PortsOrch::setGearboxPortAttr(const Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value) { return true; } -task_process_status PortsOrch::setPortAdvSpeeds(sai_object_id_t port_id, std::vector &speed_list) +task_process_status PortsOrch::setPortAdvSpeeds(Port &port, std::set &speed_list) { return task_success; } @@ -581,30 +606,22 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin return true; } -void PortsOrch::generateQueueMapPerPort(const Port &port) -{ -} - -void PortsOrch::generatePriorityGroupMapPerPort(const Port &port) +bool PortsOrch::isAutoNegEnabled(sai_object_id_t id) { + return true; } -task_process_status PortsOrch::setPortAutoNeg(sai_object_id_t id, int an) +task_process_status PortsOrch::setPortAutoNeg(Port &port, bool autoneg) { return task_success; } -bool PortsOrch::setPortFecMode(sai_object_id_t id, int fec) -{ - return true; -} - -task_process_status PortsOrch::setPortInterfaceType(sai_object_id_t id, sai_port_interface_type_t interface_type) +task_process_status PortsOrch::setPortInterfaceType(Port &port, sai_port_interface_type_t interface_type) { return task_success; } -task_process_status PortsOrch::setPortAdvInterfaceTypes(sai_object_id_t id, std::vector &interface_types) +task_process_status PortsOrch::setPortAdvInterfaceTypes(Port &port, std::set &interface_types) { return task_success; } @@ -622,25 +639,10 @@ void PortsOrch::updateDbPortOperSpeed(Port &port, sai_uint32_t speed) { } -void PortsOrch::getPortSerdesVal(const std::string &s, std::vector &lane_values) +void PortsOrch::getPortSerdesVal(const std::string &s, std::vector &lane_values, int base) { } -bool PortsOrch::getPortAdvSpeedsVal(const std::string &s, std::vector &speed_values) -{ - return true; -} - -bool PortsOrch::getPortInterfaceTypeVal(const std::string &s, sai_port_interface_type_t &interface_type) -{ - return true; -} - -bool PortsOrch::getPortAdvInterfaceTypesVal(const std::string &s, std::vector &type_values) -{ - return true; -} - void PortsOrch::removePortSerdesAttribute(sai_object_id_t port_id) { } @@ -677,7 +679,7 @@ void PortsOrch::voqSyncDelLag(Port &lag) { } -void PortsOrch::voqSyncAddLagMember(Port &lag, Port &port) +void PortsOrch::voqSyncAddLagMember(Port &lag, Port &port, string status) { } @@ -685,7 +687,11 @@ void PortsOrch::voqSyncDelLagMember(Port &lag, Port &port) { } -std::unordered_set PortsOrch::generateCounterStats(const string &type) +std::unordered_set PortsOrch::generateCounterStats(const string &type, bool gearbox) { return {}; -} \ No newline at end of file +} + +void PortsOrch::doTask(swss::SelectableTimer &timer) +{ +} diff --git a/orchagent/p4orch/tests/gre_tunnel_manager_test.cpp b/orchagent/p4orch/tests/gre_tunnel_manager_test.cpp new file mode 100644 index 0000000000..937c329fbe --- /dev/null +++ b/orchagent/p4orch/tests/gre_tunnel_manager_test.cpp @@ -0,0 +1,909 @@ +#include "gre_tunnel_manager.h" + +#include +#include + +#include +#include +#include + +#include "ipaddress.h" +#include "json.hpp" +#include "mock_response_publisher.h" +#include "mock_sai_router_interface.h" +#include "mock_sai_serialize.h" +#include "mock_sai_tunnel.h" +#include "p4oidmapper.h" +#include "p4orch/p4orch_util.h" +#include "p4orch_util.h" +#include "return_code.h" +#include "swssnet.h" +extern "C" +{ +#include "sai.h" +} + +using ::p4orch::kTableKeyDelimiter; + +using ::testing::_; +using ::testing::DoAll; +using ::testing::Eq; +using ::testing::Return; +using ::testing::SetArgPointee; +using ::testing::StrictMock; +using ::testing::Truly; + +extern sai_object_id_t gSwitchId; +extern sai_tunnel_api_t *sai_tunnel_api; +extern sai_router_interface_api_t *sai_router_intfs_api; +extern MockSaiTunnel *mock_sai_tunnel; + +namespace +{ +constexpr char *kRouterInterfaceId1 = "intf-eth-1/2/3"; +constexpr sai_object_id_t kRouterInterfaceOid1 = 1; +constexpr char *kGreTunnelP4AppDbId1 = "tunnel-1"; +constexpr char *kGreTunnelP4AppDbKey1 = R"({"match/tunnel_id":"tunnel-1"})"; +constexpr sai_object_id_t kGreTunnelOid1 = 0x11; +constexpr sai_object_id_t kOverlayRifOid1 = 0x101; + +// APP DB entries for Add request. +const P4GreTunnelAppDbEntry kP4GreTunnelAppDbEntry1{/*tunnel_id=*/"tunnel-1", + /*router_interface_id=*/"intf-eth-1/2/3", + /*encap_src_ip=*/swss::IpAddress("2607:f8b0:8096:3110::1"), + /*encap_dst_ip=*/swss::IpAddress("2607:f8b0:8096:311a::2"), + /*action_str=*/"mark_for_p2p_tunnel_encap"}; + +std::unordered_map CreateAttributeListForGreTunnelObject( + const P4GreTunnelAppDbEntry &app_entry, const sai_object_id_t &rif_oid) +{ + std::unordered_map tunnel_attrs; + sai_attribute_t tunnel_attr; + + tunnel_attr.id = SAI_TUNNEL_ATTR_TYPE; + tunnel_attr.value.s32 = SAI_TUNNEL_TYPE_IPINIP_GRE; + tunnel_attrs.insert({tunnel_attr.id, tunnel_attr.value}); + + tunnel_attr.id = SAI_TUNNEL_ATTR_PEER_MODE; + tunnel_attr.value.s32 = SAI_TUNNEL_PEER_MODE_P2P; + tunnel_attrs.insert({tunnel_attr.id, tunnel_attr.value}); + + tunnel_attr.id = SAI_TUNNEL_ATTR_OVERLAY_INTERFACE; + tunnel_attr.value.oid = kOverlayRifOid1; + tunnel_attrs.insert({tunnel_attr.id, tunnel_attr.value}); + + tunnel_attr.id = SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE; + tunnel_attr.value.oid = rif_oid; + tunnel_attrs.insert({tunnel_attr.id, tunnel_attr.value}); + + tunnel_attr.id = SAI_TUNNEL_ATTR_ENCAP_SRC_IP; + swss::copy(tunnel_attr.value.ipaddr, app_entry.encap_src_ip); + tunnel_attrs.insert({tunnel_attr.id, tunnel_attr.value}); + + tunnel_attr.id = SAI_TUNNEL_ATTR_ENCAP_DST_IP; + swss::copy(tunnel_attr.value.ipaddr, app_entry.encap_dst_ip); + tunnel_attrs.insert({tunnel_attr.id, tunnel_attr.value}); + + return tunnel_attrs; +} + +// Verifies whether the attribute list is the same as expected. +// Returns true if they match; otherwise, false. +bool MatchCreateGreTunnelArgAttrList(const sai_attribute_t *attr_list, + const std::unordered_map &expected_attr_list) +{ + if (attr_list == nullptr) + { + return false; + } + + // Sanity check for expected_attr_list. + const auto end = expected_attr_list.end(); + if (expected_attr_list.size() < 3 || expected_attr_list.find(SAI_TUNNEL_ATTR_TYPE) == end || + expected_attr_list.find(SAI_TUNNEL_ATTR_PEER_MODE) == end || + expected_attr_list.find(SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE) == end || + expected_attr_list.find(SAI_TUNNEL_ATTR_OVERLAY_INTERFACE) == end || + expected_attr_list.find(SAI_TUNNEL_ATTR_ENCAP_SRC_IP) == end || + expected_attr_list.find(SAI_TUNNEL_ATTR_ENCAP_DST_IP) == end) + { + return false; + } + + size_t valid_attrs_num = 0; + for (size_t i = 0; i < expected_attr_list.size(); ++i) + { + switch (attr_list[i].id) + { + case SAI_TUNNEL_ATTR_TYPE: { + if (attr_list[i].value.s32 != expected_attr_list.at(SAI_TUNNEL_ATTR_TYPE).s32) + { + return false; + } + valid_attrs_num++; + break; + } + case SAI_TUNNEL_ATTR_PEER_MODE: { + if (attr_list[i].value.s32 != expected_attr_list.at(SAI_TUNNEL_ATTR_PEER_MODE).s32) + { + return false; + } + valid_attrs_num++; + break; + } + case SAI_TUNNEL_ATTR_ENCAP_SRC_IP: { + if (attr_list[i].value.ipaddr.addr_family != + expected_attr_list.at(SAI_TUNNEL_ATTR_ENCAP_SRC_IP).ipaddr.addr_family || + (attr_list[i].value.ipaddr.addr_family == SAI_IP_ADDR_FAMILY_IPV4 && + attr_list[i].value.ipaddr.addr.ip4 != + expected_attr_list.at(SAI_TUNNEL_ATTR_ENCAP_SRC_IP).ipaddr.addr.ip4) || + (attr_list[i].value.ipaddr.addr_family == SAI_IP_ADDR_FAMILY_IPV6 && + memcmp(&attr_list[i].value.ipaddr.addr.ip6, + &expected_attr_list.at(SAI_TUNNEL_ATTR_ENCAP_SRC_IP).ipaddr.addr.ip6, sizeof(sai_ip6_t)) != 0)) + { + return false; + } + valid_attrs_num++; + break; + } + case SAI_TUNNEL_ATTR_ENCAP_DST_IP: { + if (attr_list[i].value.ipaddr.addr_family != + expected_attr_list.at(SAI_TUNNEL_ATTR_ENCAP_DST_IP).ipaddr.addr_family || + (attr_list[i].value.ipaddr.addr_family == SAI_IP_ADDR_FAMILY_IPV4 && + attr_list[i].value.ipaddr.addr.ip4 != + expected_attr_list.at(SAI_TUNNEL_ATTR_ENCAP_DST_IP).ipaddr.addr.ip4) || + (attr_list[i].value.ipaddr.addr_family == SAI_IP_ADDR_FAMILY_IPV6 && + memcmp(&attr_list[i].value.ipaddr.addr.ip6, + &expected_attr_list.at(SAI_TUNNEL_ATTR_ENCAP_DST_IP).ipaddr.addr.ip6, sizeof(sai_ip6_t)) != 0)) + { + return false; + } + valid_attrs_num++; + break; + } + case SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE: { + if (expected_attr_list.find(SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE) == end || + expected_attr_list.at(SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE).oid != attr_list[i].value.oid) + { + return false; + } + valid_attrs_num++; + break; + } + case SAI_TUNNEL_ATTR_OVERLAY_INTERFACE: { + if (expected_attr_list.find(SAI_TUNNEL_ATTR_OVERLAY_INTERFACE) == end || + expected_attr_list.at(SAI_TUNNEL_ATTR_OVERLAY_INTERFACE).oid != attr_list[i].value.oid) + { + return false; + } + valid_attrs_num++; + break; + } + default: + return false; + } + } + + if (expected_attr_list.size() != valid_attrs_num) + { + return false; + } + + return true; +} +} // namespace + +class GreTunnelManagerTest : public ::testing::Test +{ + protected: + GreTunnelManagerTest() : gre_tunnel_manager_(&p4_oid_mapper_, &publisher_) + { + } + + void SetUp() override + { + // Set up mock stuff for SAI tunnel API structure. + mock_sai_tunnel = &mock_sai_tunnel_; + sai_tunnel_api->create_tunnel = mock_create_tunnel; + sai_tunnel_api->remove_tunnel = mock_remove_tunnel; + // Set up mock stuff for SAI router interface API structure. + mock_sai_router_intf = &mock_sai_router_intf_; + sai_router_intfs_api->create_router_interface = mock_create_router_interface; + sai_router_intfs_api->remove_router_interface = mock_remove_router_interface; + + mock_sai_serialize = &mock_sai_serialize_; + } + + void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) + { + gre_tunnel_manager_.enqueue(APP_P4RT_TUNNEL_TABLE_NAME, entry); + } + + void Drain() + { + gre_tunnel_manager_.drain(); + } + + std::string VerifyState(const std::string &key, const std::vector &tuple) + { + return gre_tunnel_manager_.verifyState(key, tuple); + } + + ReturnCode ProcessAddRequest(const P4GreTunnelAppDbEntry &app_db_entry) + { + return gre_tunnel_manager_.processAddRequest(app_db_entry); + } + + ReturnCode ProcessDeleteRequest(const std::string &tunnel_key) + { + return gre_tunnel_manager_.processDeleteRequest(tunnel_key); + } + + P4GreTunnelEntry *GetGreTunnelEntry(const std::string &tunnel_key) + { + return gre_tunnel_manager_.getGreTunnelEntry(tunnel_key); + } + + ReturnCodeOr DeserializeP4GreTunnelAppDbEntry( + const std::string &key, const std::vector &attributes) + { + return gre_tunnel_manager_.deserializeP4GreTunnelAppDbEntry(key, attributes); + } + + // Adds the gre tunnel entry -- kP4GreTunnelAppDbEntry1, via gre tunnel + // manager's ProcessAddRequest (). This function also takes care of all the + // dependencies of the gre tunnel entry. Returns a valid pointer to gre tunnel + // entry on success. + P4GreTunnelEntry *AddGreTunnelEntry1(); + + // Validates that a P4 App gre tunnel entry is correctly added in gre tunnel + // manager and centralized mapper. Returns true on success. + bool ValidateGreTunnelEntryAdd(const P4GreTunnelAppDbEntry &app_db_entry); + + // Return true if the specified the object has the expected number of + // reference. + bool ValidateRefCnt(sai_object_type_t object_type, const std::string &key, uint32_t expected_ref_count) + { + uint32_t ref_count; + if (!p4_oid_mapper_.getRefCount(object_type, key, &ref_count)) + return false; + return ref_count == expected_ref_count; + } + + StrictMock mock_sai_tunnel_; + StrictMock mock_sai_router_intf_; + StrictMock mock_sai_serialize_; + MockResponsePublisher publisher_; + P4OidMapper p4_oid_mapper_; + GreTunnelManager gre_tunnel_manager_; +}; + +P4GreTunnelEntry *GreTunnelManagerTest::AddGreTunnelEntry1() +{ + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); + EXPECT_TRUE(p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId1), + kRouterInterfaceOid1)); + + // Set up mock call. + EXPECT_CALL(mock_sai_router_intf_, create_router_interface(::testing::NotNull(), Eq(gSwitchId), Eq(2), _)) + .WillOnce(DoAll(SetArgPointee<0>(kOverlayRifOid1), Return(SAI_STATUS_SUCCESS))); + + EXPECT_CALL(mock_sai_tunnel_, create_tunnel(::testing::NotNull(), Eq(gSwitchId), Eq(6), + Truly(std::bind(MatchCreateGreTunnelArgAttrList, std::placeholders::_1, + CreateAttributeListForGreTunnelObject( + kP4GreTunnelAppDbEntry1, kRouterInterfaceOid1))))) + .WillOnce(DoAll(SetArgPointee<0>(kGreTunnelOid1), Return(SAI_STATUS_SUCCESS))); + + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddRequest(kP4GreTunnelAppDbEntry1)); + + return GetGreTunnelEntry(gre_tunnel_key); +} + +bool GreTunnelManagerTest::ValidateGreTunnelEntryAdd(const P4GreTunnelAppDbEntry &app_db_entry) +{ + const auto *p4_gre_tunnel_entry = GetGreTunnelEntry(KeyGenerator::generateTunnelKey(app_db_entry.tunnel_id)); + if (p4_gre_tunnel_entry == nullptr || p4_gre_tunnel_entry->encap_src_ip != app_db_entry.encap_src_ip || + p4_gre_tunnel_entry->encap_dst_ip != app_db_entry.encap_dst_ip || + p4_gre_tunnel_entry->neighbor_id != app_db_entry.encap_dst_ip || + p4_gre_tunnel_entry->router_interface_id != app_db_entry.router_interface_id || + p4_gre_tunnel_entry->tunnel_id != app_db_entry.tunnel_id) + { + return false; + } + + return true; +} + +TEST_F(GreTunnelManagerTest, ProcessAddRequestShouldSucceedAddingNewGreTunnel) +{ + AddGreTunnelEntry1(); + EXPECT_TRUE(ValidateGreTunnelEntryAdd(kP4GreTunnelAppDbEntry1)); +} + +TEST_F(GreTunnelManagerTest, ProcessAddRequestShouldFailWhenGreTunnelExistInCentralMapper) +{ + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); + ASSERT_EQ(gre_tunnel_key, "tunnel_id=tunnel-1"); + ASSERT_TRUE(p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_key, kGreTunnelOid1)); + // TODO: Expect critical state. + EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessAddRequest(kP4GreTunnelAppDbEntry1)); +} + +TEST_F(GreTunnelManagerTest, ProcessAddRequestShouldFailWhenDependingPortIsNotPresent) +{ + const P4GreTunnelAppDbEntry kAppDbEntry{/*tunnel_id=*/"tunnel-1", + /*router_interface_id=*/"intf-eth-1/2/3", + /*encap_src_ip=*/swss::IpAddress("2607:f8b0:8096:3110::1"), + /*encap_dst_ip=*/swss::IpAddress("2607:f8b0:8096:311a::2"), + /*action_str=*/"mark_for_p2p_tunnel_encap"}; + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kAppDbEntry.tunnel_id); + + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessAddRequest(kAppDbEntry)); + + EXPECT_EQ(GetGreTunnelEntry(gre_tunnel_key), nullptr); +} + +TEST_F(GreTunnelManagerTest, ProcessAddRequestShouldFailWhenRifSaiCallFails) +{ + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); + EXPECT_TRUE(p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId1), + kRouterInterfaceOid1)); + // Set up mock call. + EXPECT_CALL(mock_sai_router_intf_, create_router_interface(::testing::NotNull(), Eq(gSwitchId), Eq(2), _)) + .WillOnce(DoAll(SetArgPointee<0>(kOverlayRifOid1), Return(SAI_STATUS_FAILURE))); + + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(kP4GreTunnelAppDbEntry1)); + + // The add request failed for the gre tunnel entry. + EXPECT_EQ(GetGreTunnelEntry(gre_tunnel_key), nullptr); +} + +TEST_F(GreTunnelManagerTest, ProcessAddRequestShouldFailWhenTunnelSaiCallFails) +{ + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); + EXPECT_TRUE(p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId1), + kRouterInterfaceOid1)); + // Set up mock call. + EXPECT_CALL(mock_sai_router_intf_, create_router_interface(::testing::NotNull(), Eq(gSwitchId), Eq(2), _)) + .WillOnce(DoAll(SetArgPointee<0>(kOverlayRifOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_tunnel_, create_tunnel(::testing::NotNull(), Eq(gSwitchId), Eq(6), + Truly(std::bind(MatchCreateGreTunnelArgAttrList, std::placeholders::_1, + CreateAttributeListForGreTunnelObject( + kP4GreTunnelAppDbEntry1, kRouterInterfaceOid1))))) + .WillOnce(Return(SAI_STATUS_FAILURE)); + EXPECT_CALL(mock_sai_router_intf_, remove_router_interface(Eq(kOverlayRifOid1))) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(kP4GreTunnelAppDbEntry1)); + + // The add request failed for the gre tunnel entry. + EXPECT_EQ(GetGreTunnelEntry(gre_tunnel_key), nullptr); +} + +TEST_F(GreTunnelManagerTest, ProcessAddRequestShouldRaiseCriticalWhenRecoverySaiCallFails) +{ + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); + EXPECT_TRUE(p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId1), + kRouterInterfaceOid1)); + // Set up mock call. + EXPECT_CALL(mock_sai_router_intf_, create_router_interface(::testing::NotNull(), Eq(gSwitchId), Eq(2), _)) + .WillOnce(DoAll(SetArgPointee<0>(kOverlayRifOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_tunnel_, create_tunnel(::testing::NotNull(), Eq(gSwitchId), Eq(6), + Truly(std::bind(MatchCreateGreTunnelArgAttrList, std::placeholders::_1, + CreateAttributeListForGreTunnelObject( + kP4GreTunnelAppDbEntry1, kRouterInterfaceOid1))))) + .WillOnce(Return(SAI_STATUS_FAILURE)); + EXPECT_CALL(mock_sai_router_intf_, remove_router_interface(Eq(kOverlayRifOid1))) + .WillOnce(Return(SAI_STATUS_FAILURE)); + // TODO: Expect critical state. + + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(kP4GreTunnelAppDbEntry1)); + + // The add request failed for the gre tunnel entry. + EXPECT_EQ(GetGreTunnelEntry(gre_tunnel_key), nullptr); +} + +TEST_F(GreTunnelManagerTest, ProcessDeleteRequestShouldFailForNonExistingGreTunnel) +{ + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessDeleteRequest(gre_tunnel_key)); +} + +TEST_F(GreTunnelManagerTest, ProcessDeleteRequestShouldFailIfGreTunnelEntryIsAbsentInCentralMapper) +{ + auto *p4_tunnel_entry = AddGreTunnelEntry1(); + ASSERT_NE(p4_tunnel_entry, nullptr); + + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); + + ASSERT_TRUE(p4_oid_mapper_.eraseOID(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_key)); + + // TODO: Expect critical state. + EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessDeleteRequest(gre_tunnel_key)); + + // Validate the gre tunnel entry is not deleted in P4 gre tunnel manager. + p4_tunnel_entry = GetGreTunnelEntry(gre_tunnel_key); + ASSERT_NE(p4_tunnel_entry, nullptr); +} + +TEST_F(GreTunnelManagerTest, ProcessDeleteRequestShouldFailIfGreTunnelEntryIsStillReferenced) +{ + auto *p4_tunnel_entry = AddGreTunnelEntry1(); + ASSERT_NE(p4_tunnel_entry, nullptr); + + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); + ASSERT_TRUE(p4_oid_mapper_.increaseRefCount(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_key)); + + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ProcessDeleteRequest(gre_tunnel_key)); + + // Validate the gre tunnel entry is not deleted in either P4 gre tunnel + // manager or central mapper. + p4_tunnel_entry = GetGreTunnelEntry(gre_tunnel_key); + ASSERT_NE(p4_tunnel_entry, nullptr); + EXPECT_TRUE(ValidateRefCnt(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_key, 1)); +} + +TEST_F(GreTunnelManagerTest, ProcessDeleteRequestShouldFailIfTunnelSaiCallFails) +{ + auto *p4_tunnel_entry = AddGreTunnelEntry1(); + ASSERT_NE(p4_tunnel_entry, nullptr); + + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); + + // Set up mock call. + EXPECT_CALL(mock_sai_tunnel_, remove_tunnel(Eq(p4_tunnel_entry->tunnel_oid))).WillOnce(Return(SAI_STATUS_FAILURE)); + + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessDeleteRequest(gre_tunnel_key)); + + // Validate the gre tunnel entry is not deleted in either P4 gre tunnel + // manager or central mapper. + p4_tunnel_entry = GetGreTunnelEntry(gre_tunnel_key); + ASSERT_NE(p4_tunnel_entry, nullptr); + EXPECT_TRUE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_key)); +} + +TEST_F(GreTunnelManagerTest, ProcessDeleteRequestShouldFailIfRifSaiCallFails) +{ + auto *p4_tunnel_entry = AddGreTunnelEntry1(); + ASSERT_NE(p4_tunnel_entry, nullptr); + + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); + + // Set up mock call. + EXPECT_CALL(mock_sai_tunnel_, remove_tunnel(Eq(p4_tunnel_entry->tunnel_oid))).WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_router_intf_, remove_router_interface(Eq(kOverlayRifOid1))) + .WillOnce(Return(SAI_STATUS_FAILURE)); + EXPECT_CALL(mock_sai_tunnel_, create_tunnel(::testing::NotNull(), Eq(gSwitchId), Eq(6), + Truly(std::bind(MatchCreateGreTunnelArgAttrList, std::placeholders::_1, + CreateAttributeListForGreTunnelObject( + kP4GreTunnelAppDbEntry1, kRouterInterfaceOid1))))) + .WillOnce(DoAll(SetArgPointee<0>(kGreTunnelOid1), Return(SAI_STATUS_SUCCESS))); + + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessDeleteRequest(gre_tunnel_key)); + + // Validate the gre tunnel entry is not deleted in either P4 gre tunnel + // manager or central mapper. + p4_tunnel_entry = GetGreTunnelEntry(gre_tunnel_key); + ASSERT_NE(p4_tunnel_entry, nullptr); + EXPECT_TRUE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_key)); +} + +TEST_F(GreTunnelManagerTest, ProcessDeleteRequestShouldRaiseCriticalIfRecoverySaiCallFails) +{ + auto *p4_tunnel_entry = AddGreTunnelEntry1(); + ASSERT_NE(p4_tunnel_entry, nullptr); + + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); + + // Set up mock call. + EXPECT_CALL(mock_sai_tunnel_, remove_tunnel(Eq(p4_tunnel_entry->tunnel_oid))).WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_router_intf_, remove_router_interface(Eq(kOverlayRifOid1))) + .WillOnce(Return(SAI_STATUS_FAILURE)); + EXPECT_CALL(mock_sai_tunnel_, create_tunnel(::testing::NotNull(), Eq(gSwitchId), Eq(6), + Truly(std::bind(MatchCreateGreTunnelArgAttrList, std::placeholders::_1, + CreateAttributeListForGreTunnelObject( + kP4GreTunnelAppDbEntry1, kRouterInterfaceOid1))))) + .WillOnce(Return(SAI_STATUS_FAILURE)); + + // TODO: Expect critical state. + + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessDeleteRequest(gre_tunnel_key)); + + // Validate the gre tunnel entry is not deleted in either P4 gre tunnel + // manager or central mapper. + p4_tunnel_entry = GetGreTunnelEntry(gre_tunnel_key); + ASSERT_NE(p4_tunnel_entry, nullptr); + EXPECT_TRUE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_key)); +} + +TEST_F(GreTunnelManagerTest, GetGreTunnelEntryShouldReturnNullPointerForNonexistingGreTunnel) +{ + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); + EXPECT_EQ(GetGreTunnelEntry(gre_tunnel_key), nullptr); +} + +TEST_F(GreTunnelManagerTest, DeserializeP4GreTunnelAppDbEntryShouldReturnNullPointerForInvalidField) +{ + std::vector attributes = {swss::FieldValueTuple(p4orch::kAction, p4orch::kTunnelAction), + swss::FieldValueTuple("UNKNOWN_FIELD", "UNKOWN")}; + + EXPECT_FALSE(DeserializeP4GreTunnelAppDbEntry(kGreTunnelP4AppDbKey1, attributes).ok()); +} + +TEST_F(GreTunnelManagerTest, DeserializeP4GreTunnelAppDbEntryShouldReturnNullPointerForInvalidIP) +{ + std::vector attributes = { + swss::FieldValueTuple(p4orch::kAction, p4orch::kTunnelAction), + swss::FieldValueTuple(prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId1), + swss::FieldValueTuple(prependParamField(p4orch::kEncapSrcIp), "1.2.3.4"), + swss::FieldValueTuple(prependParamField(p4orch::kEncapDstIp), "2.3.4.5")}; + EXPECT_TRUE(DeserializeP4GreTunnelAppDbEntry(kGreTunnelP4AppDbKey1, attributes).ok()); + attributes = {swss::FieldValueTuple(p4orch::kAction, p4orch::kTunnelAction), + swss::FieldValueTuple(prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId1), + swss::FieldValueTuple(prependParamField(p4orch::kEncapSrcIp), "1:2:3:4"), + swss::FieldValueTuple(prependParamField(p4orch::kEncapDstIp), "1.2.3.5")}; + EXPECT_FALSE(DeserializeP4GreTunnelAppDbEntry(kGreTunnelP4AppDbKey1, attributes).ok()); + attributes = {swss::FieldValueTuple(p4orch::kAction, p4orch::kTunnelAction), + swss::FieldValueTuple(prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId1), + swss::FieldValueTuple(prependParamField(p4orch::kEncapSrcIp), "1.2.3.4"), + swss::FieldValueTuple(prependParamField(p4orch::kEncapDstIp), "1:2:3:5")}; + EXPECT_FALSE(DeserializeP4GreTunnelAppDbEntry(kGreTunnelP4AppDbKey1, attributes).ok()); +} + +TEST_F(GreTunnelManagerTest, DeserializeP4GreTunnelAppDbEntryShouldReturnNullPointerForInvalidKey) +{ + std::vector attributes = { + {p4orch::kAction, p4orch::kTunnelAction}, + {prependParamField(p4orch::kRouterInterfaceId), kP4GreTunnelAppDbEntry1.router_interface_id}, + {prependParamField(p4orch::kEncapSrcIp), kP4GreTunnelAppDbEntry1.encap_src_ip.to_string()}, + {prependParamField(p4orch::kEncapDstIp), kP4GreTunnelAppDbEntry1.encap_dst_ip.to_string()}}; + constexpr char *kInvalidAppDbKey = R"({"tunnel_id":1})"; + EXPECT_FALSE(DeserializeP4GreTunnelAppDbEntry(kInvalidAppDbKey, attributes).ok()); +} + +TEST_F(GreTunnelManagerTest, DrainDuplicateSetRequestShouldSucceed) +{ + auto *p4_tunnel_entry = AddGreTunnelEntry1(); + ASSERT_NE(p4_tunnel_entry, nullptr); + + nlohmann::json j; + j[prependMatchField(p4orch::kTunnelId)] = kP4GreTunnelAppDbEntry1.tunnel_id; + + std::vector fvs{ + {p4orch::kAction, p4orch::kTunnelAction}, + {prependParamField(p4orch::kRouterInterfaceId), kP4GreTunnelAppDbEntry1.router_interface_id}, + {prependParamField(p4orch::kEncapSrcIp), kP4GreTunnelAppDbEntry1.encap_src_ip.to_string()}, + {prependParamField(p4orch::kEncapDstIp), kP4GreTunnelAppDbEntry1.encap_dst_ip.to_string()}}; + + swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry); + Drain(); + + // Expect that the update call will fail, so gre tunnel entry's fields stay + // the same. + EXPECT_TRUE(ValidateGreTunnelEntryAdd(kP4GreTunnelAppDbEntry1)); +} + +TEST_F(GreTunnelManagerTest, DrainDeleteRequestShouldSucceedForExistingGreTunnel) +{ + auto *p4_tunnel_entry = AddGreTunnelEntry1(); + ASSERT_NE(p4_tunnel_entry, nullptr); + EXPECT_EQ(p4_tunnel_entry->tunnel_oid, kGreTunnelOid1); + + nlohmann::json j; + j[prependMatchField(p4orch::kTunnelId)] = kP4GreTunnelAppDbEntry1.tunnel_id; + + std::vector fvs; + swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + DEL_COMMAND, fvs); + EXPECT_CALL(mock_sai_router_intf_, remove_router_interface(Eq(kOverlayRifOid1))) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_tunnel_, remove_tunnel(Eq(p4_tunnel_entry->tunnel_oid))).WillOnce(Return(SAI_STATUS_SUCCESS)); + + Enqueue(app_db_entry); + Drain(); + + // Validate the gre tunnel entry has been deleted in both P4 gre tunnel + // manager and centralized mapper. + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); + p4_tunnel_entry = GetGreTunnelEntry(gre_tunnel_key); + EXPECT_EQ(p4_tunnel_entry, nullptr); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_key)); +} + +TEST_F(GreTunnelManagerTest, DrainValidAppEntryShouldSucceed) +{ + nlohmann::json j; + j[prependMatchField(p4orch::kTunnelId)] = kGreTunnelP4AppDbId1; + EXPECT_TRUE(p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId1), + kRouterInterfaceOid1)); + + std::vector fvs{ + {p4orch::kAction, p4orch::kTunnelAction}, + {prependParamField(p4orch::kRouterInterfaceId), kP4GreTunnelAppDbEntry1.router_interface_id}, + {prependParamField(p4orch::kEncapSrcIp), kP4GreTunnelAppDbEntry1.encap_src_ip.to_string()}, + {prependParamField(p4orch::kEncapDstIp), kP4GreTunnelAppDbEntry1.encap_dst_ip.to_string()}}; + + swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry); + EXPECT_CALL(mock_sai_router_intf_, create_router_interface(::testing::NotNull(), Eq(gSwitchId), Eq(2), _)) + .WillOnce(DoAll(SetArgPointee<0>(kOverlayRifOid1), Return(SAI_STATUS_SUCCESS))); + + EXPECT_CALL(mock_sai_tunnel_, create_tunnel(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kGreTunnelOid1), Return(SAI_STATUS_SUCCESS))); + + Drain(); + + EXPECT_TRUE(ValidateGreTunnelEntryAdd(kP4GreTunnelAppDbEntry1)); +} + +TEST_F(GreTunnelManagerTest, DrainInvalidAppEntryShouldFail) +{ + nlohmann::json j; + j[prependMatchField(p4orch::kTunnelId)] = kGreTunnelP4AppDbId1; + j[p4orch::kTunnelId] = 1000; + + std::vector fvs{ + {p4orch::kAction, p4orch::kTunnelAction}, + {prependParamField(p4orch::kRouterInterfaceId), kP4GreTunnelAppDbEntry1.router_interface_id}, + {prependParamField(p4orch::kEncapSrcIp), "1"}, + {prependParamField(p4orch::kEncapDstIp), kP4GreTunnelAppDbEntry1.encap_dst_ip.to_string()}}; + + swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry); + + Drain(); + EXPECT_EQ(GetGreTunnelEntry(kGreTunnelP4AppDbKey1), nullptr); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, kGreTunnelP4AppDbKey1)); + + // Invalid action_str + fvs = {{p4orch::kAction, "set_nexthop"}, + {prependParamField(p4orch::kRouterInterfaceId), kP4GreTunnelAppDbEntry1.router_interface_id}, + {prependParamField(p4orch::kEncapSrcIp), kP4GreTunnelAppDbEntry1.encap_src_ip.to_string()}, + {prependParamField(p4orch::kEncapDstIp), kP4GreTunnelAppDbEntry1.encap_dst_ip.to_string()}}; + + app_db_entry = {std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; + + Enqueue(app_db_entry); + + Drain(); + EXPECT_EQ(GetGreTunnelEntry(kGreTunnelP4AppDbKey1), nullptr); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, kGreTunnelP4AppDbKey1)); + + // Miss action + fvs = {{prependParamField(p4orch::kRouterInterfaceId), kP4GreTunnelAppDbEntry1.router_interface_id}, + {prependParamField(p4orch::kEncapSrcIp), kP4GreTunnelAppDbEntry1.encap_src_ip.to_string()}, + {prependParamField(p4orch::kEncapDstIp), kP4GreTunnelAppDbEntry1.encap_dst_ip.to_string()}}; + + app_db_entry = {std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; + + Enqueue(app_db_entry); + + Drain(); + EXPECT_EQ(GetGreTunnelEntry(kGreTunnelP4AppDbKey1), nullptr); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, kGreTunnelP4AppDbKey1)); + + // Miss router_interface_id + fvs = {{p4orch::kAction, p4orch::kTunnelAction}, + {prependParamField(p4orch::kEncapSrcIp), kP4GreTunnelAppDbEntry1.encap_src_ip.to_string()}, + {prependParamField(p4orch::kEncapDstIp), kP4GreTunnelAppDbEntry1.encap_dst_ip.to_string()}}; + + app_db_entry = {std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; + + Enqueue(app_db_entry); + + Drain(); + EXPECT_EQ(GetGreTunnelEntry(kGreTunnelP4AppDbKey1), nullptr); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, kGreTunnelP4AppDbKey1)); + + // Miss encap_src_ip + fvs = {{p4orch::kAction, p4orch::kTunnelAction}, + {prependParamField(p4orch::kRouterInterfaceId), kP4GreTunnelAppDbEntry1.router_interface_id}, + {prependParamField(p4orch::kEncapDstIp), kP4GreTunnelAppDbEntry1.encap_dst_ip.to_string()}}; + + app_db_entry = {std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; + + Enqueue(app_db_entry); + + Drain(); + EXPECT_EQ(GetGreTunnelEntry(kGreTunnelP4AppDbKey1), nullptr); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, kGreTunnelP4AppDbKey1)); + + // Miss encap_dst_ip + fvs = {{p4orch::kAction, p4orch::kTunnelAction}, + {prependParamField(p4orch::kRouterInterfaceId), kP4GreTunnelAppDbEntry1.router_interface_id}, + {prependParamField(p4orch::kEncapSrcIp), kP4GreTunnelAppDbEntry1.encap_src_ip.to_string()}}; + + app_db_entry = {std::string(APP_P4RT_TUNNEL_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; + + Enqueue(app_db_entry); + + Drain(); + EXPECT_EQ(GetGreTunnelEntry(kGreTunnelP4AppDbKey1), nullptr); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_TUNNEL, kGreTunnelP4AppDbKey1)); +} + +TEST_F(GreTunnelManagerTest, VerifyStateTest) +{ + auto *p4_tunnel_entry = AddGreTunnelEntry1(); + ASSERT_NE(p4_tunnel_entry, nullptr); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_TUNNEL:oid:0x11", + std::vector{ + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_TYPE", "SAI_TUNNEL_TYPE_IPINIP_GRE"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_PEER_MODE", "SAI_TUNNEL_PEER_MODE_P2P"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_ENCAP_SRC_IP", "2607:f8b0:8096:3110::1"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_ENCAP_DST_IP", "2607:f8b0:8096:311a::2"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE", "oid:0x1"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_OVERLAY_INTERFACE", "oid:0x101"}}); + + // Overlay router interface + table.set("SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x101", + std::vector{ + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID", "oid:0x0"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_TYPE", "SAI_ROUTER_INTERFACE_TYPE_LOOPBACK"}}); + + // Underlay router interface + table.set("SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x1", + std::vector{ + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID", "oid:0x0"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS", "00:01:02:03:04:05"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_TYPE", "SAI_ROUTER_INTERFACE_TYPE_PORT"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_PORT_ID", "oid:0x1234"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_MTU", "9100"}}); + + nlohmann::json j; + j[prependMatchField(p4orch::kTunnelId)] = kGreTunnelP4AppDbId1; + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_TUNNEL_TABLE_NAME + + kTableKeyDelimiter + j.dump(); + std::vector attributes; + + // Verification should succeed with vaild key and value. + attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kTunnelAction}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kRouterInterfaceId), + kP4GreTunnelAppDbEntry1.router_interface_id}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kEncapSrcIp), + kP4GreTunnelAppDbEntry1.encap_src_ip.to_string()}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kEncapDstIp), + kP4GreTunnelAppDbEntry1.encap_dst_ip.to_string()}); + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Invalid key should fail verification. + EXPECT_FALSE(VerifyState("invalid", attributes).empty()); + EXPECT_FALSE(VerifyState("invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":FIXED_TUNNEL_TABLE:invalid", attributes).empty()); + + // Verification should fail if entry does not exist. + j[prependMatchField(p4orch::kTunnelId)] = "invalid"; + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_TUNNEL_TABLE_NAME + + kTableKeyDelimiter + j.dump(), + attributes) + .empty()); + + // Verification should fail if router interface name mismatches. + auto saved_router_interface_id = p4_tunnel_entry->router_interface_id; + p4_tunnel_entry->router_interface_id = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_tunnel_entry->router_interface_id = saved_router_interface_id; + + // Verification should fail if tunnel key mismatches. + auto saved_tunnel_key = p4_tunnel_entry->tunnel_key; + p4_tunnel_entry->tunnel_key = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_tunnel_entry->tunnel_key = saved_tunnel_key; + + // Verification should fail if IP mismatches. + auto saved_SRC_IP = p4_tunnel_entry->encap_src_ip; + p4_tunnel_entry->encap_src_ip = swss::IpAddress("1.1.1.1"); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_tunnel_entry->encap_src_ip = saved_SRC_IP; + + // Verification should fail if IP mask mismatches. + auto saved_DST_IP = p4_tunnel_entry->encap_dst_ip; + p4_tunnel_entry->encap_dst_ip = swss::IpAddress("2.2.2.2"); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_tunnel_entry->encap_dst_ip = saved_DST_IP; + + // Verification should fail if IP mask mismatches. + auto saved_NEIGHBOR_ID = p4_tunnel_entry->neighbor_id; + p4_tunnel_entry->neighbor_id = swss::IpAddress("2.2.2.2"); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_tunnel_entry->neighbor_id = saved_NEIGHBOR_ID; + + // Verification should fail if tunnel_id mismatches. + auto saved_tunnel_id = p4_tunnel_entry->tunnel_id; + p4_tunnel_entry->tunnel_id = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_tunnel_entry->tunnel_id = saved_tunnel_id; + + // Verification should fail if OID mapper mismatches. + const auto gre_tunnel_key = KeyGenerator::generateTunnelKey(kP4GreTunnelAppDbEntry1.tunnel_id); + p4_oid_mapper_.eraseOID(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_key); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_TUNNEL, gre_tunnel_key, kGreTunnelOid1); +} + +TEST_F(GreTunnelManagerTest, VerifyStateAsicDbTest) +{ + auto *p4_tunnel_entry = AddGreTunnelEntry1(); + ASSERT_NE(p4_tunnel_entry, nullptr); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_TUNNEL:oid:0x11", + std::vector{ + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_TYPE", "SAI_TUNNEL_TYPE_IPINIP_GRE"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_PEER_MODE", "SAI_TUNNEL_PEER_MODE_P2P"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_ENCAP_SRC_IP", "2607:f8b0:8096:3110::1"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_ENCAP_DST_IP", "2607:f8b0:8096:311a::2"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE", "oid:0x1"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_OVERLAY_INTERFACE", "oid:0x101"}}); + + // Overlay router interface + table.set("SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x101", + std::vector{ + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID", "oid:0x0"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_TYPE", "SAI_ROUTER_INTERFACE_TYPE_LOOPBACK"}}); + + // Underlay router interface + table.set("SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x1", + std::vector{ + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID", "oid:0x0"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS", "00:01:02:03:04:05"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_TYPE", "SAI_ROUTER_INTERFACE_TYPE_PORT"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_PORT_ID", "oid:0x1234"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_MTU", "9100"}}); + + nlohmann::json j; + j[prependMatchField(p4orch::kTunnelId)] = kGreTunnelP4AppDbId1; + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_TUNNEL_TABLE_NAME + + kTableKeyDelimiter + j.dump(); + std::vector attributes; + + // Verification should succeed with vaild key and value. + attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kTunnelAction}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kRouterInterfaceId), + kP4GreTunnelAppDbEntry1.router_interface_id}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kEncapSrcIp), + kP4GreTunnelAppDbEntry1.encap_src_ip.to_string()}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kEncapDstIp), + kP4GreTunnelAppDbEntry1.encap_dst_ip.to_string()}); + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Verification should fail if ASIC DB values mismatch. + table.set("SAI_OBJECT_TYPE_TUNNEL:oid:0x11", std::vector{swss::FieldValueTuple{ + "SAI_TUNNEL_ATTR_ENCAP_SRC_IP", "2607:f8b0:8096:3110::3"}}); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + + // Verification should fail if ASIC DB table is missing. + table.del("SAI_OBJECT_TYPE_TUNNEL:oid:0x11"); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + + table.set("SAI_OBJECT_TYPE_TUNNEL:oid:0x11", + std::vector{ + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_TYPE", "SAI_TUNNEL_TYPE_IPINIP_GRE"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_PEER_MODE", "SAI_TUNNEL_PEER_MODE_P2P"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_ENCAP_SRC_IP", "2607:f8b0:8096:3110::1"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_ENCAP_DST_IP", "2607:f8b0:8096:311a::2"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE", "oid:0x1"}, + swss::FieldValueTuple{"SAI_TUNNEL_ATTR_OVERLAY_INTERFACE", "oid:0x101"}}); + + // Verification should fail if SAI attr cannot be constructed. + p4_tunnel_entry->encap_src_ip = swss::IpAddress("1.2.3.4"); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_tunnel_entry->encap_src_ip = swss::IpAddress("2607:f8b0:8096:3110::1"); +} diff --git a/orchagent/p4orch/tests/l3_admit_manager_test.cpp b/orchagent/p4orch/tests/l3_admit_manager_test.cpp new file mode 100644 index 0000000000..f1d85cdded --- /dev/null +++ b/orchagent/p4orch/tests/l3_admit_manager_test.cpp @@ -0,0 +1,653 @@ +#include "l3_admit_manager.h" + +#include +#include + +#include +#include +#include + +#include "json.hpp" +#include "mock_response_publisher.h" +#include "mock_sai_my_mac.h" +#include "p4oidmapper.h" +#include "p4orch/p4orch_util.h" +#include "p4orch_util.h" +#include "return_code.h" +extern "C" +{ +#include "sai.h" +} + +using ::p4orch::kTableKeyDelimiter; + +using ::testing::_; +using ::testing::DoAll; +using ::testing::Eq; +using ::testing::Return; +using ::testing::SetArgPointee; +using ::testing::StrictMock; +using ::testing::Truly; + +extern sai_object_id_t gSwitchId; +extern sai_my_mac_api_t *sai_my_mac_api; +extern MockSaiMyMac *mock_sai_my_mac; + +namespace +{ +constexpr char *kPortName1 = "Ethernet1"; +constexpr sai_object_id_t kPortOid1 = 0x112233; +constexpr uint32_t kMtu1 = 1500; + +constexpr char *kPortName2 = "Ethernet2"; +constexpr sai_object_id_t kPortOid2 = 0x1fed3; +constexpr uint32_t kMtu2 = 4500; + +constexpr char *kL3AdmitP4AppDbKey1 = R"({"match/dst_mac":"00:02:03:04:00:00&ff:ff:ff:ff:00:00","priority":2030})"; +constexpr sai_object_id_t kL3AdmitOid1 = 0x1; +constexpr sai_object_id_t kL3AdmitOid2 = 0x2; + +// APP DB entries for Add request. +const P4L3AdmitAppDbEntry kP4L3AdmitAppDbEntry1{/*port_name=*/"", + /*mac_address_data=*/swss::MacAddress("00:02:03:04:00:00"), + /*mac_address_mask=*/swss::MacAddress("ff:ff:ff:ff:00:00"), + /*priority=*/2030}; + +const P4L3AdmitAppDbEntry kP4L3AdmitAppDbEntry2{/*port_name=*/kPortName1, + /*mac_address_data=*/swss::MacAddress("00:02:03:04:05:00"), + /*mac_address_mask=*/swss::MacAddress("ff:ff:ff:ff:ff:00"), + /*priority=*/2030}; + +std::unordered_map CreateAttributeListForL3AdmitObject( + const P4L3AdmitAppDbEntry &app_entry, const sai_object_id_t &port_oid) +{ + std::unordered_map my_mac_attrs; + sai_attribute_t my_mac_attr; + + my_mac_attr.id = SAI_MY_MAC_ATTR_PRIORITY; + my_mac_attr.value.u32 = app_entry.priority; + my_mac_attrs.insert({my_mac_attr.id, my_mac_attr.value}); + + my_mac_attr.id = SAI_MY_MAC_ATTR_MAC_ADDRESS; + memcpy(my_mac_attr.value.mac, app_entry.mac_address_data.getMac(), sizeof(sai_mac_t)); + my_mac_attrs.insert({my_mac_attr.id, my_mac_attr.value}); + + my_mac_attr.id = SAI_MY_MAC_ATTR_MAC_ADDRESS_MASK; + memcpy(my_mac_attr.value.mac, app_entry.mac_address_mask.getMac(), sizeof(sai_mac_t)); + my_mac_attrs.insert({my_mac_attr.id, my_mac_attr.value}); + + if (port_oid != SAI_NULL_OBJECT_ID) + { + my_mac_attr.id = SAI_MY_MAC_ATTR_PORT_ID; + my_mac_attr.value.oid = port_oid; + my_mac_attrs.insert({my_mac_attr.id, my_mac_attr.value}); + } + + return my_mac_attrs; +} + +// Verifies whether the attribute list is the same as expected. +// Returns true if they match; otherwise, false. +bool MatchCreateL3AdmitArgAttrList(const sai_attribute_t *attr_list, + const std::unordered_map &expected_attr_list) +{ + if (attr_list == nullptr) + { + return false; + } + + // Sanity check for expected_attr_list. + const auto end = expected_attr_list.end(); + if (expected_attr_list.size() < 3 || expected_attr_list.find(SAI_MY_MAC_ATTR_PRIORITY) == end || + expected_attr_list.find(SAI_MY_MAC_ATTR_MAC_ADDRESS) == end || + expected_attr_list.find(SAI_MY_MAC_ATTR_MAC_ADDRESS_MASK) == end) + { + return false; + } + + size_t valid_attrs_num = 0; + for (size_t i = 0; i < expected_attr_list.size(); ++i) + { + switch (attr_list[i].id) + { + case SAI_MY_MAC_ATTR_PRIORITY: { + if (attr_list[i].value.u32 != expected_attr_list.at(SAI_MY_MAC_ATTR_PRIORITY).u32) + { + return false; + } + valid_attrs_num++; + break; + } + case SAI_MY_MAC_ATTR_MAC_ADDRESS: { + auto macaddr = swss::MacAddress(attr_list[i].value.mac); + auto expected_macaddr = swss::MacAddress(expected_attr_list.at(SAI_MY_MAC_ATTR_MAC_ADDRESS).mac); + if (macaddr != expected_macaddr) + { + return false; + } + valid_attrs_num++; + break; + } + case SAI_MY_MAC_ATTR_MAC_ADDRESS_MASK: { + auto macaddr = swss::MacAddress(attr_list[i].value.mac); + auto expected_macaddr = swss::MacAddress(expected_attr_list.at(SAI_MY_MAC_ATTR_MAC_ADDRESS_MASK).mac); + if (macaddr != expected_macaddr) + { + return false; + } + valid_attrs_num++; + break; + } + case SAI_MY_MAC_ATTR_PORT_ID: { + if (expected_attr_list.find(SAI_MY_MAC_ATTR_PORT_ID) == end || + expected_attr_list.at(SAI_MY_MAC_ATTR_PORT_ID).oid != attr_list[i].value.oid) + { + return false; + } + valid_attrs_num++; + break; + } + default: + return false; + } + } + + if (expected_attr_list.size() != valid_attrs_num) + { + return false; + } + + return true; +} +} // namespace + +class L3AdmitManagerTest : public ::testing::Test +{ + protected: + L3AdmitManagerTest() : l3_admit_manager_(&p4_oid_mapper_, &publisher_) + { + } + + void SetUp() override + { + // Set up mock stuff for SAI l3 admit API structure. + mock_sai_my_mac = &mock_sai_my_mac_; + sai_my_mac_api->create_my_mac = mock_create_my_mac; + sai_my_mac_api->remove_my_mac = mock_remove_my_mac; + } + + void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) + { + l3_admit_manager_.enqueue(APP_P4RT_L3_ADMIT_TABLE_NAME, entry); + } + + void Drain() + { + l3_admit_manager_.drain(); + } + + std::string VerifyState(const std::string &key, const std::vector &tuple) + { + return l3_admit_manager_.verifyState(key, tuple); + } + + ReturnCode ProcessAddRequest(const P4L3AdmitAppDbEntry &app_db_entry, const std::string &l3_admit_key) + { + return l3_admit_manager_.processAddRequest(app_db_entry, l3_admit_key); + } + + ReturnCode ProcessDeleteRequest(const std::string &my_mac_key) + { + return l3_admit_manager_.processDeleteRequest(my_mac_key); + } + + P4L3AdmitEntry *GetL3AdmitEntry(const std::string &my_mac_key) + { + return l3_admit_manager_.getL3AdmitEntry(my_mac_key); + } + + ReturnCodeOr DeserializeP4L3AdmitAppDbEntry( + const std::string &key, const std::vector &attributes) + { + return l3_admit_manager_.deserializeP4L3AdmitAppDbEntry(key, attributes); + } + + // Adds the l3 admit entry -- kP4L3AdmitAppDbEntry1, via l3 admit manager's + // ProcessAddRequest (). This function also takes care of all the dependencies + // of the l3 admit entry. + // Returns a valid pointer to l3 admit entry on success. + P4L3AdmitEntry *AddL3AdmitEntry1(); + + // Validates that a P4 App l3 admit entry is correctly added in l3 admit + // manager and centralized mapper. Returns true on success. + bool ValidateL3AdmitEntryAdd(const P4L3AdmitAppDbEntry &app_db_entry); + + // Return true if the specified the object has the expected number of + // reference. + bool ValidateRefCnt(sai_object_type_t object_type, const std::string &key, uint32_t expected_ref_count) + { + uint32_t ref_count; + if (!p4_oid_mapper_.getRefCount(object_type, key, &ref_count)) + return false; + return ref_count == expected_ref_count; + } + + StrictMock mock_sai_my_mac_; + MockResponsePublisher publisher_; + P4OidMapper p4_oid_mapper_; + L3AdmitManager l3_admit_manager_; +}; + +P4L3AdmitEntry *L3AdmitManagerTest::AddL3AdmitEntry1() +{ + const auto l3admit_key = + KeyGenerator::generateL3AdmitKey(kP4L3AdmitAppDbEntry1.mac_address_data, kP4L3AdmitAppDbEntry1.mac_address_mask, + kP4L3AdmitAppDbEntry1.port_name, kP4L3AdmitAppDbEntry1.priority); + + // Set up mock call. + EXPECT_CALL( + mock_sai_my_mac_, + create_my_mac(::testing::NotNull(), Eq(gSwitchId), Eq(3), + Truly(std::bind(MatchCreateL3AdmitArgAttrList, std::placeholders::_1, + CreateAttributeListForL3AdmitObject(kP4L3AdmitAppDbEntry1, SAI_NULL_OBJECT_ID))))) + .WillOnce(DoAll(SetArgPointee<0>(kL3AdmitOid1), Return(SAI_STATUS_SUCCESS))); + + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddRequest(kP4L3AdmitAppDbEntry1, l3admit_key)); + + return GetL3AdmitEntry(l3admit_key); +} + +bool L3AdmitManagerTest::ValidateL3AdmitEntryAdd(const P4L3AdmitAppDbEntry &app_db_entry) +{ + const auto *p4_l3_admit_entry = GetL3AdmitEntry(KeyGenerator::generateL3AdmitKey( + app_db_entry.mac_address_data, app_db_entry.mac_address_mask, app_db_entry.port_name, app_db_entry.priority)); + if (p4_l3_admit_entry == nullptr || p4_l3_admit_entry->mac_address_data != app_db_entry.mac_address_data || + p4_l3_admit_entry->mac_address_mask != app_db_entry.mac_address_mask || + p4_l3_admit_entry->port_name != app_db_entry.port_name || p4_l3_admit_entry->priority != app_db_entry.priority) + { + return false; + } + + return true; +} + +TEST_F(L3AdmitManagerTest, ProcessAddRequestShouldSucceedAddingNewL3Admit) +{ + AddL3AdmitEntry1(); + EXPECT_TRUE(ValidateL3AdmitEntryAdd(kP4L3AdmitAppDbEntry1)); +} + +TEST_F(L3AdmitManagerTest, ProcessAddRequestShouldFailWhenL3AdmitExistInCentralMapper) +{ + const auto l3admit_key = + KeyGenerator::generateL3AdmitKey(kP4L3AdmitAppDbEntry1.mac_address_data, kP4L3AdmitAppDbEntry1.mac_address_mask, + kP4L3AdmitAppDbEntry1.port_name, kP4L3AdmitAppDbEntry1.priority); + ASSERT_EQ(l3admit_key, "match/dst_mac=00:02:03:04:00:00&ff:ff:ff:ff:00:00:priority=2030"); + ASSERT_TRUE(p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_MY_MAC, l3admit_key, kL3AdmitOid1)); + // TODO: Expect critical state. + EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessAddRequest(kP4L3AdmitAppDbEntry1, l3admit_key)); +} + +TEST_F(L3AdmitManagerTest, ProcessAddRequestShouldFailWhenDependingPortIsNotPresent) +{ + const P4L3AdmitAppDbEntry kAppDbEntry{/*port_name=*/"Ethernet100", + /*mac_address_data=*/swss::MacAddress("00:02:03:04:00:00"), + /*mac_address_mask=*/swss::MacAddress("ff:ff:ff:ff:00:00"), + /*priority=*/2030}; + const auto l3admit_key = KeyGenerator::generateL3AdmitKey( + kAppDbEntry.mac_address_data, kAppDbEntry.mac_address_mask, kAppDbEntry.port_name, kAppDbEntry.priority); + + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessAddRequest(kAppDbEntry, l3admit_key)); + + EXPECT_EQ(GetL3AdmitEntry(l3admit_key), nullptr); +} + +TEST_F(L3AdmitManagerTest, ProcessAddRequestShouldFailWhenSaiCallFails) +{ + const auto l3admit_key = + KeyGenerator::generateL3AdmitKey(kP4L3AdmitAppDbEntry1.mac_address_data, kP4L3AdmitAppDbEntry1.mac_address_mask, + kP4L3AdmitAppDbEntry1.port_name, kP4L3AdmitAppDbEntry1.priority); + // Set up mock call. + EXPECT_CALL( + mock_sai_my_mac_, + create_my_mac(::testing::NotNull(), Eq(gSwitchId), Eq(3), + Truly(std::bind(MatchCreateL3AdmitArgAttrList, std::placeholders::_1, + CreateAttributeListForL3AdmitObject(kP4L3AdmitAppDbEntry1, SAI_NULL_OBJECT_ID))))) + .WillOnce(Return(SAI_STATUS_FAILURE)); + + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(kP4L3AdmitAppDbEntry1, l3admit_key)); + + // The add request failed for the l3 admit entry. + EXPECT_EQ(GetL3AdmitEntry(l3admit_key), nullptr); +} + +TEST_F(L3AdmitManagerTest, ProcessDeleteRequestShouldFailForNonExistingL3Admit) +{ + const auto l3admit_key = + KeyGenerator::generateL3AdmitKey(kP4L3AdmitAppDbEntry1.mac_address_data, kP4L3AdmitAppDbEntry1.mac_address_mask, + kP4L3AdmitAppDbEntry1.port_name, kP4L3AdmitAppDbEntry1.priority); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessDeleteRequest(l3admit_key)); +} + +TEST_F(L3AdmitManagerTest, ProcessDeleteRequestShouldFailIfL3AdmitEntryIsAbsentInCentralMapper) +{ + auto *p4_my_mac_entry = AddL3AdmitEntry1(); + ASSERT_NE(p4_my_mac_entry, nullptr); + + const auto l3admit_key = + KeyGenerator::generateL3AdmitKey(kP4L3AdmitAppDbEntry1.mac_address_data, kP4L3AdmitAppDbEntry1.mac_address_mask, + kP4L3AdmitAppDbEntry1.port_name, kP4L3AdmitAppDbEntry1.priority); + + ASSERT_TRUE(p4_oid_mapper_.eraseOID(SAI_OBJECT_TYPE_MY_MAC, l3admit_key)); + + // TODO: Expect critical state. + EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessDeleteRequest(l3admit_key)); + + // Validate the l3 admit entry is not deleted in P4 l3 admit manager. + p4_my_mac_entry = GetL3AdmitEntry(l3admit_key); + ASSERT_NE(p4_my_mac_entry, nullptr); +} + +TEST_F(L3AdmitManagerTest, ProcessDeleteRequestShouldFailIfL3AdmitEntryIsStillReferenced) +{ + auto *p4_my_mac_entry = AddL3AdmitEntry1(); + ASSERT_NE(p4_my_mac_entry, nullptr); + + const auto l3admit_key = + KeyGenerator::generateL3AdmitKey(kP4L3AdmitAppDbEntry1.mac_address_data, kP4L3AdmitAppDbEntry1.mac_address_mask, + kP4L3AdmitAppDbEntry1.port_name, kP4L3AdmitAppDbEntry1.priority); + ASSERT_TRUE(p4_oid_mapper_.increaseRefCount(SAI_OBJECT_TYPE_MY_MAC, l3admit_key)); + + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ProcessDeleteRequest(l3admit_key)); + + // Validate the l3 admit entry is not deleted in either P4 l3 admit manager + // or central mapper. + p4_my_mac_entry = GetL3AdmitEntry(l3admit_key); + ASSERT_NE(p4_my_mac_entry, nullptr); + EXPECT_TRUE(ValidateRefCnt(SAI_OBJECT_TYPE_MY_MAC, l3admit_key, 1)); +} + +TEST_F(L3AdmitManagerTest, ProcessDeleteRequestShouldFailIfSaiCallFails) +{ + auto *p4_my_mac_entry = AddL3AdmitEntry1(); + ASSERT_NE(p4_my_mac_entry, nullptr); + + const auto l3admit_key = + KeyGenerator::generateL3AdmitKey(kP4L3AdmitAppDbEntry1.mac_address_data, kP4L3AdmitAppDbEntry1.mac_address_mask, + kP4L3AdmitAppDbEntry1.port_name, kP4L3AdmitAppDbEntry1.priority); + + // Set up mock call. + EXPECT_CALL(mock_sai_my_mac_, remove_my_mac(Eq(p4_my_mac_entry->l3_admit_oid))) + .WillOnce(Return(SAI_STATUS_FAILURE)); + + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessDeleteRequest(l3admit_key)); + + // Validate the l3 admit entry is not deleted in either P4 l3 admit manager + // or central mapper. + p4_my_mac_entry = GetL3AdmitEntry(l3admit_key); + ASSERT_NE(p4_my_mac_entry, nullptr); + EXPECT_TRUE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_MY_MAC, l3admit_key)); +} + +TEST_F(L3AdmitManagerTest, GetL3AdmitEntryShouldReturnNullPointerForNonexistingL3Admit) +{ + const auto l3admit_key = + KeyGenerator::generateL3AdmitKey(kP4L3AdmitAppDbEntry1.mac_address_data, kP4L3AdmitAppDbEntry1.mac_address_mask, + kP4L3AdmitAppDbEntry1.port_name, kP4L3AdmitAppDbEntry1.priority); + EXPECT_EQ(GetL3AdmitEntry(l3admit_key), nullptr); +} + +TEST_F(L3AdmitManagerTest, DeserializeP4L3AdmitAppDbEntryShouldReturnNullPointerForInvalidAction) +{ + std::vector attributes = { + swss::FieldValueTuple(p4orch::kAction, "set_nexthop")}; // Invalid action. + + EXPECT_FALSE(DeserializeP4L3AdmitAppDbEntry(kL3AdmitP4AppDbKey1, attributes).ok()); +} + +TEST_F(L3AdmitManagerTest, DeserializeP4L3AdmitAppDbEntryShouldReturnNullPointerForInvalidField) +{ + std::vector attributes = {swss::FieldValueTuple(p4orch::kAction, p4orch::kL3AdmitAction), + swss::FieldValueTuple("UNKNOWN_FIELD", "UNKOWN")}; + + EXPECT_FALSE(DeserializeP4L3AdmitAppDbEntry(kL3AdmitP4AppDbKey1, attributes).ok()); +} + +TEST_F(L3AdmitManagerTest, DeserializeP4L3AdmitAppDbEntryShouldReturnNullPointerForInvalidMac) +{ + std::vector attributes = {swss::FieldValueTuple(p4orch::kAction, p4orch::kL3AdmitAction)}; + constexpr char *kValidAppDbKey = R"({"match/dst_mac":"00:02:03:04:00:00","priority":2030})"; + EXPECT_TRUE(DeserializeP4L3AdmitAppDbEntry(kValidAppDbKey, attributes).ok()); + constexpr char *kInvalidAppDbKey = R"({"match/dst_mac":"123.123.123.123","priority":2030})"; + EXPECT_FALSE(DeserializeP4L3AdmitAppDbEntry(kInvalidAppDbKey, attributes).ok()); +} + +TEST_F(L3AdmitManagerTest, DeserializeP4L3AdmitAppDbEntryShouldReturnNullPointerForInvalidPriority) +{ + std::vector attributes = {swss::FieldValueTuple(p4orch::kAction, p4orch::kL3AdmitAction)}; + constexpr char *kInvalidAppDbKey = R"({"match/dst_mac":"00:02:03:04:00:00","priority":-1})"; + EXPECT_FALSE(DeserializeP4L3AdmitAppDbEntry(kInvalidAppDbKey, attributes).ok()); +} + +TEST_F(L3AdmitManagerTest, DeserializeP4L3AdmitAppDbEntryShouldSucceedWithoutDstMac) +{ + std::vector attributes = {swss::FieldValueTuple(p4orch::kAction, p4orch::kL3AdmitAction)}; + constexpr char *kValidAppDbKey = R"({"priority":1})"; + EXPECT_TRUE(DeserializeP4L3AdmitAppDbEntry(kValidAppDbKey, attributes).ok()); +} + +TEST_F(L3AdmitManagerTest, DrainDuplicateSetRequestShouldSucceed) +{ + auto *p4_my_mac_entry = AddL3AdmitEntry1(); + ASSERT_NE(p4_my_mac_entry, nullptr); + + nlohmann::json j; + j[prependMatchField(p4orch::kDstMac)] = kP4L3AdmitAppDbEntry1.mac_address_data.to_string() + + p4orch::kDataMaskDelimiter + + kP4L3AdmitAppDbEntry1.mac_address_mask.to_string(); + j[p4orch::kPriority] = kP4L3AdmitAppDbEntry1.priority; + + std::vector fvs{{p4orch::kAction, p4orch::kL3AdmitAction}}; + + swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_L3_ADMIT_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry); + Drain(); + + // Expect that the update call will fail, so l3 admit entry's fields stay + // the same. + EXPECT_TRUE(ValidateL3AdmitEntryAdd(kP4L3AdmitAppDbEntry1)); +} + +TEST_F(L3AdmitManagerTest, DrainDeleteRequestShouldSucceedForExistingL3Admit) +{ + auto *p4_my_mac_entry = AddL3AdmitEntry1(); + ASSERT_NE(p4_my_mac_entry, nullptr); + + nlohmann::json j; + j[prependMatchField(p4orch::kDstMac)] = kP4L3AdmitAppDbEntry1.mac_address_data.to_string() + + p4orch::kDataMaskDelimiter + + kP4L3AdmitAppDbEntry1.mac_address_mask.to_string(); + j[p4orch::kPriority] = kP4L3AdmitAppDbEntry1.priority; + + std::vector fvs; + swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + DEL_COMMAND, fvs); + EXPECT_CALL(mock_sai_my_mac_, remove_my_mac(Eq(p4_my_mac_entry->l3_admit_oid))) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + + Enqueue(app_db_entry); + Drain(); + + // Validate the l3 admit entry has been deleted in both P4 l3 admit + // manager + // and centralized mapper. + const auto l3admit_key = + KeyGenerator::generateL3AdmitKey(kP4L3AdmitAppDbEntry1.mac_address_data, kP4L3AdmitAppDbEntry1.mac_address_mask, + kP4L3AdmitAppDbEntry1.port_name, kP4L3AdmitAppDbEntry1.priority); + p4_my_mac_entry = GetL3AdmitEntry(l3admit_key); + EXPECT_EQ(p4_my_mac_entry, nullptr); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_MY_MAC, l3admit_key)); +} + +TEST_F(L3AdmitManagerTest, DrainValidAppEntryShouldSucceed) +{ + nlohmann::json j; + j[prependMatchField(p4orch::kDstMac)] = kP4L3AdmitAppDbEntry2.mac_address_data.to_string() + + p4orch::kDataMaskDelimiter + + kP4L3AdmitAppDbEntry2.mac_address_mask.to_string(); + j[p4orch::kPriority] = kP4L3AdmitAppDbEntry2.priority; + j[prependMatchField(p4orch::kInPort)] = kP4L3AdmitAppDbEntry2.port_name; + + std::vector fvs{{p4orch::kAction, p4orch::kL3AdmitAction}}; + + swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_L3_ADMIT_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry); + EXPECT_CALL(mock_sai_my_mac_, create_my_mac(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kL3AdmitOid2), Return(SAI_STATUS_SUCCESS))); + + Drain(); + + EXPECT_TRUE(ValidateL3AdmitEntryAdd(kP4L3AdmitAppDbEntry2)); +} + +TEST_F(L3AdmitManagerTest, DrainInValidAppEntryShouldSucceed) +{ + nlohmann::json j; + j[prependMatchField(p4orch::kDstMac)] = "1"; // Invalid Mac + j[p4orch::kPriority] = 1000; + + std::vector fvs{{p4orch::kAction, p4orch::kL3AdmitAction}}; + + swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_L3_ADMIT_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + SET_COMMAND, fvs); + + Enqueue(app_db_entry); + + Drain(); + constexpr char *kL3AdmitKey = R"({"match/dst_mac":"1","priority":1000})"; + EXPECT_EQ(GetL3AdmitEntry(kL3AdmitKey), nullptr); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_MY_MAC, kL3AdmitKey)); +} + +TEST_F(L3AdmitManagerTest, VerifyStateTest) +{ + auto *p4_my_mac_entry = AddL3AdmitEntry1(); + ASSERT_NE(p4_my_mac_entry, nullptr); + nlohmann::json j; + j[prependMatchField(p4orch::kDstMac)] = kP4L3AdmitAppDbEntry1.mac_address_data.to_string() + + p4orch::kDataMaskDelimiter + + kP4L3AdmitAppDbEntry1.mac_address_mask.to_string(); + j[p4orch::kPriority] = kP4L3AdmitAppDbEntry1.priority; + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_L3_ADMIT_TABLE_NAME + + kTableKeyDelimiter + j.dump(); + std::vector attributes; + + // Verification should succeed with vaild key and value. + attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kL3AdmitAction}); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set( + "SAI_OBJECT_TYPE_MY_MAC:oid:0x1", + std::vector{ + swss::FieldValueTuple{"SAI_MY_MAC_ATTR_MAC_ADDRESS", kP4L3AdmitAppDbEntry1.mac_address_data.to_string()}, + swss::FieldValueTuple{"SAI_MY_MAC_ATTR_MAC_ADDRESS_MASK", "FF:FF:FF:FF:00:00"}, + swss::FieldValueTuple{"SAI_MY_MAC_ATTR_PRIORITY", "2030"}}); + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Invalid key should fail verification. + EXPECT_FALSE(VerifyState("invalid", attributes).empty()); + EXPECT_FALSE(VerifyState("invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":FIXED_L3_ADMIT_TABLE:invalid", attributes).empty()); + + // Verification should fail if MAC does not exist. + j[prependMatchField(p4orch::kDstMac)] = kP4L3AdmitAppDbEntry2.mac_address_data.to_string() + + p4orch::kDataMaskDelimiter + + kP4L3AdmitAppDbEntry2.mac_address_mask.to_string(); + j[p4orch::kPriority] = kP4L3AdmitAppDbEntry1.priority; + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_L3_ADMIT_TABLE_NAME + + kTableKeyDelimiter + j.dump(), + attributes) + .empty()); + + // Verification should fail if port name mismatches. + auto saved_port_name = p4_my_mac_entry->port_name; + p4_my_mac_entry->port_name = kP4L3AdmitAppDbEntry2.port_name; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_my_mac_entry->port_name = saved_port_name; + + // Verification should fail if MAC mismatches. + auto saved_mac_address_data = p4_my_mac_entry->mac_address_data; + p4_my_mac_entry->mac_address_data = kP4L3AdmitAppDbEntry2.mac_address_data; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_my_mac_entry->mac_address_data = saved_mac_address_data; + + // Verification should fail if MAC mask mismatches. + auto saved_mac_address_mask = p4_my_mac_entry->mac_address_mask; + p4_my_mac_entry->mac_address_mask = kP4L3AdmitAppDbEntry2.mac_address_mask; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_my_mac_entry->mac_address_mask = saved_mac_address_mask; + + // Verification should fail if priority mismatches. + auto saved_priority = p4_my_mac_entry->priority; + p4_my_mac_entry->priority = 1111; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_my_mac_entry->priority = saved_priority; + + // Verification should fail if OID mapper mismatches. + const auto l3admit_key = + KeyGenerator::generateL3AdmitKey(kP4L3AdmitAppDbEntry1.mac_address_data, kP4L3AdmitAppDbEntry1.mac_address_mask, + kP4L3AdmitAppDbEntry1.port_name, kP4L3AdmitAppDbEntry1.priority); + p4_oid_mapper_.eraseOID(SAI_OBJECT_TYPE_MY_MAC, l3admit_key); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_MY_MAC, l3admit_key, kL3AdmitOid1); +} + +TEST_F(L3AdmitManagerTest, VerifyStateAsicDbTest) +{ + auto *p4_my_mac_entry = AddL3AdmitEntry1(); + ASSERT_NE(p4_my_mac_entry, nullptr); + nlohmann::json j; + j[prependMatchField(p4orch::kDstMac)] = kP4L3AdmitAppDbEntry1.mac_address_data.to_string() + + p4orch::kDataMaskDelimiter + + kP4L3AdmitAppDbEntry1.mac_address_mask.to_string(); + j[p4orch::kPriority] = kP4L3AdmitAppDbEntry1.priority; + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_L3_ADMIT_TABLE_NAME + + kTableKeyDelimiter + j.dump(); + std::vector attributes; + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set( + "SAI_OBJECT_TYPE_MY_MAC:oid:0x1", + std::vector{ + swss::FieldValueTuple{"SAI_MY_MAC_ATTR_MAC_ADDRESS", kP4L3AdmitAppDbEntry1.mac_address_data.to_string()}, + swss::FieldValueTuple{"SAI_MY_MAC_ATTR_MAC_ADDRESS_MASK", "FF:FF:FF:FF:00:00"}, + swss::FieldValueTuple{"SAI_MY_MAC_ATTR_PRIORITY", "2030"}}); + + // Verification should succeed with vaild key and value. + attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kL3AdmitAction}); + + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Verification should fail if ASIC DB values mismatch. + table.set("SAI_OBJECT_TYPE_MY_MAC:oid:0x1", + std::vector{swss::FieldValueTuple{"SAI_MY_MAC_ATTR_PRIORITY", "1000"}}); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + + // Verification should fail if ASIC DB table is missing. + table.del("SAI_OBJECT_TYPE_MY_MAC:oid:0x1"); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + table.set( + "SAI_OBJECT_TYPE_MY_MAC:oid:0x1", + std::vector{ + swss::FieldValueTuple{"SAI_MY_MAC_ATTR_MAC_ADDRESS", kP4L3AdmitAppDbEntry1.mac_address_data.to_string()}, + swss::FieldValueTuple{"SAI_MY_MAC_ATTR_MAC_ADDRESS_MASK", "FF:FF:FF:FF:00:00"}, + swss::FieldValueTuple{"SAI_MY_MAC_ATTR_PRIORITY", "2030"}}); +} diff --git a/orchagent/p4orch/tests/mirror_session_manager_test.cpp b/orchagent/p4orch/tests/mirror_session_manager_test.cpp index c45a0d9bcd..19b1e33f1b 100644 --- a/orchagent/p4orch/tests/mirror_session_manager_test.cpp +++ b/orchagent/p4orch/tests/mirror_session_manager_test.cpp @@ -20,6 +20,8 @@ extern "C" #include "sai.h" } +using ::p4orch::kTableKeyDelimiter; + extern sai_mirror_api_t *sai_mirror_api; extern sai_object_id_t gSwitchId; extern PortsOrch *gPortsOrch; @@ -215,7 +217,7 @@ class MirrorSessionManagerTest : public ::testing::Test void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) { - return mirror_session_manager_.enqueue(entry); + return mirror_session_manager_.enqueue(APP_P4RT_MIRROR_SESSION_TABLE_NAME, entry); } void Drain() @@ -223,6 +225,11 @@ class MirrorSessionManagerTest : public ::testing::Test return mirror_session_manager_.drain(); } + std::string VerifyState(const std::string &key, const std::vector &tuple) + { + return mirror_session_manager_.verifyState(key, tuple); + } + ReturnCodeOr DeserializeP4MirrorSessionAppDbEntry( const std::string &key, const std::vector &attributes) { @@ -712,7 +719,7 @@ TEST_F(MirrorSessionManagerTest, CreateExistingMirrorSessionInMapperShouldFail) ASSERT_TRUE(p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_MIRROR_SESSION, mirror_session_entry.mirror_session_key, mirror_session_entry.mirror_session_oid)); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_FALSE(CreateMirrorSession(mirror_session_entry).ok()); } @@ -735,7 +742,7 @@ TEST_F(MirrorSessionManagerTest, UpdatingNonexistingMirrorSessionShouldFail) { P4MirrorSessionAppDbEntry app_db_entry; // Fail because existing_mirror_session_entry is nullptr. - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_FALSE(ProcessUpdateRequest(app_db_entry, /*existing_mirror_session_entry=*/nullptr) .ok()); @@ -746,7 +753,7 @@ TEST_F(MirrorSessionManagerTest, UpdatingNonexistingMirrorSessionShouldFail) kTtl1Num, kTos1Num); // Fail because the mirror session is not added into centralized mapper. - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_FALSE(ProcessUpdateRequest(app_db_entry, &existing_mirror_session_entry).ok()); } @@ -975,7 +982,7 @@ TEST_F(MirrorSessionManagerTest, UpdateRecoveryFailureShouldRaiseCriticalState) .WillOnce(Return(SAI_STATUS_SUCCESS)) .WillOnce(Return(SAI_STATUS_SUCCESS)) .WillOnce(Return(SAI_STATUS_FAILURE)); - // (TODO): Expect critical state. + // TODO: Expect critical state. Drain(); @@ -1002,10 +1009,193 @@ TEST_F(MirrorSessionManagerTest, DeleteMirrorSessionNotInMapperShouldFail) { AddDefaultMirrorSection(); p4_oid_mapper_.eraseOID(SAI_OBJECT_TYPE_MIRROR_SESSION, KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); - // (TODO): Expect critical state. + // TODO: Expect critical state. ASSERT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessDeleteRequest(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId))); } +TEST_F(MirrorSessionManagerTest, VerifyStateTest) +{ + AddDefaultMirrorSection(); + nlohmann::json j; + j[prependMatchField(p4orch::kMirrorSessionId)] = kMirrorSessionId; + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + + APP_P4RT_MIRROR_SESSION_TABLE_NAME + kTableKeyDelimiter + j.dump(); + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_MIRROR_SESSION:oid:0x445566", + std::vector{ + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT", "oid:0x112233"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_TYPE", "SAI_MIRROR_SESSION_TYPE_ENHANCED_REMOTE"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE", + "SAI_ERSPAN_ENCAPSULATION_TYPE_MIRROR_L3_GRE_TUNNEL"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION", "4"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_TOS", "0"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_TTL", "64"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS", "10.206.196.31"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS", "172.20.0.203"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS", "00:02:03:04:05:06"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS", "00:1A:11:17:5F:80"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE", "35006"}, + }); + std::vector attributes; + + // Verification should succeed with vaild key and value. + attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kMirrorAsIpv4Erspan}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kPort), kPort1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcIp), kSrcIp1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kDstIp), kDstIp1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), kSrcMac1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kDstMac), kDstMac1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kTtl), kTtl1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kTos), kTos1}); + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Invalid key should fail verification. + EXPECT_FALSE(VerifyState("invalid", attributes).empty()); + EXPECT_FALSE(VerifyState("invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid:invalid", attributes).empty()); + EXPECT_FALSE( + VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":FIXED_MIRROR_SESSION_TABLE:invalid", attributes).empty()); + + // Verification should fail if entry does not exist. + j[prependMatchField(p4orch::kMirrorSessionId)] = "invalid"; + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + + APP_P4RT_MIRROR_SESSION_TABLE_NAME + kTableKeyDelimiter + j.dump(), + attributes) + .empty()); + + auto *mirror_entry = GetMirrorSessionEntry(KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); + ASSERT_NE(mirror_entry, nullptr); + + // Verification should fail if mirror section key mismatches. + auto saved_mirror_session_key = mirror_entry->mirror_session_key; + mirror_entry->mirror_session_key = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + mirror_entry->mirror_session_key = saved_mirror_session_key; + + // Verification should fail if mirror section ID mismatches. + auto saved_mirror_session_id = mirror_entry->mirror_session_id; + mirror_entry->mirror_session_id = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + mirror_entry->mirror_session_id = saved_mirror_session_id; + + // Verification should fail if port mismatches. + auto saved_port = mirror_entry->port; + mirror_entry->port = kPort2; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + mirror_entry->port = saved_port; + + // Verification should fail if source IP mismatches. + auto saved_src_ip = mirror_entry->src_ip; + mirror_entry->src_ip = swss::IpAddress(kSrcIp2); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + mirror_entry->src_ip = saved_src_ip; + + // Verification should fail if dest IP mismatches. + auto saved_dst_ip = mirror_entry->dst_ip; + mirror_entry->dst_ip = swss::IpAddress(kDstIp2); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + mirror_entry->dst_ip = saved_dst_ip; + + // Verification should fail if source MAC mismatches. + auto saved_src_mac = mirror_entry->src_mac; + mirror_entry->src_mac = swss::MacAddress(kSrcMac2); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + mirror_entry->src_mac = saved_src_mac; + + // Verification should fail if dest MAC mismatches. + auto saved_dst_mac = mirror_entry->dst_mac; + mirror_entry->dst_mac = swss::MacAddress(kDstMac2); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + mirror_entry->dst_mac = saved_dst_mac; + + // Verification should fail if ttl mismatches. + auto saved_ttl = mirror_entry->ttl; + mirror_entry->ttl = kTtl2Num; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + mirror_entry->ttl = saved_ttl; + + // Verification should fail if tos mismatches. + auto saved_tos = mirror_entry->tos; + mirror_entry->tos = kTos2Num; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + mirror_entry->tos = saved_tos; + + // Verification should fail if OID mapper mismatches. + p4_oid_mapper_.eraseOID(SAI_OBJECT_TYPE_MIRROR_SESSION, KeyGenerator::generateMirrorSessionKey(kMirrorSessionId)); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_MIRROR_SESSION, KeyGenerator::generateMirrorSessionKey(kMirrorSessionId), + kMirrorSessionOid); +} + +TEST_F(MirrorSessionManagerTest, VerifyStateAsicDbTest) +{ + AddDefaultMirrorSection(); + nlohmann::json j; + j[prependMatchField(p4orch::kMirrorSessionId)] = kMirrorSessionId; + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + + APP_P4RT_MIRROR_SESSION_TABLE_NAME + kTableKeyDelimiter + j.dump(); + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_MIRROR_SESSION:oid:0x445566", + std::vector{ + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT", "oid:0x112233"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_TYPE", "SAI_MIRROR_SESSION_TYPE_ENHANCED_REMOTE"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE", + "SAI_ERSPAN_ENCAPSULATION_TYPE_MIRROR_L3_GRE_TUNNEL"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION", "4"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_TOS", "0"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_TTL", "64"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS", "10.206.196.31"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS", "172.20.0.203"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS", "00:02:03:04:05:06"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS", "00:1A:11:17:5F:80"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE", "35006"}, + }); + + std::vector attributes; + + // Verification should succeed with vaild key and value. + attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kMirrorAsIpv4Erspan}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kPort), kPort1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcIp), kSrcIp1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kDstIp), kDstIp1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), kSrcMac1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kDstMac), kDstMac1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kTtl), kTtl1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kTos), kTos1}); + + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // set differenet SRC IP ADDR and expect the VerifyState to fail + table.set("SAI_OBJECT_TYPE_MIRROR_SESSION:oid:0x445566", + std::vector{ + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS", "10.206.196.32"}}); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + + // Delete the ASIC DB entry and expect the VerifyState to fail + table.del("SAI_OBJECT_TYPE_MIRROR_SESSION:oid:0x445566"); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + + // Restore the ASIC DB entry + table.set("SAI_OBJECT_TYPE_MIRROR_SESSION:oid:0x445566", + std::vector{ + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_MONITOR_PORT", "oid:0x112233"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_TYPE", "SAI_MIRROR_SESSION_TYPE_ENHANCED_REMOTE"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE", + "SAI_ERSPAN_ENCAPSULATION_TYPE_MIRROR_L3_GRE_TUNNEL"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION", "4"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_TOS", "0"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_TTL", "64"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS", "10.206.196.31"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS", "172.20.0.203"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS", "00:02:03:04:05:06"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS", "00:1A:11:17:5F:80"}, + swss::FieldValueTuple{"SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE", "35006"}, + }); +} + } // namespace test } // namespace p4orch diff --git a/orchagent/p4orch/tests/mock_sai_my_mac.h b/orchagent/p4orch/tests/mock_sai_my_mac.h new file mode 100644 index 0000000000..e82f38f520 --- /dev/null +++ b/orchagent/p4orch/tests/mock_sai_my_mac.h @@ -0,0 +1,31 @@ +#pragma once + +#include + +extern "C" +{ +#include "sai.h" +} + +// Mock Class mapping methods to my_mac object SAI APIs. +class MockSaiMyMac +{ + public: + MOCK_METHOD4(create_my_mac, sai_status_t(_Out_ sai_object_id_t *my_mac_id, _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list)); + + MOCK_METHOD1(remove_my_mac, sai_status_t(_In_ sai_object_id_t my_mac_id)); +}; + +MockSaiMyMac *mock_sai_my_mac; + +sai_status_t mock_create_my_mac(_Out_ sai_object_id_t *my_mac_id, _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list) +{ + return mock_sai_my_mac->create_my_mac(my_mac_id, switch_id, attr_count, attr_list); +} + +sai_status_t mock_remove_my_mac(_In_ sai_object_id_t my_mac_id) +{ + return mock_sai_my_mac->remove_my_mac(my_mac_id); +} diff --git a/orchagent/p4orch/tests/mock_sai_next_hop_group.h b/orchagent/p4orch/tests/mock_sai_next_hop_group.h index 5398ec5a70..c1ffedc175 100644 --- a/orchagent/p4orch/tests/mock_sai_next_hop_group.h +++ b/orchagent/p4orch/tests/mock_sai_next_hop_group.h @@ -6,6 +6,7 @@ extern "C" { #include "sai.h" +#include "sainexthopgroup.h" } // Mock class including mock functions mapping to SAI next hop group's @@ -27,6 +28,16 @@ class MockSaiNextHopGroup MOCK_METHOD2(set_next_hop_group_member_attribute, sai_status_t(_In_ sai_object_id_t next_hop_group_member_id, _In_ const sai_attribute_t *attr)); + + MOCK_METHOD7(create_next_hop_group_members, + sai_status_t(_In_ sai_object_id_t switch_id, _In_ uint32_t object_count, + _In_ const uint32_t *attr_count, _In_ const sai_attribute_t **attr_list, + _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_object_id_t *object_id, + _Out_ sai_status_t *object_statuses)); + + MOCK_METHOD4(remove_next_hop_group_members, + sai_status_t(_In_ uint32_t object_count, _In_ const sai_object_id_t *object_id, + _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses)); }; // Note that before mock functions below are used, mock_sai_next_hop_group must @@ -62,3 +73,18 @@ sai_status_t set_next_hop_group_member_attribute(_In_ sai_object_id_t next_hop_g { return mock_sai_next_hop_group->set_next_hop_group_member_attribute(next_hop_group_member_id, attr); } + +sai_status_t create_next_hop_group_members(_In_ sai_object_id_t switch_id, _In_ uint32_t object_count, + _In_ const uint32_t *attr_count, _In_ const sai_attribute_t **attr_list, + _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_object_id_t *object_id, + _Out_ sai_status_t *object_statuses) +{ + return mock_sai_next_hop_group->create_next_hop_group_members(switch_id, object_count, attr_count, attr_list, mode, + object_id, object_statuses); +} + +sai_status_t remove_next_hop_group_members(_In_ uint32_t object_count, _In_ const sai_object_id_t *object_id, + _In_ sai_bulk_op_error_mode_t mode, _Out_ sai_status_t *object_statuses) +{ + return mock_sai_next_hop_group->remove_next_hop_group_members(object_count, object_id, mode, object_statuses); +} diff --git a/orchagent/p4orch/tests/mock_sai_router_interface.cpp b/orchagent/p4orch/tests/mock_sai_router_interface.cpp new file mode 100644 index 0000000000..8fbc4af16d --- /dev/null +++ b/orchagent/p4orch/tests/mock_sai_router_interface.cpp @@ -0,0 +1,26 @@ +#include "mock_sai_router_interface.h" + +MockSaiRouterInterface *mock_sai_router_intf; + +sai_status_t mock_create_router_interface(_Out_ sai_object_id_t *router_interface_id, _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list) +{ + return mock_sai_router_intf->create_router_interface(router_interface_id, switch_id, attr_count, attr_list); +} + +sai_status_t mock_remove_router_interface(_In_ sai_object_id_t router_interface_id) +{ + return mock_sai_router_intf->remove_router_interface(router_interface_id); +} + +sai_status_t mock_set_router_interface_attribute(_In_ sai_object_id_t router_interface_id, + _In_ const sai_attribute_t *attr) +{ + return mock_sai_router_intf->set_router_interface_attribute(router_interface_id, attr); +} + +sai_status_t mock_get_router_interface_attribute(_In_ sai_object_id_t router_interface_id, _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) +{ + return mock_sai_router_intf->get_router_interface_attribute(router_interface_id, attr_count, attr_list); +} \ No newline at end of file diff --git a/orchagent/p4orch/tests/mock_sai_router_interface.h b/orchagent/p4orch/tests/mock_sai_router_interface.h index 9c0caa3004..5d7b7e1172 100644 --- a/orchagent/p4orch/tests/mock_sai_router_interface.h +++ b/orchagent/p4orch/tests/mock_sai_router_interface.h @@ -25,27 +25,15 @@ class MockSaiRouterInterface _Inout_ sai_attribute_t *attr_list)); }; -MockSaiRouterInterface *mock_sai_router_intf; +extern MockSaiRouterInterface *mock_sai_router_intf; sai_status_t mock_create_router_interface(_Out_ sai_object_id_t *router_interface_id, _In_ sai_object_id_t switch_id, - _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list) -{ - return mock_sai_router_intf->create_router_interface(router_interface_id, switch_id, attr_count, attr_list); -} + _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list); -sai_status_t mock_remove_router_interface(_In_ sai_object_id_t router_interface_id) -{ - return mock_sai_router_intf->remove_router_interface(router_interface_id); -} +sai_status_t mock_remove_router_interface(_In_ sai_object_id_t router_interface_id); sai_status_t mock_set_router_interface_attribute(_In_ sai_object_id_t router_interface_id, - _In_ const sai_attribute_t *attr) -{ - return mock_sai_router_intf->set_router_interface_attribute(router_interface_id, attr); -} + _In_ const sai_attribute_t *attr); sai_status_t mock_get_router_interface_attribute(_In_ sai_object_id_t router_interface_id, _In_ uint32_t attr_count, - _Inout_ sai_attribute_t *attr_list) -{ - return mock_sai_router_intf->get_router_interface_attribute(router_interface_id, attr_count, attr_list); -} + _Inout_ sai_attribute_t *attr_list); diff --git a/orchagent/p4orch/tests/mock_sai_tunnel.h b/orchagent/p4orch/tests/mock_sai_tunnel.h new file mode 100644 index 0000000000..5a2a165b02 --- /dev/null +++ b/orchagent/p4orch/tests/mock_sai_tunnel.h @@ -0,0 +1,31 @@ +#pragma once + +#include + +extern "C" +{ +#include "sai.h" +} + +// Mock Class mapping methods to tunnel object SAI APIs. +class MockSaiTunnel +{ + public: + MOCK_METHOD4(create_tunnel, sai_status_t(_Out_ sai_object_id_t *tunnel_id, _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list)); + + MOCK_METHOD1(remove_tunnel, sai_status_t(_In_ sai_object_id_t tunnel_id)); +}; + +MockSaiTunnel *mock_sai_tunnel; + +sai_status_t mock_create_tunnel(_Out_ sai_object_id_t *tunnel_id, _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list) +{ + return mock_sai_tunnel->create_tunnel(tunnel_id, switch_id, attr_count, attr_list); +} + +sai_status_t mock_remove_tunnel(_In_ sai_object_id_t tunnel_id) +{ + return mock_sai_tunnel->remove_tunnel(tunnel_id); +} diff --git a/orchagent/p4orch/tests/neighbor_manager_test.cpp b/orchagent/p4orch/tests/neighbor_manager_test.cpp index e3986ef701..37cf0162be 100644 --- a/orchagent/p4orch/tests/neighbor_manager_test.cpp +++ b/orchagent/p4orch/tests/neighbor_manager_test.cpp @@ -130,7 +130,7 @@ class NeighborManagerTest : public ::testing::Test void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) { - neighbor_manager_.enqueue(entry); + neighbor_manager_.enqueue(APP_P4RT_NEIGHBOR_TABLE_NAME, entry); } void Drain() @@ -138,6 +138,11 @@ class NeighborManagerTest : public ::testing::Test neighbor_manager_.drain(); } + std::string VerifyState(const std::string &key, const std::vector &tuple) + { + return neighbor_manager_.verifyState(key, tuple); + } + ReturnCodeOr DeserializeNeighborEntry(const std::string &key, const std::vector &attributes) { @@ -280,7 +285,7 @@ TEST_F(NeighborManagerTest, CreateNeighborEntryExistsInP4OidMapper) P4NeighborEntry neighbor_entry(kRouterInterfaceId2, kNeighborId2, kMacAddress2); p4_oid_mapper_.setDummyOID(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_entry.neighbor_key); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, CreateNeighbor(neighbor_entry)); auto current_entry = GetNeighborEntry(neighbor_entry.neighbor_key); @@ -342,7 +347,7 @@ TEST_F(NeighborManagerTest, RemoveNeighborNotExistInMapper) AddNeighborEntry(neighbor_entry, kRouterInterfaceOid2); ASSERT_TRUE(p4_oid_mapper_.eraseOID(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_entry.neighbor_key)); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, RemoveNeighbor(neighbor_entry.neighbor_key)); } @@ -820,3 +825,116 @@ TEST_F(NeighborManagerTest, DrainInvalidOperation) P4NeighborEntry neighbor_entry(kRouterInterfaceId1, kNeighborId1, kMacAddress1); ValidateNeighborEntryNotPresent(neighbor_entry, /*check_ref_count=*/true); } + +TEST_F(NeighborManagerTest, VerifyStateTest) +{ + P4NeighborEntry neighbor_entry(kRouterInterfaceId1, kNeighborId1, kMacAddress1); + AddNeighborEntry(neighbor_entry, kRouterInterfaceOid1); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_NEIGHBOR_ENTRY:{\"ip\":\"10.0.0.22\",\"rif\":\"oid:" + "0x295100\",\"switch_id\":\"oid:0x0\"}", + std::vector{ + swss::FieldValueTuple{"SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS", "00:01:02:03:04:05"}, + swss::FieldValueTuple{"SAI_NEIGHBOR_ENTRY_ATTR_NO_HOST_ROUTE", "true"}}); + + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_NEIGHBOR_TABLE_NAME + + kTableKeyDelimiter + CreateNeighborAppDbKey(kRouterInterfaceId1, kNeighborId1); + std::vector attributes; + + // Verification should succeed with vaild key and value. + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kDstMac), kMacAddress1.to_string()}); + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Invalid key should fail verification. + EXPECT_FALSE(VerifyState("invalid", attributes).empty()); + EXPECT_FALSE(VerifyState("invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":FIXED_NEIGHBOR_TABLE:invalid", attributes).empty()); + + // Non-existing router intf should fail verification. + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_NEIGHBOR_TABLE_NAME + + kTableKeyDelimiter + CreateNeighborAppDbKey(kRouterInterfaceId2, kNeighborId1), + attributes) + .empty()); + + // Non-existing entry should fail verification. + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_NEIGHBOR_TABLE_NAME + + kTableKeyDelimiter + CreateNeighborAppDbKey(kRouterInterfaceId1, kNeighborId2), + attributes) + .empty()); + + auto *current_entry = GetNeighborEntry(KeyGenerator::generateNeighborKey(kRouterInterfaceId1, kNeighborId1)); + EXPECT_NE(current_entry, nullptr); + + // Verification should fail if ritf ID mismatches. + auto saved_router_intf_id = current_entry->router_intf_id; + current_entry->router_intf_id = kRouterInterfaceId2; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + current_entry->router_intf_id = saved_router_intf_id; + + // Verification should fail if neighbor ID mismatches. + auto saved_neighbor_id = current_entry->neighbor_id; + current_entry->neighbor_id = kNeighborId2; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + current_entry->neighbor_id = saved_neighbor_id; + + // Verification should fail if dest MAC mismatches. + auto saved_dst_mac_address = current_entry->dst_mac_address; + current_entry->dst_mac_address = kMacAddress2; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + current_entry->dst_mac_address = saved_dst_mac_address; + + // Verification should fail if router intf key mismatches. + auto saved_router_intf_key = current_entry->router_intf_key; + current_entry->router_intf_key = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + current_entry->router_intf_key = saved_router_intf_key; + + // Verification should fail if neighbor key mismatches. + auto saved_neighbor_key = current_entry->neighbor_key; + current_entry->neighbor_key = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + current_entry->neighbor_key = saved_neighbor_key; +} + +TEST_F(NeighborManagerTest, VerifyStateAsicDbTest) +{ + P4NeighborEntry neighbor_entry(kRouterInterfaceId1, kNeighborId1, kMacAddress1); + AddNeighborEntry(neighbor_entry, kRouterInterfaceOid1); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_NEIGHBOR_ENTRY:{\"ip\":\"10.0.0.22\",\"rif\":\"oid:" + "0x295100\",\"switch_id\":\"oid:0x0\"}", + std::vector{ + swss::FieldValueTuple{"SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS", "00:01:02:03:04:05"}, + swss::FieldValueTuple{"SAI_NEIGHBOR_ENTRY_ATTR_NO_HOST_ROUTE", "true"}}); + + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_NEIGHBOR_TABLE_NAME + + kTableKeyDelimiter + CreateNeighborAppDbKey(kRouterInterfaceId1, kNeighborId1); + std::vector attributes; + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kDstMac), kMacAddress1.to_string()}); + + // Verification should succeed with correct ASIC DB values. + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Verification should fail if ASIC DB values mismatch. + table.set("SAI_OBJECT_TYPE_NEIGHBOR_ENTRY:{\"ip\":\"10.0.0.22\",\"rif\":\"oid:" + "0x295100\",\"switch_id\":\"oid:0x0\"}", + std::vector{ + swss::FieldValueTuple{"SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS", "00:ff:ee:dd:cc:bb"}}); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + + // Verification should fail if ASIC DB table is missing. + table.del("SAI_OBJECT_TYPE_NEIGHBOR_ENTRY:{\"ip\":\"10.0.0.22\",\"rif\":\"oid:" + "0x295100\",\"switch_id\":\"oid:0x0\"}"); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + table.set("SAI_OBJECT_TYPE_NEIGHBOR_ENTRY:{\"ip\":\"10.0.0.22\",\"rif\":\"oid:" + "0x295100\",\"switch_id\":\"oid:0x0\"}", + std::vector{ + swss::FieldValueTuple{"SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS", "00:01:02:03:04:05"}, + swss::FieldValueTuple{"SAI_NEIGHBOR_ENTRY_ATTR_NO_HOST_ROUTE", "true"}}); +} diff --git a/orchagent/p4orch/tests/next_hop_manager_test.cpp b/orchagent/p4orch/tests/next_hop_manager_test.cpp index a78310cc8d..d9e6073a06 100644 --- a/orchagent/p4orch/tests/next_hop_manager_test.cpp +++ b/orchagent/p4orch/tests/next_hop_manager_test.cpp @@ -10,10 +10,12 @@ #include "ipaddress.h" #include "json.hpp" #include "mock_response_publisher.h" +#include "mock_sai_hostif.h" #include "mock_sai_next_hop.h" +#include "mock_sai_serialize.h" +#include "mock_sai_switch.h" #include "p4oidmapper.h" -#include "p4orch/p4orch_util.h" -#include "p4orch_util.h" +#include "p4orch.h" #include "return_code.h" #include "swssnet.h" extern "C" @@ -32,52 +34,116 @@ using ::testing::StrictMock; using ::testing::Truly; extern sai_object_id_t gSwitchId; -extern sai_next_hop_api_t *sai_next_hop_api; extern MockSaiNextHop *mock_sai_next_hop; +extern P4Orch *gP4Orch; +extern VRFOrch *gVrfOrch; +extern swss::DBConnector *gAppDb; +extern sai_hostif_api_t *sai_hostif_api; +extern sai_switch_api_t *sai_switch_api; +extern sai_next_hop_api_t *sai_next_hop_api; namespace { constexpr char *kNextHopId = "8"; constexpr char *kNextHopP4AppDbKey = R"({"match/nexthop_id":"8"})"; -constexpr sai_object_id_t kNextHopOid = 1; +constexpr sai_object_id_t kNextHopOid = 101; +constexpr char *kTunnelNextHopId = "tunnel-nexthop-1"; +constexpr char *kTunnelNextHopP4AppDbKey = R"({"match/nexthop_id":"tunnel-nexthop-1"})"; +constexpr sai_object_id_t kTunnelNextHopOid = 102; constexpr char *kRouterInterfaceId1 = "16"; constexpr char *kRouterInterfaceId2 = "17"; constexpr sai_object_id_t kRouterInterfaceOid1 = 1; constexpr sai_object_id_t kRouterInterfaceOid2 = 2; +constexpr char *kTunnelId1 = "tunnel-1"; +constexpr char *kTunnelId2 = "tunnel-2"; +constexpr sai_object_id_t kTunnelOid1 = 11; +constexpr sai_object_id_t kTunnelOid2 = 12; constexpr char *kNeighborId1 = "10.0.0.1"; constexpr char *kNeighborId2 = "fe80::21a:11ff:fe17:5f80"; // APP DB entries for Add and Update request. -const P4NextHopAppDbEntry kP4NextHopAppDbEntry1{/*next_hop_id=*/kNextHopId, /*router_interface_id=*/kRouterInterfaceId1, +const P4NextHopAppDbEntry kP4NextHopAppDbEntry1{/*next_hop_id=*/kNextHopId, + /*router_interface_id=*/kRouterInterfaceId1, + /*gre_tunnel_id=*/"", /*neighbor_id=*/swss::IpAddress(kNeighborId1), - /*is_set_router_interface_id=*/true, /*is_set_neighbor_id=*/true}; + /*action_str=*/"set_ip_nexthop"}; -const P4NextHopAppDbEntry kP4NextHopAppDbEntry2{/*next_hop_id=*/kNextHopId, /*router_interface_id=*/kRouterInterfaceId2, +const P4NextHopAppDbEntry kP4NextHopAppDbEntry2{/*next_hop_id=*/kNextHopId, + /*router_interface_id=*/kRouterInterfaceId2, + /*gre_tunnel_id=*/"", /*neighbor_id=*/swss::IpAddress(kNeighborId2), - /*is_set_router_interface_id=*/true, /*is_set_neighbor_id=*/true}; + /*action_str=*/"set_ip_nexthop"}; // APP DB entries for Delete request. -const P4NextHopAppDbEntry kP4NextHopAppDbEntry3{/*next_hop_id=*/kNextHopId, /*router_interface_id=*/"", +const P4NextHopAppDbEntry kP4NextHopAppDbEntry3{/*next_hop_id=*/kNextHopId, + /*router_interface_id=*/"", + /*gre_tunnel_id=*/"", /*neighbor_id=*/swss::IpAddress(), - /*is_set_router_interface_id=*/false, /*is_set_neighbor_id=*/false}; + /*action_str=*/""}; + +// APP DB entry for tunnel next hop entry +const P4NextHopAppDbEntry kP4TunnelNextHopAppDbEntry1{/*next_hop_id=*/kTunnelNextHopId, + /*router_interface_id=*/"", + /*gre_tunnel_id=*/kTunnelId1, + /*neighbor_id=*/swss::IpAddress("0.0.0.0"), + /*action_str=*/"set_p2p_tunnel_encap_nexthop"}; + +const P4NextHopAppDbEntry kP4TunnelNextHopAppDbEntry2{/*next_hop_id=*/kTunnelNextHopId, + /*router_interface_id=*/"", + /*gre_tunnel_id=*/kTunnelId2, + /*neighbor_id=*/swss::IpAddress("0.0.0.0"), + /*action_str=*/"set_p2p_tunnel_encap_nexthop"}; + +const P4GreTunnelEntry kP4TunnelEntry1( + /*tunnel_id=*/kTunnelId1, + /*router_interface_id=*/kRouterInterfaceId1, + /*encap_src_ip=*/swss::IpAddress("1.2.3.4"), + /*encap_dst_ip=*/swss::IpAddress(kNeighborId1), + /*neighbor_id=*/swss::IpAddress(kNeighborId1)); + +const P4GreTunnelEntry kP4TunnelEntry2( + /*tunnel_id=*/kTunnelId2, + /*router_interface_id=*/kRouterInterfaceId2, + /*encap_src_ip=*/swss::IpAddress("1.2.3.4"), + /*encap_dst_ip=*/swss::IpAddress(kNeighborId2), + /*neighbor_id=*/swss::IpAddress(kNeighborId2)); std::unordered_map CreateAttributeListForNextHopObject( - const P4NextHopAppDbEntry &app_entry, const sai_object_id_t &rif_oid) + const P4NextHopAppDbEntry &app_entry, const sai_object_id_t &oid, + const swss::IpAddress &neighbor_id = swss::IpAddress("0.0.0.0")) { std::unordered_map next_hop_attrs; sai_attribute_t next_hop_attr; - next_hop_attr.id = SAI_NEXT_HOP_ATTR_TYPE; - next_hop_attr.value.s32 = SAI_NEXT_HOP_TYPE_IP; - next_hop_attrs.insert({next_hop_attr.id, next_hop_attr.value}); + if (app_entry.action_str == p4orch::kSetTunnelNexthop) + { + next_hop_attr.id = SAI_NEXT_HOP_ATTR_TYPE; + next_hop_attr.value.s32 = SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP; + next_hop_attrs.insert({next_hop_attr.id, next_hop_attr.value}); + next_hop_attr.id = SAI_NEXT_HOP_ATTR_TUNNEL_ID; + next_hop_attr.value.oid = oid; + next_hop_attrs.insert({next_hop_attr.id, next_hop_attr.value}); + } + else + { + next_hop_attr.id = SAI_NEXT_HOP_ATTR_TYPE; + next_hop_attr.value.s32 = SAI_NEXT_HOP_TYPE_IP; + next_hop_attrs.insert({next_hop_attr.id, next_hop_attr.value}); + next_hop_attr.id = SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID; + next_hop_attr.value.oid = oid; + next_hop_attrs.insert({next_hop_attr.id, next_hop_attr.value}); + } next_hop_attr.id = SAI_NEXT_HOP_ATTR_IP; - swss::copy(next_hop_attr.value.ipaddr, app_entry.neighbor_id); - next_hop_attrs.insert({next_hop_attr.id, next_hop_attr.value}); - - next_hop_attr.id = SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID; - next_hop_attr.value.oid = rif_oid; + if (!neighbor_id.isZero()) + { + swss::copy(next_hop_attr.value.ipaddr, neighbor_id); + } + else + { + swss::copy(next_hop_attr.value.ipaddr, app_entry.neighbor_id); + } next_hop_attrs.insert({next_hop_attr.id, next_hop_attr.value}); return next_hop_attrs; @@ -98,7 +164,8 @@ bool MatchCreateNextHopArgAttrList(const sai_attribute_t *attr_list, const auto end = expected_attr_list.end(); if (expected_attr_list.size() != 3 || expected_attr_list.find(SAI_NEXT_HOP_ATTR_TYPE) == end || expected_attr_list.find(SAI_NEXT_HOP_ATTR_IP) == end || - expected_attr_list.find(SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID) == end) + (expected_attr_list.find(SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID) == end && + expected_attr_list.find(SAI_NEXT_HOP_ATTR_TUNNEL_ID) == end)) { return false; } @@ -142,6 +209,12 @@ bool MatchCreateNextHopArgAttrList(const sai_attribute_t *attr_list, return false; } break; + case SAI_NEXT_HOP_ATTR_TUNNEL_ID: + if (attr_list[i].value.oid != expected_attr_list.at(SAI_NEXT_HOP_ATTR_TUNNEL_ID).oid) + { + return false; + } + break; default: // Invalid attribute ID in next hop's attribute list. return false; @@ -158,6 +231,23 @@ class NextHopManagerTest : public ::testing::Test protected: NextHopManagerTest() : next_hop_manager_(&p4_oid_mapper_, &publisher_) { + mock_sai_hostif = &mock_sai_hostif_; + mock_sai_switch = &mock_sai_switch_; + sai_switch_api->get_switch_attribute = mock_get_switch_attribute; + sai_hostif_api->create_hostif_trap = mock_create_hostif_trap; + sai_hostif_api->create_hostif_table_entry = mock_create_hostif_table_entry; + EXPECT_CALL(mock_sai_hostif_, create_hostif_table_entry(_, _, _, _)).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_hostif_, create_hostif_trap(_, _, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_CALL(mock_sai_switch_, get_switch_attribute(_, _, _)).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); + copp_orch_ = new CoppOrch(gAppDb, APP_COPP_TABLE_NAME); + std::vector p4_tables; + gP4Orch = new P4Orch(gAppDb, p4_tables, gVrfOrch, copp_orch_); + } + + ~NextHopManagerTest() + { + delete gP4Orch; + delete copp_orch_; } void SetUp() override @@ -170,9 +260,14 @@ class NextHopManagerTest : public ::testing::Test sai_next_hop_api->get_next_hop_attribute = mock_get_next_hop_attribute; } + void TearDown() override + { + gP4Orch->getGreTunnelManager()->m_greTunnelTable.clear(); + } + void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) { - next_hop_manager_.enqueue(entry); + next_hop_manager_.enqueue(APP_P4RT_NEXTHOP_TABLE_NAME, entry); } void Drain() @@ -180,6 +275,11 @@ class NextHopManagerTest : public ::testing::Test next_hop_manager_.drain(); } + std::string VerifyState(const std::string &key, const std::vector &tuple) + { + return next_hop_manager_.verifyState(key, tuple); + } + ReturnCode ProcessAddRequest(const P4NextHopAppDbEntry &app_db_entry) { return next_hop_manager_.processAddRequest(app_db_entry); @@ -207,7 +307,7 @@ class NextHopManagerTest : public ::testing::Test } // Resolves the dependency of a next hop entry by adding depended router - // interface and neighbor into centralized mapper. + // interface/tunnel and neighbor into centralized mapper. // Returns true on succuess. bool ResolveNextHopEntryDependency(const P4NextHopAppDbEntry &app_db_entry, const sai_object_id_t &rif_oid); @@ -217,6 +317,12 @@ class NextHopManagerTest : public ::testing::Test // Returns a valid pointer to next hop entry on success. P4NextHopEntry *AddNextHopEntry1(); + // Adds the next hop entry -- kP4TunnelNextHopAppDbEntry1, via next hop + // manager's ProcessAddRequest (). This function also takes care of all the + // dependencies of the next hop entry. Returns a valid pointer to next hop + // entry on success. + P4NextHopEntry *AddTunnelNextHopEntry1(); + // Validates that a P4 App next hop entry is correctly added in next hop // manager and centralized mapper. Returns true on success. bool ValidateNextHopEntryAdd(const P4NextHopAppDbEntry &app_db_entry, const sai_object_id_t &expected_next_hop_oid); @@ -235,18 +341,46 @@ class NextHopManagerTest : public ::testing::Test MockResponsePublisher publisher_; P4OidMapper p4_oid_mapper_; NextHopManager next_hop_manager_; + StrictMock mock_sai_hostif_; + StrictMock mock_sai_switch_; + CoppOrch *copp_orch_; }; bool NextHopManagerTest::ResolveNextHopEntryDependency(const P4NextHopAppDbEntry &app_db_entry, - const sai_object_id_t &rif_oid) + const sai_object_id_t &oid) { - const std::string rif_key = KeyGenerator::generateRouterInterfaceKey(app_db_entry.router_interface_id); - if (!p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, rif_key, rif_oid)) + std::string rif_id = app_db_entry.router_interface_id; + auto neighbor_id = app_db_entry.neighbor_id; + if (app_db_entry.action_str == p4orch::kSetTunnelNexthop) { - return false; + const std::string tunnel_key = KeyGenerator::generateTunnelKey(app_db_entry.gre_tunnel_id); + if (!p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_TUNNEL, tunnel_key, oid)) + { + return false; + } + gP4Orch->getGreTunnelManager()->m_greTunnelTable.emplace( + tunnel_key, app_db_entry.gre_tunnel_id == kTunnelId1 ? kP4TunnelEntry1 : kP4TunnelEntry2); + auto gre_tunnel_or = gP4Orch->getGreTunnelManager()->getConstGreTunnelEntry(tunnel_key); + EXPECT_TRUE(gre_tunnel_or.ok()); + rif_id = (*gre_tunnel_or).router_interface_id; + auto rif_oid = rif_id == kRouterInterfaceId1 ? kRouterInterfaceOid1 : kRouterInterfaceOid2; + neighbor_id = (*gre_tunnel_or).neighbor_id; + const std::string rif_key = KeyGenerator::generateRouterInterfaceKey(rif_id); + if (!p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, rif_key, rif_oid)) + { + return false; + } } - const std::string neighbor_key = - KeyGenerator::generateNeighborKey(app_db_entry.router_interface_id, app_db_entry.neighbor_id); + else + { + const std::string rif_key = KeyGenerator::generateRouterInterfaceKey(rif_id); + if (!p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, rif_key, oid)) + { + return false; + } + } + + const std::string neighbor_key = KeyGenerator::generateNeighborKey(rif_id, neighbor_id); if (!p4_oid_mapper_.setDummyOID(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_key)) { return false; @@ -274,15 +408,49 @@ P4NextHopEntry *NextHopManagerTest::AddNextHopEntry1() return GetNextHopEntry(KeyGenerator::generateNextHopKey(kP4NextHopAppDbEntry1.next_hop_id)); } +P4NextHopEntry *NextHopManagerTest::AddTunnelNextHopEntry1() +{ + if (!ResolveNextHopEntryDependency(kP4TunnelNextHopAppDbEntry1, kTunnelOid1)) + { + return nullptr; + } + + // Set up mock call. + EXPECT_CALL( + mock_sai_next_hop_, + create_next_hop(::testing::NotNull(), Eq(gSwitchId), Eq(3), + Truly(std::bind(MatchCreateNextHopArgAttrList, std::placeholders::_1, + CreateAttributeListForNextHopObject(kP4TunnelNextHopAppDbEntry1, kTunnelOid1, + swss::IpAddress(kNeighborId1)))))) + .WillOnce(DoAll(SetArgPointee<0>(kTunnelNextHopOid), Return(SAI_STATUS_SUCCESS))); + + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddRequest(kP4TunnelNextHopAppDbEntry1)); + + return GetNextHopEntry(KeyGenerator::generateNextHopKey(kP4TunnelNextHopAppDbEntry1.next_hop_id)); +} + bool NextHopManagerTest::ValidateNextHopEntryAdd(const P4NextHopAppDbEntry &app_db_entry, const sai_object_id_t &expected_next_hop_oid) { const auto *p4_next_hop_entry = GetNextHopEntry(KeyGenerator::generateNextHopKey(app_db_entry.next_hop_id)); if (p4_next_hop_entry == nullptr || p4_next_hop_entry->next_hop_id != app_db_entry.next_hop_id || - p4_next_hop_entry->router_interface_id != app_db_entry.router_interface_id || - p4_next_hop_entry->neighbor_id != app_db_entry.neighbor_id || p4_next_hop_entry->next_hop_oid != expected_next_hop_oid) + { + return false; + } + + if (app_db_entry.action_str == p4orch::kSetTunnelNexthop && + p4_next_hop_entry->gre_tunnel_id != app_db_entry.gre_tunnel_id) + { return false; + } + + if (app_db_entry.action_str == p4orch::kSetIpNexthop && + (p4_next_hop_entry->router_interface_id != app_db_entry.router_interface_id || + p4_next_hop_entry->neighbor_id != app_db_entry.neighbor_id)) + { + return false; + } sai_object_id_t next_hop_oid; if (!p4_oid_mapper_.getOID(SAI_OBJECT_TYPE_NEXT_HOP, p4_next_hop_entry->next_hop_key, &next_hop_oid) || @@ -326,7 +494,7 @@ TEST_F(NextHopManagerTest, ProcessAddRequestShouldFailWhenNextHopExistInCentralM ASSERT_TRUE(ResolveNextHopEntryDependency(kP4NextHopAppDbEntry1, kRouterInterfaceOid1)); ASSERT_TRUE(p4_oid_mapper_.setOID( SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kP4NextHopAppDbEntry1.next_hop_id), kNextHopOid)); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessAddRequest(kP4NextHopAppDbEntry1)); } @@ -341,6 +509,17 @@ TEST_F(NextHopManagerTest, ProcessAddRequestShouldFailWhenDependingRifIsAbsentIn EXPECT_EQ(GetNextHopEntry(KeyGenerator::generateNextHopKey(kP4NextHopAppDbEntry1.next_hop_id)), nullptr); } +TEST_F(NextHopManagerTest, ProcessAddRequestShouldFailWhenDependingTunnelIsAbsentInCentralMapper) +{ + const std::string neighbor_key = + KeyGenerator::generateNeighborKey(kP4TunnelNextHopAppDbEntry1.router_interface_id, kP4TunnelEntry1.neighbor_id); + ASSERT_TRUE(p4_oid_mapper_.setDummyOID(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_key)); + + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessAddRequest(kP4TunnelNextHopAppDbEntry1)); + + EXPECT_EQ(GetNextHopEntry(KeyGenerator::generateNextHopKey(kP4TunnelNextHopAppDbEntry1.next_hop_id)), nullptr); +} + TEST_F(NextHopManagerTest, ProcessAddRequestShouldFailWhenDependingNeigherIsAbsentInCentralMapper) { const std::string rif_key = KeyGenerator::generateRouterInterfaceKey(kP4NextHopAppDbEntry1.router_interface_id); @@ -386,6 +565,36 @@ TEST_F(NextHopManagerTest, ProcessAddRequestShouldDoNoOpForDuplicateAddRequest) EXPECT_TRUE(ValidateRefCnt(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_key, 1)); } +TEST_F(NextHopManagerTest, ProcessAddRequestShouldSuccessForTunnelNexthop) +{ + ASSERT_TRUE(ResolveNextHopEntryDependency(kP4TunnelNextHopAppDbEntry1, kTunnelOid1)); + + // Set up mock call. + EXPECT_CALL( + mock_sai_next_hop_, + create_next_hop(::testing::NotNull(), Eq(gSwitchId), Eq(3), + Truly(std::bind(MatchCreateNextHopArgAttrList, std::placeholders::_1, + CreateAttributeListForNextHopObject(kP4TunnelNextHopAppDbEntry1, kTunnelOid1, + swss::IpAddress(kNeighborId1)))))) + .WillOnce(DoAll(SetArgPointee<0>(kTunnelNextHopOid), Return(SAI_STATUS_SUCCESS))); + + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddRequest(kP4TunnelNextHopAppDbEntry1)); + + EXPECT_NE(GetNextHopEntry(KeyGenerator::generateNextHopKey(kP4TunnelNextHopAppDbEntry1.next_hop_id)), nullptr); + + // Add the same next hop entry again. + EXPECT_EQ(StatusCode::SWSS_RC_EXISTS, ProcessAddRequest(kP4TunnelNextHopAppDbEntry1)); + + // Adding the same next hop entry multiple times should have the same outcome + // as adding it once. + EXPECT_TRUE(ValidateNextHopEntryAdd(kP4TunnelNextHopAppDbEntry1, kTunnelNextHopOid)); + const std::string tunnel_key = KeyGenerator::generateTunnelKey(kP4TunnelNextHopAppDbEntry1.gre_tunnel_id); + const std::string neighbor_key = + KeyGenerator::generateNeighborKey(kP4TunnelEntry1.router_interface_id, kP4TunnelEntry1.neighbor_id); + EXPECT_TRUE(ValidateRefCnt(SAI_OBJECT_TYPE_TUNNEL, tunnel_key, 1)); + EXPECT_TRUE(ValidateRefCnt(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_key, 1)); +} + TEST_F(NextHopManagerTest, ProcessUpdateRequestShouldFailAsItIsUnsupported) { auto *p4_next_hop_entry = AddNextHopEntry1(); @@ -444,7 +653,7 @@ TEST_F(NextHopManagerTest, ProcessDeleteRequestShouldFailIfNextHopEntryIsAbsentI ASSERT_TRUE(p4_oid_mapper_.eraseOID(SAI_OBJECT_TYPE_NEXT_HOP, p4_next_hop_entry->next_hop_key)); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessDeleteRequest(p4_next_hop_entry->next_hop_key)); // Validate the next hop entry is not deleted in P4 next hop manager. @@ -533,7 +742,7 @@ TEST_F(NextHopManagerTest, GetNextHopEntryShouldReturnNullPointerForNonexistingN TEST_F(NextHopManagerTest, DeserializeP4NextHopAppDbEntryShouldSucceedForValidNextHopSetEntry) { std::vector attributes = { - swss::FieldValueTuple(p4orch::kAction, "set_nexthop"), + swss::FieldValueTuple(p4orch::kAction, "set_ip_nexthop"), swss::FieldValueTuple(prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId1), swss::FieldValueTuple(prependParamField(p4orch::kNeighborId), kNeighborId1)}; @@ -541,9 +750,9 @@ TEST_F(NextHopManagerTest, DeserializeP4NextHopAppDbEntryShouldSucceedForValidNe EXPECT_TRUE(app_db_entry_or.ok()); auto &app_db_entry = *app_db_entry_or; EXPECT_EQ(app_db_entry.next_hop_id, kNextHopId); - EXPECT_TRUE(app_db_entry.is_set_router_interface_id); + EXPECT_FALSE(app_db_entry.router_interface_id.empty()); EXPECT_EQ(app_db_entry.router_interface_id, kRouterInterfaceId1); - EXPECT_TRUE(app_db_entry.is_set_neighbor_id); + EXPECT_FALSE(app_db_entry.neighbor_id.isZero()); EXPECT_EQ(app_db_entry.neighbor_id, swss::IpAddress(kNeighborId1)); } @@ -553,8 +762,8 @@ TEST_F(NextHopManagerTest, DeserializeP4NextHopAppDbEntryShouldSucceedForValidNe EXPECT_TRUE(app_db_entry_or.ok()); auto &app_db_entry = *app_db_entry_or; EXPECT_EQ(app_db_entry.next_hop_id, kNextHopId); - EXPECT_FALSE(app_db_entry.is_set_router_interface_id); - EXPECT_FALSE(app_db_entry.is_set_neighbor_id); + EXPECT_TRUE(app_db_entry.router_interface_id.empty()); + EXPECT_TRUE(app_db_entry.neighbor_id.isZero()); } TEST_F(NextHopManagerTest, DeserializeP4NextHopAppDbEntryShouldReturnNullPointerWhenFailToDeserializeNextHopId) @@ -569,7 +778,7 @@ TEST_F(NextHopManagerTest, DeserializeP4NextHopAppDbEntryShouldReturnNullPointer TEST_F(NextHopManagerTest, DeserializeP4NextHopAppDbEntryShouldReturnNullPointerForInvalidIpAddr) { std::vector attributes = { - swss::FieldValueTuple(p4orch::kAction, "set_nexthop"), + swss::FieldValueTuple(p4orch::kAction, "set_ip_nexthop"), swss::FieldValueTuple(prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId1), swss::FieldValueTuple(prependParamField(p4orch::kNeighborId), "0.0.0.0.0.0")}; // Invalid IP address. @@ -579,7 +788,7 @@ TEST_F(NextHopManagerTest, DeserializeP4NextHopAppDbEntryShouldReturnNullPointer TEST_F(NextHopManagerTest, DeserializeP4NextHopAppDbEntryShouldReturnNullPointerDueToUnexpectedField) { std::vector attributes = { - swss::FieldValueTuple(p4orch::kAction, "set_nexthop"), + swss::FieldValueTuple(p4orch::kAction, "set_ip_nexthop"), swss::FieldValueTuple(p4orch::kRouterInterfaceId, kRouterInterfaceId1), swss::FieldValueTuple("unexpected_field", "unexpected_value")}; @@ -591,7 +800,8 @@ TEST_F(NextHopManagerTest, DrainValidAppEntryShouldSucceed) nlohmann::json j; j[prependMatchField(p4orch::kNexthopId)] = kNextHopId; - std::vector fvs{{prependParamField(p4orch::kNeighborId), kNeighborId2}, + std::vector fvs{{p4orch::kAction, p4orch::kSetIpNexthop}, + {prependParamField(p4orch::kNeighborId), kNeighborId2}, {prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId2}}; swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), @@ -608,6 +818,52 @@ TEST_F(NextHopManagerTest, DrainValidAppEntryShouldSucceed) EXPECT_TRUE(ValidateNextHopEntryAdd(kP4NextHopAppDbEntry2, kNextHopOid)); } +TEST_F(NextHopManagerTest, DrainValidTunnelNexthopAppEntryShouldSucceed) +{ + nlohmann::json tunnel_j; + tunnel_j[prependMatchField(p4orch::kNexthopId)] = kTunnelNextHopId; + std::vector tunnel_fvs = {{p4orch::kAction, p4orch::kSetTunnelNexthop}, + {prependParamField(p4orch::kNeighborId), kNeighborId2}, + {prependParamField(p4orch::kTunnelId), kTunnelId2}}; + + swss::KeyOpFieldsValuesTuple tunnel_app_db_entry( + std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + tunnel_j.dump(), SET_COMMAND, tunnel_fvs); + + Enqueue(tunnel_app_db_entry); + + EXPECT_TRUE(ResolveNextHopEntryDependency(kP4TunnelNextHopAppDbEntry2, kTunnelOid2)); + EXPECT_CALL(mock_sai_next_hop_, create_next_hop(_, _, _, _)) + .WillOnce(DoAll(SetArgPointee<0>(kTunnelNextHopOid), Return(SAI_STATUS_SUCCESS))); + + Drain(); + + EXPECT_TRUE(ValidateNextHopEntryAdd(kP4TunnelNextHopAppDbEntry2, kTunnelNextHopOid)); + + nlohmann::json j; + j[prependMatchField(p4orch::kNexthopId)] = kTunnelNextHopId; + std::vector fvs; + swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), + DEL_COMMAND, fvs); + EXPECT_CALL(mock_sai_next_hop_, remove_next_hop(Eq(kTunnelNextHopOid))).WillOnce(Return(SAI_STATUS_SUCCESS)); + + Enqueue(app_db_entry); + Drain(); + + // Validate the next hop entry has been deleted in both P4 next hop manager + // and centralized mapper. + auto p4_next_hop_entry = GetNextHopEntry(KeyGenerator::generateNextHopKey(kP4TunnelNextHopAppDbEntry2.next_hop_id)); + EXPECT_EQ(p4_next_hop_entry, nullptr); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_NEXT_HOP, + KeyGenerator::generateNextHopKey(kP4TunnelNextHopAppDbEntry2.next_hop_id))); + + // Validate ref count decrement. + const std::string tunnel_key = KeyGenerator::generateTunnelKey(kP4TunnelNextHopAppDbEntry2.gre_tunnel_id); + const std::string neighbor_key = + KeyGenerator::generateNeighborKey(kP4TunnelEntry2.router_interface_id, kP4TunnelEntry2.neighbor_id); + EXPECT_TRUE(ValidateRefCnt(SAI_OBJECT_TYPE_TUNNEL, tunnel_key, 0)); + EXPECT_TRUE(ValidateRefCnt(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_key, 0)); +} + TEST_F(NextHopManagerTest, DrainAppEntryWithInvalidOpShouldBeNoOp) { nlohmann::json j; @@ -638,15 +894,72 @@ TEST_F(NextHopManagerTest, DrainAppEntryWithInvalidFieldShouldBeNoOp) {"unexpected_field", "unexpected_value"}}; swss::KeyOpFieldsValuesTuple app_db_entry(std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), - "INVALID_OP", fvs); + SET_COMMAND, fvs); Enqueue(app_db_entry); - EXPECT_TRUE(ResolveNextHopEntryDependency(kP4NextHopAppDbEntry2, kRouterInterfaceOid2)); + Drain(); + EXPECT_FALSE(ValidateNextHopEntryAdd(kP4NextHopAppDbEntry2, kNextHopOid)); + + // Missing action field + fvs = {{prependParamField(p4orch::kNeighborId), kNeighborId2}, + {prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId2}}; + app_db_entry = {std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; + + Enqueue(app_db_entry); + + Drain(); + EXPECT_FALSE(ValidateNextHopEntryAdd(kP4NextHopAppDbEntry2, kNextHopOid)); + + // Missing neighbor field + fvs = {{p4orch::kAction, p4orch::kSetIpNexthop}, + {prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId2}}; + app_db_entry = {std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; + + Enqueue(app_db_entry); Drain(); + EXPECT_FALSE(ValidateNextHopEntryAdd(kP4NextHopAppDbEntry2, kNextHopOid)); + + // set_ip_nexthop + missing router_interface_id + fvs = {{p4orch::kAction, p4orch::kSetIpNexthop}, {prependParamField(p4orch::kNeighborId), kNeighborId2}}; + app_db_entry = {std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; + + Enqueue(app_db_entry); + Drain(); + EXPECT_FALSE(ValidateNextHopEntryAdd(kP4NextHopAppDbEntry2, kNextHopOid)); + + // set_ip_nexthop + invalid param/tunnel_id + fvs = {{p4orch::kAction, p4orch::kSetIpNexthop}, + {prependParamField(p4orch::kNeighborId), kNeighborId2}, + {prependParamField(p4orch::kTunnelId), kTunnelId1}}; + app_db_entry = {std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; + + Enqueue(app_db_entry); + + Drain(); EXPECT_FALSE(ValidateNextHopEntryAdd(kP4NextHopAppDbEntry2, kNextHopOid)); + + // set_p2p_tunnel_encap_nexthop + invalid router_interface_id + fvs = {{p4orch::kAction, p4orch::kSetTunnelNexthop}, + {prependParamField(p4orch::kNeighborId), kNeighborId2}, + {prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId1}}; + app_db_entry = {std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; + + Enqueue(app_db_entry); + + Drain(); + EXPECT_FALSE(ValidateNextHopEntryAdd(kP4TunnelNextHopAppDbEntry2, kNextHopOid)); + + // set_p2p_tunnel_encap_nexthop + missing tunnel_id + fvs = {{p4orch::kAction, p4orch::kSetTunnelNexthop}, {prependParamField(p4orch::kNeighborId), kNeighborId2}}; + app_db_entry = {std::string(APP_P4RT_NEXTHOP_TABLE_NAME) + kTableKeyDelimiter + j.dump(), SET_COMMAND, fvs}; + + Enqueue(app_db_entry); + + Drain(); + EXPECT_FALSE(ValidateNextHopEntryAdd(kP4TunnelNextHopAppDbEntry2, kNextHopOid)); } TEST_F(NextHopManagerTest, DrainUpdateRequestShouldBeUnsupported) @@ -707,3 +1020,179 @@ TEST_F(NextHopManagerTest, DrainDeleteRequestShouldSucceedForExistingNextHop) EXPECT_TRUE(ValidateRefCnt(SAI_OBJECT_TYPE_ROUTER_INTERFACE, rif_key, 0)); EXPECT_TRUE(ValidateRefCnt(SAI_OBJECT_TYPE_NEIGHBOR_ENTRY, neighbor_key, 0)); } + +TEST_F(NextHopManagerTest, VerifyIpNextHopStateTest) +{ + auto *p4_next_hop_entry = AddNextHopEntry1(); + ASSERT_NE(p4_next_hop_entry, nullptr); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set( + "SAI_OBJECT_TYPE_NEXT_HOP:oid:0x65", + std::vector{swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_TYPE", "SAI_NEXT_HOP_TYPE_IP"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_IP", "10.0.0.1"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID", "oid:0x1"}}); + + nlohmann::json j; + j[prependMatchField(p4orch::kNexthopId)] = kNextHopId; + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_NEXTHOP_TABLE_NAME + + kTableKeyDelimiter + j.dump(); + std::vector attributes; + + // Verification should succeed with vaild key and value. + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNeighborId), kNeighborId1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId1}); + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Invalid key should fail verification. + EXPECT_FALSE(VerifyState("invalid", attributes).empty()); + EXPECT_FALSE(VerifyState("invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":FIXED_NEXTHOP_TABLE:invalid", attributes).empty()); + + // Verification should fail with non-existing nexthop. + j[prependMatchField(p4orch::kNexthopId)] = "invalid"; + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_NEXTHOP_TABLE_NAME + + kTableKeyDelimiter + j.dump(), + attributes) + .empty()); + + // Verification should fail if nexthop key mismatches. + auto saved_next_hop_key = p4_next_hop_entry->next_hop_key; + p4_next_hop_entry->next_hop_key = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->next_hop_key = saved_next_hop_key; + + // Verification should fail if nexthop ID mismatches. + auto saved_next_hop_id = p4_next_hop_entry->next_hop_id; + p4_next_hop_entry->next_hop_id = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->next_hop_id = saved_next_hop_id; + + // Verification should fail if ritf ID mismatches. + auto saved_router_interface_id = p4_next_hop_entry->router_interface_id; + p4_next_hop_entry->router_interface_id = kRouterInterfaceId2; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->router_interface_id = saved_router_interface_id; + + // Verification should fail if neighbor ID mismatches. + auto saved_neighbor_id = p4_next_hop_entry->neighbor_id; + p4_next_hop_entry->neighbor_id = swss::IpAddress(kNeighborId2); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->neighbor_id = saved_neighbor_id; + + // Verification should fail if tunnel ID mismatches. + auto saved_gre_tunnel_id = p4_next_hop_entry->gre_tunnel_id; + p4_next_hop_entry->gre_tunnel_id = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->gre_tunnel_id = saved_gre_tunnel_id; +} + +TEST_F(NextHopManagerTest, VerifyTunnelNextHopStateTest) +{ + ASSERT_TRUE(ResolveNextHopEntryDependency(kP4TunnelNextHopAppDbEntry1, kTunnelOid1)); + + // Set up mock call. + EXPECT_CALL( + mock_sai_next_hop_, + create_next_hop(::testing::NotNull(), Eq(gSwitchId), Eq(3), + Truly(std::bind(MatchCreateNextHopArgAttrList, std::placeholders::_1, + CreateAttributeListForNextHopObject(kP4TunnelNextHopAppDbEntry1, kTunnelOid1, + swss::IpAddress(kNeighborId1)))))) + .WillOnce(DoAll(SetArgPointee<0>(kTunnelNextHopOid), Return(SAI_STATUS_SUCCESS))); + + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddRequest(kP4TunnelNextHopAppDbEntry1)); + + auto p4_next_hop_entry = GetNextHopEntry(KeyGenerator::generateNextHopKey(kP4TunnelNextHopAppDbEntry1.next_hop_id)); + ASSERT_NE(p4_next_hop_entry, nullptr); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_NEXT_HOP:oid:0x66", + std::vector{ + swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_TYPE", "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_IP", "10.0.0.1"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_TUNNEL_ID", "oid:0xb"}}); + + nlohmann::json j; + j[prependMatchField(p4orch::kNexthopId)] = kTunnelNextHopId; + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_NEXTHOP_TABLE_NAME + + kTableKeyDelimiter + j.dump(); + std::vector attributes; + + // Verification should succeed with vaild key and value. + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNeighborId), kNeighborId1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId1}); + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Verification should fail if nexthop key mismatches. + auto saved_next_hop_key = p4_next_hop_entry->next_hop_key; + p4_next_hop_entry->next_hop_key = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->next_hop_key = saved_next_hop_key; + + // Verification should fail if nexthop ID mismatches. + auto saved_next_hop_id = p4_next_hop_entry->next_hop_id; + p4_next_hop_entry->next_hop_id = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->next_hop_id = saved_next_hop_id; + + // Verification should fail if ritf ID mismatches. + auto saved_router_interface_id = p4_next_hop_entry->router_interface_id; + p4_next_hop_entry->router_interface_id = kRouterInterfaceId2; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->router_interface_id = saved_router_interface_id; + + // Verification should fail if neighbor ID mismatches. + auto saved_neighbor_id = p4_next_hop_entry->neighbor_id; + p4_next_hop_entry->neighbor_id = swss::IpAddress(kNeighborId2); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->neighbor_id = saved_neighbor_id; + + // Verification should fail if tunnel ID mismatches. + auto saved_gre_tunnel_id = p4_next_hop_entry->gre_tunnel_id; + p4_next_hop_entry->gre_tunnel_id = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_next_hop_entry->gre_tunnel_id = saved_gre_tunnel_id; +} + +TEST_F(NextHopManagerTest, VerifyStateAsicDbTest) +{ + auto *p4_next_hop_entry = AddNextHopEntry1(); + ASSERT_NE(p4_next_hop_entry, nullptr); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set( + "SAI_OBJECT_TYPE_NEXT_HOP:oid:0x65", + std::vector{swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_TYPE", "SAI_NEXT_HOP_TYPE_IP"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_IP", "10.0.0.1"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID", "oid:0x1"}}); + + nlohmann::json j; + j[prependMatchField(p4orch::kNexthopId)] = kNextHopId; + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_NEXTHOP_TABLE_NAME + + kTableKeyDelimiter + j.dump(); + std::vector attributes; + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNeighborId), kNeighborId1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kRouterInterfaceId), kRouterInterfaceId1}); + + // Verification should succeed with correct ASIC DB values. + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Verification should fail if ASIC DB values mismatch. + table.set("SAI_OBJECT_TYPE_NEXT_HOP:oid:0x65", std::vector{swss::FieldValueTuple{ + "SAI_NEXT_HOP_ATTR_IP", "fe80::21a:11ff:fe17:5f80"}}); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + + // Verification should fail if ASIC DB table is missing. + table.del("SAI_OBJECT_TYPE_NEXT_HOP:oid:0x65"); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + table.set( + "SAI_OBJECT_TYPE_NEXT_HOP:oid:0x65", + std::vector{swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_TYPE", "SAI_NEXT_HOP_TYPE_IP"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_IP", "10.0.0.1"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID", "oid:0x1"}}); +} diff --git a/orchagent/p4orch/tests/p4oidmapper_test.cpp b/orchagent/p4orch/tests/p4oidmapper_test.cpp index 131ee7aedb..bde2ee656b 100644 --- a/orchagent/p4orch/tests/p4oidmapper_test.cpp +++ b/orchagent/p4orch/tests/p4oidmapper_test.cpp @@ -4,9 +4,11 @@ #include +#include "sai_serialize.h" + extern "C" { -#include "saitypes.h" +#include "sai.h" } namespace @@ -19,6 +21,11 @@ constexpr char *kRouteObject2 = "Route2"; constexpr sai_object_id_t kOid1 = 1; constexpr sai_object_id_t kOid2 = 2; +std::string convertToDBField(_In_ const sai_object_type_t object_type, _In_ const std::string &key) +{ + return sai_serialize_object_type(object_type) + ":" + key; +} + TEST(P4OidMapperTest, MapperTest) { P4OidMapper mapper; @@ -41,6 +48,10 @@ TEST(P4OidMapperTest, MapperTest) EXPECT_EQ(kOid1, oid); EXPECT_TRUE(mapper.getOID(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject2, &oid)); EXPECT_EQ(kOid2, oid); + EXPECT_TRUE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1, kOid1).empty()); + EXPECT_TRUE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject2, kOid2).empty()); + EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1, kOid2).empty()); + EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject2, kOid1).empty()); uint32_t ref_count; EXPECT_TRUE(mapper.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1, &ref_count)); @@ -74,6 +85,8 @@ TEST(P4OidMapperTest, MapperTest) EXPECT_TRUE(mapper.existsOID(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject2)); EXPECT_FALSE(mapper.existsOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, kRouteObject1)); EXPECT_FALSE(mapper.existsOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, kRouteObject2)); + EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1, kOid1).empty()); + EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject2, kOid2).empty()); } TEST(P4OidMapperTest, ErrorTest) @@ -119,4 +132,27 @@ TEST(P4OidMapperTest, ErrorTest) EXPECT_FALSE(mapper.decreaseRefCount(SAI_OBJECT_TYPE_ROUTE_ENTRY, kRouteObject1)); } +TEST(P4OidMapperTest, VerifyMapperTest) +{ + P4OidMapper mapper; + swss::Table table(nullptr, "P4RT_KEY_TO_OID"); + EXPECT_TRUE(mapper.setOID(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1, kOid1)); + EXPECT_TRUE(mapper.setOID(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject2, kOid2, + /*ref_count=*/100)); + + EXPECT_TRUE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1, kOid1).empty()); + EXPECT_TRUE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject2, kOid2).empty()); + EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1, kOid2).empty()); + EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject2, kOid1).empty()); + EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, "invalid", kOid1).empty()); + + // Verification should fail if OID in DB mismatches. + table.hset("", convertToDBField(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1), sai_serialize_object_id(kOid2)); + EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1, kOid1).empty()); + + // Verification should fail if OID in DB is not found. + table.hdel("", convertToDBField(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1)); + EXPECT_FALSE(mapper.verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP, kNextHopObject1, kOid1).empty()); +} + } // namespace diff --git a/orchagent/p4orch/tests/p4orch_util_test.cpp b/orchagent/p4orch/tests/p4orch_util_test.cpp index 8c171b1e6e..ba86624d4c 100644 --- a/orchagent/p4orch/tests/p4orch_util_test.cpp +++ b/orchagent/p4orch/tests/p4orch_util_test.cpp @@ -77,4 +77,57 @@ TEST(P4OrchUtilTest, QuotedVarTest) EXPECT_EQ(QuotedVar(bar.c_str()), "'a string has \\\'quote\\\''"); } +TEST(P4OrchUtilTest, VerifyAttrsTest) +{ + EXPECT_TRUE(verifyAttrs(std::vector{swss::FieldValueTuple{"k1", "v1"}}, + std::vector{swss::FieldValueTuple{"k1", "v1"}}, + std::vector{}, + /*allow_unknown=*/false) + .empty()); + EXPECT_FALSE(verifyAttrs(std::vector{swss::FieldValueTuple{"k1", "v1"}, + swss::FieldValueTuple{"k2", "v2"}}, + std::vector{swss::FieldValueTuple{"k1", "v1"}}, + std::vector{}, + /*allow_unknown=*/false) + .empty()); + EXPECT_TRUE(verifyAttrs(std::vector{swss::FieldValueTuple{"k1", "v1"}, + swss::FieldValueTuple{"k2", "v2"}}, + std::vector{swss::FieldValueTuple{"k1", "v1"}}, + std::vector{}, + /*allow_unknown=*/true) + .empty()); + EXPECT_TRUE(verifyAttrs(std::vector{swss::FieldValueTuple{"k1", "v1"}, + swss::FieldValueTuple{"k2", "v2"}}, + std::vector{swss::FieldValueTuple{"k1", "v1"}}, + std::vector{swss::FieldValueTuple{"k2", "v2"}}, + /*allow_unknown=*/false) + .empty()); + EXPECT_FALSE(verifyAttrs(std::vector{swss::FieldValueTuple{"k1", "v1"}, + swss::FieldValueTuple{"k2", "v2"}}, + std::vector{swss::FieldValueTuple{"k1", "v3"}}, + std::vector{swss::FieldValueTuple{"k2", "v2"}}, + /*allow_unknown=*/false) + .empty()); + EXPECT_FALSE(verifyAttrs(std::vector{swss::FieldValueTuple{"k1", "v1"}, + swss::FieldValueTuple{"k2", "v2"}}, + std::vector{swss::FieldValueTuple{"k1", "v1"}}, + std::vector{swss::FieldValueTuple{"k2", "v3"}}, + /*allow_unknown=*/false) + .empty()); + EXPECT_FALSE( + verifyAttrs( + std::vector{swss::FieldValueTuple{"k1", "v1"}, swss::FieldValueTuple{"k2", "v2"}}, + std::vector{swss::FieldValueTuple{"k1", "v1"}, swss::FieldValueTuple{"k3", "v3"}}, + std::vector{swss::FieldValueTuple{"k2", "v2"}}, + /*allow_unknown=*/true) + .empty()); + EXPECT_TRUE( + verifyAttrs( + std::vector{swss::FieldValueTuple{"k1", "v1"}, swss::FieldValueTuple{"k2", "v2"}}, + std::vector{swss::FieldValueTuple{"k1", "v1"}}, + std::vector{swss::FieldValueTuple{"k2", "v2"}, swss::FieldValueTuple{"k3", "v3"}}, + /*allow_unknown=*/false) + .empty()); +} + } // namespace diff --git a/orchagent/p4orch/tests/return_code_test.cpp b/orchagent/p4orch/tests/return_code_test.cpp index 7a866827d7..7ab21121aa 100644 --- a/orchagent/p4orch/tests/return_code_test.cpp +++ b/orchagent/p4orch/tests/return_code_test.cpp @@ -4,6 +4,7 @@ #include #include +#include extern "C" { @@ -105,6 +106,43 @@ TEST(ReturnCodeTest, CopyAndAppendStringInMsg) EXPECT_EQ("SWSS_RC_INVALID_PARAM:Detailed reasons. More details.", return_code.toString()); } +TEST(ReturnCodeTest, SaiCodeToReturnCodeMapping) +{ + std::unordered_map expect_mapping = { + {SAI_STATUS_SUCCESS, StatusCode::SWSS_RC_SUCCESS}, + {SAI_STATUS_NOT_SUPPORTED, StatusCode::SWSS_RC_UNIMPLEMENTED}, + {SAI_STATUS_NO_MEMORY, StatusCode::SWSS_RC_NO_MEMORY}, + {SAI_STATUS_INSUFFICIENT_RESOURCES, StatusCode::SWSS_RC_FULL}, + {SAI_STATUS_INVALID_PARAMETER, StatusCode::SWSS_RC_INVALID_PARAM}, + {SAI_STATUS_ITEM_ALREADY_EXISTS, StatusCode::SWSS_RC_EXISTS}, + {SAI_STATUS_ITEM_NOT_FOUND, StatusCode::SWSS_RC_NOT_FOUND}, + {SAI_STATUS_TABLE_FULL, StatusCode::SWSS_RC_FULL}, + {SAI_STATUS_NOT_IMPLEMENTED, StatusCode::SWSS_RC_UNIMPLEMENTED}, + {SAI_STATUS_OBJECT_IN_USE, StatusCode::SWSS_RC_IN_USE}, + {SAI_STATUS_FAILURE, StatusCode::SWSS_RC_UNKNOWN}, + {SAI_STATUS_INVALID_ATTRIBUTE_0, StatusCode::SWSS_RC_INVALID_PARAM}, + {SAI_STATUS_INVALID_ATTRIBUTE_10, StatusCode::SWSS_RC_INVALID_PARAM}, + {SAI_STATUS_INVALID_ATTRIBUTE_MAX, StatusCode::SWSS_RC_INVALID_PARAM}, + {SAI_STATUS_INVALID_ATTR_VALUE_0, StatusCode::SWSS_RC_INVALID_PARAM}, + {SAI_STATUS_INVALID_ATTR_VALUE_10, StatusCode::SWSS_RC_INVALID_PARAM}, + {SAI_STATUS_INVALID_ATTR_VALUE_MAX, StatusCode::SWSS_RC_INVALID_PARAM}, + {SAI_STATUS_ATTR_NOT_IMPLEMENTED_0, StatusCode::SWSS_RC_UNIMPLEMENTED}, + {SAI_STATUS_ATTR_NOT_IMPLEMENTED_10, StatusCode::SWSS_RC_UNIMPLEMENTED}, + {SAI_STATUS_ATTR_NOT_IMPLEMENTED_MAX, StatusCode::SWSS_RC_UNIMPLEMENTED}, + {SAI_STATUS_UNKNOWN_ATTRIBUTE_0, StatusCode::SWSS_RC_INVALID_PARAM}, + {SAI_STATUS_UNKNOWN_ATTRIBUTE_10, StatusCode::SWSS_RC_INVALID_PARAM}, + {SAI_STATUS_UNKNOWN_ATTRIBUTE_MAX, StatusCode::SWSS_RC_INVALID_PARAM}, + {SAI_STATUS_ATTR_NOT_SUPPORTED_0, StatusCode::SWSS_RC_UNIMPLEMENTED}, + {SAI_STATUS_ATTR_NOT_SUPPORTED_10, StatusCode::SWSS_RC_UNIMPLEMENTED}, + {SAI_STATUS_ATTR_NOT_SUPPORTED_MAX, StatusCode::SWSS_RC_UNIMPLEMENTED}, + }; + for (const auto &it : expect_mapping) + { + ReturnCode return_code(it.first); + EXPECT_EQ(return_code, it.second); + } +} + TEST(ReturnCodeTest, ReturnCodeOrHasInt) { ReturnCodeOr return_code_or = 42; diff --git a/orchagent/p4orch/tests/route_manager_test.cpp b/orchagent/p4orch/tests/route_manager_test.cpp index de1238761b..640cf18ccf 100644 --- a/orchagent/p4orch/tests/route_manager_test.cpp +++ b/orchagent/p4orch/tests/route_manager_test.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include "ipprefix.h" #include "json.hpp" @@ -23,14 +24,14 @@ using ::testing::_; using ::testing::DoAll; using ::testing::Eq; using ::testing::Return; -using ::testing::SetArgPointee; +using ::testing::SetArrayArgument; using ::testing::StrictMock; -using ::testing::Truly; extern sai_object_id_t gSwitchId; extern sai_object_id_t gVirtualRouterId; extern sai_object_id_t gVrfOid; extern char *gVrfName; +extern size_t gMaxBulkSize; extern sai_route_api_t *sai_route_api; extern VRFOrch *gVrfOrch; @@ -38,6 +39,7 @@ namespace { constexpr char *kIpv4Prefix = "10.11.12.0/24"; +constexpr char *kIpv4Prefix2 = "10.12.12.0/24"; constexpr char *kIpv6Prefix = "2001:db8:1::/32"; constexpr char *kNexthopId1 = "ju1u32m1.atl11:qe-3/7"; constexpr sai_object_id_t kNexthopOid1 = 1; @@ -47,6 +49,10 @@ constexpr char *kWcmpGroup1 = "wcmp-group-1"; constexpr sai_object_id_t kWcmpGroupOid1 = 3; constexpr char *kWcmpGroup2 = "wcmp-group-2"; constexpr sai_object_id_t kWcmpGroupOid2 = 4; +constexpr char *kMetadata1 = "1"; +constexpr char *kMetadata2 = "2"; +uint32_t kMetadataInt1 = 1; +uint32_t kMetadataInt2 = 2; // Returns true if the two prefixes are equal. False otherwise. // Arguments must be non-nullptr. @@ -65,61 +71,114 @@ bool PrefixCmp(const sai_ip_prefix_t *x, const sai_ip_prefix_t *y) memcmp(&x->mask.ip6, &y->mask.ip6, sizeof(sai_ip6_t)) == 0; } -// Matches the sai_route_entry_t argument. -bool MatchSaiRouteEntry(const sai_ip_prefix_t &expected_prefix, const sai_route_entry_t *route_entry, - const sai_object_id_t expected_vrf_oid) +// Matches two SAI route entries. +bool MatchSaiRouteEntry(const sai_route_entry_t &route_entry, const sai_route_entry_t &exp_route_entry) { - if (route_entry == nullptr) + if (route_entry.switch_id != exp_route_entry.switch_id) { return false; } - if (route_entry->vr_id != expected_vrf_oid) + if (route_entry.vr_id != exp_route_entry.vr_id) { return false; } - if (route_entry->switch_id != gSwitchId) - { - return false; - } - if (!PrefixCmp(&route_entry->destination, &expected_prefix)) + if (!PrefixCmp(&route_entry.destination, &exp_route_entry.destination)) { return false; } return true; } -// Matches the action type sai_attribute_t argument. -bool MatchSaiAttributeAction(sai_packet_action_t expected_action, const sai_attribute_t *attr) +// Matches two SAI attributes. +bool MatchSaiAttribute(const sai_attribute_t &attr, const sai_attribute_t &exp_attr) { - if (attr == nullptr) + if (exp_attr.id == SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION) { - return false; + if (attr.id != SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION || attr.value.s32 != exp_attr.value.s32) + { + return false; + } } - if (attr->id != SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION) + if (exp_attr.id == SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID) { - return false; + if (attr.id != SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID || attr.value.oid != exp_attr.value.oid) + { + return false; + } } - if (attr->value.s32 != expected_action) + if (exp_attr.id == SAI_ROUTE_ENTRY_ATTR_META_DATA) { - return false; + if (attr.id != SAI_ROUTE_ENTRY_ATTR_META_DATA || attr.value.u32 != exp_attr.value.u32) + { + return false; + } } return true; } -// Matches the nexthop ID type sai_attribute_t argument. -bool MatchSaiAttributeNexthopId(sai_object_id_t expected_oid, const sai_attribute_t *attr) +MATCHER_P(ArrayEq, array, "") { - if (attr == nullptr) + for (size_t i = 0; i < array.size(); ++i) { - return false; + if (arg[i] != array[i]) + { + return false; + } } - if (attr->id != SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID) + return true; +} + +MATCHER_P(RouteEntryArrayEq, array, "") +{ + for (size_t i = 0; i < array.size(); ++i) { - return false; + if (!MatchSaiRouteEntry(arg[i], array[i])) + { + return false; + } + } + return true; +} + +MATCHER_P(AttrArrayEq, array, "") +{ + for (size_t i = 0; i < array.size(); ++i) + { + if (!MatchSaiAttribute(arg[i], array[i])) + { + return false; + } } - if (attr->value.oid != expected_oid) + return true; +} + +MATCHER_P(AttrArrayArrayEq, array, "") +{ + for (size_t i = 0; i < array.size(); ++i) { - return false; + for (size_t j = 0; j < array[i].size(); j++) + { + if (!MatchSaiAttribute(arg[i][j], array[i][j])) + { + return false; + } + } + } + return true; +} + +MATCHER_P(FieldValueTupleArrayEq, array, "") +{ + for (size_t i = 0; i < array.size(); ++i) + { + if (fvField(arg[i]) != fvField(array[i])) + { + return false; + } + if (fvValue(arg[i]) != fvValue(array[i])) + { + return false; + } } return true; } @@ -163,78 +222,162 @@ class RouteManagerTest : public ::testing::Test return route_manager_.getRouteEntry(route_entry_key); } - ReturnCode ValidateRouteEntry(const P4RouteEntry &route_entry) + ReturnCode ValidateRouteEntry(const P4RouteEntry &route_entry, const std::string &operation) { - return route_manager_.validateRouteEntry(route_entry); + return route_manager_.validateRouteEntry(route_entry, operation); } - ReturnCode ValidateSetRouteEntry(const P4RouteEntry &route_entry) + std::vector CreateRouteEntries(const std::vector &route_entries) { - return route_manager_.validateSetRouteEntry(route_entry); + return route_manager_.createRouteEntries(route_entries); } - ReturnCode ValidateDelRouteEntry(const P4RouteEntry &route_entry) + std::vector UpdateRouteEntries(const std::vector &route_entries) { - return route_manager_.validateDelRouteEntry(route_entry); + return route_manager_.updateRouteEntries(route_entries); } - ReturnCode CreateRouteEntry(const P4RouteEntry &route_entry) + std::vector DeleteRouteEntries(const std::vector &route_entries) { - return route_manager_.createRouteEntry(route_entry); + return route_manager_.deleteRouteEntries(route_entries); } - ReturnCode UpdateRouteEntry(const P4RouteEntry &route_entry) + void Enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { - return route_manager_.updateRouteEntry(route_entry); + route_manager_.enqueue(table_name, entry); } - ReturnCode DeleteRouteEntry(const P4RouteEntry &route_entry) + void Drain() { - return route_manager_.deleteRouteEntry(route_entry); + route_manager_.drain(); } - void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) + std::string VerifyState(const std::string &key, const std::vector &tuple) { - route_manager_.enqueue(entry); + return route_manager_.verifyState(key, tuple); } - void Drain() + // Generates a KeyOpFieldsValuesTuple. + swss::KeyOpFieldsValuesTuple GenerateKeyOpFieldsValuesTuple(const std::string &vrf_id, + const swss::IpPrefix &route_prefix, + const std::string &command, const std::string &action, + const std::string &action_param, + const std::string &route_metadata = "") { - route_manager_.drain(); + nlohmann::json j; + std::string key_prefix; + j[prependMatchField(p4orch::kVrfId)] = vrf_id; + if (route_prefix.isV4()) + { + j[prependMatchField(p4orch::kIpv4Dst)] = route_prefix.to_string(); + key_prefix = std::string(APP_P4RT_IPV4_TABLE_NAME) + kTableKeyDelimiter; + } + else + { + j[prependMatchField(p4orch::kIpv6Dst)] = route_prefix.to_string(); + key_prefix = std::string(APP_P4RT_IPV6_TABLE_NAME) + kTableKeyDelimiter; + } + std::vector attributes; + if (command == SET_COMMAND) + { + attributes.push_back(swss::FieldValueTuple{p4orch::kAction, action}); + if (action == p4orch::kSetNexthopId || p4orch::kSetNexthopIdAndMetadata) + { + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNexthopId), action_param}); + } + else if (action == p4orch::kSetWcmpGroupId || action == p4orch::kSetWcmpGroupIdAndMetadata) + { + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kWcmpGroupId), action_param}); + } + if (action == p4orch::kSetNexthopIdAndMetadata || action == p4orch::kSetWcmpGroupIdAndMetadata || + action == p4orch::kSetMetadataAndDrop) + { + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kRouteMetadata), route_metadata}); + } + } + return swss::KeyOpFieldsValuesTuple(key_prefix + j.dump(), command, attributes); } - // Sets up a nexthop route entry for test. - void SetupNexthopIdRouteEntry(const std::string &vrf_id, const swss::IpPrefix &route_prefix, - const std::string &nexthop_id, sai_object_id_t nexthop_oid) + // Generates a P4RouteEntry. + P4RouteEntry GenerateP4RouteEntry(const std::string &vrf_id, const swss::IpPrefix &route_prefix, + const std::string &action, const std::string &action_param, + const std::string &route_metadata = "") { P4RouteEntry route_entry = {}; route_entry.vrf_id = vrf_id; route_entry.route_prefix = route_prefix; - route_entry.action = p4orch::kSetNexthopId; - route_entry.nexthop_id = nexthop_id; + route_entry.route_metadata = route_metadata; + route_entry.action = action; + if (action == p4orch::kSetNexthopId || action == p4orch::kSetNexthopIdAndMetadata) + { + route_entry.nexthop_id = action_param; + } + else if (action == p4orch::kSetWcmpGroupId || action == p4orch::kSetWcmpGroupIdAndMetadata) + { + route_entry.wcmp_group = action_param; + } route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + return route_entry; + } + + // Sets up a nexthop route entry for test. + void SetupNexthopIdRouteEntry(const std::string &vrf_id, const swss::IpPrefix &route_prefix, + const std::string &nexthop_id, sai_object_id_t nexthop_oid, + const std::string &metadata = "") + { + auto route_entry = GenerateP4RouteEntry( + vrf_id, route_prefix, (metadata.empty()) ? p4orch::kSetNexthopId : p4orch::kSetNexthopIdAndMetadata, + nexthop_id, metadata); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), nexthop_oid); - EXPECT_CALL(mock_sai_route_, create_route_entry(_, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, CreateRouteEntry(route_entry)); + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); } // Sets up a wcmp route entry for test. void SetupWcmpGroupRouteEntry(const std::string &vrf_id, const swss::IpPrefix &route_prefix, - const std::string &wcmp_group_id, sai_object_id_t wcmp_group_oid) + const std::string &wcmp_group_id, sai_object_id_t wcmp_group_oid, + const std::string &metadata = "") { - P4RouteEntry route_entry = {}; - route_entry.vrf_id = vrf_id; - route_entry.route_prefix = route_prefix; - route_entry.action = p4orch::kSetWcmpGroupId; - route_entry.wcmp_group = wcmp_group_id; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry( + vrf_id, route_prefix, (metadata.empty()) ? p4orch::kSetWcmpGroupId : p4orch::kSetWcmpGroupIdAndMetadata, + wcmp_group_id, metadata); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), wcmp_group_oid); - EXPECT_CALL(mock_sai_route_, create_route_entry(_, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, CreateRouteEntry(route_entry)); + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + } + + // Sets up a drop route entry for test. + void SetupDropRouteEntry(const std::string &vrf_id, const swss::IpPrefix &route_prefix) + { + auto route_entry = GenerateP4RouteEntry(vrf_id, route_prefix, p4orch::kDrop, ""); + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + } + + // Sets up a trap route entry for test. + void SetupTrapRouteEntry(const std::string &vrf_id, const swss::IpPrefix &route_prefix) + { + auto route_entry = GenerateP4RouteEntry(vrf_id, route_prefix, p4orch::kTrap, ""); + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); } // Verifies the two given route entries are identical. @@ -246,6 +389,7 @@ class RouteManagerTest : public ::testing::Test EXPECT_EQ(x.action, y.action); EXPECT_EQ(x.nexthop_id, y.nexthop_id); EXPECT_EQ(x.wcmp_group, y.wcmp_group); + EXPECT_EQ(x.route_metadata, y.route_metadata); EXPECT_EQ(x.sai_route_entry.vr_id, y.sai_route_entry.vr_id); EXPECT_EQ(x.sai_route_entry.switch_id, y.sai_route_entry.switch_id); EXPECT_TRUE(PrefixCmp(&x.sai_route_entry.destination, &y.sai_route_entry.destination)); @@ -265,7 +409,7 @@ class RouteManagerTest : public ::testing::Test } StrictMock mock_sai_route_; - MockResponsePublisher publisher_; + StrictMock publisher_; P4OidMapper p4_oid_mapper_; RouteManager route_manager_; }; @@ -273,45 +417,36 @@ class RouteManagerTest : public ::testing::Test TEST_F(RouteManagerTest, MergeRouteEntryWithNexthopIdActionDestTest) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry dest = {}; - dest.vrf_id = gVrfName; - dest.route_prefix = swss_ipv4_route_prefix; - dest.action = p4orch::kSetNexthopId; - dest.nexthop_id = kNexthopId1; - dest.route_entry_key = KeyGenerator::generateRouteKey(dest.vrf_id, dest.route_prefix); + auto dest = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); dest.sai_route_entry.vr_id = gVrfOid; dest.sai_route_entry.switch_id = gSwitchId; copy(dest.sai_route_entry.destination, swss_ipv4_route_prefix); // Source is identical to destination. - P4RouteEntry src = {}; - src.vrf_id = gVrfName; - src.route_prefix = swss_ipv4_route_prefix; - src.action = p4orch::kSetNexthopId; - src.nexthop_id = kNexthopId1; - src.route_entry_key = KeyGenerator::generateRouteKey(src.vrf_id, src.route_prefix); + auto src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); P4RouteEntry ret = {}; EXPECT_FALSE(MergeRouteEntry(dest, src, &ret)); VerifyRouteEntriesEq(dest, ret); // Source has different nexthop ID. - src = {}; - src.vrf_id = gVrfName; - src.route_prefix = swss_ipv4_route_prefix; - src.nexthop_id = kNexthopId2; - src.route_entry_key = KeyGenerator::generateRouteKey(src.vrf_id, src.route_prefix); + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId2); EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); P4RouteEntry expect_entry = dest; expect_entry.nexthop_id = kNexthopId2; VerifyRouteEntriesEq(expect_entry, ret); + // Source has set nexthop ID and metadata action and dest has set nexthop ID + // action. + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopIdAndMetadata, kNexthopId1, + kMetadata1); + EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); + expect_entry = dest; + expect_entry.action = p4orch::kSetNexthopIdAndMetadata; + expect_entry.route_metadata = kMetadata1; + VerifyRouteEntriesEq(expect_entry, ret); + // Source has wcmp group action and dest has nexhop ID action. - src = {}; - src.vrf_id = gVrfName; - src.route_prefix = swss_ipv4_route_prefix; - src.action = p4orch::kSetWcmpGroupId; - src.wcmp_group = kWcmpGroup1; - src.route_entry_key = KeyGenerator::generateRouteKey(src.vrf_id, src.route_prefix); + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); expect_entry = dest; expect_entry.nexthop_id = ""; @@ -320,60 +455,101 @@ TEST_F(RouteManagerTest, MergeRouteEntryWithNexthopIdActionDestTest) VerifyRouteEntriesEq(expect_entry, ret); // Source has drop action and dest has nexhop ID action. - src = {}; - src.vrf_id = gVrfName; - src.route_prefix = swss_ipv4_route_prefix; - src.action = p4orch::kDrop; - src.route_entry_key = KeyGenerator::generateRouteKey(src.vrf_id, src.route_prefix); + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); + EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); + expect_entry = dest; + expect_entry.nexthop_id = ""; + expect_entry.action = p4orch::kDrop; + VerifyRouteEntriesEq(expect_entry, ret); +} + +TEST_F(RouteManagerTest, MergeRouteEntryWithNexthopIdAndMetadataActionDestTest) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + auto dest = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopIdAndMetadata, kNexthopId1, + kMetadata1); + dest.sai_route_entry.vr_id = gVrfOid; + dest.sai_route_entry.switch_id = gSwitchId; + copy(dest.sai_route_entry.destination, swss_ipv4_route_prefix); + + // Source is identical to destination. + auto src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopIdAndMetadata, kNexthopId1, + kMetadata1); + P4RouteEntry ret = {}; + EXPECT_FALSE(MergeRouteEntry(dest, src, &ret)); + VerifyRouteEntriesEq(dest, ret); + + // Source has different metadata. + src.route_metadata = kMetadata2; + EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); + P4RouteEntry expect_entry = dest; + expect_entry.route_metadata = kMetadata2; + VerifyRouteEntriesEq(expect_entry, ret); + + // Source has different nexthop ID and metadata. + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopIdAndMetadata, kNexthopId2, + kMetadata2); + EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); + expect_entry = dest; + expect_entry.nexthop_id = kNexthopId2; + expect_entry.route_metadata = kMetadata2; + VerifyRouteEntriesEq(expect_entry, ret); + + // Source has wcmp group action and dest has nexhop ID and metadata action. + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); + EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); + expect_entry = dest; + expect_entry.nexthop_id = ""; + expect_entry.action = p4orch::kSetWcmpGroupId; + expect_entry.wcmp_group = kWcmpGroup1; + expect_entry.route_metadata = ""; + VerifyRouteEntriesEq(expect_entry, ret); + + // Source has drop action and dest has nexhop ID and metadata action. + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); expect_entry = dest; expect_entry.nexthop_id = ""; expect_entry.action = p4orch::kDrop; + expect_entry.route_metadata = ""; + VerifyRouteEntriesEq(expect_entry, ret); + + // Source has wcmp group and metadata action and dest has nexhop ID and + // metadata action. + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupIdAndMetadata, kWcmpGroup1, + kMetadata2); + EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); + expect_entry = dest; + expect_entry.nexthop_id = ""; + expect_entry.action = p4orch::kSetWcmpGroupIdAndMetadata; + expect_entry.wcmp_group = kWcmpGroup1; + expect_entry.route_metadata = kMetadata2; VerifyRouteEntriesEq(expect_entry, ret); } TEST_F(RouteManagerTest, MergeRouteEntryWithWcmpGroupActionDestTest) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry dest = {}; - dest.vrf_id = gVrfName; - dest.route_prefix = swss_ipv4_route_prefix; - dest.action = p4orch::kSetWcmpGroupId; - dest.wcmp_group = kWcmpGroup1; - dest.route_entry_key = KeyGenerator::generateRouteKey(dest.vrf_id, dest.route_prefix); + auto dest = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); dest.sai_route_entry.vr_id = gVrfOid; dest.sai_route_entry.switch_id = gSwitchId; copy(dest.sai_route_entry.destination, swss_ipv4_route_prefix); // Source is identical to destination. - P4RouteEntry src = {}; - src.vrf_id = gVrfName; - src.route_prefix = swss_ipv4_route_prefix; - src.action = p4orch::kSetWcmpGroupId; - src.wcmp_group = kWcmpGroup1; - src.route_entry_key = KeyGenerator::generateRouteKey(src.vrf_id, src.route_prefix); + auto src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); P4RouteEntry ret = {}; EXPECT_FALSE(MergeRouteEntry(dest, src, &ret)); VerifyRouteEntriesEq(dest, ret); // Source has different wcmp group. - src = {}; - src.vrf_id = gVrfName; - src.route_prefix = swss_ipv4_route_prefix; - src.wcmp_group = kWcmpGroup2; - src.route_entry_key = KeyGenerator::generateRouteKey(dest.vrf_id, dest.route_prefix); + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup2); EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); P4RouteEntry expect_entry = dest; expect_entry.wcmp_group = kWcmpGroup2; VerifyRouteEntriesEq(expect_entry, ret); // Source has nexthop ID action and dest has wcmp group action. - src = {}; - src.vrf_id = gVrfName; - src.route_prefix = swss_ipv4_route_prefix; - src.action = p4orch::kSetNexthopId; - src.nexthop_id = kNexthopId1; - src.route_entry_key = KeyGenerator::generateRouteKey(src.vrf_id, src.route_prefix); + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); expect_entry = dest; expect_entry.wcmp_group = ""; @@ -382,11 +558,7 @@ TEST_F(RouteManagerTest, MergeRouteEntryWithWcmpGroupActionDestTest) VerifyRouteEntriesEq(expect_entry, ret); // Source has drop action and dest has wcmp group action. - src = {}; - src.vrf_id = gVrfName; - src.route_prefix = swss_ipv4_route_prefix; - src.action = p4orch::kDrop; - src.route_entry_key = KeyGenerator::generateRouteKey(src.vrf_id, src.route_prefix); + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); expect_entry = dest; expect_entry.wcmp_group = ""; @@ -397,45 +569,61 @@ TEST_F(RouteManagerTest, MergeRouteEntryWithWcmpGroupActionDestTest) TEST_F(RouteManagerTest, MergeRouteEntryWithDropActionDestTest) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry dest = {}; - dest.vrf_id = gVrfName; - dest.route_prefix = swss_ipv4_route_prefix; - dest.action = p4orch::kDrop; - dest.route_entry_key = KeyGenerator::generateRouteKey(dest.vrf_id, dest.route_prefix); + auto dest = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); dest.sai_route_entry.vr_id = gVrfOid; dest.sai_route_entry.switch_id = gSwitchId; copy(dest.sai_route_entry.destination, swss_ipv4_route_prefix); // Source is identical to destination. - P4RouteEntry src = {}; - src.vrf_id = gVrfName; - src.route_prefix = swss_ipv4_route_prefix; - src.action = p4orch::kDrop; - src.route_entry_key = KeyGenerator::generateRouteKey(src.vrf_id, src.route_prefix); + auto src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); P4RouteEntry ret = {}; EXPECT_FALSE(MergeRouteEntry(dest, src, &ret)); VerifyRouteEntriesEq(dest, ret); // Source has nexthop ID action and dest has drop action. - src = {}; - src.vrf_id = gVrfName; - src.route_prefix = swss_ipv4_route_prefix; - src.action = p4orch::kSetNexthopId; - src.nexthop_id = kNexthopId1; - src.route_entry_key = KeyGenerator::generateRouteKey(src.vrf_id, src.route_prefix); + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); P4RouteEntry expect_entry = dest; expect_entry.action = p4orch::kSetNexthopId; expect_entry.nexthop_id = kNexthopId1; VerifyRouteEntriesEq(expect_entry, ret); - // Source has wcmp group action and dest has drop action. - src = {}; - src.vrf_id = gVrfName; - src.route_prefix = swss_ipv4_route_prefix; - src.action = p4orch::kSetWcmpGroupId; - src.wcmp_group = kWcmpGroup1; - src.route_entry_key = KeyGenerator::generateRouteKey(src.vrf_id, src.route_prefix); + // Source has wcmp group and metadata action and dest has drop action. + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupIdAndMetadata, kWcmpGroup1, + kMetadata1); + EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); + expect_entry = dest; + expect_entry.action = p4orch::kSetWcmpGroupIdAndMetadata; + expect_entry.nexthop_id = ""; + expect_entry.wcmp_group = kWcmpGroup1; + expect_entry.route_metadata = kMetadata1; + VerifyRouteEntriesEq(expect_entry, ret); +} + +TEST_F(RouteManagerTest, MergeRouteEntryWithTrapActionDestTest) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + auto dest = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kTrap, ""); + dest.sai_route_entry.vr_id = gVrfOid; + dest.sai_route_entry.switch_id = gSwitchId; + copy(dest.sai_route_entry.destination, swss_ipv4_route_prefix); + + // Source is identical to destination. + auto src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kTrap, ""); + P4RouteEntry ret = {}; + EXPECT_FALSE(MergeRouteEntry(dest, src, &ret)); + VerifyRouteEntriesEq(dest, ret); + + // Source has nexthop ID action and dest has trap action. + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); + EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); + P4RouteEntry expect_entry = dest; + expect_entry.action = p4orch::kSetNexthopId; + expect_entry.nexthop_id = kNexthopId1; + VerifyRouteEntriesEq(expect_entry, ret); + + // Source has wcmp group action and dest has trap action. + src = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); EXPECT_TRUE(MergeRouteEntry(dest, src, &ret)); expect_entry = dest; expect_entry.action = p4orch::kSetWcmpGroupId; @@ -452,12 +640,8 @@ TEST_F(RouteManagerTest, DeserializeRouteEntryWithNexthopIdActionTest) auto route_entry_or = DeserializeRouteEntry(key, attributes, APP_P4RT_IPV4_TABLE_NAME); EXPECT_TRUE(route_entry_or.ok()); auto &route_entry = *route_entry_or; - P4RouteEntry expect_entry = {}; - expect_entry.vrf_id = "b4-traffic"; - expect_entry.route_prefix = swss::IpPrefix("10.11.12.0/24"); - expect_entry.action = p4orch::kSetNexthopId; - expect_entry.nexthop_id = kNexthopId1; - expect_entry.route_entry_key = KeyGenerator::generateRouteKey(expect_entry.vrf_id, expect_entry.route_prefix); + auto expect_entry = + GenerateP4RouteEntry("b4-traffic", swss::IpPrefix("10.11.12.0/24"), p4orch::kSetNexthopId, kNexthopId1); VerifyRouteEntriesEq(expect_entry, route_entry); } @@ -470,12 +654,38 @@ TEST_F(RouteManagerTest, DeserializeRouteEntryWithWcmpGroupActionTest) auto route_entry_or = DeserializeRouteEntry(key, attributes, APP_P4RT_IPV4_TABLE_NAME); EXPECT_TRUE(route_entry_or.ok()); auto &route_entry = *route_entry_or; - P4RouteEntry expect_entry = {}; - expect_entry.vrf_id = "b4-traffic"; - expect_entry.route_prefix = swss::IpPrefix("10.11.12.0/24"); - expect_entry.action = p4orch::kSetWcmpGroupId; - expect_entry.wcmp_group = kWcmpGroup1; - expect_entry.route_entry_key = KeyGenerator::generateRouteKey(expect_entry.vrf_id, expect_entry.route_prefix); + auto expect_entry = + GenerateP4RouteEntry("b4-traffic", swss::IpPrefix("10.11.12.0/24"), p4orch::kSetWcmpGroupId, kWcmpGroup1); + VerifyRouteEntriesEq(expect_entry, route_entry); +} + +TEST_F(RouteManagerTest, DeserializeRouteEntryWithNexthopIdAdnMetadataActionTest) +{ + std::string key = R"({"match/vrf_id":"b4-traffic","match/ipv4_dst":"10.11.12.0/24"})"; + std::vector attributes; + attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopIdAndMetadata}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNexthopId), kNexthopId1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kRouteMetadata), kMetadata1}); + auto route_entry_or = DeserializeRouteEntry(key, attributes, APP_P4RT_IPV4_TABLE_NAME); + EXPECT_TRUE(route_entry_or.ok()); + auto &route_entry = *route_entry_or; + auto expect_entry = GenerateP4RouteEntry("b4-traffic", swss::IpPrefix("10.11.12.0/24"), + p4orch::kSetNexthopIdAndMetadata, kNexthopId1, kMetadata1); + VerifyRouteEntriesEq(expect_entry, route_entry); +} + +TEST_F(RouteManagerTest, DeserializeRouteEntryWithWcmpGroupAndMetadataActionTest) +{ + std::string key = R"({"match/vrf_id":"b4-traffic","match/ipv4_dst":"10.11.12.0/24"})"; + std::vector attributes; + attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetWcmpGroupIdAndMetadata}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kWcmpGroupId), kWcmpGroup1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kRouteMetadata), kMetadata1}); + auto route_entry_or = DeserializeRouteEntry(key, attributes, APP_P4RT_IPV4_TABLE_NAME); + EXPECT_TRUE(route_entry_or.ok()); + auto &route_entry = *route_entry_or; + auto expect_entry = GenerateP4RouteEntry("b4-traffic", swss::IpPrefix("10.11.12.0/24"), + p4orch::kSetWcmpGroupIdAndMetadata, kWcmpGroup1, kMetadata1); VerifyRouteEntriesEq(expect_entry, route_entry); } @@ -487,11 +697,19 @@ TEST_F(RouteManagerTest, DeserializeRouteEntryWithDropActionTest) auto route_entry_or = DeserializeRouteEntry(key, attributes, APP_P4RT_IPV6_TABLE_NAME); EXPECT_TRUE(route_entry_or.ok()); auto &route_entry = *route_entry_or; - P4RouteEntry expect_entry = {}; - expect_entry.vrf_id = "b4-traffic"; - expect_entry.route_prefix = swss::IpPrefix("2001:db8:1::/32"); - expect_entry.action = p4orch::kDrop; - expect_entry.route_entry_key = KeyGenerator::generateRouteKey(expect_entry.vrf_id, expect_entry.route_prefix); + auto expect_entry = GenerateP4RouteEntry("b4-traffic", swss::IpPrefix("2001:db8:1::/32"), p4orch::kDrop, ""); + VerifyRouteEntriesEq(expect_entry, route_entry); +} + +TEST_F(RouteManagerTest, DeserializeRouteEntryWithTrapActionTest) +{ + std::string key = R"({"match/vrf_id":"b4-traffic","match/ipv6_dst":"2001:db8:1::/32"})"; + std::vector attributes; + attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kTrap}); + auto route_entry_or = DeserializeRouteEntry(key, attributes, APP_P4RT_IPV6_TABLE_NAME); + EXPECT_TRUE(route_entry_or.ok()); + auto &route_entry = *route_entry_or; + auto expect_entry = GenerateP4RouteEntry("b4-traffic", swss::IpPrefix("2001:db8:1::/32"), p4orch::kTrap, ""); VerifyRouteEntriesEq(expect_entry, route_entry); } @@ -530,11 +748,7 @@ TEST_F(RouteManagerTest, DeserializeRouteEntryWithoutIpv4WildcardLpmMatchShouldS auto route_entry_or = DeserializeRouteEntry(key, attributes, APP_P4RT_IPV4_TABLE_NAME); EXPECT_TRUE(route_entry_or.ok()); auto &route_entry = *route_entry_or; - P4RouteEntry expect_entry = {}; - expect_entry.vrf_id = "b4-traffic"; - expect_entry.route_prefix = swss::IpPrefix("0.0.0.0/0"); - expect_entry.action = p4orch::kDrop; - expect_entry.route_entry_key = KeyGenerator::generateRouteKey(expect_entry.vrf_id, expect_entry.route_prefix); + auto expect_entry = GenerateP4RouteEntry("b4-traffic", swss::IpPrefix("0.0.0.0/0"), p4orch::kDrop, ""); VerifyRouteEntriesEq(expect_entry, route_entry); } @@ -546,279 +760,293 @@ TEST_F(RouteManagerTest, DeserializeRouteEntryWithoutIpv6WildcardLpmMatchShouldS auto route_entry_or = DeserializeRouteEntry(key, attributes, APP_P4RT_IPV6_TABLE_NAME); EXPECT_TRUE(route_entry_or.ok()); auto &route_entry = *route_entry_or; - P4RouteEntry expect_entry = {}; - expect_entry.vrf_id = "b4-traffic"; - expect_entry.route_prefix = swss::IpPrefix("::/0"); - expect_entry.action = p4orch::kDrop; - expect_entry.route_entry_key = KeyGenerator::generateRouteKey(expect_entry.vrf_id, expect_entry.route_prefix); + auto expect_entry = GenerateP4RouteEntry("b4-traffic", swss::IpPrefix("::/0"), p4orch::kDrop, ""); VerifyRouteEntriesEq(expect_entry, route_entry); } -TEST_F(RouteManagerTest, ValidateRouteEntryTest) +TEST_F(RouteManagerTest, ValidateRouteEntryNexthopActionWithInvalidNexthopShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ValidateRouteEntry(route_entry, SET_COMMAND)); +} - // ValidateRouteEntry should fail when the nexthop does not exist in - // centralized map. - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetNexthopId; - route_entry.nexthop_id = kNexthopId1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ValidateRouteEntry(route_entry)); +TEST_F(RouteManagerTest, ValidateRouteEntryNexthopActionWithValidNexthopShouldSucceed) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), kNexthopOid1); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ValidateRouteEntry(route_entry)); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ValidateRouteEntry(route_entry, SET_COMMAND)); } TEST_F(RouteManagerTest, ValidateRouteEntryWcmpGroupActionWithInvalidWcmpGroupShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetWcmpGroupId; - route_entry.wcmp_group = kWcmpGroup1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ValidateRouteEntry(route_entry)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ValidateRouteEntry(route_entry, SET_COMMAND)); } TEST_F(RouteManagerTest, ValidateRouteEntryWcmpGroupActionWithValidWcmpGroupShouldSucceed) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetWcmpGroupId; - route_entry.wcmp_group = kWcmpGroup1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), kWcmpGroupOid1); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ValidateRouteEntry(route_entry)); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ValidateRouteEntry(route_entry, SET_COMMAND)); +} + +TEST_F(RouteManagerTest, ValidateRouteEntryWithInvalidCommandShouldFail) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, "invalid")); } TEST_F(RouteManagerTest, ValidateSetRouteEntryDoesNotExistInManagerShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.nexthop_id = kNexthopId1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateSetRouteEntry(route_entry)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); + route_entry.action = ""; + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid1); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, SET_COMMAND)); } TEST_F(RouteManagerTest, ValidateSetRouteEntryExistsInMapperDoesNotExistInManagerShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetNexthopId; - route_entry.nexthop_id = kNexthopId1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid1); p4_oid_mapper_.setDummyOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry.route_entry_key); - EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ValidateSetRouteEntry(route_entry)); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ValidateRouteEntry(route_entry, SET_COMMAND)); } TEST_F(RouteManagerTest, ValidateSetRouteEntryExistsInManagerDoesNotExistInMapperShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.nexthop_id = kNexthopId2; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); p4_oid_mapper_.eraseOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry.route_entry_key); - EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ValidateSetRouteEntry(route_entry)); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ValidateRouteEntry(route_entry, SET_COMMAND)); } TEST_F(RouteManagerTest, ValidateSetRouteEntryNexthopIdActionWithoutNexthopIdShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetNexthopId; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateSetRouteEntry(route_entry)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, ""); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, SET_COMMAND)); } TEST_F(RouteManagerTest, ValidateSetRouteEntryNexthopIdActionWithWcmpGroupShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetNexthopId; - route_entry.nexthop_id = kNexthopId1; + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); route_entry.wcmp_group = kWcmpGroup1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateSetRouteEntry(route_entry)); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), + kWcmpGroupOid1); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, SET_COMMAND)); } TEST_F(RouteManagerTest, ValidateSetRouteEntryWcmpGroupActionWithoutWcmpGroupShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetWcmpGroupId; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateSetRouteEntry(route_entry)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, ""); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, SET_COMMAND)); } TEST_F(RouteManagerTest, ValidateSetRouteEntryWcmpGroupActionWithNexthopIdShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetWcmpGroupId; + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); route_entry.nexthop_id = kNexthopId1; - route_entry.wcmp_group = kWcmpGroup1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateSetRouteEntry(route_entry)); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), + kWcmpGroupOid1); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, SET_COMMAND)); } TEST_F(RouteManagerTest, ValidateSetRouteEntryDropActionWithNexthopIdShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kDrop; + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); route_entry.nexthop_id = kNexthopId1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateSetRouteEntry(route_entry)); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid1); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, SET_COMMAND)); +} + +TEST_F(RouteManagerTest, ValidateSetRouteEntryWcmpGroupActionWithNonemptyMetadataShouldFail) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + auto route_entry = + GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1, kMetadata1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), + kWcmpGroupOid1); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, SET_COMMAND)); +} + +TEST_F(RouteManagerTest, ValidateSetRouteEntryNexthopIdAndMetadataActionWithEmptyMetadataShouldFail) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + auto route_entry = + GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopIdAndMetadata, kNexthopId1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid1); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, SET_COMMAND)); +} + +TEST_F(RouteManagerTest, ValidateSetRouteEntryNexthopIdAndMetadataActionWithInvalidMetadataShouldFail) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopIdAndMetadata, + kNexthopId1, "invalid_int"); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid1); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, SET_COMMAND)); } TEST_F(RouteManagerTest, ValidateSetRouteEntryDropActionWithWcmpGroupShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kDrop; + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); + route_entry.wcmp_group = kWcmpGroup1; + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), + kWcmpGroupOid1); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, SET_COMMAND)); +} + +TEST_F(RouteManagerTest, ValidateSetRouteEntryTrapActionWithNexthopIdShouldFail) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kTrap, ""); + route_entry.nexthop_id = kNexthopId1; + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid1); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, SET_COMMAND)); +} + +TEST_F(RouteManagerTest, ValidateSetRouteEntryTrapActionWithWcmpGroupShouldFail) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kTrap, ""); route_entry.wcmp_group = kWcmpGroup1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateSetRouteEntry(route_entry)); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), + kWcmpGroupOid1); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, SET_COMMAND)); } TEST_F(RouteManagerTest, ValidateSetRouteEntryInvalidActionShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); route_entry.action = "invalid"; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateSetRouteEntry(route_entry)); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, SET_COMMAND)); } TEST_F(RouteManagerTest, ValidateSetRouteEntrySucceeds) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.nexthop_id = kNexthopId2; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ValidateSetRouteEntry(route_entry)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId2); + route_entry.action = ""; + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid2); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ValidateRouteEntry(route_entry, SET_COMMAND)); } TEST_F(RouteManagerTest, ValidateDelRouteEntryDoesNotExistInManagerShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ValidateDelRouteEntry(route_entry)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, "", ""); + EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ValidateRouteEntry(route_entry, DEL_COMMAND)); } TEST_F(RouteManagerTest, ValidateDelRouteEntryDoesNotExistInMapperShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, "", ""); p4_oid_mapper_.eraseOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry.route_entry_key); - // (TODO): Expect critical state. - EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ValidateDelRouteEntry(route_entry)); + // TODO: Expect critical state. + EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ValidateRouteEntry(route_entry, DEL_COMMAND)); } TEST_F(RouteManagerTest, ValidateDelRouteEntryHasActionShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetNexthopId; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateDelRouteEntry(route_entry)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, ""); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, DEL_COMMAND)); } TEST_F(RouteManagerTest, ValidateDelRouteEntryHasNexthopIdShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.nexthop_id = kNexthopId1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateDelRouteEntry(route_entry)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); + route_entry.action = ""; + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid1); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, DEL_COMMAND)); } TEST_F(RouteManagerTest, ValidateDelRouteEntryHasWcmpGroupShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.wcmp_group = kWcmpGroup1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateDelRouteEntry(route_entry)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); + route_entry.action = ""; + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), + kWcmpGroupOid1); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, DEL_COMMAND)); +} + +TEST_F(RouteManagerTest, ValidateDelRouteEntryHasMetadataShouldFail) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, "", "", kMetadata1); + EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ValidateRouteEntry(route_entry, DEL_COMMAND)); } TEST_F(RouteManagerTest, ValidateDelRouteEntrySucceeds) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ValidateDelRouteEntry(route_entry)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, "", ""); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ValidateRouteEntry(route_entry, DEL_COMMAND)); } TEST_F(RouteManagerTest, CreateRouteEntryWithSaiErrorShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - P4RouteEntry route_entry; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kDrop; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); - EXPECT_CALL(mock_sai_route_, create_route_entry(_, _, _)).Times(3).WillRepeatedly(Return(SAI_STATUS_FAILURE)); - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, CreateRouteEntry(route_entry)); + std::vector exp_status{SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) + .Times(3) + .WillRepeatedly(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_FAILURE))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); route_entry.action = p4orch::kSetNexthopId; route_entry.nexthop_id = kNexthopId1; - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, CreateRouteEntry(route_entry)); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid1); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); route_entry.action = p4orch::kSetWcmpGroupId; route_entry.wcmp_group = kWcmpGroup1; - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, CreateRouteEntry(route_entry)); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), + kWcmpGroupOid1); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); } TEST_F(RouteManagerTest, CreateNexthopIdIpv4RouteSucceeds) @@ -826,21 +1054,28 @@ TEST_F(RouteManagerTest, CreateNexthopIdIpv4RouteSucceeds) auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); sai_ip_prefix_t sai_ipv4_route_prefix; copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); - P4RouteEntry route_entry; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetNexthopId; - route_entry.nexthop_id = kNexthopId1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), kNexthopOid1); - EXPECT_CALL( - mock_sai_route_, - create_route_entry(Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), - Eq(1), Truly(std::bind(MatchSaiAttributeNexthopId, kNexthopOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, CreateRouteEntry(route_entry)); + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kNexthopOid1; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + create_route_entries(Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + ArrayEq(std::vector{1}), + AttrArrayArrayEq(std::vector>{{exp_sai_attr}}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); uint32_t ref_cnt; EXPECT_TRUE( @@ -853,45 +1088,135 @@ TEST_F(RouteManagerTest, CreateNexthopIdIpv6RouteSucceeds) auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); sai_ip_prefix_t sai_ipv6_route_prefix; copy(sai_ipv6_route_prefix, swss_ipv6_route_prefix); - P4RouteEntry route_entry; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv6_route_prefix; - route_entry.action = p4orch::kSetNexthopId; - route_entry.nexthop_id = kNexthopId1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, p4orch::kSetNexthopId, kNexthopId1); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), kNexthopOid1); - EXPECT_CALL( - mock_sai_route_, - create_route_entry(Truly(std::bind(MatchSaiRouteEntry, sai_ipv6_route_prefix, std::placeholders::_1, gVrfOid)), - Eq(1), Truly(std::bind(MatchSaiAttributeNexthopId, kNexthopOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, CreateRouteEntry(route_entry)); - VerifyRouteEntry(route_entry, sai_ipv6_route_prefix, gVrfOid); + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv6_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kNexthopOid1; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + create_route_entries(Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + ArrayEq(std::vector{1}), + AttrArrayArrayEq(std::vector>{{exp_sai_attr}}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv6_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); +} + +TEST_F(RouteManagerTest, CreateNexthopIdWithMetadataIpv4RouteSucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopIdAndMetadata, + kNexthopId1, kMetadata1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid1); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + std::vector exp_sai_attrs; + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kNexthopOid1; + exp_sai_attrs.push_back(exp_sai_attr); + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr.value.u32 = kMetadataInt1; + exp_sai_attrs.push_back(exp_sai_attr); + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + create_route_entries(Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + ArrayEq(std::vector{static_cast(exp_sai_attrs.size())}), + AttrArrayArrayEq(std::vector>{exp_sai_attrs}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); uint32_t ref_cnt; EXPECT_TRUE( p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); EXPECT_EQ(1, ref_cnt); } +TEST_F(RouteManagerTest, CreateDropSetMetadataRouteSucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + auto route_entry = + GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetMetadataAndDrop, "", kMetadata1); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + std::vector exp_sai_attrs; + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_DROP; + exp_sai_attrs.push_back(exp_sai_attr); + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr.value.u32 = kMetadataInt1; + exp_sai_attrs.push_back(exp_sai_attr); + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + create_route_entries(Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + ArrayEq(std::vector{static_cast(exp_sai_attrs.size())}), + AttrArrayArrayEq(std::vector>{exp_sai_attrs}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); +} + TEST_F(RouteManagerTest, CreateDropIpv4RouteSucceeds) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); sai_ip_prefix_t sai_ipv4_route_prefix; copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); - P4RouteEntry route_entry; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kDrop; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_DROP; + + std::vector exp_status{SAI_STATUS_SUCCESS}; EXPECT_CALL(mock_sai_route_, - create_route_entry( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), Eq(1), - Truly(std::bind(MatchSaiAttributeAction, SAI_PACKET_ACTION_DROP, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, CreateRouteEntry(route_entry)); + create_route_entries(Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + ArrayEq(std::vector{1}), + AttrArrayArrayEq(std::vector>{{exp_sai_attr}}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); } @@ -900,18 +1225,82 @@ TEST_F(RouteManagerTest, CreateDropIpv6RouteSucceeds) auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); sai_ip_prefix_t sai_ipv6_route_prefix; copy(sai_ipv6_route_prefix, swss_ipv6_route_prefix); - P4RouteEntry route_entry; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv6_route_prefix; - route_entry.action = p4orch::kDrop; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, p4orch::kDrop, ""); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv6_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_DROP; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + create_route_entries(Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + ArrayEq(std::vector{1}), + AttrArrayArrayEq(std::vector>{{exp_sai_attr}}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv6_route_prefix, gVrfOid); +} + +TEST_F(RouteManagerTest, CreateTrapIpv4RouteSucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kTrap, ""); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_TRAP; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + create_route_entries(Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + ArrayEq(std::vector{1}), + AttrArrayArrayEq(std::vector>{{exp_sai_attr}}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); +} + +TEST_F(RouteManagerTest, CreateTrapIpv6RouteSucceeds) +{ + auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); + sai_ip_prefix_t sai_ipv6_route_prefix; + copy(sai_ipv6_route_prefix, swss_ipv6_route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, p4orch::kTrap, ""); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv6_route_prefix; + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_TRAP; + + std::vector exp_status{SAI_STATUS_SUCCESS}; EXPECT_CALL(mock_sai_route_, - create_route_entry( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv6_route_prefix, std::placeholders::_1, gVrfOid)), Eq(1), - Truly(std::bind(MatchSaiAttributeAction, SAI_PACKET_ACTION_DROP, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, CreateRouteEntry(route_entry)); + create_route_entries(Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + ArrayEq(std::vector{1}), + AttrArrayArrayEq(std::vector>{{exp_sai_attr}}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv6_route_prefix, gVrfOid); } @@ -920,21 +1309,28 @@ TEST_F(RouteManagerTest, CreateWcmpIpv4RouteSucceeds) auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); sai_ip_prefix_t sai_ipv4_route_prefix; copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); - P4RouteEntry route_entry; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetWcmpGroupId; - route_entry.wcmp_group = kWcmpGroup1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), kWcmpGroupOid1); - EXPECT_CALL( - mock_sai_route_, - create_route_entry(Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), - Eq(1), Truly(std::bind(MatchSaiAttributeNexthopId, kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, CreateRouteEntry(route_entry)); + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kWcmpGroupOid1; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + create_route_entries(Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + ArrayEq(std::vector{1}), + AttrArrayArrayEq(std::vector>{{exp_sai_attr}}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); uint32_t ref_cnt; EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, @@ -947,21 +1343,68 @@ TEST_F(RouteManagerTest, CreateWcmpIpv6RouteSucceeds) auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); sai_ip_prefix_t sai_ipv6_route_prefix; copy(sai_ipv6_route_prefix, swss_ipv6_route_prefix); - P4RouteEntry route_entry; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv6_route_prefix; - route_entry.action = p4orch::kSetWcmpGroupId; - route_entry.wcmp_group = kWcmpGroup1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), kWcmpGroupOid1); - EXPECT_CALL( - mock_sai_route_, - create_route_entry(Truly(std::bind(MatchSaiRouteEntry, sai_ipv6_route_prefix, std::placeholders::_1, gVrfOid)), - Eq(1), Truly(std::bind(MatchSaiAttributeNexthopId, kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, CreateRouteEntry(route_entry)); + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv6_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kWcmpGroupOid1; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + create_route_entries(Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + ArrayEq(std::vector{1}), + AttrArrayArrayEq(std::vector>{{exp_sai_attr}}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv6_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); +} + +TEST_F(RouteManagerTest, CreateWcmpWithMetadataIpv6RouteSucceeds) +{ + auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); + sai_ip_prefix_t sai_ipv6_route_prefix; + copy(sai_ipv6_route_prefix, swss_ipv6_route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, p4orch::kSetWcmpGroupIdAndMetadata, + kWcmpGroup1, kMetadata1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), + kWcmpGroupOid1); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv6_route_prefix; + + std::vector exp_sai_attrs; + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kWcmpGroupOid1; + exp_sai_attrs.push_back(exp_sai_attr); + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr.value.u32 = kMetadataInt1; + exp_sai_attrs.push_back(exp_sai_attr); + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + create_route_entries(Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + ArrayEq(std::vector{static_cast(exp_sai_attrs.size())}), + AttrArrayArrayEq(std::vector>{exp_sai_attrs}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv6_route_prefix, gVrfOid); uint32_t ref_cnt; EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, @@ -976,39 +1419,30 @@ TEST_F(RouteManagerTest, UpdateRouteEntryWcmpWithSaiErrorShouldFail) copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); SetupWcmpGroupRouteEntry(gVrfName, swss_ipv4_route_prefix, kWcmpGroup1, kWcmpGroupOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetWcmpGroupId; - route_entry.wcmp_group = kWcmpGroup2; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup2); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), kWcmpGroupOid2); - EXPECT_CALL(mock_sai_route_, set_route_entry_attribute(_, _)).WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, UpdateRouteEntry(route_entry)); - EXPECT_CALL(mock_sai_route_, set_route_entry_attribute(_, _)) - .WillOnce(Return(SAI_STATUS_SUCCESS)) - .WillOnce(Return(SAI_STATUS_FAILURE)) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, UpdateRouteEntry(route_entry)); + std::vector exp_status{SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_FAILURE))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); } -TEST_F(RouteManagerTest, UpdateRouteEntryWcmpNotExistInMapperShouldFail) +TEST_F(RouteManagerTest, UpdateRouteEntryWcmpNotExistInMapperShouldRaiseCriticalState) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); sai_ip_prefix_t sai_ipv4_route_prefix; copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); SetupWcmpGroupRouteEntry(gVrfName, swss_ipv4_route_prefix, kWcmpGroup1, kWcmpGroupOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetWcmpGroupId; - route_entry.wcmp_group = kWcmpGroup2; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - - // (TODO): Expect critical state. - EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, UpdateRouteEntry(route_entry)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup2); + std::vector exp_status{SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_FAILURE))); + // TODO: Expect critical state. + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); } TEST_F(RouteManagerTest, UpdateRouteFromSetWcmpToSetNextHopSucceeds) @@ -1018,25 +1452,75 @@ TEST_F(RouteManagerTest, UpdateRouteFromSetWcmpToSetNextHopSucceeds) copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); SetupWcmpGroupRouteEntry(gVrfName, swss_ipv4_route_prefix, kWcmpGroup1, kWcmpGroupOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetNexthopId; - route_entry.nexthop_id = kNexthopId2; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId2); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), kNexthopOid2); - EXPECT_CALL(mock_sai_route_, - set_route_entry_attribute( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), - Truly(std::bind(MatchSaiAttributeNexthopId, kNexthopOid2, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_route_, - set_route_entry_attribute( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), - Truly(std::bind(MatchSaiAttributeAction, SAI_PACKET_ACTION_FORWARD, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, UpdateRouteEntry(route_entry)); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kNexthopOid2; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId2), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); +} + +TEST_F(RouteManagerTest, UpdateRouteFromSetWcmpToSetNextHopAndMetadataSucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + SetupWcmpGroupRouteEntry(gVrfName, swss_ipv4_route_prefix, kWcmpGroup1, kWcmpGroupOid1); + + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopIdAndMetadata, + kNexthopId2, kMetadata2); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid2); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr.value.u32 = kMetadataInt2; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kNexthopOid2; + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); uint32_t ref_cnt; EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, @@ -1054,25 +1538,27 @@ TEST_F(RouteManagerTest, UpdateRouteFromSetNexthopIdToSetWcmpSucceeds) copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetWcmpGroupId; - route_entry.wcmp_group = kWcmpGroup2; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup2); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), kWcmpGroupOid2); - EXPECT_CALL(mock_sai_route_, - set_route_entry_attribute( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), - Truly(std::bind(MatchSaiAttributeNexthopId, kWcmpGroupOid2, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_route_, - set_route_entry_attribute( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), - Truly(std::bind(MatchSaiAttributeAction, SAI_PACKET_ACTION_FORWARD, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, UpdateRouteEntry(route_entry)); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kWcmpGroupOid2; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); route_entry.action = p4orch::kSetWcmpGroupId; VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); uint32_t ref_cnt; @@ -1084,40 +1570,82 @@ TEST_F(RouteManagerTest, UpdateRouteFromSetNexthopIdToSetWcmpSucceeds) EXPECT_EQ(1, ref_cnt); } +TEST_F(RouteManagerTest, UpdateRouteFromSetNexthopIdAndMetadataToSetWcmpSucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1, kMetadata2); + + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup2); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), + kWcmpGroupOid2); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr.value.u32 = 0; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kWcmpGroupOid2; + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); + EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup2), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); +} + TEST_F(RouteManagerTest, UpdateRouteEntryNexthopIdWithSaiErrorShouldFail) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.nexthop_id = kNexthopId2; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId2); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), kNexthopOid2); - EXPECT_CALL(mock_sai_route_, set_route_entry_attribute(_, _)).WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, UpdateRouteEntry(route_entry)); - EXPECT_CALL(mock_sai_route_, set_route_entry_attribute(_, _)) - .WillOnce(Return(SAI_STATUS_SUCCESS)) - .WillOnce(Return(SAI_STATUS_FAILURE)) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, UpdateRouteEntry(route_entry)); + std::vector exp_failure_status{SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); } -TEST_F(RouteManagerTest, UpdateRouteEntryNexthopIdNotExistInMapperShouldFail) +TEST_F(RouteManagerTest, UpdateRouteEntryNexthopIdNotExistInMapperShouldRaiseCriticalState) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.nexthop_id = kNexthopId2; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - - // (TODO): Expect critical state. - EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, UpdateRouteEntry(route_entry)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId2); + std::vector exp_failure_status{SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))); + // TODO: Expect critical state. + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); } TEST_F(RouteManagerTest, UpdateRouteEntryDropWithSaiErrorShouldFail) @@ -1125,18 +1653,67 @@ TEST_F(RouteManagerTest, UpdateRouteEntryDropWithSaiErrorShouldFail) auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kDrop; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_CALL(mock_sai_route_, set_route_entry_attribute(_, _)).WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, UpdateRouteEntry(route_entry)); - EXPECT_CALL(mock_sai_route_, set_route_entry_attribute(_, _)) - .WillOnce(Return(SAI_STATUS_SUCCESS)) - .WillOnce(Return(SAI_STATUS_FAILURE)) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, UpdateRouteEntry(route_entry)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); + std::vector exp_failure_status{SAI_STATUS_FAILURE}; + std::vector exp_success_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))); + // TODO: Expect critical state. + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); +} + +TEST_F(RouteManagerTest, UpdateRouteEntryTrapWithSaiErrorShouldFail) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); + + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kTrap, ""); + std::vector exp_failure_status{SAI_STATUS_FAILURE}; + std::vector exp_success_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))); + // TODO: Expect critical state. + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); } TEST_F(RouteManagerTest, UpdateRouteWithDifferentNexthopIdsSucceeds) @@ -1146,24 +1723,27 @@ TEST_F(RouteManagerTest, UpdateRouteWithDifferentNexthopIdsSucceeds) copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.nexthop_id = kNexthopId2; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId2); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), kNexthopOid2); - EXPECT_CALL(mock_sai_route_, - set_route_entry_attribute( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), - Truly(std::bind(MatchSaiAttributeNexthopId, kNexthopOid2, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_route_, - set_route_entry_attribute( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), - Truly(std::bind(MatchSaiAttributeAction, SAI_PACKET_ACTION_FORWARD, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, UpdateRouteEntry(route_entry)); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kNexthopOid2; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); route_entry.action = p4orch::kSetNexthopId; VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); uint32_t ref_cnt; @@ -1175,6 +1755,54 @@ TEST_F(RouteManagerTest, UpdateRouteWithDifferentNexthopIdsSucceeds) EXPECT_EQ(1, ref_cnt); } +TEST_F(RouteManagerTest, UpdateRouteWithDifferentNexthopIdsAndMetadatasSucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1, kMetadata1); + + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopIdAndMetadata, + kNexthopId2, kMetadata2); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid2); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr.value.u32 = kMetadataInt2; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kNexthopOid2; + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId2), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); +} + TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdToDropSucceeds) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); @@ -1182,22 +1810,34 @@ TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdToDropSucceeds) copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kDrop; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_CALL(mock_sai_route_, - set_route_entry_attribute( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), - Truly(std::bind(MatchSaiAttributeAction, SAI_PACKET_ACTION_DROP, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_route_, - set_route_entry_attribute( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), - Truly(std::bind(MatchSaiAttributeNexthopId, SAI_NULL_OBJECT_ID, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, UpdateRouteEntry(route_entry)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_DROP; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = SAI_NULL_OBJECT_ID; + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); uint32_t ref_cnt; EXPECT_TRUE( @@ -1205,32 +1845,147 @@ TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdToDropSucceeds) EXPECT_EQ(0, ref_cnt); } -TEST_F(RouteManagerTest, UpdateRouteFromDropToNexthopIdSucceeds) +TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdToRouteMetadataSucceeds) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); sai_ip_prefix_t sai_ipv4_route_prefix; copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetNexthopId; - route_entry.nexthop_id = kNexthopId2; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = + GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetMetadataAndDrop, "", kMetadata1); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr.value.s32 = kMetadataInt1; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = SAI_NULL_OBJECT_ID; + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_DROP; + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); +} + +TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdAndMetadataToDropSucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1, kMetadata2); + + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_DROP; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = SAI_NULL_OBJECT_ID; + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr.value.u32 = 0; + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); +} + +TEST_F(RouteManagerTest, UpdateRouteFromDropToNexthopIdSucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + SetupDropRouteEntry(gVrfName, swss_ipv4_route_prefix); + + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId2); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), kNexthopOid2); - EXPECT_CALL(mock_sai_route_, - set_route_entry_attribute( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), - Truly(std::bind(MatchSaiAttributeNexthopId, kNexthopOid2, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_route_, - set_route_entry_attribute( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), - Truly(std::bind(MatchSaiAttributeAction, SAI_PACKET_ACTION_FORWARD, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, UpdateRouteEntry(route_entry)); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kNexthopOid2; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_FORWARD; + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); uint32_t ref_cnt; EXPECT_TRUE( @@ -1238,174 +1993,507 @@ TEST_F(RouteManagerTest, UpdateRouteFromDropToNexthopIdSucceeds) EXPECT_EQ(1, ref_cnt); } -TEST_F(RouteManagerTest, UpdateRouteWithDifferentWcmpGroupsSucceeds) +TEST_F(RouteManagerTest, UpdateRouteFromDropToWcmpWithMetadataSucceeds) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); sai_ip_prefix_t sai_ipv4_route_prefix; copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); - SetupWcmpGroupRouteEntry(gVrfName, swss_ipv4_route_prefix, kWcmpGroup1, kWcmpGroupOid1); + SetupDropRouteEntry(gVrfName, swss_ipv4_route_prefix); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetWcmpGroupId; - route_entry.wcmp_group = kWcmpGroup2; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupIdAndMetadata, + kWcmpGroup1, kMetadata2); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), - kWcmpGroupOid2); - EXPECT_CALL(mock_sai_route_, - set_route_entry_attribute( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), - Truly(std::bind(MatchSaiAttributeNexthopId, kWcmpGroupOid2, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_route_, - set_route_entry_attribute( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVrfOid)), - Truly(std::bind(MatchSaiAttributeAction, SAI_PACKET_ACTION_FORWARD, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, UpdateRouteEntry(route_entry)); - route_entry.action = p4orch::kSetWcmpGroupId; + kWcmpGroupOid1); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr.value.u32 = kMetadataInt2; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kWcmpGroupOid1; + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_FORWARD; + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); uint32_t ref_cnt; EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), &ref_cnt)); - EXPECT_EQ(0, ref_cnt); - EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, - KeyGenerator::generateWcmpGroupKey(kWcmpGroup2), &ref_cnt)); EXPECT_EQ(1, ref_cnt); } -TEST_F(RouteManagerTest, UpdateNexthopIdRouteWithNoChangeSucceeds) +TEST_F(RouteManagerTest, UpdateRouteFromTrapToDropAndSetMetadataSucceeds) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); sai_ip_prefix_t sai_ipv4_route_prefix; copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); - SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetNexthopId; - route_entry.nexthop_id = kNexthopId1; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, UpdateRouteEntry(route_entry)); + SetupTrapRouteEntry(gVrfName, swss_ipv4_route_prefix); + + auto route_entry = + GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetMetadataAndDrop, "", kMetadata2); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_DROP; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_META_DATA; + exp_sai_attr.value.u32 = kMetadataInt2; + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); - uint32_t ref_cnt; - EXPECT_TRUE( - p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); - EXPECT_EQ(1, ref_cnt); } -TEST_F(RouteManagerTest, UpdateRouteEntryRecoverFailureShouldRaiseCriticalState) +TEST_F(RouteManagerTest, UpdateRouteFromTrapToDropSucceeds) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kDrop; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_CALL(mock_sai_route_, set_route_entry_attribute(_, _)).WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, UpdateRouteEntry(route_entry)); - EXPECT_CALL(mock_sai_route_, set_route_entry_attribute(_, _)) - .WillOnce(Return(SAI_STATUS_SUCCESS)) - .WillOnce(Return(SAI_STATUS_FAILURE)) - .WillOnce(Return(SAI_STATUS_FAILURE)); - // (TODO): Expect critical state. - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, UpdateRouteEntry(route_entry)); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + SetupTrapRouteEntry(gVrfName, swss_ipv4_route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_DROP; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); } -TEST_F(RouteManagerTest, DeleteRouteEntryWithSaiErrorShouldFail) +TEST_F(RouteManagerTest, UpdateRouteFromDropToTrapSucceeds) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); - SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - - EXPECT_CALL(mock_sai_route_, remove_route_entry(_)).WillOnce(Return(SAI_STATUS_FAILURE)); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, DeleteRouteEntry(route_entry)); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + SetupDropRouteEntry(gVrfName, swss_ipv4_route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kTrap, ""); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_TRAP; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); } -TEST_F(RouteManagerTest, DeleteIpv4RouteSucceeds) +TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdToTrapSucceeds) { auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); sai_ip_prefix_t sai_ipv4_route_prefix; copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); - - EXPECT_CALL(mock_sai_route_, remove_route_entry(Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, - std::placeholders::_1, gVrfOid)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, DeleteRouteEntry(route_entry)); - auto *route_entry_ptr = GetRouteEntry(route_entry.route_entry_key); - EXPECT_EQ(nullptr, route_entry_ptr); - EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry.route_entry_key)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kTrap, ""); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_TRAP; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = SAI_NULL_OBJECT_ID; + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); uint32_t ref_cnt; EXPECT_TRUE( p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); EXPECT_EQ(0, ref_cnt); } -TEST_F(RouteManagerTest, DeleteIpv6RouteSucceeds) +TEST_F(RouteManagerTest, UpdateRouteFromTrapToNexthopIdSucceeds) { - auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); - sai_ip_prefix_t sai_ipv6_route_prefix; - copy(sai_ipv6_route_prefix, swss_ipv6_route_prefix); - SetupWcmpGroupRouteEntry(gVrfName, swss_ipv6_route_prefix, kWcmpGroup1, kWcmpGroupOid1); + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + SetupTrapRouteEntry(gVrfName, swss_ipv4_route_prefix); - EXPECT_CALL(mock_sai_route_, remove_route_entry(Truly(std::bind(MatchSaiRouteEntry, sai_ipv6_route_prefix, - std::placeholders::_1, gVrfOid)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - P4RouteEntry route_entry = {}; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv6_route_prefix; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); - EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, DeleteRouteEntry(route_entry)); - auto *route_entry_ptr = GetRouteEntry(route_entry.route_entry_key); - EXPECT_EQ(nullptr, route_entry_ptr); - EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry.route_entry_key)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId2); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid2); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kNexthopOid2; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr.value.s32 = SAI_PACKET_ACTION_FORWARD; + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); uint32_t ref_cnt; - EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, - KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), &ref_cnt)); - EXPECT_EQ(0, ref_cnt); + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId2), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); } -TEST_F(RouteManagerTest, RouteCreateAndUpdateInDrainSucceeds) +TEST_F(RouteManagerTest, UpdateRouteFromTrapToNexthopIdAndMetadataRecoverFailureShouldRaiseCriticalState) { - const std::string kKeyPrefix = std::string(APP_P4RT_IPV4_TABLE_NAME) + kTableKeyDelimiter; - p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); - p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId2), kNexthopOid2); - nlohmann::json j; - j[prependMatchField(p4orch::kVrfId)] = gVrfName; - j[prependMatchField(p4orch::kIpv4Dst)] = kIpv4Prefix; - std::vector attributes; - attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopId}); - attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNexthopId), kNexthopId1}); - Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); - attributes.clear(); - attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNexthopId), kNexthopId2}); - Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + SetupTrapRouteEntry(gVrfName, swss_ipv4_route_prefix); - EXPECT_CALL(mock_sai_route_, create_route_entry(_, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_route_, set_route_entry_attribute(_, _)).Times(2).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopIdAndMetadata, + kNexthopId2, kMetadata1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid2); + std::vector exp_failure_status{SAI_STATUS_FAILURE}; + std::vector exp_success_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))); + // TODO: Expect critical state. + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); +} - Drain(); +TEST_F(RouteManagerTest, UpdateRouteWithDifferentWcmpGroupsSucceeds) +{ auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); sai_ip_prefix_t sai_ipv4_route_prefix; copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); - P4RouteEntry route_entry; - route_entry.vrf_id = gVrfName; - route_entry.route_prefix = swss_ipv4_route_prefix; - route_entry.action = p4orch::kSetNexthopId; - route_entry.nexthop_id = kNexthopId2; - route_entry.route_entry_key = KeyGenerator::generateRouteKey(route_entry.vrf_id, route_entry.route_prefix); + SetupWcmpGroupRouteEntry(gVrfName, swss_ipv4_route_prefix, kWcmpGroup1, kWcmpGroupOid1); + + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup2); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(route_entry.wcmp_group), + kWcmpGroupOid2); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kWcmpGroupOid2; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + AttrArrayEq(std::vector{exp_sai_attr}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + route_entry.action = p4orch::kSetWcmpGroupId; + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); + EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup2), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); +} + +TEST_F(RouteManagerTest, UpdateNexthopIdRouteWithNoChangeSucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); + + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); +} + +TEST_F(RouteManagerTest, UpdateRouteFromNexthopIdAndMetadataToDropRecoverFailureShouldRaiseCriticalState) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1, kMetadata2); + + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kDrop, ""); + + std::vector exp_failure_status{SAI_STATUS_FAILURE}; + std::vector exp_success_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))); + // TODO: Expect critical state. + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); +} + +TEST_F(RouteManagerTest, UpdateRouteFromDifferentNexthopIdAndMetadataRecoverFailureShouldRaiseCriticalState) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1, kMetadata1); + + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopIdAndMetadata, + kNexthopId2, kMetadata2); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry.nexthop_id), + kNexthopOid2); + + std::vector exp_failure_status{SAI_STATUS_FAILURE}; + std::vector exp_success_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); + + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_success_status.begin(), exp_success_status.end()), + Return(SAI_STATUS_SUCCESS))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))) + .WillOnce(DoAll(SetArrayArgument<4>(exp_failure_status.begin(), exp_failure_status.end()), + Return(SAI_STATUS_FAILURE))); + // TODO: Expect critical state. + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); +} + +TEST_F(RouteManagerTest, DeleteRouteEntryWithSaiErrorShouldFail) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); + + std::vector exp_status{SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_route_, remove_route_entries(_, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_FAILURE))); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, "", ""); + EXPECT_THAT(DeleteRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_UNKNOWN})); +} + +TEST_F(RouteManagerTest, DeleteIpv4RouteSucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + remove_route_entries(Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, "", ""); + EXPECT_THAT(DeleteRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + auto *route_entry_ptr = GetRouteEntry(route_entry.route_entry_key); + EXPECT_EQ(nullptr, route_entry_ptr); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry.route_entry_key)); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); +} + +TEST_F(RouteManagerTest, DeleteIpv6RouteSucceeds) +{ + auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); + sai_ip_prefix_t sai_ipv6_route_prefix; + copy(sai_ipv6_route_prefix, swss_ipv6_route_prefix); + SetupWcmpGroupRouteEntry(gVrfName, swss_ipv6_route_prefix, kWcmpGroup1, kWcmpGroupOid1); + + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVrfOid; + exp_sai_route_entry.destination = sai_ipv6_route_prefix; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + remove_route_entries(Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, "", ""); + EXPECT_THAT(DeleteRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + auto *route_entry_ptr = GetRouteEntry(route_entry.route_entry_key); + EXPECT_EQ(nullptr, route_entry_ptr); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry.route_entry_key)); + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); +} + +TEST_F(RouteManagerTest, RouteCreateAndUpdateInDrainSucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); + auto key_op_fvs_1 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss_ipv4_route_prefix, SET_COMMAND, + p4orch::kSetNexthopId, kNexthopId1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_1); + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_1)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_1)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) + .Times(1); + Drain(); + + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId2), kNexthopOid2); + auto key_op_fvs_2 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss_ipv4_route_prefix, SET_COMMAND, + p4orch::kSetNexthopId, kNexthopId2); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_2); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_2)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_2)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) + .Times(1); + Drain(); + + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId2); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); uint32_t ref_cnt; EXPECT_TRUE( @@ -1414,26 +2502,55 @@ TEST_F(RouteManagerTest, RouteCreateAndUpdateInDrainSucceeds) EXPECT_TRUE( p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId2), &ref_cnt)); EXPECT_EQ(1, ref_cnt); + + auto key_op_fvs_3 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss_ipv4_route_prefix, SET_COMMAND, + p4orch::kSetMetadataAndDrop, "", kMetadata1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_3); + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute(_, _, _, _, _)) + .WillRepeatedly(DoAll(SetArrayArgument<4>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_3)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_3)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) + .Times(1); + Drain(); + + route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetMetadataAndDrop, "", kMetadata1); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId2), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); } TEST_F(RouteManagerTest, RouteCreateAndDeleteInDrainSucceeds) { - const std::string kKeyPrefix = std::string(APP_P4RT_IPV4_TABLE_NAME) + kTableKeyDelimiter; + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); - nlohmann::json j; - j[prependMatchField(p4orch::kVrfId)] = gVrfName; - j[prependMatchField(p4orch::kIpv4Dst)] = kIpv4Prefix; - std::vector attributes; - attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopId}); - attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNexthopId), kNexthopId1}); - Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); - attributes.clear(); - Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), DEL_COMMAND, attributes)); + auto key_op_fvs_1 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss_ipv4_route_prefix, SET_COMMAND, + p4orch::kSetNexthopId, kNexthopId1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_1); + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_1)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_1)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) + .Times(1); + Drain(); - EXPECT_CALL(mock_sai_route_, create_route_entry(_, _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_route_, remove_route_entry(_)).WillOnce(Return(SAI_STATUS_SUCCESS)); + auto key_op_fvs_2 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss_ipv4_route_prefix, DEL_COMMAND, "", ""); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_2); + EXPECT_CALL(mock_sai_route_, remove_route_entries(_, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_2)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_2)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) + .Times(1); Drain(); - std::string key = KeyGenerator::generateRouteKey(gVrfName, swss::IpPrefix(kIpv4Prefix)); + + std::string key = KeyGenerator::generateRouteKey(gVrfName, swss_ipv4_route_prefix); auto *route_entry_ptr = GetRouteEntry(key); EXPECT_EQ(nullptr, route_entry_ptr); EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, key)); @@ -1443,28 +2560,107 @@ TEST_F(RouteManagerTest, RouteCreateAndDeleteInDrainSucceeds) EXPECT_EQ(0, ref_cnt); } +TEST_F(RouteManagerTest, UpdateFailsWhenCreateAndUpdateTheSameRouteInDrain) +{ + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId2), kNexthopOid2); + auto key_op_fvs_1 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss::IpPrefix(kIpv4Prefix), SET_COMMAND, + p4orch::kSetNexthopId, kNexthopId1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_1); + auto key_op_fvs_2 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss::IpPrefix(kIpv4Prefix), SET_COMMAND, + p4orch::kSetNexthopId, kNexthopId2); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_2); + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_1)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_1)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) + .Times(1); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_2)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_2)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))) + .Times(1); + + Drain(); + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId2), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); +} + +TEST_F(RouteManagerTest, DeleteFailsWhenCreateAndDeleteTheSameRouteInDrain) +{ + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); + auto key_op_fvs_1 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss::IpPrefix(kIpv4Prefix), SET_COMMAND, + p4orch::kSetNexthopId, kNexthopId1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_1); + auto key_op_fvs_2 = GenerateKeyOpFieldsValuesTuple(gVrfName, swss::IpPrefix(kIpv4Prefix), DEL_COMMAND, "", ""); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs_2); + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_1)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_1)), + Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) + .Times(1); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs_2)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs_2)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))) + .Times(1); + Drain(); + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); + VerifyRouteEntry(route_entry, sai_ipv4_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); +} + TEST_F(RouteManagerTest, RouteCreateInDrainSucceedsWhenVrfIsEmpty) { - const std::string kKeyPrefix = std::string(APP_P4RT_IPV4_TABLE_NAME) + kTableKeyDelimiter; const std::string kDefaultVrfName = ""; // Default Vrf auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); sai_ip_prefix_t sai_ipv4_route_prefix; copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); - nlohmann::json j; - j[prependMatchField(p4orch::kVrfId)] = kDefaultVrfName; - j[prependMatchField(p4orch::kIpv4Dst)] = kIpv4Prefix; - std::vector attributes; - attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopId}); - attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNexthopId), kNexthopId1}); - Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); + auto key_op_fvs = GenerateKeyOpFieldsValuesTuple(kDefaultVrfName, swss::IpPrefix(kIpv4Prefix), SET_COMMAND, + p4orch::kSetNexthopId, kNexthopId1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs); - EXPECT_CALL( - mock_sai_route_, - create_route_entry( - Truly(std::bind(MatchSaiRouteEntry, sai_ipv4_route_prefix, std::placeholders::_1, gVirtualRouterId)), Eq(1), - Truly(std::bind(MatchSaiAttributeNexthopId, kNexthopOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + sai_route_entry_t exp_sai_route_entry; + exp_sai_route_entry.switch_id = gSwitchId; + exp_sai_route_entry.vr_id = gVirtualRouterId; + exp_sai_route_entry.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr; + exp_sai_attr.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr.value.oid = kNexthopOid1; + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, + create_route_entries(Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry}), + ArrayEq(std::vector{1}), + AttrArrayArrayEq(std::vector>{{exp_sai_attr}}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(publisher_, + publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), Eq(StatusCode::SWSS_RC_SUCCESS), Eq(true))) + .Times(1); Drain(); std::string key = KeyGenerator::generateRouteKey(kDefaultVrfName, swss::IpPrefix(kIpv4Prefix)); @@ -1480,55 +2676,69 @@ TEST_F(RouteManagerTest, RouteCreateInDrainSucceedsWhenVrfIsEmpty) TEST_F(RouteManagerTest, DeserializeRouteEntryInDrainFails) { const std::string kKeyPrefix = std::string(APP_P4RT_IPV4_TABLE_NAME) + kTableKeyDelimiter; - Enqueue( - swss::KeyOpFieldsValuesTuple(kKeyPrefix + "{{{{{{{{{{{{", SET_COMMAND, std::vector{})); + auto key_op_fvs = + swss::KeyOpFieldsValuesTuple(kKeyPrefix + "{{{{{{{{{{{{", SET_COMMAND, std::vector{}); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))) + .Times(1); Drain(); } TEST_F(RouteManagerTest, ValidateRouteEntryInDrainFailsWhenVrfDoesNotExist) { - const std::string kKeyPrefix = std::string(APP_P4RT_IPV4_TABLE_NAME) + kTableKeyDelimiter; p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); - nlohmann::json j; - j[prependMatchField(p4orch::kVrfId)] = "Invalid-Vrf"; - j[prependMatchField(p4orch::kIpv4Dst)] = kIpv4Prefix; - std::vector attributes; - attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopId}); - // Vrf does not exist. - attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNexthopId), kNexthopId1}); - Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); + auto key_op_fvs = GenerateKeyOpFieldsValuesTuple("Invalid-Vrf", swss::IpPrefix(kIpv4Prefix), SET_COMMAND, + p4orch::kSetNexthopId, kNexthopId1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), + Eq(StatusCode::SWSS_RC_NOT_FOUND), Eq(true))) + .Times(1); Drain(); } TEST_F(RouteManagerTest, ValidateRouteEntryInDrainFailsWhenNexthopDoesNotExist) { - const std::string kKeyPrefix = std::string(APP_P4RT_IPV4_TABLE_NAME) + kTableKeyDelimiter; - nlohmann::json j; - j[prependMatchField(p4orch::kVrfId)] = gVrfName; - j[prependMatchField(p4orch::kIpv4Dst)] = kIpv4Prefix; - std::vector attributes; - attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopId}); - // Nexthop ID does not exist. - attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNexthopId), kNexthopId1}); - Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); + auto key_op_fvs = GenerateKeyOpFieldsValuesTuple(gVrfName, swss::IpPrefix(kIpv4Prefix), SET_COMMAND, + p4orch::kSetNexthopId, kNexthopId1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), + Eq(StatusCode::SWSS_RC_NOT_FOUND), Eq(true))) + .Times(1); Drain(); } -TEST_F(RouteManagerTest, ValidateSetRouteEntryInDrainFails) +TEST_F(RouteManagerTest, InvalidateSetRouteEntryInDrainFails) { - const std::string kKeyPrefix = std::string(APP_P4RT_IPV4_TABLE_NAME) + kTableKeyDelimiter; p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); - nlohmann::json j; - j[prependMatchField(p4orch::kVrfId)] = gVrfName; - j[prependMatchField(p4orch::kIpv4Dst)] = kIpv4Prefix; - std::vector attributes; - attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopId}); // No nexthop ID with kSetNexthopId action. - Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); + auto key_op_fvs = + GenerateKeyOpFieldsValuesTuple(gVrfName, swss::IpPrefix(kIpv4Prefix), SET_COMMAND, p4orch::kSetNexthopId, ""); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))) + .Times(1); Drain(); } -TEST_F(RouteManagerTest, ValidateDelRouteEntryInDrainFails) +TEST_F(RouteManagerTest, InvalidateDelRouteEntryInDrainFails) +{ + // Route does not exist. + auto key_op_fvs = GenerateKeyOpFieldsValuesTuple(gVrfName, swss::IpPrefix(kIpv4Prefix), DEL_COMMAND, + p4orch::kSetNexthopId, kNexthopId1); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), + Eq(StatusCode::SWSS_RC_NOT_FOUND), Eq(true))) + .Times(1); + Drain(); +} + +TEST_F(RouteManagerTest, InvalidCommandInDrainFails) { const std::string kKeyPrefix = std::string(APP_P4RT_IPV4_TABLE_NAME) + kTableKeyDelimiter; p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); @@ -1536,23 +2746,543 @@ TEST_F(RouteManagerTest, ValidateDelRouteEntryInDrainFails) j[prependMatchField(p4orch::kVrfId)] = gVrfName; j[prependMatchField(p4orch::kIpv4Dst)] = kIpv4Prefix; std::vector attributes; - // Fields are non-empty for DEl. attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopId}); attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNexthopId), kNexthopId1}); - Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), DEL_COMMAND, attributes)); + auto key_op_fvs = swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), "INVALID_COMMAND", attributes); + Enqueue(APP_P4RT_IPV4_TABLE_NAME, key_op_fvs); + EXPECT_CALL(publisher_, publish(Eq(APP_P4RT_TABLE_NAME), Eq(kfvKey(key_op_fvs)), + FieldValueTupleArrayEq(kfvFieldsValues(key_op_fvs)), + Eq(StatusCode::SWSS_RC_INVALID_PARAM), Eq(true))) + .Times(1); Drain(); } -TEST_F(RouteManagerTest, InvalidCommandInDrainFails) +TEST_F(RouteManagerTest, BatchedCreateSucceeds) { - const std::string kKeyPrefix = std::string(APP_P4RT_IPV4_TABLE_NAME) + kTableKeyDelimiter; - p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), kNexthopOid1); + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + auto route_entry_ipv4 = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry_ipv4.nexthop_id), + kNexthopOid1); + + auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); + sai_ip_prefix_t sai_ipv6_route_prefix; + copy(sai_ipv6_route_prefix, swss_ipv6_route_prefix); + auto route_entry_ipv6 = + GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(route_entry_ipv6.wcmp_group), kWcmpGroupOid1); + + sai_route_entry_t exp_sai_route_entry_ipv4; + exp_sai_route_entry_ipv4.switch_id = gSwitchId; + exp_sai_route_entry_ipv4.vr_id = gVrfOid; + exp_sai_route_entry_ipv4.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr_ipv4; + exp_sai_attr_ipv4.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_ipv4.value.oid = kNexthopOid1; + + sai_route_entry_t exp_sai_route_entry_ipv6; + exp_sai_route_entry_ipv6.switch_id = gSwitchId; + exp_sai_route_entry_ipv6.vr_id = gVrfOid; + exp_sai_route_entry_ipv6.destination = sai_ipv6_route_prefix; + + sai_attribute_t exp_sai_attr_ipv6; + exp_sai_attr_ipv6.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_ipv6.value.oid = kWcmpGroupOid1; + + std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL( + mock_sai_route_, + create_route_entries( + Eq(2), + RouteEntryArrayEq(std::vector{exp_sai_route_entry_ipv6, exp_sai_route_entry_ipv4}), + ArrayEq(std::vector{1, 1}), + AttrArrayArrayEq(std::vector>{{exp_sai_attr_ipv6}, {exp_sai_attr_ipv4}}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry_ipv4, route_entry_ipv6}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS, StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry_ipv4, sai_ipv4_route_prefix, gVrfOid); + VerifyRouteEntry(route_entry_ipv6, sai_ipv6_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); + EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); +} + +TEST_F(RouteManagerTest, BatchedCreatePartiallySucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + auto route_entry_ipv4 = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(route_entry_ipv4.nexthop_id), + kNexthopOid1); + + auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); + sai_ip_prefix_t sai_ipv6_route_prefix; + copy(sai_ipv6_route_prefix, swss_ipv6_route_prefix); + auto route_entry_ipv6 = + GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(route_entry_ipv6.wcmp_group), kWcmpGroupOid1); + + sai_route_entry_t exp_sai_route_entry_ipv4; + exp_sai_route_entry_ipv4.switch_id = gSwitchId; + exp_sai_route_entry_ipv4.vr_id = gVrfOid; + exp_sai_route_entry_ipv4.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr_ipv4; + exp_sai_attr_ipv4.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_ipv4.value.oid = kNexthopOid1; + + sai_route_entry_t exp_sai_route_entry_ipv6; + exp_sai_route_entry_ipv6.switch_id = gSwitchId; + exp_sai_route_entry_ipv6.vr_id = gVrfOid; + exp_sai_route_entry_ipv6.destination = sai_ipv6_route_prefix; + + sai_attribute_t exp_sai_attr_ipv6; + exp_sai_attr_ipv6.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_ipv6.value.oid = kWcmpGroupOid1; + + std::vector exp_status{SAI_STATUS_FAILURE, SAI_STATUS_SUCCESS}; + EXPECT_CALL( + mock_sai_route_, + create_route_entries( + Eq(2), + RouteEntryArrayEq(std::vector{exp_sai_route_entry_ipv6, exp_sai_route_entry_ipv4}), + ArrayEq(std::vector{1, 1}), + AttrArrayArrayEq(std::vector>{{exp_sai_attr_ipv6}, {exp_sai_attr_ipv4}}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_FAILURE))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry_ipv4, route_entry_ipv6}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS, StatusCode::SWSS_RC_UNKNOWN})); + VerifyRouteEntry(route_entry_ipv4, sai_ipv4_route_prefix, gVrfOid); + auto *route_entry_ptr_ipv6 = GetRouteEntry(route_entry_ipv6.route_entry_key); + EXPECT_EQ(nullptr, route_entry_ptr_ipv6); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry_ipv6.route_entry_key)); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); + EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); +} + +TEST_F(RouteManagerTest, BatchedUpdateSucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + auto route_entry_ipv4 = + GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), + kWcmpGroupOid1); + + auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); + sai_ip_prefix_t sai_ipv6_route_prefix; + copy(sai_ipv6_route_prefix, swss_ipv6_route_prefix); + auto route_entry_ipv6 = + GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup2); + SetupDropRouteEntry(gVrfName, swss_ipv6_route_prefix); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(kWcmpGroup2), + kWcmpGroupOid2); + + sai_route_entry_t exp_sai_route_entry_ipv4; + exp_sai_route_entry_ipv4.switch_id = gSwitchId; + exp_sai_route_entry_ipv4.vr_id = gVrfOid; + exp_sai_route_entry_ipv4.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr_ipv4; + exp_sai_attr_ipv4.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_ipv4.value.oid = kWcmpGroupOid1; + + sai_route_entry_t exp_sai_route_entry_ipv6; + exp_sai_route_entry_ipv6.switch_id = gSwitchId; + exp_sai_route_entry_ipv6.vr_id = gVrfOid; + exp_sai_route_entry_ipv6.destination = sai_ipv6_route_prefix; + + sai_attribute_t exp_sai_attr_ipv6; + exp_sai_attr_ipv6.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_ipv6.value.oid = kWcmpGroupOid2; + + std::vector exp_status_1{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(2), + RouteEntryArrayEq(std::vector{exp_sai_route_entry_ipv6, + exp_sai_route_entry_ipv4}), + AttrArrayEq(std::vector{exp_sai_attr_ipv6, exp_sai_attr_ipv4}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status_1.begin(), exp_status_1.end()), Return(SAI_STATUS_SUCCESS))); + + sai_attribute_t exp_sai_attr_ipv6_2; + exp_sai_attr_ipv6_2.id = SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION; + exp_sai_attr_ipv6_2.value.s32 = SAI_PACKET_ACTION_FORWARD; + + std::vector exp_status_2{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(1), RouteEntryArrayEq(std::vector{exp_sai_route_entry_ipv6}), + AttrArrayEq(std::vector{exp_sai_attr_ipv6_2}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status_2.begin(), exp_status_2.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry_ipv4, route_entry_ipv6}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS, StatusCode::SWSS_RC_SUCCESS})); + VerifyRouteEntry(route_entry_ipv4, sai_ipv4_route_prefix, gVrfOid); + VerifyRouteEntry(route_entry_ipv6, sai_ipv6_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); + EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); + EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup2), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); +} + +TEST_F(RouteManagerTest, BatchedUpdatePartiallySucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + auto route_entry_ipv4 = + GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup1); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), + kWcmpGroupOid1); + + auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); + sai_ip_prefix_t sai_ipv6_route_prefix; + copy(sai_ipv6_route_prefix, swss_ipv6_route_prefix); + auto route_entry_ipv6 = + GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, p4orch::kSetWcmpGroupId, kWcmpGroup2); + SetupDropRouteEntry(gVrfName, swss_ipv6_route_prefix); + p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(kWcmpGroup2), + kWcmpGroupOid2); + + sai_route_entry_t exp_sai_route_entry_ipv4; + exp_sai_route_entry_ipv4.switch_id = gSwitchId; + exp_sai_route_entry_ipv4.vr_id = gVrfOid; + exp_sai_route_entry_ipv4.destination = sai_ipv4_route_prefix; + + sai_attribute_t exp_sai_attr_ipv4; + exp_sai_attr_ipv4.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_ipv4.value.oid = kWcmpGroupOid1; + + sai_route_entry_t exp_sai_route_entry_ipv6; + exp_sai_route_entry_ipv6.switch_id = gSwitchId; + exp_sai_route_entry_ipv6.vr_id = gVrfOid; + exp_sai_route_entry_ipv6.destination = sai_ipv6_route_prefix; + + sai_attribute_t exp_sai_attr_ipv6; + exp_sai_attr_ipv6.id = SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID; + exp_sai_attr_ipv6.value.oid = kWcmpGroupOid2; + + std::vector exp_status_1{SAI_STATUS_FAILURE, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, set_route_entries_attribute( + Eq(2), + RouteEntryArrayEq(std::vector{exp_sai_route_entry_ipv6, + exp_sai_route_entry_ipv4}), + AttrArrayEq(std::vector{exp_sai_attr_ipv6, exp_sai_attr_ipv4}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<4>(exp_status_1.begin(), exp_status_1.end()), Return(SAI_STATUS_FAILURE))); + EXPECT_THAT(UpdateRouteEntries(std::vector{route_entry_ipv4, route_entry_ipv6}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS, StatusCode::SWSS_RC_UNKNOWN})); + VerifyRouteEntry(route_entry_ipv4, sai_ipv4_route_prefix, gVrfOid); + route_entry_ipv6 = GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, p4orch::kDrop, ""); + VerifyRouteEntry(route_entry_ipv6, sai_ipv6_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); + EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup1), &ref_cnt)); + EXPECT_EQ(1, ref_cnt); + EXPECT_TRUE(p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, + KeyGenerator::generateWcmpGroupKey(kWcmpGroup2), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); +} + +TEST_F(RouteManagerTest, BatchedDeleteSucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + auto route_entry_ipv4 = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); + + auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); + sai_ip_prefix_t sai_ipv6_route_prefix; + copy(sai_ipv6_route_prefix, swss_ipv6_route_prefix); + auto route_entry_ipv6 = GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, p4orch::kDrop, ""); + SetupDropRouteEntry(gVrfName, swss_ipv6_route_prefix); + + sai_route_entry_t exp_sai_route_entry_ipv4; + exp_sai_route_entry_ipv4.switch_id = gSwitchId; + exp_sai_route_entry_ipv4.vr_id = gVrfOid; + exp_sai_route_entry_ipv4.destination = sai_ipv4_route_prefix; + + sai_route_entry_t exp_sai_route_entry_ipv6; + exp_sai_route_entry_ipv6.switch_id = gSwitchId; + exp_sai_route_entry_ipv6.vr_id = gVrfOid; + exp_sai_route_entry_ipv6.destination = sai_ipv6_route_prefix; + + std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, remove_route_entries(Eq(2), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry_ipv6, exp_sai_route_entry_ipv4}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(DeleteRouteEntries(std::vector{route_entry_ipv4, route_entry_ipv6}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS, StatusCode::SWSS_RC_SUCCESS})); + auto *route_entry_ptr_ipv4 = GetRouteEntry(route_entry_ipv4.route_entry_key); + EXPECT_EQ(nullptr, route_entry_ptr_ipv4); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry_ipv4.route_entry_key)); + auto *route_entry_ptr_ipv6 = GetRouteEntry(route_entry_ipv6.route_entry_key); + EXPECT_EQ(nullptr, route_entry_ptr_ipv6); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry_ipv6.route_entry_key)); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); +} + +TEST_F(RouteManagerTest, BatchedDeletePartiallySucceeds) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + sai_ip_prefix_t sai_ipv4_route_prefix; + copy(sai_ipv4_route_prefix, swss_ipv4_route_prefix); + auto route_entry_ipv4 = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); + + auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); + sai_ip_prefix_t sai_ipv6_route_prefix; + copy(sai_ipv6_route_prefix, swss_ipv6_route_prefix); + auto route_entry_ipv6 = GenerateP4RouteEntry(gVrfName, swss_ipv6_route_prefix, p4orch::kDrop, ""); + SetupDropRouteEntry(gVrfName, swss_ipv6_route_prefix); + + sai_route_entry_t exp_sai_route_entry_ipv4; + exp_sai_route_entry_ipv4.switch_id = gSwitchId; + exp_sai_route_entry_ipv4.vr_id = gVrfOid; + exp_sai_route_entry_ipv4.destination = sai_ipv4_route_prefix; + + sai_route_entry_t exp_sai_route_entry_ipv6; + exp_sai_route_entry_ipv6.switch_id = gSwitchId; + exp_sai_route_entry_ipv6.vr_id = gVrfOid; + exp_sai_route_entry_ipv6.destination = sai_ipv6_route_prefix; + + std::vector exp_status{SAI_STATUS_FAILURE, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, remove_route_entries(Eq(2), + RouteEntryArrayEq(std::vector{ + exp_sai_route_entry_ipv6, exp_sai_route_entry_ipv4}), + Eq(SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<3>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_FAILURE))); + EXPECT_THAT(DeleteRouteEntries(std::vector{route_entry_ipv4, route_entry_ipv6}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS, StatusCode::SWSS_RC_UNKNOWN})); + auto *route_entry_ptr_ipv4 = GetRouteEntry(route_entry_ipv4.route_entry_key); + EXPECT_EQ(nullptr, route_entry_ptr_ipv4); + EXPECT_FALSE(p4_oid_mapper_.existsOID(SAI_OBJECT_TYPE_ROUTE_ENTRY, route_entry_ipv4.route_entry_key)); + VerifyRouteEntry(route_entry_ipv6, sai_ipv6_route_prefix, gVrfOid); + uint32_t ref_cnt; + EXPECT_TRUE( + p4_oid_mapper_.getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(kNexthopId1), &ref_cnt)); + EXPECT_EQ(0, ref_cnt); +} + +TEST_F(RouteManagerTest, VerifyStateTest) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv4_route_prefix, kNexthopId1, kNexthopOid1); + auto route_entry = GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix, p4orch::kSetNexthopId, kNexthopId1); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"10.11.12.0/" + "24\",\"switch_id\":\"oid:0x0\",\"vr\":\"oid:0x6f\"}", + std::vector{swss::FieldValueTuple{"SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID", "oid:0x1"}}); + nlohmann::json j; j[prependMatchField(p4orch::kVrfId)] = gVrfName; j[prependMatchField(p4orch::kIpv4Dst)] = kIpv4Prefix; + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_IPV4_TABLE_NAME + + kTableKeyDelimiter + j.dump(); std::vector attributes; + + // Verification should succeed with vaild key and value. attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopId}); attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNexthopId), kNexthopId1}); - Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), "INVALID_COMMAND", attributes)); - Drain(); + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // TODO: Expect critical state. + + // Invalid key should fail verification. + EXPECT_FALSE(VerifyState("invalid", attributes).empty()); + EXPECT_FALSE(VerifyState("invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":FIXED_IPV4_TABLE:invalid", attributes).empty()); + + // Verification should fail if nexthop ID does not exist. + attributes.clear(); + attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopId}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNexthopId), kNexthopId2}); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + attributes.clear(); + attributes.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopId}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNexthopId), kNexthopId1}); + + // Verification should fail if entry does not exist. + j[prependMatchField(p4orch::kIpv4Dst)] = "1.1.1.0/24"; + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_IPV4_TABLE_NAME + + kTableKeyDelimiter + j.dump(), + attributes) + .empty()); + + auto *route_entry_ptr = GetRouteEntry(KeyGenerator::generateRouteKey(gVrfName, swss_ipv4_route_prefix)); + EXPECT_NE(route_entry_ptr, nullptr); + + // Verification should fail if route entry key mismatches. + auto saved_route_entry_key = route_entry_ptr->route_entry_key; + route_entry_ptr->route_entry_key = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + route_entry_ptr->route_entry_key = saved_route_entry_key; + + // Verification should fail if VRF ID mismatches. + auto saved_vrf_id = route_entry_ptr->vrf_id; + route_entry_ptr->vrf_id = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + route_entry_ptr->vrf_id = saved_vrf_id; + + // Verification should fail if route prefix mismatches. + auto saved_route_prefix = route_entry_ptr->route_prefix; + route_entry_ptr->route_prefix = swss::IpPrefix(kIpv6Prefix); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + route_entry_ptr->route_prefix = saved_route_prefix; + + // Verification should fail if action mismatches. + auto saved_action = route_entry_ptr->action; + route_entry_ptr->action = p4orch::kSetWcmpGroupId; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + route_entry_ptr->action = saved_action; + + // Verification should fail if nexthop ID mismatches. + auto saved_nexthop_id = route_entry_ptr->nexthop_id; + route_entry_ptr->nexthop_id = kNexthopId2; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + route_entry_ptr->nexthop_id = saved_nexthop_id; + + // Verification should fail if WCMP group mismatches. + auto saved_wcmp_group = route_entry_ptr->wcmp_group; + route_entry_ptr->wcmp_group = kWcmpGroup1; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + route_entry_ptr->wcmp_group = saved_wcmp_group; + + // Verification should fail if WCMP group mismatches. + auto saved_route_metadata = route_entry_ptr->route_metadata; + route_entry_ptr->route_metadata = kMetadata1; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + route_entry_ptr->route_metadata = saved_route_metadata; +} + +TEST_F(RouteManagerTest, VerifyStateAsicDbTest) +{ + auto swss_ipv4_route_prefix = swss::IpPrefix(kIpv4Prefix); + SetupDropRouteEntry(gVrfName, swss_ipv4_route_prefix); + auto swss_ipv6_route_prefix = swss::IpPrefix(kIpv6Prefix); + SetupNexthopIdRouteEntry(gVrfName, swss_ipv6_route_prefix, kNexthopId1, kNexthopOid1, kMetadata1); + + auto swss_ipv4_route_prefix2 = swss::IpPrefix(kIpv4Prefix2); + auto route_entry = + GenerateP4RouteEntry(gVrfName, swss_ipv4_route_prefix2, p4orch::kSetMetadataAndDrop, "", kMetadata2); + + std::vector exp_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_route_, create_route_entries(_, _, _, _, _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_THAT(CreateRouteEntries(std::vector{route_entry}), + ArrayEq(std::vector{StatusCode::SWSS_RC_SUCCESS})); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"10.11.12.0/" + "24\",\"switch_id\":\"oid:0x0\",\"vr\":\"oid:0x6f\"}", + std::vector{ + swss::FieldValueTuple{"SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION", "SAI_PACKET_ACTION_DROP"}, + swss::FieldValueTuple{"SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID", "oid:0x0"}}); + table.set("SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"2001:db8:1::/" + "32\",\"switch_id\":\"oid:0x0\",\"vr\":\"oid:0x6f\"}", + std::vector{swss::FieldValueTuple{"SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID", "oid:0x1"}, + swss::FieldValueTuple{"SAI_ROUTE_ENTRY_ATTR_META_DATA", "1"}}); + + table.set("SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"10.12.12.0/" + "24\",\"switch_id\":\"oid:0x0\",\"vr\":\"oid:0x6f\"}", + std::vector{ + swss::FieldValueTuple{"SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION", "SAI_PACKET_ACTION_DROP"}, + swss::FieldValueTuple{"SAI_ROUTE_ENTRY_ATTR_META_DATA", "2"}}); + + nlohmann::json j_1; + j_1[prependMatchField(p4orch::kVrfId)] = gVrfName; + j_1[prependMatchField(p4orch::kIpv4Dst)] = kIpv4Prefix; + const std::string db_key_1 = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_IPV4_TABLE_NAME + + kTableKeyDelimiter + j_1.dump(); + std::vector attributes_1; + attributes_1.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kDrop}); + nlohmann::json j_2; + j_2[prependMatchField(p4orch::kVrfId)] = gVrfName; + j_2[prependMatchField(p4orch::kIpv6Dst)] = kIpv6Prefix; + const std::string db_key_2 = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_IPV6_TABLE_NAME + + kTableKeyDelimiter + j_2.dump(); + std::vector attributes_2; + attributes_2.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetNexthopIdAndMetadata}); + attributes_2.push_back(swss::FieldValueTuple{prependParamField(p4orch::kNexthopId), kNexthopId1}); + attributes_2.push_back(swss::FieldValueTuple{prependParamField(p4orch::kRouteMetadata), kMetadata1}); + + nlohmann::json j_3; + j_3[prependMatchField(p4orch::kVrfId)] = gVrfName; + j_3[prependMatchField(p4orch::kIpv6Dst)] = kIpv4Prefix2; + const std::string db_key_3 = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_IPV6_TABLE_NAME + + kTableKeyDelimiter + j_3.dump(); + std::vector attributes_3; + attributes_3.push_back(swss::FieldValueTuple{p4orch::kAction, p4orch::kSetMetadataAndDrop}); + attributes_3.push_back(swss::FieldValueTuple{prependParamField(p4orch::kRouteMetadata), kMetadata2}); + + // Verification should succeed with correct ASIC DB values. + EXPECT_EQ(VerifyState(db_key_1, attributes_1), ""); + EXPECT_EQ(VerifyState(db_key_2, attributes_2), ""); + EXPECT_EQ(VerifyState(db_key_3, attributes_3), ""); + + // Verification should fail if ASIC DB values mismatch. + table.set("SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"10.11.12.0/" + "24\",\"switch_id\":\"oid:0x0\",\"vr\":\"oid:0x6f\"}", + std::vector{ + swss::FieldValueTuple{"SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION", "SAI_PACKET_ACTION_FORWARD"}}); + table.set("SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"2001:db8:1::/" + "32\",\"switch_id\":\"oid:0x0\",\"vr\":\"oid:0x6f\"}", + std::vector{swss::FieldValueTuple{"SAI_ROUTE_ENTRY_ATTR_META_DATA", "2"}}); + EXPECT_FALSE(VerifyState(db_key_1, attributes_1).empty()); + EXPECT_FALSE(VerifyState(db_key_2, attributes_2).empty()); + + // Verification should fail if ASIC DB table is missing. + table.del("SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"10.11.12.0/" + "24\",\"switch_id\":\"oid:0x0\",\"vr\":\"oid:0x6f\"}"); + table.del("SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"2001:db8:1::/" + "32\",\"switch_id\":\"oid:0x0\",\"vr\":\"oid:0x6f\"}"); + EXPECT_FALSE(VerifyState(db_key_1, attributes_1).empty()); + EXPECT_FALSE(VerifyState(db_key_2, attributes_2).empty()); + table.set("SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"10.11.12.0/" + "24\",\"switch_id\":\"oid:0x0\",\"vr\":\"oid:0x6f\"}", + std::vector{ + swss::FieldValueTuple{"SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION", "SAI_PACKET_ACTION_DROP"}, + swss::FieldValueTuple{"SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID", "oid:0x0"}}); + table.set("SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"2001:db8:1::/" + "32\",\"switch_id\":\"oid:0x0\",\"vr\":\"oid:0x6f\"}", + std::vector{swss::FieldValueTuple{"SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID", "oid:0x1"}, + swss::FieldValueTuple{"SAI_ROUTE_ENTRY_ATTR_META_DATA", "1"}}); } diff --git a/orchagent/p4orch/tests/router_interface_manager_test.cpp b/orchagent/p4orch/tests/router_interface_manager_test.cpp index 661fe33efa..d1c7330cc7 100644 --- a/orchagent/p4orch/tests/router_interface_manager_test.cpp +++ b/orchagent/p4orch/tests/router_interface_manager_test.cpp @@ -164,7 +164,7 @@ class RouterInterfaceManagerTest : public ::testing::Test void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) { - router_intf_manager_.enqueue(entry); + router_intf_manager_.enqueue(APP_P4RT_ROUTER_INTERFACE_TABLE_NAME, entry); } void Drain() @@ -172,6 +172,11 @@ class RouterInterfaceManagerTest : public ::testing::Test router_intf_manager_.drain(); } + std::string VerifyState(const std::string &key, const std::vector &tuple) + { + return router_intf_manager_.verifyState(key, tuple); + } + ReturnCodeOr DeserializeRouterIntfEntry( const std::string &key, const std::vector &attributes) { @@ -290,7 +295,7 @@ TEST_F(RouterInterfaceManagerTest, CreateRouterInterfaceEntryExistsInP4OidMapper p4_oid_mapper_.setOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, router_intf_key, kRouterInterfaceOid2); P4RouterInterfaceEntry router_intf_entry(kRouterInterfaceId2, kPortName2, kMacAddress2); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, CreateRouterInterface(router_intf_key, router_intf_entry)); auto current_entry = GetRouterInterfaceEntry(router_intf_key); @@ -622,7 +627,7 @@ TEST_F(RouterInterfaceManagerTest, ProcessDeleteRequestInterfaceNotExistInMapper p4_oid_mapper_.eraseOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, KeyGenerator::generateRouterInterfaceKey(kRouterInterfaceId1)); - // (TODO): Expect critical state. + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_INTERNAL, ProcessDeleteRequest(KeyGenerator::generateRouterInterfaceKey(router_intf_entry.router_interface_id))); } @@ -841,3 +846,123 @@ TEST_F(RouterInterfaceManagerTest, DrainInvalidOperation) ValidateRouterInterfaceEntryNotPresent(kRouterInterfaceId1); } + +TEST_F(RouterInterfaceManagerTest, VerifyStateTest) +{ + P4RouterInterfaceEntry router_intf_entry(kRouterInterfaceId1, kPortName1, kMacAddress1); + router_intf_entry.router_interface_oid = kRouterInterfaceOid1; + AddRouterInterfaceEntry(router_intf_entry, kPortOid1, kMtu1); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x295100", + std::vector{ + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID", "oid:0x0"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS", "00:01:02:03:04:05"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_TYPE", "SAI_ROUTER_INTERFACE_TYPE_PORT"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_PORT_ID", "oid:0x112233"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_MTU", "1500"}}); + + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + + APP_P4RT_ROUTER_INTERFACE_TABLE_NAME + kTableKeyDelimiter + kRouterIntfAppDbKey; + std::vector attributes; + + // Verification should succeed with vaild key and value. + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kPort), kPortName1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), kMacAddress1.to_string()}); + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Invalid key should fail verification. + EXPECT_FALSE(VerifyState("invalid", attributes).empty()); + EXPECT_FALSE(VerifyState("invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid:invalid", attributes).empty()); + EXPECT_FALSE( + VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":FIXED_ROUTER_INTERFACE_TABLE:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":FIXED_ROUTER_INTERFACE_TABLE:{\"match/" + "router_interface_id\":\"invalid\"}", + attributes) + .empty()); + + // Invalid attributes should fail verification. + attributes.clear(); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kPort), kPortName2}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), kMacAddress1.to_string()}); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + + attributes.clear(); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kPort), kPortName1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), kMacAddress2.to_string()}); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + + // Invalid port should fail verification. + attributes.clear(); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kPort), "invalid"}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), kMacAddress1.to_string()}); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + + // Verification should fail if interface IDs mismatch. + attributes.clear(); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kPort), kPortName1}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), kMacAddress1.to_string()}); + auto *router_intf_entry_ptr = + GetRouterInterfaceEntry(KeyGenerator::generateRouterInterfaceKey(router_intf_entry.router_interface_id)); + auto saved_ritf_id = router_intf_entry_ptr->router_interface_id; + router_intf_entry_ptr->router_interface_id = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + router_intf_entry_ptr->router_interface_id = saved_ritf_id; + + // Verification should fail if OID mapper mismatches. + p4_oid_mapper_.eraseOID(SAI_OBJECT_TYPE_ROUTER_INTERFACE, + KeyGenerator::generateRouterInterfaceKey(router_intf_entry.router_interface_id)); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); +} + +TEST_F(RouterInterfaceManagerTest, VerifyStateAsicDbTest) +{ + P4RouterInterfaceEntry router_intf_entry(kRouterInterfaceId1, "Ethernet7", kMacAddress1); + router_intf_entry.router_interface_oid = kRouterInterfaceOid1; + AddRouterInterfaceEntry(router_intf_entry, 0x1234, 9100); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x295100", + std::vector{ + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID", "oid:0x0"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS", "00:01:02:03:04:05"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_TYPE", "SAI_ROUTER_INTERFACE_TYPE_PORT"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_PORT_ID", "oid:0x1234"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_MTU", "9100"}}); + + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + + APP_P4RT_ROUTER_INTERFACE_TABLE_NAME + kTableKeyDelimiter + kRouterIntfAppDbKey; + std::vector attributes; + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kPort), "Ethernet7"}); + attributes.push_back(swss::FieldValueTuple{prependParamField(p4orch::kSrcMac), kMacAddress1.to_string()}); + + // Verification should succeed with correct ASIC DB values. + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Verification should fail if ASIC DB values mismatch. + table.set("SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x295100", + std::vector{swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_MTU", "1500"}}); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + + // Verification should fail if ASIC DB table is missing. + table.del("SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x295100"); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + table.set("SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x295100", + std::vector{ + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID", "oid:0x0"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS", "00:01:02:03:04:05"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_TYPE", "SAI_ROUTER_INTERFACE_TYPE_PORT"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_PORT_ID", "oid:0x1234"}, + swss::FieldValueTuple{"SAI_ROUTER_INTERFACE_ATTR_MTU", "9100"}}); + + // Verification should fail if SAI attr cannot be constructed. + auto *router_intf_entry_ptr = + GetRouterInterfaceEntry(KeyGenerator::generateRouterInterfaceKey(router_intf_entry.router_interface_id)); + router_intf_entry_ptr->port_name = "Ethernet8"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + router_intf_entry_ptr->port_name = "Ethernet7"; +} diff --git a/orchagent/p4orch/tests/test_main.cpp b/orchagent/p4orch/tests/test_main.cpp index 23cf37d8e1..787e0622f4 100644 --- a/orchagent/p4orch/tests/test_main.cpp +++ b/orchagent/p4orch/tests/test_main.cpp @@ -11,6 +11,7 @@ extern "C" #include "crmorch.h" #include "dbconnector.h" #include "directory.h" +#include "flowcounterrouteorch.h" #include "mock_sai_virtual_router.h" #include "p4orch.h" #include "portsorch.h" @@ -36,13 +37,9 @@ sai_object_id_t kMirrorSessionOid2 = 9002; sai_object_id_t gUnderlayIfId; #define DEFAULT_BATCH_SIZE 128 -int gBatchSize = DEFAULT_BATCH_SIZE; -bool gSairedisRecord = true; -bool gSwssRecord = true; -bool gLogRotate = false; -bool gSaiRedisLogRotate = false; -bool gResponsePublisherRecord = false; -bool gResponsePublisherLogRotate = false; +#define DEFAULT_MAX_BULK_SIZE 1000 +extern int gBatchSize; +size_t gMaxBulkSize = DEFAULT_MAX_BULK_SIZE; bool gSyncMode = false; bool gIsNatSupported = false; @@ -50,10 +47,9 @@ PortsOrch *gPortsOrch; CrmOrch *gCrmOrch; P4Orch *gP4Orch; VRFOrch *gVrfOrch; +FlowCounterRouteOrch *gFlowCounterRouteOrch; SwitchOrch *gSwitchOrch; Directory gDirectory; -ofstream gRecordOfs; -string gRecordFile; swss::DBConnector *gAppDb; swss::DBConnector *gStateDb; swss::DBConnector *gConfigDb; @@ -69,10 +65,40 @@ sai_acl_api_t *sai_acl_api; sai_policer_api_t *sai_policer_api; sai_virtual_router_api_t *sai_virtual_router_api; sai_hostif_api_t *sai_hostif_api; +sai_hash_api_t *sai_hash_api; sai_switch_api_t *sai_switch_api; sai_mirror_api_t *sai_mirror_api; sai_udf_api_t *sai_udf_api; sai_tunnel_api_t *sai_tunnel_api; +sai_my_mac_api_t *sai_my_mac_api; +sai_counter_api_t *sai_counter_api; +sai_generic_programmable_api_t *sai_generic_programmable_api; + +task_process_status handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context) +{ + return task_success; +} + +task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context) +{ + return task_success; +} + +task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context) +{ + return task_success; +} + +task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context) +{ + return task_success; +} + +bool parseHandleSaiStatusFailure(task_process_status status) +{ + return true; +} + namespace { @@ -147,7 +173,8 @@ void AddVrf() } // namespace int main(int argc, char *argv[]) -{ +{ + gBatchSize = DEFAULT_BATCH_SIZE; testing::InitGoogleTest(&argc, argv); sai_router_interface_api_t router_intfs_api; @@ -159,9 +186,14 @@ int main(int argc, char *argv[]) sai_policer_api_t policer_api; sai_virtual_router_api_t virtual_router_api; sai_hostif_api_t hostif_api; + sai_hash_api_t hash_api; sai_switch_api_t switch_api; sai_mirror_api_t mirror_api; sai_udf_api_t udf_api; + sai_my_mac_api_t my_mac_api; + sai_tunnel_api_t tunnel_api; + sai_counter_api_t counter_api; + sai_generic_programmable_api_t generic_programmable_api; sai_router_intfs_api = &router_intfs_api; sai_neighbor_api = &neighbor_api; sai_next_hop_api = &next_hop_api; @@ -171,9 +203,14 @@ int main(int argc, char *argv[]) sai_policer_api = &policer_api; sai_virtual_router_api = &virtual_router_api; sai_hostif_api = &hostif_api; + sai_hash_api = &hash_api; sai_switch_api = &switch_api; sai_mirror_api = &mirror_api; sai_udf_api = &udf_api; + sai_my_mac_api = &my_mac_api; + sai_tunnel_api = &tunnel_api; + sai_counter_api = &counter_api; + sai_generic_programmable_api = &generic_programmable_api; swss::DBConnector appl_db("APPL_DB", 0); swss::DBConnector state_db("STATE_DB", 0); @@ -193,6 +230,10 @@ int main(int argc, char *argv[]) gVrfOrch = &vrf_orch; gDirectory.set(static_cast(&vrf_orch)); + FlowCounterRouteOrch flow_counter_route_orch(gConfigDb, std::vector{}); + gFlowCounterRouteOrch = &flow_counter_route_orch; + gDirectory.set(static_cast(&flow_counter_route_orch)); + // Setup ports for all tests. SetupPorts(); AddVrf(); diff --git a/orchagent/p4orch/tests/wcmp_manager_test.cpp b/orchagent/p4orch/tests/wcmp_manager_test.cpp index 73cf34be25..95e8ecb70f 100644 --- a/orchagent/p4orch/tests/wcmp_manager_test.cpp +++ b/orchagent/p4orch/tests/wcmp_manager_test.cpp @@ -12,7 +12,6 @@ #include "mock_sai_next_hop_group.h" #include "mock_sai_serialize.h" #include "mock_sai_switch.h" -#include "mock_sai_udf.h" #include "p4oidmapper.h" #include "p4orch.h" #include "p4orch/p4orch_util.h" @@ -24,6 +23,8 @@ extern "C" #include "sai.h" } +using ::p4orch::kTableKeyDelimiter; + extern P4Orch *gP4Orch; extern VRFOrch *gVrfOrch; extern swss::DBConnector *gAppDb; @@ -31,7 +32,6 @@ extern sai_object_id_t gSwitchId; extern sai_next_hop_group_api_t *sai_next_hop_group_api; extern sai_hostif_api_t *sai_hostif_api; extern sai_switch_api_t *sai_switch_api; -extern sai_udf_api_t *sai_udf_api; extern sai_object_id_t gSwitchId; extern sai_acl_api_t *sai_acl_api; @@ -45,6 +45,7 @@ using ::testing::DoAll; using ::testing::Eq; using ::testing::Return; using ::testing::SetArgPointee; +using ::testing::SetArrayArgument; using ::testing::StrictMock; using ::testing::Truly; @@ -52,6 +53,7 @@ namespace { constexpr char *kWcmpGroupId1 = "group-1"; +constexpr char *kWcmpGroupId2 = "group-2"; constexpr sai_object_id_t kWcmpGroupOid1 = 10; constexpr char *kNexthopId1 = "ju1u32m1.atl11:qe-3/7"; constexpr sai_object_id_t kNexthopOid1 = 1; @@ -68,7 +70,60 @@ const std::string kWcmpGroupKey1 = KeyGenerator::generateWcmpGroupKey(kWcmpGroup const std::string kNexthopKey1 = KeyGenerator::generateNextHopKey(kNexthopId1); const std::string kNexthopKey2 = KeyGenerator::generateNextHopKey(kNexthopId2); const std::string kNexthopKey3 = KeyGenerator::generateNextHopKey(kNexthopId3); -constexpr sai_object_id_t kUdfMatchOid1 = 5001; + +// Matches two SAI attributes. +bool MatchSaiAttribute(const sai_attribute_t &attr, const sai_attribute_t &exp_attr) +{ + if (exp_attr.id == SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID) + { + if (attr.id != SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID || attr.value.oid != exp_attr.value.oid) + { + return false; + } + } + if (exp_attr.id == SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID) + { + if (attr.id != SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID || attr.value.oid != exp_attr.value.oid) + { + return false; + } + } + if (exp_attr.id == SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT) + { + if (attr.id != SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT || attr.value.u32 != exp_attr.value.u32) + { + return false; + } + } + return true; +} + +MATCHER_P(ArrayEq, array, "") +{ + for (size_t i = 0; i < array.size(); ++i) + { + if (arg[i] != array[i]) + { + return false; + } + } + return true; +} + +MATCHER_P(AttrArrayArrayEq, array, "") +{ + for (size_t i = 0; i < array.size(); ++i) + { + for (size_t j = 0; j < array[i].size(); j++) + { + if (!MatchSaiAttribute(arg[i][j], array[i][j])) + { + return false; + } + } + } + return true; +} // Matches the next hop group type sai_attribute_t argument. bool MatchSaiNextHopGroupAttribute(const sai_attribute_t *attr) @@ -118,6 +173,26 @@ bool MatchSaiNextHopGroupMemberAttribute(const sai_object_id_t expected_next_hop return true; } +std::vector GetSaiNextHopGroupMemberAttribute(sai_object_id_t next_hop_oid, uint32_t weight, + sai_object_id_t group_oid) +{ + std::vector attrs; + sai_attribute_t attr; + attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID; + attr.value.oid = group_oid; + attrs.push_back(attr); + + attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID; + attr.value.oid = next_hop_oid; + attrs.push_back(attr); + + attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT; + attr.value.u32 = weight; + attrs.push_back(attr); + + return attrs; +} + void VerifyWcmpGroupMemberEntry(const std::string &expected_next_hop_id, const int expected_weight, std::shared_ptr wcmp_gm_entry) { @@ -154,7 +229,6 @@ class WcmpManagerTest : public ::testing::Test EXPECT_CALL(mock_sai_switch_, set_switch_attribute(Eq(gSwitchId), _)) .WillRepeatedly(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_acl_, remove_acl_table_group(_)).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_udf_, remove_udf_match(_)).WillRepeatedly(Return(SAI_STATUS_SUCCESS)); delete gP4Orch; delete copp_orch_; } @@ -167,13 +241,14 @@ class WcmpManagerTest : public ::testing::Test mock_sai_hostif = &mock_sai_hostif_; mock_sai_serialize = &mock_sai_serialize_; mock_sai_acl = &mock_sai_acl_; - mock_sai_udf = &mock_sai_udf_; sai_next_hop_group_api->create_next_hop_group = create_next_hop_group; sai_next_hop_group_api->remove_next_hop_group = remove_next_hop_group; sai_next_hop_group_api->create_next_hop_group_member = create_next_hop_group_member; sai_next_hop_group_api->remove_next_hop_group_member = remove_next_hop_group_member; sai_next_hop_group_api->set_next_hop_group_member_attribute = set_next_hop_group_member_attribute; + sai_next_hop_group_api->create_next_hop_group_members = create_next_hop_group_members; + sai_next_hop_group_api->remove_next_hop_group_members = remove_next_hop_group_members; sai_hostif_api->create_hostif_table_entry = mock_create_hostif_table_entry; sai_hostif_api->create_hostif_trap = mock_create_hostif_trap; @@ -181,8 +256,6 @@ class WcmpManagerTest : public ::testing::Test sai_switch_api->set_switch_attribute = mock_set_switch_attribute; sai_acl_api->create_acl_table_group = create_acl_table_group; sai_acl_api->remove_acl_table_group = remove_acl_table_group; - sai_udf_api->create_udf_match = create_udf_match; - sai_udf_api->remove_udf_match = remove_udf_match; } void setUpP4Orch() @@ -194,16 +267,13 @@ class WcmpManagerTest : public ::testing::Test copp_orch_ = new CoppOrch(gAppDb, APP_COPP_TABLE_NAME); // init P4 orch - EXPECT_CALL(mock_sai_udf_, create_udf_match(_, _, _, _)) - .WillOnce(DoAll(SetArgPointee<0>(kUdfMatchOid1), Return(SAI_STATUS_SUCCESS))); - std::vector p4_tables; gP4Orch = new P4Orch(gAppDb, p4_tables, gVrfOrch, copp_orch_); } void Enqueue(const swss::KeyOpFieldsValuesTuple &entry) { - wcmp_group_manager_->enqueue(entry); + wcmp_group_manager_->enqueue(APP_P4RT_WCMP_GROUP_TABLE_NAME, entry); } void Drain() @@ -211,6 +281,11 @@ class WcmpManagerTest : public ::testing::Test wcmp_group_manager_->drain(); } + std::string VerifyState(const std::string &key, const std::vector &tuple) + { + return wcmp_group_manager_->verifyState(key, tuple); + } + ReturnCode ProcessAddRequest(P4WcmpGroupEntry *app_db_entry) { return wcmp_group_manager_->processAddRequest(app_db_entry); @@ -231,18 +306,6 @@ class WcmpManagerTest : public ::testing::Test wcmp_group_manager_->restorePrunedNextHops(port); } - bool VerifyWcmpGroupMemberInPrunedSet(std::shared_ptr gm, bool expected_member_present, - long unsigned int expected_set_size) - { - if (wcmp_group_manager_->pruned_wcmp_members_set.size() != expected_set_size) - return false; - - return expected_member_present ? (wcmp_group_manager_->pruned_wcmp_members_set.find(gm) != - wcmp_group_manager_->pruned_wcmp_members_set.end()) - : (wcmp_group_manager_->pruned_wcmp_members_set.find(gm) == - wcmp_group_manager_->pruned_wcmp_members_set.end()); - } - bool VerifyWcmpGroupMemberInPortMap(std::shared_ptr gm, bool expected_member_present, long unsigned int expected_set_size) { @@ -301,7 +364,6 @@ class WcmpManagerTest : public ::testing::Test StrictMock mock_sai_hostif_; StrictMock mock_sai_serialize_; StrictMock mock_sai_acl_; - StrictMock mock_sai_udf_; P4OidMapper *p4_oid_mapper_; WcmpManager *wcmp_group_manager_; CoppOrch *copp_orch_; @@ -312,10 +374,12 @@ P4WcmpGroupEntry WcmpManagerTest::getDefaultWcmpGroupEntryForTest() P4WcmpGroupEntry app_db_entry; app_db_entry.wcmp_group_id = kWcmpGroupId1; std::shared_ptr gm1 = std::make_shared(); + gm1->wcmp_group_id = kWcmpGroupId1; gm1->next_hop_id = kNexthopId1; gm1->weight = 2; app_db_entry.wcmp_group_members.push_back(gm1); std::shared_ptr gm2 = std::make_shared(); + gm2->wcmp_group_id = kWcmpGroupId1; gm2->next_hop_id = kNexthopId2; gm2->weight = 1; app_db_entry.wcmp_group_members.push_back(gm2); @@ -339,13 +403,18 @@ P4WcmpGroupEntry WcmpManagerTest::AddWcmpGroupEntryWithWatchport(const std::stri .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); // For members with non empty watchport field, member creation in SAI happens // for operationally up ports only.. + std::vector return_oids{kWcmpGroupMemberOid1}; + std::vector exp_status{SAI_STATUS_SUCCESS}; if (oper_up) { - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL( + mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); } EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddRequest(&app_db_entry)); EXPECT_NE(nullptr, GetWcmpGroupEntry(kWcmpGroupId1)); @@ -362,16 +431,16 @@ P4WcmpGroupEntry WcmpManagerTest::AddWcmpGroupEntry1() create_next_hop_group(_, Eq(gSwitchId), Eq(1), Truly(std::bind(MatchSaiNextHopGroupAttribute, std::placeholders::_1)))) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids{kWcmpGroupMemberOid1, kWcmpGroupMemberOid2}; + std::vector exp_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_status.begin(), exp_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessAddRequest(&app_db_entry)); EXPECT_NE(nullptr, GetWcmpGroupEntry(kWcmpGroupId1)); return app_db_entry; @@ -421,21 +490,26 @@ TEST_F(WcmpManagerTest, CreateWcmpGroupFailsWhenCreateGroupMemberSaiCallFails) // WCMP group creation fails when one of the group member creation fails EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector return_oids{kWcmpGroupMemberOid1, SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_SUCCESS, SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_ITEM_NOT_FOUND))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessAddRequest(&app_db_entry)); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(&app_db_entry)); std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_EQ(nullptr, wcmp_group_entry_ptr); @@ -455,23 +529,28 @@ TEST_F(WcmpManagerTest, CreateWcmpGroupFailsWhenCreateGroupMemberSaiCallFailsPlu // WCMP group creation fails when one of the group member creation fails EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_FAILURE)); + std::vector return_oids{kWcmpGroupMemberOid1, SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_SUCCESS, SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_ITEM_NOT_FOUND))); + std::vector exp_remove_status{SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_FAILURE))); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); - // (TODO): Expect critical state. - EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessAddRequest(&app_db_entry)); + // TODO: Expect critical state. + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(&app_db_entry)); } TEST_F(WcmpManagerTest, CreateWcmpGroupFailsWhenCreateGroupMemberSaiCallFailsPlusGroupRecoveryFails) @@ -482,23 +561,28 @@ TEST_F(WcmpManagerTest, CreateWcmpGroupFailsWhenCreateGroupMemberSaiCallFailsPlu // WCMP group creation fails when one of the group member creation fails EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector return_oids{kWcmpGroupMemberOid1, SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_SUCCESS, SAI_STATUS_ITEM_NOT_FOUND}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_ITEM_NOT_FOUND))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) .WillOnce(Return(SAI_STATUS_FAILURE)); - // (TODO): Expect critical state. - EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessAddRequest(&app_db_entry)); + // TODO: Expect critical state. + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(&app_db_entry)); } TEST_F(WcmpManagerTest, CreateWcmpGroupFailsWhenCreateGroupSaiCallFails) @@ -535,53 +619,73 @@ TEST_F(WcmpManagerTest, RemoveWcmpGroupFailsWhenNotExist) TEST_F(WcmpManagerTest, RemoveWcmpGroupFailsWhenSaiCallFails) { P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntry1(); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector exp_remove_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid2, kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) .WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids{kWcmpGroupMemberOid1, kWcmpGroupMemberOid2}; + std::vector exp_create_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, RemoveWcmpGroup(kWcmpGroupId1)); } TEST_F(WcmpManagerTest, RemoveWcmpGroupFailsWhenMemberRemovalFails) { P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntry1(); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_FAILURE, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid2, kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_FAILURE))); + std::vector return_oids{kWcmpGroupMemberOid1}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, RemoveWcmpGroup(kWcmpGroupId1)); } TEST_F(WcmpManagerTest, RemoveWcmpGroupFailsWhenMemberRemovalFailsPlusRecoveryFails) { P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntry1(); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_FAILURE)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_FAILURE))); - // (TODO): Expect critical state. + std::vector exp_remove_status{SAI_STATUS_FAILURE, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid2, kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_FAILURE))); + std::vector return_oids{SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_FAILURE))); + // TODO: Expect critical state. EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, RemoveWcmpGroup(kWcmpGroupId1)); } @@ -595,20 +699,36 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupMembersSucceed) std::shared_ptr gm2 = createWcmpGroupMemberEntry(kNexthopId2, 15); wcmp_group.wcmp_group_members.push_back(gm1); wcmp_group.wcmp_group_members.push_back(gm2); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 3, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid4), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 15, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid5), Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid2}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_4{kWcmpGroupMemberOid4}; + std::vector return_oids_5{kWcmpGroupMemberOid5}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 3, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_4.begin(), return_oids_4.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 15, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_5.begin(), return_oids_5.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_TRUE(ProcessUpdateRequest(&wcmp_group).ok()); VerifyWcmpGroupEntry(wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); uint32_t wcmp_group_refcount = 0; @@ -623,15 +743,27 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupMembersSucceed) wcmp_group.wcmp_group_members.clear(); gm2 = createWcmpGroupMemberEntry(kNexthopId2, 15); wcmp_group.wcmp_group_members.push_back(gm2); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid4))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid5))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 15, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); + exp_remove_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid4}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid5}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_2{kWcmpGroupMemberOid2}; + exp_create_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 15, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_2.begin(), return_oids_2.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_TRUE(ProcessUpdateRequest(&wcmp_group).ok()); VerifyWcmpGroupEntry(wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, kWcmpGroupKey1, &wcmp_group_refcount)); @@ -646,18 +778,30 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupMembersSucceed) std::shared_ptr updated_gm1 = createWcmpGroupMemberEntry(kNexthopId1, 20); wcmp_group.wcmp_group_members.push_back(updated_gm1); wcmp_group.wcmp_group_members.push_back(updated_gm2); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 20, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 15, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid5), Return(SAI_STATUS_SUCCESS))); + exp_remove_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid2}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_1{kWcmpGroupMemberOid1}; + exp_create_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 20, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_1.begin(), return_oids_1.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 15, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_5.begin(), return_oids_5.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_TRUE(ProcessUpdateRequest(&wcmp_group).ok()); VerifyWcmpGroupEntry(wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, kWcmpGroupKey1, &wcmp_group_refcount)); @@ -669,10 +813,17 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupMembersSucceed) // Update WCMP without group members wcmp_group.wcmp_group_members.clear(); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid5))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + exp_remove_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid5}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_TRUE(ProcessUpdateRequest(&wcmp_group).ok()); VerifyWcmpGroupEntry(wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, kWcmpGroupKey1, &wcmp_group_refcount)); @@ -697,25 +848,38 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenRemoveGroupMemberSaiCallFails) wcmp_group.wcmp_group_members.push_back(gm1); wcmp_group.wcmp_group_members.push_back(gm2); wcmp_group.wcmp_group_members.push_back(gm3); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 3, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid4), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid5), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid3, 30, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid3), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector return_oids_4{kWcmpGroupMemberOid4}; + std::vector return_oids_5_6{kWcmpGroupMemberOid5, kWcmpGroupMemberOid3}; + std::vector exp_create_status_1{SAI_STATUS_SUCCESS}; + std::vector exp_create_status_2{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 3, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_4.begin(), return_oids_4.end()), + SetArrayArgument<6>(exp_create_status_1.begin(), exp_create_status_1.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 10, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid3, 30, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_5_6.begin(), return_oids_5_6.end()), + SetArrayArgument<6>(exp_create_status_2.begin(), exp_create_status_2.end()), + Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid2}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_TRUE(ProcessUpdateRequest(&wcmp_group).ok()); VerifyWcmpGroupEntry(wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); uint32_t wcmp_group_refcount = 0; @@ -733,16 +897,23 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenRemoveGroupMemberSaiCallFails) wcmp_group.wcmp_group_members.clear(); wcmp_group.wcmp_group_members.push_back(gm1); wcmp_group.wcmp_group_members.push_back(gm3); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid5))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid3))) - .WillOnce(Return(SAI_STATUS_OBJECT_IN_USE)); - // Clean up - revert deletions -success + exp_remove_status = {SAI_STATUS_OBJECT_IN_USE, SAI_STATUS_SUCCESS}; EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid5), Return(SAI_STATUS_SUCCESS))); + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid3, kWcmpGroupMemberOid5}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), + Return(SAI_STATUS_OBJECT_IN_USE))); + // Clean up - revert deletions -success + std::vector return_oids_5{kWcmpGroupMemberOid5}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 10, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_5.begin(), return_oids_5.end()), + SetArrayArgument<6>(exp_create_status_1.begin(), exp_create_status_1.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_EQ(StatusCode::SWSS_RC_IN_USE, ProcessUpdateRequest(&wcmp_group)); P4WcmpGroupEntry expected_wcmp_group = {.wcmp_group_id = kWcmpGroupId1, .wcmp_group_members = {}}; expected_wcmp_group.wcmp_group_members.push_back(gm1); @@ -761,19 +932,26 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenRemoveGroupMemberSaiCallFails) // Remove WCMP group member with nexthop_id=kNexthopId1 and // nexthop_id=kNexthopId3(fail) - fail to clean up - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid5))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid3))) - .WillOnce(Return(SAI_STATUS_OBJECT_IN_USE)); - // Clean up - revert deletions -failure + exp_remove_status = {SAI_STATUS_OBJECT_IN_USE, SAI_STATUS_SUCCESS}; EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_TABLE_FULL)); - // (TODO): Expect critical state. - EXPECT_EQ("Failed to remove WCMP group member with nexthop id " - "'ju1u32m3.atl11:qe-3/7'", + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid3, kWcmpGroupMemberOid5}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), + Return(SAI_STATUS_OBJECT_IN_USE))); + // Clean up - revert deletions -failure + std::vector return_oids{SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_TABLE_FULL}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 10, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_TABLE_FULL))); + // TODO: Expect critical state. + EXPECT_EQ("Failed to delete WCMP group member: 'ju1u32m3.atl11:qe-3/7'", ProcessUpdateRequest(&wcmp_group).message()); // WCMP group is as expected, but refcounts are not VerifyWcmpGroupEntry(expected_wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); @@ -797,15 +975,27 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenCreateNewGroupMemberSaiCallFails wcmp_group.wcmp_group_members.clear(); std::shared_ptr gm = createWcmpGroupMemberEntry(kNexthopId2, 15); wcmp_group.wcmp_group_members.push_back(gm); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 15, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid5), Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid2}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_5{kWcmpGroupMemberOid5}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 15, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_5.begin(), return_oids_5.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_TRUE(ProcessUpdateRequest(&wcmp_group).ok()); VerifyWcmpGroupEntry(wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); uint32_t wcmp_group_refcount = 0; @@ -828,33 +1018,47 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenCreateNewGroupMemberSaiCallFails updated_wcmp_group.wcmp_group_members.push_back(updated_gm1); updated_wcmp_group.wcmp_group_members.push_back(updated_gm2); updated_wcmp_group.wcmp_group_members.push_back(updated_gm3); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid5))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 3, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 20, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid3, 30, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_TABLE_FULL)); + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid5}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_1{kWcmpGroupMemberOid1}; + std::vector exp_create_status_fail{SAI_STATUS_SUCCESS, SAI_STATUS_TABLE_FULL}; + std::vector return_oids_2_null{kWcmpGroupMemberOid2, SAI_NULL_OBJECT_ID}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 3, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_1.begin(), return_oids_1.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 20, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid3, 30, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_2_null.begin(), return_oids_2_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_TABLE_FULL))); // Clean up - success - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 15, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid5), Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status_2{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid2, kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<3>(exp_remove_status_2.begin(), exp_remove_status_2.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 15, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_5.begin(), return_oids_5.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_FALSE(ProcessUpdateRequest(&updated_wcmp_group).ok()); P4WcmpGroupEntry expected_wcmp_group = {.wcmp_group_id = kWcmpGroupId1, .wcmp_group_members = {}}; std::shared_ptr expected_gm = createWcmpGroupMemberEntry(kNexthopId2, 15); @@ -870,44 +1074,48 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenCreateNewGroupMemberSaiCallFails EXPECT_EQ(0, nexthop_refcount); // Try again, but this time clean up failed to remove created group member - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid5))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 3, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 20, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid3, 30, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_TABLE_FULL)); + exp_remove_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid5}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 3, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_1.begin(), return_oids_1.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 20, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid3, 30, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_2_null.begin(), return_oids_2_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_TABLE_FULL))); // Clean up - revert creation - failure - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 15, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid5), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_OBJECT_IN_USE)); - // (TODO): Expect critical state. - EXPECT_EQ("Failed to create next hop group member 'ju1u32m3.atl11:qe-3/7'", + std::vector exp_remove_status_fail{SAI_STATUS_OBJECT_IN_USE, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid2, kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce(DoAll(SetArrayArgument<3>(exp_remove_status_fail.begin(), exp_remove_status_fail.end()), + Return(SAI_STATUS_OBJECT_IN_USE))); + // TODO: Expect critical state. + EXPECT_EQ("Fail to create wcmp group member: 'ju1u32m3.atl11:qe-3/7'", ProcessUpdateRequest(&updated_wcmp_group).message()); // WCMP group is as expected, but refcounts are not VerifyWcmpGroupEntry(expected_wcmp_group, *GetWcmpGroupEntry(kWcmpGroupId1)); ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, kWcmpGroupKey1, &wcmp_group_refcount)); - EXPECT_EQ(2, wcmp_group_refcount); // Corrupt status due to clean up failure + EXPECT_EQ(1, wcmp_group_refcount); ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &nexthop_refcount)); EXPECT_EQ(0, nexthop_refcount); // Corrupt status due to clean up failure ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey2, &nexthop_refcount)); - EXPECT_EQ(2, nexthop_refcount); + EXPECT_EQ(1, nexthop_refcount); ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey3, &nexthop_refcount)); EXPECT_EQ(0, nexthop_refcount); } @@ -923,18 +1131,32 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenReduceGroupMemberWeightSaiCallFa std::shared_ptr gm2 = createWcmpGroupMemberEntry(kNexthopId2, 10); wcmp_group.wcmp_group_members.push_back(gm1); wcmp_group.wcmp_group_members.push_back(gm2); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_NOT_SUPPORTED)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_1{kWcmpGroupMemberOid1}; + std::vector return_oids_null{SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + std::vector exp_create_status_fail{SAI_STATUS_NOT_SUPPORTED}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_null.begin(), return_oids_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_NOT_SUPPORTED))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_1.begin(), return_oids_1.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_NOT_SUPPORTED))); EXPECT_FALSE(ProcessUpdateRequest(&wcmp_group).ok()); P4WcmpGroupEntry expected_wcmp_group = {.wcmp_group_id = kWcmpGroupId1, .wcmp_group_members = {}}; std::shared_ptr expected_gm1 = createWcmpGroupMemberEntry(kNexthopId1, 2); @@ -963,33 +1185,54 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenIncreaseGroupMemberWeightSaiCall std::shared_ptr gm2 = createWcmpGroupMemberEntry(kNexthopId2, 10); wcmp_group.wcmp_group_members.push_back(gm1); wcmp_group.wcmp_group_members.push_back(gm2); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid4), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_NOT_SUPPORTED)); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid2}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_4{kWcmpGroupMemberOid4}; + std::vector return_oids_null{SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + std::vector exp_create_status_fail{SAI_STATUS_NOT_SUPPORTED}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_4.begin(), return_oids_4.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 10, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_null.begin(), return_oids_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_NOT_SUPPORTED))); // Clean up modified members - success - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid4))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid4}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_1_2{kWcmpGroupMemberOid1, kWcmpGroupMemberOid2}; + std::vector exp_create_status_2{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_1_2.begin(), return_oids_1_2.end()), + SetArrayArgument<6>(exp_create_status_2.begin(), exp_create_status_2.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_FALSE(ProcessUpdateRequest(&wcmp_group).ok()); P4WcmpGroupEntry expected_wcmp_group = {.wcmp_group_id = kWcmpGroupId1, .wcmp_group_members = {}}; std::shared_ptr expected_gm1 = createWcmpGroupMemberEntry(kNexthopId1, 2); @@ -1006,38 +1249,52 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenIncreaseGroupMemberWeightSaiCall ASSERT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey2, &nexthop_refcount)); EXPECT_EQ(1, nexthop_refcount); // Try again, the same error happens when update and new error during clean up - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid4), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_NOT_SUPPORTED)); + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid2}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_4.begin(), return_oids_4.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 10, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_null.begin(), return_oids_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_NOT_SUPPORTED))); // Clean up modified members - failure - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid4))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_NOT_SUPPORTED))); - - // (TODO): Expect critical state. - EXPECT_EQ("Failed to create next hop group member " - "'ju1u32m2.atl11:qe-3/7'", - ProcessUpdateRequest(&wcmp_group).message()); + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid4}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_2_null{SAI_NULL_OBJECT_ID, kWcmpGroupMemberOid2}; + std::vector exp_create_status_2_fail{SAI_STATUS_NOT_SUPPORTED, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_2_null.begin(), return_oids_2_null.end()), + SetArrayArgument<6>(exp_create_status_2_fail.begin(), exp_create_status_2_fail.end()), + Return(SAI_STATUS_NOT_SUPPORTED))); + + // TODO: Expect critical state. + EXPECT_EQ("Fail to create wcmp group member: 'ju1u32m2.atl11:qe-3/7'", ProcessUpdateRequest(&wcmp_group).message()); // weight of wcmp_group_members[kNexthopId1] unable to revert // SAI object in ASIC DB: missing group member with // next_hop_id=kNexthopId1 @@ -1053,19 +1310,58 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupFailsWhenIncreaseGroupMemberWeightSaiCall TEST_F(WcmpManagerTest, ValidateWcmpGroupEntryFailsWhenNextHopDoesNotExist) { - P4WcmpGroupEntry app_db_entry = {.wcmp_group_id = kWcmpGroupId1, .wcmp_group_members = {}}; - std::shared_ptr gm = createWcmpGroupMemberEntry("Unregistered-Nexthop", 1); - app_db_entry.wcmp_group_members.push_back(gm); - EXPECT_EQ(StatusCode::SWSS_RC_NOT_FOUND, ProcessAddRequest(&app_db_entry)); + const std::string kKeyPrefix = std::string(APP_P4RT_WCMP_GROUP_TABLE_NAME) + kTableKeyDelimiter; + p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, kNexthopOid1); + nlohmann::json j; + j[prependMatchField(p4orch::kWcmpGroupId)] = kWcmpGroupId1; + std::vector attributes; + nlohmann::json actions; + nlohmann::json action; + action[p4orch::kAction] = p4orch::kSetNexthopId; + action[prependParamField(p4orch::kNexthopId)] = kNexthopId1; + actions.push_back(action); + action[prependParamField(p4orch::kNexthopId)] = kNexthopId2; + actions.push_back(action); + attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); + Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); + Drain(); + std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); + auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); + EXPECT_EQ(nullptr, wcmp_group_entry_ptr); + EXPECT_FALSE(p4_oid_mapper_->existsOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, key)); + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(0, ref_cnt); } TEST_F(WcmpManagerTest, ValidateWcmpGroupEntryFailsWhenWeightLessThanOne) { - P4WcmpGroupEntry app_db_entry = {.wcmp_group_id = kWcmpGroupId1, .wcmp_group_members = {}}; - std::shared_ptr gm = createWcmpGroupMemberEntry(kNexthopId1, 0); - app_db_entry.wcmp_group_members.push_back(gm); + const std::string kKeyPrefix = std::string(APP_P4RT_WCMP_GROUP_TABLE_NAME) + kTableKeyDelimiter; p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, kNexthopOid1); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ProcessAddRequest(&app_db_entry)); + p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey2, kNexthopOid2); + nlohmann::json j; + j[prependMatchField(p4orch::kWcmpGroupId)] = kWcmpGroupId1; + std::vector attributes; + nlohmann::json actions; + nlohmann::json action; + action[p4orch::kAction] = p4orch::kSetNexthopId; + action[prependParamField(p4orch::kNexthopId)] = kNexthopId1; + actions.push_back(action); + action[p4orch::kWeight] = -1; + action[prependParamField(p4orch::kNexthopId)] = kNexthopId2; + actions.push_back(action); + attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); + Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); + Drain(); + std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); + auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); + EXPECT_EQ(nullptr, wcmp_group_entry_ptr); + EXPECT_FALSE(p4_oid_mapper_->existsOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, key)); + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(0, ref_cnt); + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey2, &ref_cnt)); + EXPECT_EQ(0, ref_cnt); } TEST_F(WcmpManagerTest, WcmpGroupInvalidOperationInDrainFails) @@ -1138,16 +1434,17 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndDeleteInDrainSucceeds) EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids{kWcmpGroupMemberOid1, kWcmpGroupMemberOid2}; + std::vector exp_create_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); Drain(); std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); @@ -1159,10 +1456,13 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndDeleteInDrainSucceeds) EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey2, &ref_cnt)); EXPECT_EQ(1, ref_cnt); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector exp_remove_status{SAI_STATUS_SUCCESS, SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members( + Eq(2), ArrayEq(std::vector{kWcmpGroupMemberOid2, kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); attributes.clear(); @@ -1196,11 +1496,16 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndUpdateInDrainSucceeds) Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids{kWcmpGroupMemberOid1}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); Drain(); std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); @@ -1216,13 +1521,22 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndUpdateInDrainSucceeds) // Update WCMP group with exact same members, the same entry will be removed // and created again Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid3), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + return_oids = {kWcmpGroupMemberOid3}; + exp_create_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); Drain(); wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_NE(nullptr, wcmp_group_entry_ptr); @@ -1240,13 +1554,22 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndUpdateInDrainSucceeds) attributes.clear(); attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid3))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + return_oids = {kWcmpGroupMemberOid2}; + exp_create_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + exp_remove_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid3}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); Drain(); wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_NE(nullptr, wcmp_group_entry_ptr); @@ -1265,13 +1588,22 @@ TEST_F(WcmpManagerTest, WcmpGroupCreateAndUpdateInDrainSucceeds) attributes.clear(); attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid4), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid2))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + return_oids = {kWcmpGroupMemberOid4}; + exp_create_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 2, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + exp_remove_status = {SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid2}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); Drain(); wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); EXPECT_NE(nullptr, wcmp_group_entry_ptr); @@ -1403,13 +1735,27 @@ TEST_F(WcmpManagerTest, DeserializeWcmpGroupFailsWithUndefinedAttributes) TEST_F(WcmpManagerTest, ValidateWcmpGroupEntryWithInvalidWatchportAttributeFails) { - P4WcmpGroupEntry app_db_entry = {.wcmp_group_id = kWcmpGroupId1, .wcmp_group_members = {}}; - std::shared_ptr gm = - createWcmpGroupMemberEntryWithWatchport(kNexthopId1, 1, "EthernetXX", kWcmpGroupId1, kNexthopOid1); - app_db_entry.wcmp_group_members.push_back(gm); - EXPECT_EQ(StatusCode::SWSS_RC_INVALID_PARAM, ProcessAddRequest(&app_db_entry)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(gm, false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(gm, false, 0)); + const std::string kKeyPrefix = std::string(APP_P4RT_WCMP_GROUP_TABLE_NAME) + kTableKeyDelimiter; + p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, kNexthopOid1); + nlohmann::json j; + j[prependMatchField(p4orch::kWcmpGroupId)] = kWcmpGroupId1; + std::vector attributes; + nlohmann::json actions; + nlohmann::json action; + action[p4orch::kAction] = p4orch::kSetNexthopId; + action[p4orch::kWatchPort] = "EthernetXX"; + action[prependParamField(p4orch::kNexthopId)] = kNexthopId1; + actions.push_back(action); + attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); + Enqueue(swss::KeyOpFieldsValuesTuple(kKeyPrefix + j.dump(), SET_COMMAND, attributes)); + Drain(); + std::string key = KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1); + auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); + EXPECT_EQ(nullptr, wcmp_group_entry_ptr); + EXPECT_FALSE(p4_oid_mapper_->existsOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, key)); + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(0, ref_cnt); } TEST_F(WcmpManagerTest, PruneNextHopSucceeds) @@ -1418,13 +1764,13 @@ TEST_F(WcmpManagerTest, PruneNextHopSucceeds) std::string port_name = "Ethernet6"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); // Prune next hops associated with port PruneNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, PruneNextHopFailsWithNextHopRemovalFailure) @@ -1433,13 +1779,14 @@ TEST_F(WcmpManagerTest, PruneNextHopFailsWithNextHopRemovalFailure) std::string port_name = "Ethernet6"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) .WillOnce(Return(SAI_STATUS_FAILURE)); + // TODO: Expect critical state. // Prune next hops associated with port (fails) PruneNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, RestorePrunedNextHopSucceeds) @@ -1450,7 +1797,7 @@ TEST_F(WcmpManagerTest, RestorePrunedNextHopSucceeds) std::string port_name = "Ethernet1"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, @@ -1460,7 +1807,7 @@ TEST_F(WcmpManagerTest, RestorePrunedNextHopSucceeds) // Restore next hops associated with port RestorePrunedNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, RestorePrunedNextHopFailsWithNoOidMappingForWcmpGroup) @@ -1471,12 +1818,12 @@ TEST_F(WcmpManagerTest, RestorePrunedNextHopFailsWithNoOidMappingForWcmpGroup) std::string port_name = "Ethernet1"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); p4_oid_mapper_->eraseOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, KeyGenerator::generateWcmpGroupKey(kWcmpGroupId1)); - // (TODO): Expect critical state. + // TODO: Expect critical state. RestorePrunedNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, RestorePrunedNextHopFailsWithNextHopCreationFailure) @@ -1487,16 +1834,16 @@ TEST_F(WcmpManagerTest, RestorePrunedNextHopFailsWithNextHopCreationFailure) std::string port_name = "Ethernet1"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); EXPECT_CALL(mock_sai_next_hop_group_, create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, kWcmpGroupOid1, std::placeholders::_1)))) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_FAILURE))); - // (TODO): Expect critical state. + // TODO: Expect critical state. RestorePrunedNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, CreateGroupWithWatchportFailsWithNextHopCreationFailure) @@ -1515,26 +1862,31 @@ TEST_F(WcmpManagerTest, CreateGroupWithWatchportFailsWithNextHopCreationFailure) create_next_hop_group(_, Eq(gSwitchId), Eq(1), Truly(std::bind(MatchSaiNextHopGroupAttribute, std::placeholders::_1)))) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_FAILURE)); + std::vector return_oids{kWcmpGroupMemberOid1, SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_SUCCESS, SAI_STATUS_FAILURE}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(2), ArrayEq(std::vector{3, 3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1), + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_FAILURE))); // Clean up created members - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessAddRequest(&app_db_entry)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(gm1, false, 0)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(gm2, false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(gm1, false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(gm2, false, 0)); + EXPECT_FALSE(gm1->pruned); + EXPECT_FALSE(gm2->pruned); } TEST_F(WcmpManagerTest, RemoveWcmpGroupAfterPruningSucceeds) @@ -1543,12 +1895,12 @@ TEST_F(WcmpManagerTest, RemoveWcmpGroupAfterPruningSucceeds) std::string port_name = "Ethernet6"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); PruneNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); // Remove Wcmp group. No SAI call for member removal is expected as it is // already pruned. @@ -1556,7 +1908,6 @@ TEST_F(WcmpManagerTest, RemoveWcmpGroupAfterPruningSucceeds) .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, RemoveWcmpGroup(kWcmpGroupId1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); } TEST_F(WcmpManagerTest, RemoveWcmpGroupWithOperationallyDownWatchportSucceeds) @@ -1566,7 +1917,7 @@ TEST_F(WcmpManagerTest, RemoveWcmpGroupWithOperationallyDownWatchportSucceeds) // directly added to the pruned set of WCMP group members. P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport("Ethernet1"); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); // Remove Wcmp group. No SAI call for member removal is expected as it is // already pruned. @@ -1574,7 +1925,103 @@ TEST_F(WcmpManagerTest, RemoveWcmpGroupWithOperationallyDownWatchportSucceeds) .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, RemoveWcmpGroup(kWcmpGroupId1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); +} + +TEST_F(WcmpManagerTest, RemoveNextHopWithPrunedMember) +{ + // Add member with operationally down watch port. Since associated watchport + // is operationally down, member will not be created in SAI but will be + // directly added to the pruned set of WCMP group members. + P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport("Ethernet1"); + EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); + + // Verify that next hop reference count is incremented due to the member. + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(1, ref_cnt); + + // Remove Wcmp group. No SAI call for member removal is expected as it is + // already pruned. + EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, RemoveWcmpGroup(kWcmpGroupId1)); + EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 0)); + + // Verify that the next hop reference count is now 0. + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(0, ref_cnt); +} + +TEST_F(WcmpManagerTest, RemoveNextHopWithRestoredPrunedMember) +{ + // Add member with operationally down watch port. Since associated watchport + // is operationally down, member will not be created in SAI but will be + // directly added to the pruned set of WCMP group members. + std::string port_name = "Ethernet1"; + P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); + EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); + + // Verify that next hop reference count is incremented due to the member. + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(1, ref_cnt); + + // Restore member associated with port. + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), + Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, + kWcmpGroupOid1, std::placeholders::_1)))) + .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); + RestorePrunedNextHops(port_name); + EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); + + // Verify that next hop reference count remains the same after restore. + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(1, ref_cnt); + + // Remove Wcmp group. + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group(Eq(kWcmpGroupOid1))) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, RemoveWcmpGroup(kWcmpGroupId1)); + EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 0)); + + // Verify that the next hop reference count is now 0. + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(0, ref_cnt); +} + +TEST_F(WcmpManagerTest, VerifyNextHopRefCountWhenMemberPruned) +{ + // Add member with operationally up watch port + std::string port_name = "Ethernet6"; + P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); + EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); + + // Verify that next hop reference count is incremented due to the member. + uint32_t ref_cnt; + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(1, ref_cnt); + + // Prune member associated with port. + EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) + .WillOnce(Return(SAI_STATUS_SUCCESS)); + PruneNextHops(port_name); + EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); + + // Verify that next hop reference count does not change on pruning. + EXPECT_TRUE(p4_oid_mapper_->getRefCount(SAI_OBJECT_TYPE_NEXT_HOP, kNexthopKey1, &ref_cnt)); + EXPECT_EQ(1, ref_cnt); } TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyUpWatchportMemberSucceeds) @@ -1583,7 +2030,7 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyUpWatchportMemberSucceed std::string port_name = "Ethernet6"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); // Update WCMP group to remove kNexthopId1 and add kNexthopId2 P4WcmpGroupEntry updated_app_db_entry; @@ -1591,18 +2038,27 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyUpWatchportMemberSucceed std::shared_ptr updated_gm = createWcmpGroupMemberEntryWithWatchport(kNexthopId2, 1, port_name, kWcmpGroupId1, kNexthopOid2); updated_app_db_entry.wcmp_group_members.push_back(updated_gm); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid2), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); + std::vector return_oids{kWcmpGroupMemberOid2}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessUpdateRequest(&updated_app_db_entry)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_gm, true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_gm, false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); + EXPECT_FALSE(updated_gm->pruned); } TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyDownWatchportMemberSucceeds) @@ -1613,7 +2069,7 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyDownWatchportMemberSucce std::string port_name = "Ethernet1"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); // Update WCMP group to remove kNexthopId1 and add kNexthopId2. No SAI calls // are expected as the associated watch port is operationally down. @@ -1625,8 +2081,7 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyDownWatchportMemberSucce EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessUpdateRequest(&updated_app_db_entry)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_gm, true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_gm, true, 1)); + EXPECT_TRUE(updated_gm->pruned); } TEST_F(WcmpManagerTest, PruneAfterWcmpGroupUpdateSucceeds) @@ -1635,7 +2090,7 @@ TEST_F(WcmpManagerTest, PruneAfterWcmpGroupUpdateSucceeds) std::string port_name = "Ethernet6"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); // Update WCMP group to modify weight of kNexthopId1. P4WcmpGroupEntry updated_app_db_entry; @@ -1643,25 +2098,33 @@ TEST_F(WcmpManagerTest, PruneAfterWcmpGroupUpdateSucceeds) std::shared_ptr updated_gm = createWcmpGroupMemberEntryWithWatchport(kNexthopId1, 10, port_name, kWcmpGroupId1, kNexthopOid1); updated_app_db_entry.wcmp_group_members.push_back(updated_gm); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids{kWcmpGroupMemberOid1}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 10, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids.begin(), return_oids.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessUpdateRequest(&updated_app_db_entry)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(updated_app_db_entry.wcmp_group_members[0]->pruned); // Prune members associated with port. EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) .WillOnce(Return(SAI_STATUS_SUCCESS)); PruneNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(updated_app_db_entry.wcmp_group_members[0]->pruned); // Remove Wcmp group. No SAI call for member removal is expected as it is // already pruned. @@ -1672,7 +2135,6 @@ TEST_F(WcmpManagerTest, PruneAfterWcmpGroupUpdateSucceeds) .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, RemoveWcmpGroup(kWcmpGroupId1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_app_db_entry.wcmp_group_members[0], false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_app_db_entry.wcmp_group_members[0], false, 0)); } TEST_F(WcmpManagerTest, PrunedMemberUpdateOnRestoreSucceeds) @@ -1683,7 +2145,7 @@ TEST_F(WcmpManagerTest, PrunedMemberUpdateOnRestoreSucceeds) std::string port_name = "Ethernet1"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); // Update WCMP group to modify weight of kNexthopId1. P4WcmpGroupEntry updated_app_db_entry; @@ -1694,8 +2156,7 @@ TEST_F(WcmpManagerTest, PrunedMemberUpdateOnRestoreSucceeds) EXPECT_EQ(StatusCode::SWSS_RC_SUCCESS, ProcessUpdateRequest(&updated_app_db_entry)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(updated_app_db_entry.wcmp_group_members[0]->pruned); // Restore members associated with port. // Verify that the weight of the restored member is updated. @@ -1706,7 +2167,7 @@ TEST_F(WcmpManagerTest, PrunedMemberUpdateOnRestoreSucceeds) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); RestorePrunedNextHops(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(updated_app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyUpWatchportMemberFailsWithMemberRemovalFailure) @@ -1715,7 +2176,7 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyUpWatchportMemberFailsWi std::string port_name = "Ethernet6"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); // Update WCMP group to remove kNexthopId1(fails) and add kNexthopId2 P4WcmpGroupEntry updated_app_db_entry; @@ -1726,61 +2187,96 @@ TEST_F(WcmpManagerTest, UpdateWcmpGroupWithOperationallyUpWatchportMemberFailsWi createWcmpGroupMemberEntryWithWatchport(kNexthopId1, 1, port_name, kWcmpGroupId1, kNexthopOid1); updated_app_db_entry.wcmp_group_members.push_back(updated_gm1); updated_app_db_entry.wcmp_group_members.push_back(updated_gm2); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid4), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_INSUFFICIENT_RESOURCES)); + std::vector exp_remove_status{SAI_STATUS_SUCCESS}; + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + std::vector return_oids_4{kWcmpGroupMemberOid4}; + std::vector return_oids_null{SAI_NULL_OBJECT_ID}; + std::vector exp_create_status{SAI_STATUS_SUCCESS}; + std::vector exp_create_status_fail{SAI_STATUS_INSUFFICIENT_RESOURCES}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_4.begin(), return_oids_4.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 10, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_null.begin(), return_oids_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_INSUFFICIENT_RESOURCES))); // Clean up created member-succeeds - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid4))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); - EXPECT_EQ(StatusCode::SWSS_RC_FULL, ProcessUpdateRequest(&updated_app_db_entry)); + std::vector return_oids_1{kWcmpGroupMemberOid1}; + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_1.begin(), return_oids_1.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid4}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_EQ(StatusCode::SWSS_RC_UNKNOWN, ProcessUpdateRequest(&updated_app_db_entry)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_gm2, false, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_gm2, false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); + EXPECT_FALSE(updated_gm2->pruned); // Update again, this time clean up fails - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid1))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 1, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid4), Return(SAI_STATUS_SUCCESS))); - EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid2, 10, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(Return(SAI_STATUS_INSUFFICIENT_RESOURCES)); + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid1}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 1, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_4.begin(), return_oids_4.end()), + SetArrayArgument<6>(exp_create_status.begin(), exp_create_status.end()), + Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid2, 10, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_null.begin(), return_oids_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_INSUFFICIENT_RESOURCES))); // Clean up created member(fails) - EXPECT_CALL(mock_sai_next_hop_group_, remove_next_hop_group_member(Eq(kWcmpGroupMemberOid4))) - .WillOnce(Return(SAI_STATUS_SUCCESS)); EXPECT_CALL(mock_sai_next_hop_group_, - create_next_hop_group_member(_, Eq(gSwitchId), Eq(3), - Truly(std::bind(MatchSaiNextHopGroupMemberAttribute, kNexthopOid1, 2, - kWcmpGroupOid1, std::placeholders::_1)))) - .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_INSUFFICIENT_RESOURCES))); - // (TODO): Expect critical state. - EXPECT_EQ("Failed to create next hop group member " - "'ju1u32m2.atl11:qe-3/7'", + remove_next_hop_group_members(Eq(1), ArrayEq(std::vector{kWcmpGroupMemberOid4}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _)) + .WillOnce( + DoAll(SetArrayArgument<3>(exp_remove_status.begin(), exp_remove_status.end()), Return(SAI_STATUS_SUCCESS))); + EXPECT_CALL(mock_sai_next_hop_group_, + create_next_hop_group_members(Eq(gSwitchId), Eq(1), ArrayEq(std::vector{3}), + AttrArrayArrayEq(std::vector>{ + GetSaiNextHopGroupMemberAttribute(kNexthopOid1, 2, kWcmpGroupOid1)}), + Eq(SAI_BULK_OP_ERROR_MODE_STOP_ON_ERROR), _, _)) + .WillOnce(DoAll(SetArrayArgument<5>(return_oids_null.begin(), return_oids_null.end()), + SetArrayArgument<6>(exp_create_status_fail.begin(), exp_create_status_fail.end()), + Return(SAI_STATUS_INSUFFICIENT_RESOURCES))); + // TODO: Expect critical state. + EXPECT_EQ("Fail to create wcmp group member: 'ju1u32m2.atl11:qe-3/7'", ProcessUpdateRequest(&updated_app_db_entry).message()); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], false, 0)); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(updated_gm2, false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(updated_gm2, false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); + EXPECT_FALSE(updated_gm2->pruned); } TEST_F(WcmpManagerTest, WatchportStateChangetoOperDownSucceeds) @@ -1789,7 +2285,7 @@ TEST_F(WcmpManagerTest, WatchportStateChangetoOperDownSucceeds) std::string port_name = "Ethernet6"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name, true); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); // Send port down signal // Verify that the next hop member associated with the port is pruned. @@ -1800,7 +2296,7 @@ TEST_F(WcmpManagerTest, WatchportStateChangetoOperDownSucceeds) .WillOnce(Return(SAI_STATUS_SUCCESS)); HandlePortStatusChangeNotification(op, data); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, WatchportStateChangeToOperUpSucceeds) @@ -1811,7 +2307,7 @@ TEST_F(WcmpManagerTest, WatchportStateChangeToOperUpSucceeds) std::string port_name = "Ethernet1"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); // Send port up signal. // Verify that the pruned next hop member associated with the port is @@ -1826,7 +2322,7 @@ TEST_F(WcmpManagerTest, WatchportStateChangeToOperUpSucceeds) .WillOnce(DoAll(SetArgPointee<0>(kWcmpGroupMemberOid1), Return(SAI_STATUS_SUCCESS))); HandlePortStatusChangeNotification(op, data); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], false, 0)); + EXPECT_FALSE(app_db_entry.wcmp_group_members[0]->pruned); } TEST_F(WcmpManagerTest, WatchportStateChangeFromOperUnknownToDownPrunesMemberOnlyOnceSuceeds) @@ -1837,7 +2333,7 @@ TEST_F(WcmpManagerTest, WatchportStateChangeFromOperUnknownToDownPrunesMemberOnl std::string port_name = "Ethernet1"; P4WcmpGroupEntry app_db_entry = AddWcmpGroupEntryWithWatchport(port_name); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); // Send port down signal. // Verify that the pruned next hop member is not pruned again. @@ -1846,7 +2342,180 @@ TEST_F(WcmpManagerTest, WatchportStateChangeFromOperUnknownToDownPrunesMemberOnl "STATUS_DOWN\"}]"; HandlePortStatusChangeNotification(op, data); EXPECT_TRUE(VerifyWcmpGroupMemberInPortMap(app_db_entry.wcmp_group_members[0], true, 1)); - EXPECT_TRUE(VerifyWcmpGroupMemberInPrunedSet(app_db_entry.wcmp_group_members[0], true, 1)); + EXPECT_TRUE(app_db_entry.wcmp_group_members[0]->pruned); +} + +TEST_F(WcmpManagerTest, VerifyStateTest) +{ + AddWcmpGroupEntryWithWatchport("Ethernet6", true); + nlohmann::json j; + j[prependMatchField(p4orch::kWcmpGroupId)] = kWcmpGroupId1; + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_WCMP_GROUP_TABLE_NAME + + kTableKeyDelimiter + j.dump(); + std::vector attributes; + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_NEXT_HOP_GROUP:oid:0xa", + std::vector{swss::FieldValueTuple{ + "SAI_NEXT_HOP_GROUP_ATTR_TYPE", "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP"}}); + table.set("SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER:oid:0xb", + std::vector{ + swss::FieldValueTuple{"SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID", "oid:0xa"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID", "oid:0x1"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT", "2"}}); + + // Verification should succeed with vaild key and value. + nlohmann::json actions; + nlohmann::json action; + action[p4orch::kAction] = p4orch::kSetNexthopId; + action[p4orch::kWeight] = 2; + action[p4orch::kWatchPort] = "Ethernet6"; + action[prependParamField(p4orch::kNexthopId)] = kNexthopId1; + actions.push_back(action); + attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Invalid key should fail verification. + EXPECT_FALSE(VerifyState("invalid", attributes).empty()); + EXPECT_FALSE(VerifyState("invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":invalid:invalid", attributes).empty()); + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + ":FIXED_WCMP_GROUP_TABLE:invalid", attributes).empty()); + + // Non-existing entry should fail verification. + j[prependMatchField(p4orch::kWcmpGroupId)] = kWcmpGroupId2; + EXPECT_FALSE(VerifyState(std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_WCMP_GROUP_TABLE_NAME + + kTableKeyDelimiter + j.dump(), + attributes) + .empty()); + + // Non-existing nexthop should fail verification. + actions.clear(); + attributes.clear(); + action[prependParamField(p4orch::kNexthopId)] = "invalid"; + actions.push_back(action); + attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + actions.clear(); + attributes.clear(); + action[p4orch::kAction] = p4orch::kSetNexthopId; + action[p4orch::kWeight] = 2; + action[p4orch::kWatchPort] = "Ethernet6"; + action[prependParamField(p4orch::kNexthopId)] = kNexthopId1; + actions.push_back(action); + attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); + + auto *wcmp_group_entry_ptr = GetWcmpGroupEntry(kWcmpGroupId1); + EXPECT_NE(nullptr, wcmp_group_entry_ptr); + + // Verification should fail if WCMP group ID mismatches. + auto saved_wcmp_group_id = wcmp_group_entry_ptr->wcmp_group_id; + wcmp_group_entry_ptr->wcmp_group_id = kWcmpGroupId2; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + wcmp_group_entry_ptr->wcmp_group_id = saved_wcmp_group_id; + + // Verification should fail if WCMP group ID mismatches. + auto saved_wcmp_group_oid = wcmp_group_entry_ptr->wcmp_group_oid; + wcmp_group_entry_ptr->wcmp_group_oid = 1111; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + wcmp_group_entry_ptr->wcmp_group_oid = saved_wcmp_group_oid; + + // Verification should fail if group size mismatches. + wcmp_group_entry_ptr->wcmp_group_members.push_back(std::make_shared()); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + wcmp_group_entry_ptr->wcmp_group_members.pop_back(); + + // Verification should fail if member nexthop ID mismatches. + auto saved_next_hop_id = wcmp_group_entry_ptr->wcmp_group_members[0]->next_hop_id; + wcmp_group_entry_ptr->wcmp_group_members[0]->next_hop_id = kNexthopId3; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + wcmp_group_entry_ptr->wcmp_group_members[0]->next_hop_id = saved_next_hop_id; + + // Verification should fail if member weight mismatches. + auto saved_weight = wcmp_group_entry_ptr->wcmp_group_members[0]->weight; + wcmp_group_entry_ptr->wcmp_group_members[0]->weight = 3; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + wcmp_group_entry_ptr->wcmp_group_members[0]->weight = saved_weight; + + // Verification should fail if member watch port mismatches. + auto saved_watch_port = wcmp_group_entry_ptr->wcmp_group_members[0]->watch_port; + wcmp_group_entry_ptr->wcmp_group_members[0]->watch_port = "invalid"; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + wcmp_group_entry_ptr->wcmp_group_members[0]->watch_port = saved_watch_port; + + // Verification should fail if member WCMP group ID mismatches. + auto saved_member_wcmp_group_id = wcmp_group_entry_ptr->wcmp_group_members[0]->wcmp_group_id; + wcmp_group_entry_ptr->wcmp_group_members[0]->wcmp_group_id = kWcmpGroupId2; + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + wcmp_group_entry_ptr->wcmp_group_members[0]->wcmp_group_id = saved_member_wcmp_group_id; + + // Verification should fail if member OID mapper mismatches. + p4_oid_mapper_->eraseOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER, + kWcmpGroupKey1 + kTableKeyDelimiter + sai_serialize_object_id(kWcmpGroupMemberOid1)); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + p4_oid_mapper_->setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER, + kWcmpGroupKey1 + kTableKeyDelimiter + sai_serialize_object_id(kWcmpGroupMemberOid1), + kWcmpGroupMemberOid1); +} + +TEST_F(WcmpManagerTest, VerifyStateAsicDbTest) +{ + AddWcmpGroupEntryWithWatchport("Ethernet6", true); + nlohmann::json j; + j[prependMatchField(p4orch::kWcmpGroupId)] = kWcmpGroupId1; + const std::string db_key = std::string(APP_P4RT_TABLE_NAME) + kTableKeyDelimiter + APP_P4RT_WCMP_GROUP_TABLE_NAME + + kTableKeyDelimiter + j.dump(); + std::vector attributes; + nlohmann::json actions; + nlohmann::json action; + action[p4orch::kAction] = p4orch::kSetNexthopId; + action[p4orch::kWeight] = 2; + action[p4orch::kWatchPort] = "Ethernet6"; + action[prependParamField(p4orch::kNexthopId)] = kNexthopId1; + actions.push_back(action); + attributes.push_back(swss::FieldValueTuple{p4orch::kActions, actions.dump()}); + + // Setup ASIC DB. + swss::Table table(nullptr, "ASIC_STATE"); + table.set("SAI_OBJECT_TYPE_NEXT_HOP_GROUP:oid:0xa", + std::vector{swss::FieldValueTuple{ + "SAI_NEXT_HOP_GROUP_ATTR_TYPE", "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP"}}); + table.set("SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER:oid:0xb", + std::vector{ + swss::FieldValueTuple{"SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID", "oid:0xa"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID", "oid:0x1"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT", "2"}}); + + // Verification should succeed with correct ASIC DB values. + EXPECT_EQ(VerifyState(db_key, attributes), ""); + + // Verification should fail if group values mismatch. + table.set("SAI_OBJECT_TYPE_NEXT_HOP_GROUP:oid:0xa", + std::vector{swss::FieldValueTuple{"SAI_NEXT_HOP_GROUP_ATTR_TYPE", "invalid"}}); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + + // Verification should fail if group table is missing. + table.del("SAI_OBJECT_TYPE_NEXT_HOP_GROUP:oid:0xa"); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + table.set("SAI_OBJECT_TYPE_NEXT_HOP_GROUP:oid:0xa", + std::vector{swss::FieldValueTuple{ + "SAI_NEXT_HOP_GROUP_ATTR_TYPE", "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP"}}); + + // Verification should fail if member values mismatch. + table.set("SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER:oid:0xb", + std::vector{swss::FieldValueTuple{"SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT", "1"}}); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + + // Verification should fail if member table is missing. + table.del("SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER:oid:0xb"); + EXPECT_FALSE(VerifyState(db_key, attributes).empty()); + table.set("SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER:oid:0xb", + std::vector{ + swss::FieldValueTuple{"SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID", "oid:0xa"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID", "oid:0x1"}, + swss::FieldValueTuple{"SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT", "2"}}); } + } // namespace test } // namespace p4orch diff --git a/orchagent/p4orch/wcmp_manager.cpp b/orchagent/p4orch/wcmp_manager.cpp index 6078f92221..257dd7d2a1 100644 --- a/orchagent/p4orch/wcmp_manager.cpp +++ b/orchagent/p4orch/wcmp_manager.cpp @@ -4,21 +4,27 @@ #include #include +#include "SaiAttributeList.h" #include "crmorch.h" +#include "dbconnector.h" #include "json.hpp" #include "logger.h" #include "p4orch/p4orch_util.h" #include "portsorch.h" #include "sai_serialize.h" +#include "table.h" extern "C" { #include "sai.h" } +using ::p4orch::kTableKeyDelimiter; + extern sai_object_id_t gSwitchId; extern sai_next_hop_group_api_t *sai_next_hop_group_api; extern CrmOrch *gCrmOrch; extern PortsOrch *gPortsOrch; +extern size_t gMaxBulkSize; namespace p4orch { @@ -31,8 +37,56 @@ std::string getWcmpGroupMemberKey(const std::string &wcmp_group_key, const sai_o return wcmp_group_key + kTableKeyDelimiter + sai_serialize_object_id(wcmp_member_oid); } +std::vector getSaiGroupAttrs(const P4WcmpGroupEntry &wcmp_group_entry) +{ + std::vector attrs; + sai_attribute_t attr; + + // TODO: Update type to WCMP when SAI supports it. + attr.id = SAI_NEXT_HOP_GROUP_ATTR_TYPE; + attr.value.s32 = SAI_NEXT_HOP_GROUP_TYPE_ECMP; + attrs.push_back(attr); + + return attrs; +} + } // namespace +WcmpManager::WcmpManager(P4OidMapper *p4oidMapper, ResponsePublisherInterface *publisher) + : gNextHopGroupMemberBulker(sai_next_hop_group_api, gSwitchId, gMaxBulkSize) +{ + SWSS_LOG_ENTER(); + + assert(p4oidMapper != nullptr); + m_p4OidMapper = p4oidMapper; + assert(publisher != nullptr); + m_publisher = publisher; +} + +std::vector WcmpManager::getSaiMemberAttrs(const P4WcmpGroupMemberEntry &wcmp_member_entry, + const sai_object_id_t group_oid) +{ + std::vector attrs; + sai_attribute_t attr; + sai_object_id_t next_hop_oid = SAI_NULL_OBJECT_ID; + m_p4OidMapper->getOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(wcmp_member_entry.next_hop_id), + &next_hop_oid); + + attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID; + attr.value.oid = group_oid; + attrs.push_back(attr); + + attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID; + attr.value.oid = next_hop_oid; + attrs.push_back(attr); + + attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT; + attr.value.u32 = (uint32_t)wcmp_member_entry.weight; + attrs.push_back(attr); + + return attrs; +} + ReturnCode WcmpManager::validateWcmpGroupEntry(const P4WcmpGroupEntry &app_db_entry) { for (auto &wcmp_group_member : app_db_entry.wcmp_group_members) @@ -123,6 +177,7 @@ ReturnCodeOr WcmpManager::deserializeP4WcmpGroupAppDbEntry( wcmp_group_member->watch_port = action_item[kWatchPort]; } wcmp_group_member->wcmp_group_id = app_db_entry.wcmp_group_id; + wcmp_group_member->pruned = false; app_db_entry.wcmp_group_members.push_back(wcmp_group_member); } } @@ -154,14 +209,7 @@ P4WcmpGroupEntry *WcmpManager::getWcmpGroupEntry(const std::string &wcmp_group_i ReturnCode WcmpManager::processAddRequest(P4WcmpGroupEntry *app_db_entry) { SWSS_LOG_ENTER(); - auto status = validateWcmpGroupEntry(*app_db_entry); - if (!status.ok()) - { - SWSS_LOG_ERROR("Invalid WCMP group with id %s: %s", QuotedVar(app_db_entry->wcmp_group_id).c_str(), - status.message().c_str()); - return status; - } - status = createWcmpGroup(app_db_entry); + auto status = createWcmpGroup(app_db_entry); if (!status.ok()) { SWSS_LOG_ERROR("Failed to create WCMP group with id %s: %s", QuotedVar(app_db_entry->wcmp_group_id).c_str(), @@ -173,38 +221,18 @@ ReturnCode WcmpManager::processAddRequest(P4WcmpGroupEntry *app_db_entry) ReturnCode WcmpManager::createWcmpGroupMember(std::shared_ptr wcmp_group_member, const sai_object_id_t group_oid, const std::string &wcmp_group_key) { - std::vector nhgm_attrs; - sai_attribute_t nhgm_attr; - sai_object_id_t next_hop_oid = SAI_NULL_OBJECT_ID; - m_p4OidMapper->getOID(SAI_OBJECT_TYPE_NEXT_HOP, KeyGenerator::generateNextHopKey(wcmp_group_member->next_hop_id), - &next_hop_oid); - - nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID; - nhgm_attr.value.oid = group_oid; - nhgm_attrs.push_back(nhgm_attr); - - nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID; - nhgm_attr.value.oid = next_hop_oid; - nhgm_attrs.push_back(nhgm_attr); - - nhgm_attr.id = SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT; - nhgm_attr.value.u32 = (uint32_t)wcmp_group_member->weight; - nhgm_attrs.push_back(nhgm_attr); + auto attrs = getSaiMemberAttrs(*wcmp_group_member, group_oid); CHECK_ERROR_AND_LOG_AND_RETURN( sai_next_hop_group_api->create_next_hop_group_member(&wcmp_group_member->member_oid, gSwitchId, - (uint32_t)nhgm_attrs.size(), nhgm_attrs.data()), + (uint32_t)attrs.size(), attrs.data()), "Failed to create next hop group member " << QuotedVar(wcmp_group_member->next_hop_id)); // Update reference count - const auto &next_hop_key = KeyGenerator::generateNextHopKey(wcmp_group_member->next_hop_id); m_p4OidMapper->setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER, getWcmpGroupMemberKey(wcmp_group_key, wcmp_group_member->member_oid), wcmp_group_member->member_oid); - gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); - m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, next_hop_key); m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key); - return ReturnCode(); } @@ -251,104 +279,90 @@ ReturnCode WcmpManager::fetchPortOperStatus(const std::string &port_name, sai_po return ReturnCode(); } -ReturnCode WcmpManager::createWcmpGroupMemberWithWatchport(P4WcmpGroupEntry *wcmp_group, - std::shared_ptr member, - const std::string &wcmp_group_key) +ReturnCode WcmpManager::processWcmpGroupMembersAddition( + const std::vector> &members, const std::string &wcmp_group_key, + sai_object_id_t wcmp_group_oid, std::vector> &created_wcmp_group_members) { - // Create member in SAI only for operationally up ports - sai_port_oper_status_t oper_status = SAI_PORT_OPER_STATUS_DOWN; - auto status = fetchPortOperStatus(member->watch_port, &oper_status); - if (!status.ok()) - { - return status; - } - - if (oper_status == SAI_PORT_OPER_STATUS_UP) + SWSS_LOG_ENTER(); + ReturnCode status; + vector nhgm_ids(members.size(), SAI_NULL_OBJECT_ID); + for (size_t i = 0; i < members.size(); ++i) { - auto status = createWcmpGroupMember(member, wcmp_group->wcmp_group_oid, wcmp_group_key); - if (!status.ok()) + bool insert_member = true; + auto &member = members[i]; + if (!member->watch_port.empty()) { - SWSS_LOG_ERROR("Failed to create next hop member %s with watch_port %s", member->next_hop_id.c_str(), - member->watch_port.c_str()); - return status; - } - } - else - { - pruned_wcmp_members_set.emplace(member); - SWSS_LOG_NOTICE("Member %s in group %s not created in asic as the associated watchport " - "(%s) is not operationally up", - member->next_hop_id.c_str(), member->wcmp_group_id.c_str(), member->watch_port.c_str()); - } - // Add member to port_name_to_wcmp_group_member_map - insertMemberInPortNameToWcmpGroupMemberMap(member); - return ReturnCode(); -} + // Create member in SAI only for operationally up ports + sai_port_oper_status_t oper_status = SAI_PORT_OPER_STATUS_DOWN; + status = fetchPortOperStatus(member->watch_port, &oper_status); + if (!status.ok()) + { + break; + } -ReturnCode WcmpManager::processWcmpGroupMemberAddition(std::shared_ptr member, - P4WcmpGroupEntry *wcmp_group, const std::string &wcmp_group_key) -{ - ReturnCode status = ReturnCode(); - if (!member->watch_port.empty()) - { - status = createWcmpGroupMemberWithWatchport(wcmp_group, member, wcmp_group_key); - if (!status.ok()) - { - SWSS_LOG_ERROR("Failed to create WCMP group member %s with watch_port %s", member->next_hop_id.c_str(), - member->watch_port.c_str()); + if (oper_status != SAI_PORT_OPER_STATUS_UP) + { + insert_member = false; + member->pruned = true; + SWSS_LOG_NOTICE("Member %s in group %s not created in asic as the associated " + "watchport " + "(%s) is not operationally up", + member->next_hop_id.c_str(), member->wcmp_group_id.c_str(), member->watch_port.c_str()); + } } - } - else - { - status = createWcmpGroupMember(member, wcmp_group->wcmp_group_oid, wcmp_group_key); - if (!status.ok()) + if (insert_member) { - SWSS_LOG_ERROR("Failed to create WCMP group member %s", member->next_hop_id.c_str()); + auto attrs = getSaiMemberAttrs(*(member.get()), wcmp_group_oid); + gNextHopGroupMemberBulker.create_entry(&nhgm_ids[i], (uint32_t)attrs.size(), attrs.data()); } } - return status; -} - -ReturnCode WcmpManager::processWcmpGroupMemberRemoval(std::shared_ptr member, - const std::string &wcmp_group_key) -{ - // If member exists in pruned_wcmp_members_set, remove from set. Else, remove - // member using SAI. - auto it = pruned_wcmp_members_set.find(member); - if (it != pruned_wcmp_members_set.end()) - { - pruned_wcmp_members_set.erase(it); - SWSS_LOG_NOTICE("Removed pruned member %s from group %s", member->next_hop_id.c_str(), - member->wcmp_group_id.c_str()); - } - else + if (status.ok()) { - auto status = removeWcmpGroupMember(member, wcmp_group_key); - if (!status.ok()) + gNextHopGroupMemberBulker.flush(); + for (size_t i = 0; i < members.size(); ++i) { - return status; + auto &member = members[i]; + if (!member->pruned) + { + if (nhgm_ids[i] == SAI_NULL_OBJECT_ID) + { + if (status.ok()) + { + status = ReturnCode(StatusCode::SWSS_RC_UNKNOWN) + << "Fail to create wcmp group member: " << QuotedVar(member->next_hop_id); + } + else + { + status << "; Fail to create wcmp group member: " << QuotedVar(member->next_hop_id); + } + continue; + } + member->member_oid = nhgm_ids[i]; + m_p4OidMapper->setOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER, + getWcmpGroupMemberKey(wcmp_group_key, member->member_oid), member->member_oid); + m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key); + } + if (!member->watch_port.empty()) + { + // Add member to port_name_to_wcmp_group_member_map + insertMemberInPortNameToWcmpGroupMemberMap(member); + } + const std::string &next_hop_key = KeyGenerator::generateNextHopKey(member->next_hop_id); + gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); + m_p4OidMapper->increaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, next_hop_key); + created_wcmp_group_members.push_back(member); } } - // Remove member from port_name_to_wcmp_group_member_map - removeMemberFromPortNameToWcmpGroupMemberMap(member); - return ReturnCode(); + return status; } ReturnCode WcmpManager::createWcmpGroup(P4WcmpGroupEntry *wcmp_group) { SWSS_LOG_ENTER(); - // Create SAI next hop group - sai_attribute_t nhg_attr; - std::vector nhg_attrs; - - // TODO: Update type to WCMP when SAI supports it. - nhg_attr.id = SAI_NEXT_HOP_GROUP_ATTR_TYPE; - nhg_attr.value.s32 = SAI_NEXT_HOP_GROUP_TYPE_ECMP; - nhg_attrs.push_back(nhg_attr); + auto attrs = getSaiGroupAttrs(*wcmp_group); CHECK_ERROR_AND_LOG_AND_RETURN(sai_next_hop_group_api->create_next_hop_group(&wcmp_group->wcmp_group_oid, gSwitchId, - (uint32_t)nhg_attrs.size(), - nhg_attrs.data()), + (uint32_t)attrs.size(), attrs.data()), "Failed to create next hop group " << QuotedVar(wcmp_group->wcmp_group_id)); // Update reference count const auto &wcmp_group_key = KeyGenerator::generateWcmpGroupKey(wcmp_group->wcmp_group_id); @@ -357,16 +371,8 @@ ReturnCode WcmpManager::createWcmpGroup(P4WcmpGroupEntry *wcmp_group) // Create next hop group members std::vector> created_wcmp_group_members; - ReturnCode status; - for (auto &wcmp_group_member : wcmp_group->wcmp_group_members) - { - status = processWcmpGroupMemberAddition(wcmp_group_member, wcmp_group, wcmp_group_key); - if (!status.ok()) - { - break; - } - created_wcmp_group_members.push_back(wcmp_group_member); - } + ReturnCode status = processWcmpGroupMembersAddition(wcmp_group->wcmp_group_members, wcmp_group_key, + wcmp_group->wcmp_group_oid, created_wcmp_group_members); if (!status.ok()) { // Clean up created group members and the group @@ -383,6 +389,7 @@ ReturnCode WcmpManager::createWcmpGroup(P4WcmpGroupEntry *wcmp_group) m_p4OidMapper->eraseOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key); return status; } + m_wcmpGroupTable[wcmp_group->wcmp_group_id] = *wcmp_group; return ReturnCode(); } @@ -392,33 +399,28 @@ void WcmpManager::recoverGroupMembers( const std::vector> &created_wcmp_group_members, const std::vector> &removed_wcmp_group_members) { - // Keep track of recovery status during clean up + SWSS_LOG_ENTER(); + std::vector> members; ReturnCode recovery_status; // Clean up created group members - remove created new members - for (const auto &new_member : created_wcmp_group_members) + if (created_wcmp_group_members.size() != 0) { - auto status = processWcmpGroupMemberRemoval(new_member, wcmp_group_key); - if (!status.ok()) - { - SWSS_LOG_ERROR("Failed to remove created next hop group member %s in " - "processUpdateRequest().", - QuotedVar(new_member->next_hop_id).c_str()); - recovery_status.ok() ? recovery_status = status.prepend("Error during recovery: ") - : recovery_status << "; Error during recovery: " << status.message(); - } + recovery_status = processWcmpGroupMembersRemoval(created_wcmp_group_members, wcmp_group_key, members) + .prepend("Error during recovery: "); } + // Clean up removed group members - create removed old members - for (auto &old_member : removed_wcmp_group_members) + if (recovery_status.ok() && removed_wcmp_group_members.size() != 0) { - auto status = processWcmpGroupMemberAddition(old_member, wcmp_group_entry, wcmp_group_key); - if (!status.ok()) - { - recovery_status.ok() ? recovery_status = status.prepend("Error during recovery: ") - : recovery_status << "; Error during recovery: " << status.message(); - } + recovery_status = processWcmpGroupMembersAddition(removed_wcmp_group_members, wcmp_group_key, + wcmp_group_entry->wcmp_group_oid, members) + .prepend("Error during recovery: "); } + if (!recovery_status.ok()) + { SWSS_RAISE_CRITICAL_STATE(recovery_status.message()); + } } ReturnCode WcmpManager::processUpdateRequest(P4WcmpGroupEntry *wcmp_group_entry) @@ -441,93 +443,88 @@ ReturnCode WcmpManager::processUpdateRequest(P4WcmpGroupEntry *wcmp_group_entry) // 5. Make SAI call to remove the reserved old member // 6. Make SAI calls to create remaining new members ReturnCode update_request_status; - auto find_smallest_index = [&](p4orch::P4WcmpGroupEntry *wcmp) { + auto find_smallest_index = [&](p4orch::P4WcmpGroupEntry *wcmp, + std::vector> &other_members) -> int { + other_members.clear(); if (wcmp->wcmp_group_members.empty()) + { return -1; + } int reserved_idx = 0; for (int i = 1; i < (int)wcmp->wcmp_group_members.size(); i++) { if (wcmp->wcmp_group_members[i]->weight < wcmp->wcmp_group_members[reserved_idx]->weight) { + other_members.push_back(wcmp->wcmp_group_members[reserved_idx]); reserved_idx = i; } + else + { + other_members.push_back(wcmp->wcmp_group_members[i]); + } } return reserved_idx; }; // Find the old member who has the smallest weight, -1 if the member list is // empty - int reserved_old_member_index = find_smallest_index(old_wcmp); + std::vector> other_old_members; + int reserved_old_member_index = find_smallest_index(old_wcmp, other_old_members); // Find the new member who has the smallest weight, -1 if the member list is // empty - int reserved_new_member_index = find_smallest_index(wcmp_group_entry); + std::vector> other_new_members; + int reserved_new_member_index = find_smallest_index(wcmp_group_entry, other_new_members); // Remove stale group members except the member with the smallest weight - for (int i = 0; i < (int)old_wcmp->wcmp_group_members.size(); i++) + if (other_old_members.size() != 0) { - // Reserve the old member with smallest weight - if (i == reserved_old_member_index) - continue; - auto &stale_member = old_wcmp->wcmp_group_members[i]; - update_request_status = processWcmpGroupMemberRemoval(stale_member, wcmp_group_key); + update_request_status = + processWcmpGroupMembersRemoval(other_old_members, wcmp_group_key, removed_wcmp_group_members); if (!update_request_status.ok()) { - SWSS_LOG_ERROR("Failed to remove stale next hop group member %s in " - "processUpdateRequest().", - QuotedVar(sai_serialize_object_id(stale_member->member_oid)).c_str()); recoverGroupMembers(wcmp_group_entry, wcmp_group_key, created_wcmp_group_members, removed_wcmp_group_members); return update_request_status; } - removed_wcmp_group_members.push_back(stale_member); } // Create the new member with the smallest weight if member list is nonempty - if (!wcmp_group_entry->wcmp_group_members.empty()) + if (reserved_new_member_index != -1) { - auto &member = wcmp_group_entry->wcmp_group_members[reserved_new_member_index]; - update_request_status = processWcmpGroupMemberAddition(member, wcmp_group_entry, wcmp_group_key); + update_request_status = processWcmpGroupMembersAddition( + {wcmp_group_entry->wcmp_group_members[reserved_new_member_index]}, wcmp_group_key, + wcmp_group_entry->wcmp_group_oid, created_wcmp_group_members); if (!update_request_status.ok()) { recoverGroupMembers(wcmp_group_entry, wcmp_group_key, created_wcmp_group_members, removed_wcmp_group_members); return update_request_status; } - created_wcmp_group_members.push_back(member); } // Remove the old member with the smallest weight if member list is nonempty - if (!old_wcmp->wcmp_group_members.empty()) + if (reserved_old_member_index != -1) { - auto &stale_member = old_wcmp->wcmp_group_members[reserved_old_member_index]; - update_request_status = processWcmpGroupMemberRemoval(stale_member, wcmp_group_key); + update_request_status = processWcmpGroupMembersRemoval( + {old_wcmp->wcmp_group_members[reserved_old_member_index]}, wcmp_group_key, removed_wcmp_group_members); if (!update_request_status.ok()) { - SWSS_LOG_ERROR("Failed to remove stale next hop group member %s in " - "processUpdateRequest().", - QuotedVar(sai_serialize_object_id(stale_member->member_oid)).c_str()); recoverGroupMembers(wcmp_group_entry, wcmp_group_key, created_wcmp_group_members, removed_wcmp_group_members); return update_request_status; } - removed_wcmp_group_members.push_back(stale_member); } // Create new group members - for (int i = 0; i < (int)wcmp_group_entry->wcmp_group_members.size(); i++) + if (other_new_members.size() != 0) { - // Skip the new member with the lowest weight as it is already created - if (i == reserved_new_member_index) - continue; - auto &member = wcmp_group_entry->wcmp_group_members[i]; - // Create new group member - update_request_status = processWcmpGroupMemberAddition(member, wcmp_group_entry, wcmp_group_key); + update_request_status = processWcmpGroupMembersAddition( + other_new_members, wcmp_group_key, wcmp_group_entry->wcmp_group_oid, created_wcmp_group_members); if (!update_request_status.ok()) { recoverGroupMembers(wcmp_group_entry, wcmp_group_key, created_wcmp_group_members, removed_wcmp_group_members); return update_request_status; } - created_wcmp_group_members.push_back(member); } m_wcmpGroupTable[wcmp_group_entry->wcmp_group_id] = *wcmp_group_entry; @@ -538,19 +535,72 @@ ReturnCode WcmpManager::removeWcmpGroupMember(const std::shared_ptrnext_hop_id); CHECK_ERROR_AND_LOG_AND_RETURN(sai_next_hop_group_api->remove_next_hop_group_member(wcmp_group_member->member_oid), "Failed to remove WCMP group member with nexthop id " << QuotedVar(wcmp_group_member->next_hop_id)); m_p4OidMapper->eraseOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER, getWcmpGroupMemberKey(wcmp_group_key, wcmp_group_member->member_oid)); - gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); - m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, next_hop_key); m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key); return ReturnCode(); } +ReturnCode WcmpManager::processWcmpGroupMembersRemoval( + const std::vector> &members, const std::string &wcmp_group_key, + std::vector> &removed_wcmp_group_members) +{ + SWSS_LOG_ENTER(); + ReturnCode status; + std::vector statuses(members.size(), SAI_STATUS_FAILURE); + for (size_t i = 0; i < members.size(); ++i) + { + auto &member = members[i]; + if (!member->pruned) + { + gNextHopGroupMemberBulker.remove_entry(&statuses[i], member->member_oid); + } + } + gNextHopGroupMemberBulker.flush(); + for (size_t i = 0; i < members.size(); ++i) + { + auto &member = members[i]; + if (member->pruned) + { + SWSS_LOG_NOTICE("Removed pruned member %s from group %s", member->next_hop_id.c_str(), + member->wcmp_group_id.c_str()); + member->pruned = false; + } + else + { + if (statuses[i] != SAI_STATUS_SUCCESS) + { + if (status.ok()) + { + status = ReturnCode(statuses[i]) + << "Failed to delete WCMP group member: " << QuotedVar(member->next_hop_id); + } + else + { + status << "; Failed to delete WCMP group member: " << QuotedVar(member->next_hop_id); + } + continue; + } + else + { + m_p4OidMapper->eraseOID(SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER, + getWcmpGroupMemberKey(wcmp_group_key, member->member_oid)); + m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key); + } + } + const std::string &next_hop_key = KeyGenerator::generateNextHopKey(member->next_hop_id); + gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_NEXTHOP_GROUP_MEMBER); + m_p4OidMapper->decreaseRefCount(SAI_OBJECT_TYPE_NEXT_HOP, next_hop_key); + removeMemberFromPortNameToWcmpGroupMemberMap(member); + removed_wcmp_group_members.push_back(member); + } + return status; +} + ReturnCode WcmpManager::removeWcmpGroup(const std::string &wcmp_group_id) { SWSS_LOG_ENTER(); @@ -572,18 +622,12 @@ ReturnCode WcmpManager::removeWcmpGroup(const std::string &wcmp_group_id) << wcmp_group_refcount - expected_refcount << " more objects than its group members (size=" << expected_refcount << ") referencing it."); } - std::vector> removed_wcmp_group_members; - ReturnCode status; + // Delete group members - for (const auto &member : wcmp_group->wcmp_group_members) - { - status = processWcmpGroupMemberRemoval(member, wcmp_group_key); - if (!status.ok()) - { - break; - } - removed_wcmp_group_members.push_back(member); - } + std::vector> removed_wcmp_group_members; + ReturnCode status = + processWcmpGroupMembersRemoval(wcmp_group->wcmp_group_members, wcmp_group_key, removed_wcmp_group_members); + // Delete group if (status.ok()) { @@ -613,24 +657,22 @@ void WcmpManager::pruneNextHops(const std::string &port) { for (const auto &member : port_name_to_wcmp_group_member_map[port]) { - auto it = pruned_wcmp_members_set.find(member); // Prune a member if it is not already pruned. - if (it == pruned_wcmp_members_set.end()) + if (!member->pruned) { const auto &wcmp_group_key = KeyGenerator::generateWcmpGroupKey(member->wcmp_group_id); auto status = removeWcmpGroupMember(member, wcmp_group_key); if (!status.ok()) { - SWSS_LOG_NOTICE("Failed to remove member %s from group %s, rv: %s", member->next_hop_id.c_str(), - member->wcmp_group_id.c_str(), status.message().c_str()); - } - else - { - // Add pruned member to pruned set - pruned_wcmp_members_set.emplace(member); - SWSS_LOG_NOTICE("Pruned member %s from group %s", member->next_hop_id.c_str(), - member->wcmp_group_id.c_str()); + std::stringstream msg; + msg << "Failed to prune member " << member->next_hop_id << " from group " << member->wcmp_group_id + << ": " << status.message(); + SWSS_RAISE_CRITICAL_STATE(msg.str()); + return; } + member->pruned = true; + SWSS_LOG_NOTICE("Pruned member %s from group %s", member->next_hop_id.c_str(), + member->wcmp_group_id.c_str()); } } } @@ -647,8 +689,7 @@ void WcmpManager::restorePrunedNextHops(const std::string &port) ReturnCode status; for (auto member : port_name_to_wcmp_group_member_map[port]) { - auto it = pruned_wcmp_members_set.find(member); - if (it != pruned_wcmp_members_set.end()) + if (member->pruned) { const auto &wcmp_group_key = KeyGenerator::generateWcmpGroupKey(member->wcmp_group_id); sai_object_id_t wcmp_group_oid = SAI_NULL_OBJECT_ID; @@ -670,7 +711,7 @@ void WcmpManager::restorePrunedNextHops(const std::string &port) SWSS_RAISE_CRITICAL_STATE(status.message()); return; } - pruned_wcmp_members_set.erase(it); + member->pruned = false; SWSS_LOG_NOTICE("Restored pruned member %s in group %s", member->next_hop_id.c_str(), member->wcmp_group_id.c_str()); } @@ -693,7 +734,34 @@ void WcmpManager::updatePortOperStatusMap(const std::string &port, const sai_por port_oper_status_map[port] = status; } -void WcmpManager::enqueue(const swss::KeyOpFieldsValuesTuple &entry) +ReturnCode WcmpManager::getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) +{ + std::string value; + + try + { + nlohmann::json j = nlohmann::json::parse(json_key); + if (j.find(prependMatchField(p4orch::kWcmpGroupId)) != j.end()) + { + value = j.at(prependMatchField(p4orch::kWcmpGroupId)).get(); + object_key = KeyGenerator::generateWcmpGroupKey(value); + object_type = SAI_OBJECT_TYPE_NEXT_HOP_GROUP; + return ReturnCode(); + } + else + { + SWSS_LOG_ERROR("%s match parameter absent: required for dependent object query", p4orch::kWcmpGroupId); + } + } + catch (std::exception &ex) + { + SWSS_LOG_ERROR("json_key parse error"); + } + + return StatusCode::SWSS_RC_INVALID_PARAM; +} + +void WcmpManager::enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) { m_entries.push_back(entry); } @@ -726,6 +794,16 @@ void WcmpManager::drain() const std::string &operation = kfvOp(key_op_fvs_tuple); if (operation == SET_COMMAND) { + status = validateWcmpGroupEntry(app_db_entry); + if (!status.ok()) + { + SWSS_LOG_ERROR("Invalid WCMP group with id %s: %s", QuotedVar(app_db_entry.wcmp_group_id).c_str(), + status.message().c_str()); + m_publisher->publish(APP_P4RT_TABLE_NAME, kfvKey(key_op_fvs_tuple), kfvFieldsValues(key_op_fvs_tuple), + status, + /*replace=*/true); + continue; + } auto *wcmp_group_entry = getWcmpGroupEntry(app_db_entry.wcmp_group_id); if (wcmp_group_entry == nullptr) { @@ -757,4 +835,197 @@ void WcmpManager::drain() m_entries.clear(); } +std::string WcmpManager::verifyState(const std::string &key, const std::vector &tuple) +{ + SWSS_LOG_ENTER(); + + auto pos = key.find_first_of(kTableKeyDelimiter); + if (pos == std::string::npos) + { + return std::string("Invalid key: ") + key; + } + std::string p4rt_table = key.substr(0, pos); + std::string p4rt_key = key.substr(pos + 1); + if (p4rt_table != APP_P4RT_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + std::string table_name; + std::string key_content; + parseP4RTKey(p4rt_key, &table_name, &key_content); + if (table_name != APP_P4RT_WCMP_GROUP_TABLE_NAME) + { + return std::string("Invalid key: ") + key; + } + + ReturnCode status; + auto app_db_entry_or = deserializeP4WcmpGroupAppDbEntry(key_content, tuple); + if (!app_db_entry_or.ok()) + { + status = app_db_entry_or.status(); + std::stringstream msg; + msg << "Unable to deserialize key " << QuotedVar(key) << ": " << status.message(); + return msg.str(); + } + auto &app_db_entry = *app_db_entry_or; + + auto *wcmp_group_entry = getWcmpGroupEntry(app_db_entry.wcmp_group_id); + if (wcmp_group_entry == nullptr) + { + std::stringstream msg; + msg << "No entry found with key " << QuotedVar(key); + return msg.str(); + } + + std::string cache_result = verifyStateCache(app_db_entry, wcmp_group_entry); + std::string asic_db_result = verifyStateAsicDb(wcmp_group_entry); + if (cache_result.empty()) + { + return asic_db_result; + } + if (asic_db_result.empty()) + { + return cache_result; + } + return cache_result + "; " + asic_db_result; +} + +std::string WcmpManager::verifyStateCache(const P4WcmpGroupEntry &app_db_entry, + const P4WcmpGroupEntry *wcmp_group_entry) +{ + const std::string &wcmp_group_key = KeyGenerator::generateWcmpGroupKey(app_db_entry.wcmp_group_id); + ReturnCode status = validateWcmpGroupEntry(app_db_entry); + if (!status.ok()) + { + std::stringstream msg; + msg << "Validation failed for WCMP group DB entry with key " << QuotedVar(wcmp_group_key) << ": " + << status.message(); + return msg.str(); + } + + if (wcmp_group_entry->wcmp_group_id != app_db_entry.wcmp_group_id) + { + std::stringstream msg; + msg << "WCMP group ID " << QuotedVar(app_db_entry.wcmp_group_id) << " does not match internal cache " + << QuotedVar(wcmp_group_entry->wcmp_group_id) << " in wcmp manager."; + return msg.str(); + } + + std::string err_msg = m_p4OidMapper->verifyOIDMapping(SAI_OBJECT_TYPE_NEXT_HOP_GROUP, wcmp_group_key, + wcmp_group_entry->wcmp_group_oid); + if (!err_msg.empty()) + { + return err_msg; + } + + if (wcmp_group_entry->wcmp_group_members.size() != app_db_entry.wcmp_group_members.size()) + { + std::stringstream msg; + msg << "WCMP group with ID " << QuotedVar(app_db_entry.wcmp_group_id) << " has member size " + << app_db_entry.wcmp_group_members.size() << " non-matching internal cache " + << wcmp_group_entry->wcmp_group_members.size(); + return msg.str(); + } + + for (size_t i = 0; i < wcmp_group_entry->wcmp_group_members.size(); ++i) + { + if (wcmp_group_entry->wcmp_group_members[i]->next_hop_id != app_db_entry.wcmp_group_members[i]->next_hop_id) + { + std::stringstream msg; + msg << "WCMP group member " << QuotedVar(app_db_entry.wcmp_group_members[i]->next_hop_id) + << " does not match internal cache " << QuotedVar(wcmp_group_entry->wcmp_group_members[i]->next_hop_id) + << " in wcmp manager."; + return msg.str(); + } + if (wcmp_group_entry->wcmp_group_members[i]->weight != app_db_entry.wcmp_group_members[i]->weight) + { + std::stringstream msg; + msg << "WCMP group member " << QuotedVar(app_db_entry.wcmp_group_members[i]->next_hop_id) << " weight " + << app_db_entry.wcmp_group_members[i]->weight << " does not match internal cache " + << wcmp_group_entry->wcmp_group_members[i]->weight << " in wcmp manager."; + return msg.str(); + } + if (wcmp_group_entry->wcmp_group_members[i]->watch_port != app_db_entry.wcmp_group_members[i]->watch_port) + { + std::stringstream msg; + msg << "WCMP group member " << QuotedVar(app_db_entry.wcmp_group_members[i]->next_hop_id) << " watch port " + << QuotedVar(app_db_entry.wcmp_group_members[i]->watch_port) << " does not match internal cache " + << QuotedVar(wcmp_group_entry->wcmp_group_members[i]->watch_port) << " in wcmp manager."; + return msg.str(); + } + if (wcmp_group_entry->wcmp_group_members[i]->wcmp_group_id != app_db_entry.wcmp_group_members[i]->wcmp_group_id) + { + std::stringstream msg; + msg << "WCMP group member " << QuotedVar(app_db_entry.wcmp_group_members[i]->next_hop_id) << " group ID " + << QuotedVar(app_db_entry.wcmp_group_members[i]->wcmp_group_id) << " does not match internal cache " + << QuotedVar(wcmp_group_entry->wcmp_group_members[i]->wcmp_group_id) << " in wcmp manager."; + return msg.str(); + } + if (!app_db_entry.wcmp_group_members[i]->watch_port.empty() && wcmp_group_entry->wcmp_group_members[i]->pruned) + { + continue; + } + err_msg = m_p4OidMapper->verifyOIDMapping( + SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER, + getWcmpGroupMemberKey(wcmp_group_key, wcmp_group_entry->wcmp_group_members[i]->member_oid), + wcmp_group_entry->wcmp_group_members[i]->member_oid); + if (!err_msg.empty()) + { + return err_msg; + } + } + + return ""; +} + +std::string WcmpManager::verifyStateAsicDb(const P4WcmpGroupEntry *wcmp_group_entry) +{ + swss::DBConnector db("ASIC_DB", 0); + swss::Table table(&db, "ASIC_STATE"); + + auto group_attrs = getSaiGroupAttrs(*wcmp_group_entry); + std::vector exp = saimeta::SaiAttributeList::serialize_attr_list( + SAI_OBJECT_TYPE_NEXT_HOP_GROUP, (uint32_t)group_attrs.size(), group_attrs.data(), /*countOnly=*/false); + std::string key = sai_serialize_object_type(SAI_OBJECT_TYPE_NEXT_HOP_GROUP) + ":" + + sai_serialize_object_id(wcmp_group_entry->wcmp_group_oid); + std::vector values; + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + auto group_result = verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/false); + if (!group_result.empty()) + { + return group_result; + } + + for (const auto &member : wcmp_group_entry->wcmp_group_members) + { + if (!member->watch_port.empty() && member->pruned) + { + continue; + } + auto member_attrs = getSaiMemberAttrs(*member, wcmp_group_entry->wcmp_group_oid); + std::vector exp = saimeta::SaiAttributeList::serialize_attr_list( + SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER, (uint32_t)member_attrs.size(), member_attrs.data(), + /*countOnly=*/false); + std::string key = sai_serialize_object_type(SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER) + ":" + + sai_serialize_object_id(member->member_oid); + std::vector values; + if (!table.get(key, values)) + { + return std::string("ASIC DB key not found ") + key; + } + auto member_result = verifyAttrs(values, exp, std::vector{}, + /*allow_unknown=*/false); + if (!member_result.empty()) + { + return member_result; + } + } + + return ""; +} + } // namespace p4orch diff --git a/orchagent/p4orch/wcmp_manager.h b/orchagent/p4orch/wcmp_manager.h index 4c6629a398..64fd4283e4 100644 --- a/orchagent/p4orch/wcmp_manager.h +++ b/orchagent/p4orch/wcmp_manager.h @@ -4,6 +4,7 @@ #include #include +#include "bulker.h" #include "notificationconsumer.h" #include "orch.h" #include "p4orch/object_manager_interface.h" @@ -28,6 +29,7 @@ struct P4WcmpGroupMemberEntry // Default ECMP(weight=1) int weight = 1; std::string watch_port; + bool pruned; sai_object_id_t member_oid = SAI_NULL_OBJECT_ID; std::string wcmp_group_id; }; @@ -63,20 +65,14 @@ struct P4WcmpGroupEntry class WcmpManager : public ObjectManagerInterface { public: - WcmpManager(P4OidMapper *p4oidMapper, ResponsePublisherInterface *publisher) - { - SWSS_LOG_ENTER(); - - assert(p4oidMapper != nullptr); - m_p4OidMapper = p4oidMapper; - assert(publisher != nullptr); - m_publisher = publisher; - } + WcmpManager(P4OidMapper *p4oidMapper, ResponsePublisherInterface *publisher); virtual ~WcmpManager() = default; - void enqueue(const swss::KeyOpFieldsValuesTuple &entry) override; + void enqueue(const std::string &table_name, const swss::KeyOpFieldsValuesTuple &entry) override; void drain() override; + std::string verifyState(const std::string &key, const std::vector &tuple) override; + ReturnCode getSaiObject(const std::string &json_key, sai_object_type_t &object_type, std::string &object_key) override; // Prunes next hop members egressing through the given port. void pruneNextHops(const std::string &port); @@ -111,20 +107,18 @@ class WcmpManager : public ObjectManagerInterface ReturnCode createWcmpGroupMember(std::shared_ptr wcmp_group_member, const sai_object_id_t group_oid, const std::string &wcmp_group_key); - // Creates WCMP group member with an associated watch_port. - ReturnCode createWcmpGroupMemberWithWatchport(P4WcmpGroupEntry *wcmp_group, - std::shared_ptr member, - const std::string &wcmp_group_key); - // Performs watchport related addition operations and creates WCMP group - // member. - ReturnCode processWcmpGroupMemberAddition(std::shared_ptr member, - P4WcmpGroupEntry *wcmp_group, const std::string &wcmp_group_key); + // members. + ReturnCode processWcmpGroupMembersAddition( + const std::vector> &members, const std::string &wcmp_group_key, + sai_object_id_t wcmp_group_oid, + std::vector> &created_wcmp_group_members); // Performs watchport related removal operations and removes WCMP group - // member. - ReturnCode processWcmpGroupMemberRemoval(std::shared_ptr member, - const std::string &wcmp_group_key); + // members. + ReturnCode processWcmpGroupMembersRemoval( + const std::vector> &members, const std::string &wcmp_group_key, + std::vector> &removed_wcmp_group_members); // Processes update operation for a WCMP group entry. ReturnCode processUpdateRequest(P4WcmpGroupEntry *wcmp_group_entry); @@ -154,6 +148,16 @@ class WcmpManager : public ObjectManagerInterface // Gets port oper-status from port_oper_status_map if present bool getPortOperStatusFromMap(const std::string &port, sai_port_oper_status_t *status); + // Verifies the internal cache for an entry. + std::string verifyStateCache(const P4WcmpGroupEntry &app_db_entry, const P4WcmpGroupEntry *wcmp_group_entry); + + // Verifies the ASIC DB for an entry. + std::string verifyStateAsicDb(const P4WcmpGroupEntry *wcmp_group_entry); + + // Returns the SAI attributes for a group member. + std::vector getSaiMemberAttrs(const P4WcmpGroupMemberEntry &wcmp_member_entry, + const sai_object_id_t group_oid); + // Maps wcmp_group_id to P4WcmpGroupEntry std::unordered_map m_wcmpGroupTable; @@ -161,9 +165,6 @@ class WcmpManager : public ObjectManagerInterface std::unordered_map>> port_name_to_wcmp_group_member_map; - // Set of pruned P4WcmpGroupMemberEntry entries - std::unordered_set> pruned_wcmp_members_set; - // Maps port name to oper-status std::unordered_map port_oper_status_map; @@ -171,6 +172,7 @@ class WcmpManager : public ObjectManagerInterface P4OidMapper *m_p4OidMapper; std::deque m_entries; ResponsePublisherInterface *m_publisher; + ObjectBulker gNextHopGroupMemberBulker; friend class p4orch::test::WcmpManagerTest; }; diff --git a/orchagent/pbh/pbhcap.cpp b/orchagent/pbh/pbhcap.cpp new file mode 100644 index 0000000000..46a3a49e19 --- /dev/null +++ b/orchagent/pbh/pbhcap.cpp @@ -0,0 +1,688 @@ +// includes ----------------------------------------------------------------------------------------------------------- + +#include +#include +#include +#include +#include +#include + +#include "pbhschema.h" +#include "schema.h" +#include "logger.h" + +#include "pbhcap.h" + +using namespace swss; + +// defines ------------------------------------------------------------------------------------------------------------ + +#define PBH_PLATFORM_ENV_VAR "ASIC_VENDOR" +#define PBH_PLATFORM_GENERIC "generic" +#define PBH_PLATFORM_MELLANOX "mellanox" +#define PBH_PLATFORM_UNKN "unknown" + +#define PBH_TABLE_CAPABILITIES_KEY "table" +#define PBH_RULE_CAPABILITIES_KEY "rule" +#define PBH_HASH_CAPABILITIES_KEY "hash" +#define PBH_HASH_FIELD_CAPABILITIES_KEY "hash-field" + +#define PBH_FIELD_CAPABILITY_ADD "ADD" +#define PBH_FIELD_CAPABILITY_UPDATE "UPDATE" +#define PBH_FIELD_CAPABILITY_REMOVE "REMOVE" +#define PBH_FIELD_CAPABILITY_UNKN "UNKNOWN" + +#define PBH_STATE_DB_NAME "STATE_DB" +#define PBH_STATE_DB_TIMEOUT 0 + +// constants ---------------------------------------------------------------------------------------------------------- + +static const std::map pbhAsicVendorMap = +{ + { PbhAsicVendor::GENERIC, PBH_PLATFORM_GENERIC }, + { PbhAsicVendor::MELLANOX, PBH_PLATFORM_MELLANOX } +}; + +static const std::map pbhFieldCapabilityMap = +{ + { PbhFieldCapability::ADD, PBH_FIELD_CAPABILITY_ADD }, + { PbhFieldCapability::UPDATE, PBH_FIELD_CAPABILITY_UPDATE }, + { PbhFieldCapability::REMOVE, PBH_FIELD_CAPABILITY_REMOVE } +}; + +// functions ---------------------------------------------------------------------------------------------------------- + +static std::string toStr(PbhAsicVendor value) noexcept +{ + const auto &cit = pbhAsicVendorMap.find(value); + if (cit != pbhAsicVendorMap.cend()) + { + return cit->second; + } + + return PBH_PLATFORM_UNKN; +} + +static std::string toStr(PbhFieldCapability value) noexcept +{ + const auto &cit = pbhFieldCapabilityMap.find(value); + if (cit != pbhFieldCapabilityMap.cend()) + { + return cit->second; + } + + return PBH_FIELD_CAPABILITY_UNKN; +} + +static std::string toStr(const std::set &value) noexcept +{ + std::stringstream ss; + bool separator = false; + + for (const auto &cit : value) + { + if (!separator) + { + ss << toStr(cit); + separator = true; + } + else + { + ss << "," << toStr(cit); + } + } + + return ss.str(); +} + +// PBH field capabilities --------------------------------------------------------------------------------------------- + +void PbhVendorFieldCapabilities::setPbhDefaults(std::set &fieldCap) noexcept +{ + fieldCap.insert(PbhFieldCapability::ADD); + fieldCap.insert(PbhFieldCapability::UPDATE); + fieldCap.insert(PbhFieldCapability::REMOVE); +} + +PbhGenericFieldCapabilities::PbhGenericFieldCapabilities() noexcept +{ + this->table.interface_list.insert(PbhFieldCapability::UPDATE); + this->table.description.insert(PbhFieldCapability::UPDATE); + + this->rule.priority.insert(PbhFieldCapability::UPDATE); + this->setPbhDefaults(this->rule.gre_key); + this->setPbhDefaults(this->rule.ether_type); + this->setPbhDefaults(this->rule.ip_protocol); + this->setPbhDefaults(this->rule.ipv6_next_header); + this->setPbhDefaults(this->rule.l4_dst_port); + this->setPbhDefaults(this->rule.inner_ether_type); + this->rule.hash.insert(PbhFieldCapability::UPDATE); + this->setPbhDefaults(this->rule.packet_action); + this->setPbhDefaults(this->rule.flow_counter); + + this->hash.hash_field_list.insert(PbhFieldCapability::UPDATE); +} + +PbhMellanoxFieldCapabilities::PbhMellanoxFieldCapabilities() noexcept +{ + this->table.interface_list.insert(PbhFieldCapability::UPDATE); + this->table.description.insert(PbhFieldCapability::UPDATE); + + this->rule.priority.insert(PbhFieldCapability::UPDATE); + this->setPbhDefaults(this->rule.gre_key); + this->setPbhDefaults(this->rule.ether_type); + this->setPbhDefaults(this->rule.ip_protocol); + this->setPbhDefaults(this->rule.ipv6_next_header); + this->setPbhDefaults(this->rule.l4_dst_port); + this->setPbhDefaults(this->rule.inner_ether_type); + this->rule.hash.insert(PbhFieldCapability::UPDATE); + this->setPbhDefaults(this->rule.packet_action); + this->setPbhDefaults(this->rule.flow_counter); +} + +// PBH entity capabilities -------------------------------------------------------------------------------------------- + +PbhEntityCapabilities::PbhEntityCapabilities(const std::shared_ptr &fieldCap) noexcept : + fieldCap(fieldCap) +{ + +} + +bool PbhEntityCapabilities::validate(const std::set &fieldCap, PbhFieldCapability value) const +{ + const auto &cit = fieldCap.find(value); + if (cit == fieldCap.cend()) + { + return false; + } + + return true; +} + +PbhTableCapabilities::PbhTableCapabilities(const std::shared_ptr &fieldCap) noexcept : + PbhEntityCapabilities(fieldCap) +{ + +} + +bool PbhTableCapabilities::validatePbhInterfaceList(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().interface_list, value); +} + +bool PbhTableCapabilities::validatePbhDescription(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().description, value); +} + +auto PbhTableCapabilities::getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::table) & +{ + return this->fieldCap->table; +} + +PbhRuleCapabilities::PbhRuleCapabilities(const std::shared_ptr &fieldCap) noexcept : + PbhEntityCapabilities(fieldCap) +{ + +} + +bool PbhRuleCapabilities::validatePbhPriority(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().priority, value); +} + +bool PbhRuleCapabilities::validatePbhGreKey(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().gre_key, value); +} + +bool PbhRuleCapabilities::validatePbhEtherType(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().ether_type, value); +} + +bool PbhRuleCapabilities::validatePbhIpProtocol(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().ip_protocol, value); +} + +bool PbhRuleCapabilities::validatePbhIpv6NextHeader(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().ipv6_next_header, value); +} + +bool PbhRuleCapabilities::validatePbhL4DstPort(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().l4_dst_port, value); +} + +bool PbhRuleCapabilities::validatePbhInnerEtherType(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().inner_ether_type, value); +} + +bool PbhRuleCapabilities::validatePbhHash(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().hash, value); +} + +bool PbhRuleCapabilities::validatePbhPacketAction(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().packet_action, value); +} + +bool PbhRuleCapabilities::validatePbhFlowCounter(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().flow_counter, value); +} + +auto PbhRuleCapabilities::getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::rule) & +{ + return this->fieldCap->rule; +} + +PbhHashCapabilities::PbhHashCapabilities(const std::shared_ptr &fieldCap) noexcept : + PbhEntityCapabilities(fieldCap) +{ + +} + +bool PbhHashCapabilities::validatePbhHashFieldList(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().hash_field_list, value); +} + +auto PbhHashCapabilities::getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::hash) & +{ + return this->fieldCap->hash; +} + +PbhHashFieldCapabilities::PbhHashFieldCapabilities(const std::shared_ptr &fieldCap) noexcept : + PbhEntityCapabilities(fieldCap) +{ + +} + +bool PbhHashFieldCapabilities::validatePbhHashField(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().hash_field, value); +} + +bool PbhHashFieldCapabilities::validatePbhIpMask(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().ip_mask, value); +} + +bool PbhHashFieldCapabilities::validatePbhSequenceId(PbhFieldCapability value) const +{ + return this->validate(this->getPbhCap().sequence_id, value); +} + +auto PbhHashFieldCapabilities::getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::hashField) & +{ + return this->fieldCap->hashField; +} + +// PBH capabilities --------------------------------------------------------------------------------------------------- + +DBConnector PbhCapabilities::stateDb(PBH_STATE_DB_NAME, PBH_STATE_DB_TIMEOUT); +Table PbhCapabilities::capTable(&stateDb, STATE_PBH_CAPABILITIES_TABLE_NAME); + +PbhCapabilities::PbhCapabilities() noexcept +{ + SWSS_LOG_ENTER(); + + if (!this->parsePbhAsicVendor()) + { + SWSS_LOG_WARN("Failed to parse ASIC vendor: fallback to %s platform", PBH_PLATFORM_GENERIC); + this->asicVendor = PbhAsicVendor::GENERIC; + } + + this->initPbhVendorCapabilities(); + this->writePbhVendorCapabilitiesToDb(); +} + +PbhAsicVendor PbhCapabilities::getAsicVendor() const noexcept +{ + return this->asicVendor; +} + +bool PbhCapabilities::parsePbhAsicVendor() +{ + SWSS_LOG_ENTER(); + + const auto *envVar = std::getenv(PBH_PLATFORM_ENV_VAR); + if (envVar == nullptr) + { + SWSS_LOG_WARN("Failed to detect ASIC vendor: environmental variable(%s) is not found", PBH_PLATFORM_ENV_VAR); + return false; + } + + std::string platform(envVar); + + if (platform == PBH_PLATFORM_MELLANOX) + { + this->asicVendor = PbhAsicVendor::MELLANOX; + } + else + { + this->asicVendor = PbhAsicVendor::GENERIC; + } + + SWSS_LOG_NOTICE("Parsed PBH ASIC vendor: %s", toStr(this->asicVendor).c_str()); + + return true; +} + +void PbhCapabilities::initPbhVendorCapabilities() +{ + std::shared_ptr fieldCap; + + switch (this->asicVendor) + { + case PbhAsicVendor::GENERIC: + fieldCap = std::make_shared(); + break; + + case PbhAsicVendor::MELLANOX: + fieldCap = std::make_shared(); + break; + + default: + SWSS_LOG_WARN("Unknown ASIC vendor: skipping ..."); + break; + } + + if (!fieldCap) + { + SWSS_LOG_ERROR("Failed to initialize PBH capabilities: unknown ASIC vendor"); + return; + } + + this->table = std::make_shared(fieldCap); + this->rule = std::make_shared(fieldCap); + this->hash = std::make_shared(fieldCap); + this->hashField = std::make_shared(fieldCap); + + SWSS_LOG_NOTICE("Initialized PBH capabilities: %s platform", toStr(this->asicVendor).c_str()); +} + +template<> +void PbhCapabilities::writePbhEntityCapabilitiesToDb(const std::shared_ptr &entityCap) +{ + SWSS_LOG_ENTER(); + + auto key = PbhCapabilities::capTable.getKeyName(PBH_TABLE_CAPABILITIES_KEY); + std::vector fvList; + + fvList.push_back(FieldValueTuple(PBH_TABLE_INTERFACE_LIST, toStr(entityCap->getPbhCap().interface_list))); + fvList.push_back(FieldValueTuple(PBH_TABLE_DESCRIPTION, toStr(entityCap->getPbhCap().description))); + + PbhCapabilities::capTable.set(PBH_TABLE_CAPABILITIES_KEY, fvList); + + SWSS_LOG_NOTICE("Wrote PBH table capabilities to State DB: %s key", key.c_str()); +} + +template<> +void PbhCapabilities::writePbhEntityCapabilitiesToDb(const std::shared_ptr &entityCap) +{ + SWSS_LOG_ENTER(); + + auto key = PbhCapabilities::capTable.getKeyName(PBH_RULE_CAPABILITIES_KEY); + std::vector fvList; + + fvList.push_back(FieldValueTuple(PBH_RULE_PRIORITY, toStr(entityCap->getPbhCap().priority))); + fvList.push_back(FieldValueTuple(PBH_RULE_GRE_KEY, toStr(entityCap->getPbhCap().gre_key))); + fvList.push_back(FieldValueTuple(PBH_RULE_ETHER_TYPE, toStr(entityCap->getPbhCap().ether_type))); + fvList.push_back(FieldValueTuple(PBH_RULE_IP_PROTOCOL, toStr(entityCap->getPbhCap().ip_protocol))); + fvList.push_back(FieldValueTuple(PBH_RULE_IPV6_NEXT_HEADER, toStr(entityCap->getPbhCap().ipv6_next_header))); + fvList.push_back(FieldValueTuple(PBH_RULE_L4_DST_PORT, toStr(entityCap->getPbhCap().l4_dst_port))); + fvList.push_back(FieldValueTuple(PBH_RULE_INNER_ETHER_TYPE, toStr(entityCap->getPbhCap().inner_ether_type))); + fvList.push_back(FieldValueTuple(PBH_RULE_HASH, toStr(entityCap->getPbhCap().hash))); + fvList.push_back(FieldValueTuple(PBH_RULE_PACKET_ACTION, toStr(entityCap->getPbhCap().packet_action))); + fvList.push_back(FieldValueTuple(PBH_RULE_FLOW_COUNTER, toStr(entityCap->getPbhCap().flow_counter))); + + PbhCapabilities::capTable.set(PBH_RULE_CAPABILITIES_KEY, fvList); + + SWSS_LOG_NOTICE("Wrote PBH rule capabilities to State DB: %s key", key.c_str()); +} + +template<> +void PbhCapabilities::writePbhEntityCapabilitiesToDb(const std::shared_ptr &entityCap) +{ + SWSS_LOG_ENTER(); + + auto key = PbhCapabilities::capTable.getKeyName(PBH_HASH_CAPABILITIES_KEY); + std::vector fvList; + + fvList.push_back(FieldValueTuple(PBH_HASH_HASH_FIELD_LIST, toStr(entityCap->getPbhCap().hash_field_list))); + + PbhCapabilities::capTable.set(PBH_HASH_CAPABILITIES_KEY, fvList); + + SWSS_LOG_NOTICE("Wrote PBH hash capabilities to State DB: %s key", key.c_str()); +} + +template<> +void PbhCapabilities::writePbhEntityCapabilitiesToDb(const std::shared_ptr &entityCap) +{ + SWSS_LOG_ENTER(); + + auto key = PbhCapabilities::capTable.getKeyName(PBH_HASH_FIELD_CAPABILITIES_KEY); + std::vector fvList; + + fvList.push_back(FieldValueTuple(PBH_HASH_FIELD_HASH_FIELD, toStr(entityCap->getPbhCap().hash_field))); + fvList.push_back(FieldValueTuple(PBH_HASH_FIELD_IP_MASK, toStr(entityCap->getPbhCap().ip_mask))); + fvList.push_back(FieldValueTuple(PBH_HASH_FIELD_SEQUENCE_ID, toStr(entityCap->getPbhCap().sequence_id))); + + PbhCapabilities::capTable.set(PBH_HASH_FIELD_CAPABILITIES_KEY, fvList); + + SWSS_LOG_NOTICE("Wrote PBH hash field capabilities to State DB: %s key", key.c_str()); +} + +void PbhCapabilities::writePbhVendorCapabilitiesToDb() +{ + SWSS_LOG_ENTER(); + + this->writePbhEntityCapabilitiesToDb(this->table); + this->writePbhEntityCapabilitiesToDb(this->rule); + this->writePbhEntityCapabilitiesToDb(this->hash); + this->writePbhEntityCapabilitiesToDb(this->hashField); + + SWSS_LOG_NOTICE("Wrote PBH capabilities to State DB: %s table", STATE_PBH_CAPABILITIES_TABLE_NAME); +} + +bool PbhCapabilities::validatePbhTableCap(const std::vector &fieldList, PbhFieldCapability value) const +{ + SWSS_LOG_ENTER(); + + for (const auto &cit : fieldList) + { + if (cit == PBH_TABLE_INTERFACE_LIST) + { + if (!this->table->validatePbhInterfaceList(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_TABLE_DESCRIPTION) + { + if (!this->table->validatePbhDescription(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else + { + SWSS_LOG_WARN("Unknown field(%s): skipping ...", cit.c_str()); + } + } + + return true; +} + +bool PbhCapabilities::validatePbhRuleCap(const std::vector &fieldList, PbhFieldCapability value) const +{ + SWSS_LOG_ENTER(); + + for (const auto &cit : fieldList) + { + if (cit == PBH_RULE_PRIORITY) + { + if (!this->rule->validatePbhPriority(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_GRE_KEY) + { + if (!this->rule->validatePbhGreKey(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_ETHER_TYPE) + { + if (!this->rule->validatePbhEtherType(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_IP_PROTOCOL) + { + if (!this->rule->validatePbhIpProtocol(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_IPV6_NEXT_HEADER) + { + if (!this->rule->validatePbhIpv6NextHeader(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_L4_DST_PORT) + { + if (!this->rule->validatePbhL4DstPort(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_INNER_ETHER_TYPE) + { + if (!this->rule->validatePbhInnerEtherType(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_HASH) + { + if (!this->rule->validatePbhHash(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_PACKET_ACTION) + { + if (!this->rule->validatePbhPacketAction(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_RULE_FLOW_COUNTER) + { + if (!this->rule->validatePbhFlowCounter(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else + { + SWSS_LOG_WARN("Unknown field(%s): skipping ...", cit.c_str()); + } + } + + return true; +} + +bool PbhCapabilities::validatePbhHashCap(const std::vector &fieldList, PbhFieldCapability value) const +{ + SWSS_LOG_ENTER(); + + for (const auto &cit : fieldList) + { + if (cit == PBH_HASH_HASH_FIELD_LIST) + { + if (!this->hash->validatePbhHashFieldList(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else + { + SWSS_LOG_WARN("Unknown field(%s): skipping ...", cit.c_str()); + } + } + + return true; +} + +bool PbhCapabilities::validatePbhHashFieldCap(const std::vector &fieldList, PbhFieldCapability value) const +{ + SWSS_LOG_ENTER(); + + for (const auto &cit : fieldList) + { + if (cit == PBH_HASH_FIELD_HASH_FIELD) + { + if (!this->hashField->validatePbhHashField(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_HASH_FIELD_IP_MASK) + { + if (!this->hashField->validatePbhIpMask(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else if (cit == PBH_HASH_FIELD_SEQUENCE_ID) + { + if (!this->hashField->validatePbhSequenceId(value)) + { + SWSS_LOG_ERROR("Failed to validate field(%s): capability(%s) is not supported", + cit.c_str(), + toStr(value).c_str() + ); + return false; + } + } + else + { + SWSS_LOG_WARN("Unknown field(%s): skipping ...", cit.c_str()); + } + } + + return true; +} diff --git a/orchagent/pbh/pbhcap.h b/orchagent/pbh/pbhcap.h new file mode 100644 index 0000000000..adc2a4c9e6 --- /dev/null +++ b/orchagent/pbh/pbhcap.h @@ -0,0 +1,186 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "dbconnector.h" +#include "table.h" + +enum class PbhAsicVendor : std::int32_t +{ + GENERIC, + MELLANOX +}; + +enum class PbhFieldCapability : std::int32_t +{ + ADD, + UPDATE, + REMOVE +}; + +class PbhVendorFieldCapabilities +{ +public: + PbhVendorFieldCapabilities() = default; + virtual ~PbhVendorFieldCapabilities() = default; + +protected: + void setPbhDefaults(std::set &fieldCap) noexcept; + +public: + struct { + std::set interface_list; + std::set description; + } table; + + struct { + std::set priority; + std::set gre_key; + std::set ether_type; + std::set ip_protocol; + std::set ipv6_next_header; + std::set l4_dst_port; + std::set inner_ether_type; + std::set hash; + std::set packet_action; + std::set flow_counter; + } rule; + + struct { + std::set hash_field_list; + } hash; + + struct { + std::set hash_field; + std::set ip_mask; + std::set sequence_id; + } hashField; +}; + +class PbhGenericFieldCapabilities final : public PbhVendorFieldCapabilities +{ +public: + PbhGenericFieldCapabilities() noexcept; + ~PbhGenericFieldCapabilities() = default; +}; + +class PbhMellanoxFieldCapabilities final : public PbhVendorFieldCapabilities +{ +public: + PbhMellanoxFieldCapabilities() noexcept; + ~PbhMellanoxFieldCapabilities() = default; +}; + +class PbhEntityCapabilities +{ +public: + PbhEntityCapabilities() = delete; + virtual ~PbhEntityCapabilities() = default; + + PbhEntityCapabilities(const std::shared_ptr &fieldCap) noexcept; + +protected: + bool validate(const std::set &fieldCap, PbhFieldCapability value) const; + + std::shared_ptr fieldCap; +}; + +class PbhTableCapabilities final : public PbhEntityCapabilities +{ +public: + PbhTableCapabilities() = delete; + ~PbhTableCapabilities() = default; + + PbhTableCapabilities(const std::shared_ptr &fieldCap) noexcept; + + bool validatePbhInterfaceList(PbhFieldCapability value) const; + bool validatePbhDescription(PbhFieldCapability value) const; + + auto getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::table) &; +}; + +class PbhRuleCapabilities final : public PbhEntityCapabilities +{ +public: + PbhRuleCapabilities() = delete; + ~PbhRuleCapabilities() = default; + + PbhRuleCapabilities(const std::shared_ptr &fieldCap) noexcept; + + bool validatePbhPriority(PbhFieldCapability value) const; + bool validatePbhGreKey(PbhFieldCapability value) const; + bool validatePbhEtherType(PbhFieldCapability value) const; + bool validatePbhIpProtocol(PbhFieldCapability value) const; + bool validatePbhIpv6NextHeader(PbhFieldCapability value) const; + bool validatePbhL4DstPort(PbhFieldCapability value) const; + bool validatePbhInnerEtherType(PbhFieldCapability value) const; + bool validatePbhHash(PbhFieldCapability value) const; + bool validatePbhPacketAction(PbhFieldCapability value) const; + bool validatePbhFlowCounter(PbhFieldCapability value) const; + + auto getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::rule) &; +}; + +class PbhHashCapabilities final : public PbhEntityCapabilities +{ +public: + PbhHashCapabilities() = delete; + ~PbhHashCapabilities() = default; + + PbhHashCapabilities(const std::shared_ptr &fieldCap) noexcept; + + bool validatePbhHashFieldList(PbhFieldCapability value) const; + + auto getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::hash) &; +}; + +class PbhHashFieldCapabilities final : public PbhEntityCapabilities +{ +public: + PbhHashFieldCapabilities() = delete; + ~PbhHashFieldCapabilities() = default; + + PbhHashFieldCapabilities(const std::shared_ptr &fieldCap) noexcept; + + bool validatePbhHashField(PbhFieldCapability value) const; + bool validatePbhIpMask(PbhFieldCapability value) const; + bool validatePbhSequenceId(PbhFieldCapability value) const; + + auto getPbhCap() const -> const decltype(PbhVendorFieldCapabilities::hashField) &; +}; + +class PbhCapabilities final +{ +public: + PbhCapabilities() noexcept; + ~PbhCapabilities() = default; + + bool validatePbhTableCap(const std::vector &fieldList, PbhFieldCapability value) const; + bool validatePbhRuleCap(const std::vector &fieldList, PbhFieldCapability value) const; + bool validatePbhHashCap(const std::vector &fieldList, PbhFieldCapability value) const; + bool validatePbhHashFieldCap(const std::vector &fieldList, PbhFieldCapability value) const; + + PbhAsicVendor getAsicVendor() const noexcept; + +private: + template + void writePbhEntityCapabilitiesToDb(const std::shared_ptr &entityCap); + + bool parsePbhAsicVendor(); + void initPbhVendorCapabilities(); + void writePbhVendorCapabilitiesToDb(); + + PbhAsicVendor asicVendor; + + std::shared_ptr table; + std::shared_ptr rule; + std::shared_ptr hash; + std::shared_ptr hashField; + + static swss::DBConnector stateDb; + static swss::Table capTable; +}; diff --git a/orchagent/pbh/pbhcnt.h b/orchagent/pbh/pbhcnt.h index 787d91b63c..90c40bb681 100644 --- a/orchagent/pbh/pbhcnt.h +++ b/orchagent/pbh/pbhcnt.h @@ -105,19 +105,22 @@ class PbhRule final : public PbhContainer } inner_ether_type; struct { + struct { + std::string name; + } meta; std::string value; bool is_set = false; } hash; struct { + struct { + std::string name; + } meta; sai_acl_entry_attr_t value; bool is_set = false; } packet_action; struct { - struct { - std::string name; - } meta; bool value; bool is_set = false; } flow_counter; diff --git a/orchagent/pbh/pbhmgr.cpp b/orchagent/pbh/pbhmgr.cpp index ed10ff756c..8dfb8e09f8 100644 --- a/orchagent/pbh/pbhmgr.cpp +++ b/orchagent/pbh/pbhmgr.cpp @@ -7,6 +7,7 @@ #include #include +#include "pbhschema.h" #include "ipaddress.h" #include "converter.h" #include "tokenize.h" @@ -16,42 +17,6 @@ using namespace swss; -// defines ------------------------------------------------------------------------------------------------------------ - -#define PBH_TABLE_INTERFACE_LIST "interface_list" -#define PBH_TABLE_DESCRIPTION "description" - -#define PBH_RULE_PACKET_ACTION_SET_ECMP_HASH "SET_ECMP_HASH" -#define PBH_RULE_PACKET_ACTION_SET_LAG_HASH "SET_LAG_HASH" - -#define PBH_RULE_FLOW_COUNTER_ENABLED "ENABLED" -#define PBH_RULE_FLOW_COUNTER_DISABLED "DISABLED" - -#define PBH_RULE_PRIORITY "priority" -#define PBH_RULE_GRE_KEY "gre_key" -#define PBH_RULE_ETHER_TYPE "ether_type" -#define PBH_RULE_IP_PROTOCOL "ip_protocol" -#define PBH_RULE_IPV6_NEXT_HEADER "ipv6_next_header" -#define PBH_RULE_L4_DST_PORT "l4_dst_port" -#define PBH_RULE_INNER_ETHER_TYPE "inner_ether_type" -#define PBH_RULE_HASH "hash" -#define PBH_RULE_PACKET_ACTION "packet_action" -#define PBH_RULE_FLOW_COUNTER "flow_counter" - -#define PBH_HASH_HASH_FIELD_LIST "hash_field_list" - -#define PBH_HASH_FIELD_HASH_FIELD_INNER_IP_PROTOCOL "INNER_IP_PROTOCOL" -#define PBH_HASH_FIELD_HASH_FIELD_INNER_L4_DST_PORT "INNER_L4_DST_PORT" -#define PBH_HASH_FIELD_HASH_FIELD_INNER_L4_SRC_PORT "INNER_L4_SRC_PORT" -#define PBH_HASH_FIELD_HASH_FIELD_INNER_DST_IPV4 "INNER_DST_IPV4" -#define PBH_HASH_FIELD_HASH_FIELD_INNER_SRC_IPV4 "INNER_SRC_IPV4" -#define PBH_HASH_FIELD_HASH_FIELD_INNER_DST_IPV6 "INNER_DST_IPV6" -#define PBH_HASH_FIELD_HASH_FIELD_INNER_SRC_IPV6 "INNER_SRC_IPV6" - -#define PBH_HASH_FIELD_HASH_FIELD "hash_field" -#define PBH_HASH_FIELD_IP_MASK "ip_mask" -#define PBH_HASH_FIELD_SEQUENCE_ID "sequence_id" - // constants ---------------------------------------------------------------------------------------------------------- static const std::unordered_map pbhRulePacketActionMap = @@ -712,6 +677,7 @@ bool PbhHelper::parsePbhRuleHash(PbhRule &rule, const std::string &field, const return false; } + rule.hash.meta.name = field; rule.hash.value = value; rule.hash.is_set = true; @@ -729,6 +695,7 @@ bool PbhHelper::parsePbhRulePacketAction(PbhRule &rule, const std::string &field return false; } + rule.packet_action.meta.name = field; rule.packet_action.value = cit->second; rule.packet_action.is_set = true; @@ -746,7 +713,6 @@ bool PbhHelper::parsePbhRuleFlowCounter(PbhRule &rule, const std::string &field, return false; } - rule.flow_counter.meta.name = field; rule.flow_counter.value = cit->second; rule.flow_counter.is_set = true; @@ -1036,6 +1002,7 @@ bool PbhHelper::validatePbhRule(PbhRule &rule) const PBH_RULE_PACKET_ACTION_SET_ECMP_HASH ); + rule.packet_action.meta.name = PBH_RULE_PACKET_ACTION; rule.packet_action.value = SAI_ACL_ENTRY_ATTR_ACTION_SET_ECMP_HASH_ID; rule.packet_action.is_set = true; @@ -1049,7 +1016,7 @@ bool PbhHelper::validatePbhRule(PbhRule &rule) const PBH_RULE_FLOW_COUNTER, PBH_RULE_FLOW_COUNTER_DISABLED ); - rule.flow_counter.meta.name = PBH_RULE_FLOW_COUNTER; + rule.flow_counter.value = false; rule.flow_counter.is_set = true; diff --git a/orchagent/pbh/pbhrule.cpp b/orchagent/pbh/pbhrule.cpp index 7d35f4bb8f..0b00e40e44 100644 --- a/orchagent/pbh/pbhrule.cpp +++ b/orchagent/pbh/pbhrule.cpp @@ -98,3 +98,50 @@ bool AclRulePbh::validateAddAction(string attr_name, string attr_value) { SWSS_LOG_THROW("This API should not be used on PbhRule"); } + +bool AclRulePbh::disableAction() +{ + const auto &cit1 = m_actions.find(SAI_ACL_ENTRY_ATTR_ACTION_SET_ECMP_HASH_ID); + if (cit1 != m_actions.cend()) + { + const auto &attr1 = cit1->second.getSaiAttr(); + if (attr1.value.aclaction.enable) + { + sai_attribute_t attr; + + attr.id = attr1.id; + attr.value.aclaction.enable = false; + attr.value.aclaction.parameter.oid = SAI_NULL_OBJECT_ID; + + if (!setAttribute(attr)) + { + return false; + } + + m_actions.erase(cit1); + } + } + + const auto &cit2 = m_actions.find(SAI_ACL_ENTRY_ATTR_ACTION_SET_LAG_HASH_ID); + if (cit2 != m_actions.cend()) + { + const auto &attr2 = cit2->second.getSaiAttr(); + if (attr2.value.aclaction.enable) + { + sai_attribute_t attr; + + attr.id = attr2.id; + attr.value.aclaction.enable = false; + attr.value.aclaction.parameter.oid = SAI_NULL_OBJECT_ID; + + if (!setAttribute(attr)) + { + return false; + } + + m_actions.erase(cit2); + } + } + + return true; +} diff --git a/orchagent/pbh/pbhrule.h b/orchagent/pbh/pbhrule.h index 9e661761c4..5fa5ddf1fc 100644 --- a/orchagent/pbh/pbhrule.h +++ b/orchagent/pbh/pbhrule.h @@ -13,4 +13,5 @@ class AclRulePbh: public AclRule bool validate() override; void onUpdate(SubjectType, void *) override; bool validateAddAction(string attr_name, string attr_value) override; + bool disableAction(); }; diff --git a/orchagent/pbh/pbhschema.h b/orchagent/pbh/pbhschema.h new file mode 100644 index 0000000000..3ea280f769 --- /dev/null +++ b/orchagent/pbh/pbhschema.h @@ -0,0 +1,37 @@ +#pragma once + +// defines ------------------------------------------------------------------------------------------------------------ + +#define PBH_TABLE_INTERFACE_LIST "interface_list" +#define PBH_TABLE_DESCRIPTION "description" + +#define PBH_RULE_PACKET_ACTION_SET_ECMP_HASH "SET_ECMP_HASH" +#define PBH_RULE_PACKET_ACTION_SET_LAG_HASH "SET_LAG_HASH" + +#define PBH_RULE_FLOW_COUNTER_ENABLED "ENABLED" +#define PBH_RULE_FLOW_COUNTER_DISABLED "DISABLED" + +#define PBH_RULE_PRIORITY "priority" +#define PBH_RULE_GRE_KEY "gre_key" +#define PBH_RULE_ETHER_TYPE "ether_type" +#define PBH_RULE_IP_PROTOCOL "ip_protocol" +#define PBH_RULE_IPV6_NEXT_HEADER "ipv6_next_header" +#define PBH_RULE_L4_DST_PORT "l4_dst_port" +#define PBH_RULE_INNER_ETHER_TYPE "inner_ether_type" +#define PBH_RULE_HASH "hash" +#define PBH_RULE_PACKET_ACTION "packet_action" +#define PBH_RULE_FLOW_COUNTER "flow_counter" + +#define PBH_HASH_HASH_FIELD_LIST "hash_field_list" + +#define PBH_HASH_FIELD_HASH_FIELD_INNER_IP_PROTOCOL "INNER_IP_PROTOCOL" +#define PBH_HASH_FIELD_HASH_FIELD_INNER_L4_DST_PORT "INNER_L4_DST_PORT" +#define PBH_HASH_FIELD_HASH_FIELD_INNER_L4_SRC_PORT "INNER_L4_SRC_PORT" +#define PBH_HASH_FIELD_HASH_FIELD_INNER_DST_IPV4 "INNER_DST_IPV4" +#define PBH_HASH_FIELD_HASH_FIELD_INNER_SRC_IPV4 "INNER_SRC_IPV4" +#define PBH_HASH_FIELD_HASH_FIELD_INNER_DST_IPV6 "INNER_DST_IPV6" +#define PBH_HASH_FIELD_HASH_FIELD_INNER_SRC_IPV6 "INNER_SRC_IPV6" + +#define PBH_HASH_FIELD_HASH_FIELD "hash_field" +#define PBH_HASH_FIELD_IP_MASK "ip_mask" +#define PBH_HASH_FIELD_SEQUENCE_ID "sequence_id" diff --git a/orchagent/pbhorch.cpp b/orchagent/pbhorch.cpp index 83a1e80bd0..e2146cb362 100644 --- a/orchagent/pbhorch.cpp +++ b/orchagent/pbhorch.cpp @@ -53,7 +53,26 @@ static inline std::vector uMapDiffByKey(const umap_t &uMap1, const umap const auto &s1 = uMapToKeySet(uMap1); const auto &s2 = uMapToKeySet(uMap2); - std::set_symmetric_difference( + std::set_difference( + s1.cbegin(), + s1.cend(), + s2.cbegin(), + s2.cend(), + std::back_inserter(v) + ); + + return v; +} + +template +static inline std::vector uMapIntersectByKey(const umap_t &uMap1, const umap_t &uMap2) +{ + std::vector v; + + const auto &s1 = uMapToKeySet(uMap1); + const auto &s2 = uMapToKeySet(uMap2); + + std::set_intersection( s1.cbegin(), s1.cend(), s2.cbegin(), @@ -76,11 +95,52 @@ PbhOrch::PbhOrch( this->portsOrch = portsOrch; } -PbhOrch::~PbhOrch() +template +std::vector PbhOrch::getPbhAddedFields(const T &obj, const T &nObj) const +{ + return uMapDiffByKey(nObj.fieldValueMap, obj.fieldValueMap); +} + +template std::vector PbhOrch::getPbhAddedFields(const PbhTable &obj, const PbhTable &nObj) const; +template std::vector PbhOrch::getPbhAddedFields(const PbhRule &obj, const PbhRule &nObj) const; +template std::vector PbhOrch::getPbhAddedFields(const PbhHash &obj, const PbhHash &nObj) const; +template std::vector PbhOrch::getPbhAddedFields(const PbhHashField &obj, const PbhHashField &nObj) const; + +template +std::vector PbhOrch::getPbhUpdatedFields(const T &obj, const T &nObj) const { + std::vector v; + + const auto &iv = uMapIntersectByKey(obj.fieldValueMap, nObj.fieldValueMap); + std::copy_if( + iv.cbegin(), + iv.cend(), + std::back_inserter(v), + [&obj, &nObj](const auto &f) { + return obj.fieldValueMap.at(f) != nObj.fieldValueMap.at(f); + } + ); + + return v; } +template std::vector PbhOrch::getPbhUpdatedFields(const PbhTable &obj, const PbhTable &nObj) const; +template std::vector PbhOrch::getPbhUpdatedFields(const PbhRule &obj, const PbhRule &nObj) const; +template std::vector PbhOrch::getPbhUpdatedFields(const PbhHash &obj, const PbhHash &nObj) const; +template std::vector PbhOrch::getPbhUpdatedFields(const PbhHashField &obj, const PbhHashField &nObj) const; + +template +std::vector PbhOrch::getPbhRemovedFields(const T &obj, const T &nObj) const +{ + return uMapDiffByKey(obj.fieldValueMap, nObj.fieldValueMap); +} + +template std::vector PbhOrch::getPbhRemovedFields(const PbhTable &obj, const PbhTable &nObj) const; +template std::vector PbhOrch::getPbhRemovedFields(const PbhRule &obj, const PbhRule &nObj) const; +template std::vector PbhOrch::getPbhRemovedFields(const PbhHash &obj, const PbhHash &nObj) const; +template std::vector PbhOrch::getPbhRemovedFields(const PbhHashField &obj, const PbhHashField &nObj) const; + template<> auto PbhOrch::getPbhSetupTaskMap() const -> const std::unordered_map& { @@ -252,6 +312,34 @@ bool PbhOrch::updatePbhTable(const PbhTable &table) return false; } + const auto &aFields = this->getPbhAddedFields(tObj, table); + const auto &uFields = this->getPbhUpdatedFields(tObj, table); + const auto &rFields = this->getPbhRemovedFields(tObj, table); + + if (aFields.empty() && uFields.empty() && rFields.empty()) + { + SWSS_LOG_NOTICE("PBH table(%s) in SAI is up-to-date", table.key.c_str()); + return true; + } + + if (!this->pbhCap.validatePbhTableCap(aFields, PbhFieldCapability::ADD)) + { + SWSS_LOG_ERROR("Failed to validate PBH table(%s) added fields: unsupported capabilities", table.key.c_str()); + return false; + } + + if (!this->pbhCap.validatePbhTableCap(uFields, PbhFieldCapability::UPDATE)) + { + SWSS_LOG_ERROR("Failed to validate PBH table(%s) updated fields: unsupported capabilities", table.key.c_str()); + return false; + } + + if (!this->pbhCap.validatePbhTableCap(rFields, PbhFieldCapability::REMOVE)) + { + SWSS_LOG_ERROR("Failed to validate PBH table(%s) removed fields: unsupported capabilities", table.key.c_str()); + return false; + } + AclTable pbhTable(this->aclOrch, table.name); if (table.interface_list.is_set) @@ -577,57 +665,227 @@ bool PbhOrch::updatePbhRule(const PbhRule &rule) return false; } - if (!uMapDiffByKey(rObj.fieldValueMap, rule.fieldValueMap).empty()) + const auto &aFields = this->getPbhAddedFields(rObj, rule); + const auto &uFields = this->getPbhUpdatedFields(rObj, rule); + const auto &rFields = this->getPbhRemovedFields(rObj, rule); + + if (aFields.empty() && uFields.empty() && rFields.empty()) + { + SWSS_LOG_NOTICE("PBH rule(%s) in SAI is up-to-date", rule.key.c_str()); + return true; + } + + if (!this->pbhCap.validatePbhRuleCap(aFields, PbhFieldCapability::ADD)) { - SWSS_LOG_ERROR("Failed to update PBH rule(%s) in SAI: fields add/remove is prohibited", rule.key.c_str()); + SWSS_LOG_ERROR("Failed to validate PBH rule(%s) added fields: unsupported capabilities", rule.key.c_str()); return false; } - bool flowCounterUpdate = false; + if (!this->pbhCap.validatePbhRuleCap(uFields, PbhFieldCapability::UPDATE)) + { + SWSS_LOG_ERROR("Failed to validate PBH rule(%s) updated fields: unsupported capabilities", rule.key.c_str()); + return false; + } - for (const auto &oCit : rObj.fieldValueMap) + if (!this->pbhCap.validatePbhRuleCap(rFields, PbhFieldCapability::REMOVE)) { - const auto &field = oCit.first; + SWSS_LOG_ERROR("Failed to validate PBH rule(%s) removed fields: unsupported capabilities", rule.key.c_str()); + return false; + } - const auto &oValue = oCit.second; - const auto &nValue = rule.fieldValueMap.at(field); + std::shared_ptr pbhRule; + + if (rule.flow_counter.is_set) + { + pbhRule = std::make_shared(this->aclOrch, rule.name, rule.table, rule.flow_counter.value); + } + else + { + pbhRule = std::make_shared(this->aclOrch, rule.name, rule.table); + } - if (oValue == nValue) + if (rule.priority.is_set) + { + if (!pbhRule->validateAddPriority(rule.priority.value)) { - continue; + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) priority", rule.key.c_str()); + return false; } + } + + if (rule.gre_key.is_set) + { + sai_attribute_t attr; - if (field != rule.flow_counter.meta.name) + attr.id = SAI_ACL_ENTRY_ATTR_FIELD_GRE_KEY; + attr.value.aclfield.enable = true; + attr.value.aclfield.data.u32 = rule.gre_key.value; + attr.value.aclfield.mask.u32 = rule.gre_key.mask; + + if (!pbhRule->validateAddMatch(attr)) { - SWSS_LOG_ERROR( - "Failed to update PBH rule(%s) in SAI: field(%s) update is prohibited", - rule.key.c_str(), - field.c_str() - ); + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) match: GRE_KEY", rule.key.c_str()); return false; } + } - flowCounterUpdate = true; + if (rule.ether_type.is_set) + { + sai_attribute_t attr; + + attr.id = SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE; + attr.value.aclfield.enable = true; + attr.value.aclfield.data.u16 = rule.ether_type.value; + attr.value.aclfield.mask.u16 = rule.ether_type.mask; + + if (!pbhRule->validateAddMatch(attr)) + { + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) match: ETHER_TYPE", rule.key.c_str()); + return false; + } } - if (!flowCounterUpdate) + if (rule.ip_protocol.is_set) { - SWSS_LOG_NOTICE("PBH rule(%s) in SAI is up-to-date", rule.key.c_str()); - return true; + sai_attribute_t attr; + + attr.id = SAI_ACL_ENTRY_ATTR_FIELD_IP_PROTOCOL; + attr.value.aclfield.enable = true; + attr.value.aclfield.data.u8 = rule.ip_protocol.value; + attr.value.aclfield.mask.u8 = rule.ip_protocol.mask; + + if (!pbhRule->validateAddMatch(attr)) + { + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) match: IP_PROTOCOL", rule.key.c_str()); + return false; + } + } + + if (rule.ipv6_next_header.is_set) + { + sai_attribute_t attr; + + attr.id = SAI_ACL_ENTRY_ATTR_FIELD_IPV6_NEXT_HEADER; + attr.value.aclfield.enable = true; + attr.value.aclfield.data.u8 = rule.ipv6_next_header.value; + attr.value.aclfield.mask.u8 = rule.ipv6_next_header.mask; + + if (!pbhRule->validateAddMatch(attr)) + { + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) match: IPV6_NEXT_HEADER", rule.key.c_str()); + return false; + } + } + + if (rule.l4_dst_port.is_set) + { + sai_attribute_t attr; + + attr.id = SAI_ACL_ENTRY_ATTR_FIELD_L4_DST_PORT; + attr.value.aclfield.enable = true; + attr.value.aclfield.data.u16 = rule.l4_dst_port.value; + attr.value.aclfield.mask.u16 = rule.l4_dst_port.mask; + + if (!pbhRule->validateAddMatch(attr)) + { + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) match: L4_DST_PORT", rule.key.c_str()); + return false; + } + } + + if (rule.inner_ether_type.is_set) + { + sai_attribute_t attr; + + attr.id = SAI_ACL_ENTRY_ATTR_FIELD_INNER_ETHER_TYPE; + attr.value.aclfield.enable = true; + attr.value.aclfield.data.u16 = rule.inner_ether_type.value; + attr.value.aclfield.mask.u16 = rule.inner_ether_type.mask; + + if (!pbhRule->validateAddMatch(attr)) + { + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) match: INNER_ETHER_TYPE", rule.key.c_str()); + return false; + } } - if (!this->aclOrch->updateAclRule(rule.table, rule.name, rule.flow_counter.value)) + if (rule.hash.is_set && rule.packet_action.is_set) + { + PbhHash hObj; + + if (this->pbhHlpr.getPbhHash(hObj, rule.hash.value)) + { + sai_attribute_t attr; + + attr.id = rule.packet_action.value; + attr.value.aclaction.enable = true; + attr.value.aclaction.parameter.oid = hObj.getOid(); + + if (!pbhRule->validateAddAction(attr)) + { + SWSS_LOG_ERROR("Failed to configure PBH rule(%s) action", rule.key.c_str()); + return false; + } + } + } + + if (!pbhRule->validate()) + { + SWSS_LOG_ERROR("Failed to validate PBH rule(%s)", rule.key.c_str()); + return false; + } + + // Mellanox W/A + if (this->pbhCap.getAsicVendor() == PbhAsicVendor::MELLANOX) + { + const auto &hMeta = rule.hash.meta; + const auto &paMeta = rule.packet_action.meta; + + auto cond1 = std::find(uFields.cbegin(), uFields.cend(), hMeta.name) != uFields.cend(); + auto cond2 = std::find(uFields.cbegin(), uFields.cend(), paMeta.name) != uFields.cend(); + + if (cond1 || cond2) + { + auto pbhRulePtr = dynamic_cast(this->aclOrch->getAclRule(rule.table, rule.name)); + + if (pbhRulePtr == nullptr) + { + SWSS_LOG_ERROR("Failed to update PBH rule(%s) in SAI: invalid object type", rule.key.c_str()); + return false; + } + + if (!pbhRulePtr->disableAction()) + { + SWSS_LOG_ERROR("Failed to disable PBH rule(%s) action", rule.key.c_str()); + return false; + } + } + } + + if (!this->aclOrch->updateAclRule(pbhRule)) { SWSS_LOG_ERROR("Failed to update PBH rule(%s) in SAI", rule.key.c_str()); return false; } + if (!this->pbhHlpr.decRefCount(rObj)) + { + SWSS_LOG_ERROR("Failed to remove PBH rule(%s) dependencies", rObj.key.c_str()); + return false; + } + if (!this->pbhHlpr.updatePbhRule(rule)) { SWSS_LOG_ERROR("Failed to update PBH rule(%s) in internal cache", rule.key.c_str()); return false; } + if (!this->pbhHlpr.incRefCount(rule)) + { + SWSS_LOG_ERROR("Failed to add PBH rule(%s) dependencies", rule.key.c_str()); + return false; + } + SWSS_LOG_NOTICE("Updated PBH rule(%s) in SAI", rule.key.c_str()); return true; @@ -832,31 +1090,98 @@ bool PbhOrch::updatePbhHash(const PbhHash &hash) return false; } - if (!uMapDiffByKey(hObj.fieldValueMap, hash.fieldValueMap).empty()) + const auto &aFields = this->getPbhAddedFields(hObj, hash); + const auto &uFields = this->getPbhUpdatedFields(hObj, hash); + const auto &rFields = this->getPbhRemovedFields(hObj, hash); + + if (aFields.empty() && uFields.empty() && rFields.empty()) { - SWSS_LOG_ERROR("Failed to update PBH hash(%s) in SAI: fields add/remove is prohibited", hash.key.c_str()); + SWSS_LOG_NOTICE("PBH hash(%s) in SAI is up-to-date", hash.key.c_str()); + return true; + } + + if (!this->pbhCap.validatePbhHashCap(aFields, PbhFieldCapability::ADD)) + { + SWSS_LOG_ERROR("Failed to validate PBH hash(%s) added fields: unsupported capabilities", hash.key.c_str()); + return false; + } + + if (!this->pbhCap.validatePbhHashCap(uFields, PbhFieldCapability::UPDATE)) + { + SWSS_LOG_ERROR("Failed to validate PBH hash(%s) updated fields: unsupported capabilities", hash.key.c_str()); return false; } - for (const auto &oCit : hObj.fieldValueMap) + if (!this->pbhCap.validatePbhHashCap(rFields, PbhFieldCapability::REMOVE)) { - const auto &field = oCit.first; + SWSS_LOG_ERROR("Failed to validate PBH hash(%s) removed fields: unsupported capabilities", hash.key.c_str()); + return false; + } - const auto &oValue = oCit.second; - const auto &nValue = hash.fieldValueMap.at(field); + std::vector hashFieldOidList; - if (oValue != nValue) + if (hash.hash_field_list.is_set) + { + for (const auto &cit : hash.hash_field_list.value) { - SWSS_LOG_ERROR( - "Failed to update PBH hash(%s) in SAI: field(%s) update is prohibited", - hash.key.c_str(), - field.c_str() - ); - return false; + PbhHashField hfObj; + + if (!this->pbhHlpr.getPbhHashField(hfObj, cit)) + { + SWSS_LOG_ERROR( + "Failed to update PBH hash(%s) in SAI: missing hash field(%s)", + hash.key.c_str(), + cit.c_str() + ); + return false; + } + + hashFieldOidList.push_back(hfObj.getOid()); } } - SWSS_LOG_NOTICE("PBH hash(%s) in SAI is up-to-date", hash.key.c_str()); + if (hashFieldOidList.empty()) + { + SWSS_LOG_ERROR("Failed to update PBH hash(%s) in SAI: missing hash fields", hash.key.c_str()); + return false; + } + + sai_attribute_t attr; + + attr.id = SAI_HASH_ATTR_FINE_GRAINED_HASH_FIELD_LIST; + attr.value.objlist.count = static_cast(hashFieldOidList.size()); + attr.value.objlist.list = hashFieldOidList.data(); + + sai_status_t status; + + status = sai_hash_api->set_hash_attribute(hObj.getOid(), &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to update PBH hash(%s) in SAI", hash.key.c_str()); + return false; + } + + if (!this->pbhHlpr.decRefCount(hObj)) + { + SWSS_LOG_ERROR("Failed to remove PBH hash(%s) dependencies", hObj.key.c_str()); + return false; + } + + hObj.hash_field_list = hash.hash_field_list; + + if (!this->pbhHlpr.updatePbhHash(hObj)) + { + SWSS_LOG_ERROR("Failed to update PBH hash(%s) in internal cache", hObj.key.c_str()); + return false; + } + + if (!this->pbhHlpr.incRefCount(hObj)) + { + SWSS_LOG_ERROR("Failed to add PBH hash(%s) dependencies", hObj.key.c_str()); + return false; + } + + SWSS_LOG_NOTICE("Updated PBH hash(%s) in SAI", hObj.key.c_str()); return true; } @@ -1072,33 +1397,37 @@ bool PbhOrch::updatePbhHashField(const PbhHashField &hashField) return false; } - if (!uMapDiffByKey(hfObj.fieldValueMap, hashField.fieldValueMap).empty()) + const auto &aFields = this->getPbhAddedFields(hfObj, hashField); + const auto &uFields = this->getPbhUpdatedFields(hfObj, hashField); + const auto &rFields = this->getPbhRemovedFields(hfObj, hashField); + + if (aFields.empty() && uFields.empty() && rFields.empty()) { - SWSS_LOG_ERROR("Failed to update PBH hash field(%s) in SAI: fields add/remove is prohibited", hashField.key.c_str()); - return false; + SWSS_LOG_NOTICE("PBH hash field(%s) in SAI is up-to-date", hashField.key.c_str()); + return true; } - for (const auto &oCit : hfObj.fieldValueMap) + if (!this->pbhCap.validatePbhHashFieldCap(aFields, PbhFieldCapability::ADD)) { - const auto &field = oCit.first; + SWSS_LOG_ERROR("Failed to validate PBH hash field(%s) added fields: unsupported capabilities", hashField.key.c_str()); + return false; + } - const auto &oValue = oCit.second; - const auto &nValue = hashField.fieldValueMap.at(field); + if (!this->pbhCap.validatePbhHashFieldCap(uFields, PbhFieldCapability::UPDATE)) + { + SWSS_LOG_ERROR("Failed to validate PBH hash field(%s) updated fields: unsupported capabilities", hashField.key.c_str()); + return false; + } - if (oValue != nValue) - { - SWSS_LOG_ERROR( - "Failed to update PBH hash field(%s) in SAI: field(%s) update is prohibited", - hashField.key.c_str(), - field.c_str() - ); - return false; - } + if (!this->pbhCap.validatePbhHashFieldCap(rFields, PbhFieldCapability::REMOVE)) + { + SWSS_LOG_ERROR("Failed to validate PBH hash field(%s) removed fields: unsupported capabilities", hashField.key.c_str()); + return false; } - SWSS_LOG_NOTICE("PBH hash field(%s) in SAI is up-to-date", hashField.key.c_str()); + SWSS_LOG_ERROR("Failed to update PBH hash field(%s) in SAI: update is prohibited", hfObj.key.c_str()); - return true; + return false; } bool PbhOrch::removePbhHashField(const PbhHashField &hashField) diff --git a/orchagent/pbhorch.h b/orchagent/pbhorch.h index 1aa49e1d26..250963f54a 100644 --- a/orchagent/pbhorch.h +++ b/orchagent/pbhorch.h @@ -8,20 +8,30 @@ #include "pbh/pbhrule.h" #include "pbh/pbhmgr.h" +#include "pbh/pbhcap.h" class PbhOrch final : public Orch { public: + PbhOrch() = delete; + ~PbhOrch() = default; + PbhOrch( std::vector &connectorList, AclOrch *aclOrch, PortsOrch *portsOrch ); - ~PbhOrch(); using Orch::doTask; // Allow access to the basic doTask private: + template + std::vector getPbhAddedFields(const T &obj, const T &nObj) const; + template + std::vector getPbhUpdatedFields(const T &obj, const T &nObj) const; + template + std::vector getPbhRemovedFields(const T &obj, const T &nObj) const; + template auto getPbhSetupTaskMap() const -> const std::unordered_map&; template @@ -75,4 +85,5 @@ class PbhOrch final : public Orch PortsOrch *portsOrch; PbhHelper pbhHlpr; + PbhCapabilities pbhCap; }; diff --git a/orchagent/pfc_detect_innovium.lua b/orchagent/pfc_detect_innovium.lua index 8deedeaa4f..a948bd6fad 100644 --- a/orchagent/pfc_detect_innovium.lua +++ b/orchagent/pfc_detect_innovium.lua @@ -79,7 +79,7 @@ for i = n, 1, -1 do -- redis.call('HSET', counters_table_name .. ':' .. KEYS[i], 'K7_debug_3', 'YES') - if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0) or + if (occupancy_bytes > 0 and packets - packets_last == 0 and pfc_rx_packets - pfc_rx_packets_last > 0 and (pfc_duration - pfc_duration_last) > poll_time * 0.8) or -- DEBUG CODE START. Uncomment to enable (debug_storm == "enabled") or -- DEBUG CODE END. diff --git a/orchagent/pfcactionhandler.cpp b/orchagent/pfcactionhandler.cpp index 05f2d6ef56..305ed4421d 100644 --- a/orchagent/pfcactionhandler.cpp +++ b/orchagent/pfcactionhandler.cpp @@ -25,6 +25,7 @@ extern sai_object_id_t gSwitchId; extern PortsOrch *gPortsOrch; +extern SwitchOrch *gSwitchOrch; extern AclOrch * gAclOrch; extern sai_port_api_t *sai_port_api; extern sai_queue_api_t *sai_queue_api; @@ -262,6 +263,49 @@ PfcWdSaiDlrInitHandler::~PfcWdSaiDlrInitHandler(void) } } +PfcWdDlrHandler::PfcWdDlrHandler(sai_object_id_t port, sai_object_id_t queue, + uint8_t queueId, shared_ptr
countersTable): + PfcWdLossyHandler(port, queue, queueId, countersTable) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + attr.id = SAI_QUEUE_ATTR_PFC_DLR_INIT; + attr.value.booldata = true; + + // Set DLR init to true to start PFC deadlock recovery + sai_status_t status = sai_queue_api->set_queue_attribute(queue, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set PFC DLR INIT on port 0x%" PRIx64 " queue 0x%" PRIx64 + " queueId %d : %d", + port, queue, queueId, status); + return; + } +} + +PfcWdDlrHandler::~PfcWdDlrHandler(void) +{ + SWSS_LOG_ENTER(); + + sai_object_id_t port = getPort(); + sai_object_id_t queue = getQueue(); + uint8_t queueId = getQueueId(); + + sai_attribute_t attr; + attr.id = SAI_QUEUE_ATTR_PFC_DLR_INIT; + attr.value.booldata = false; + + // Set DLR init to false to stop PFC deadlock recovery + sai_status_t status = sai_queue_api->set_queue_attribute(getQueue(), &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to clear PFC DLR INIT on port 0x%" PRIx64 " queue 0x%" PRIx64 + " queueId %d : %d", port, queue, queueId, status); + return; + } +} + PfcWdAclHandler::PfcWdAclHandler(sai_object_id_t port, sai_object_id_t queue, uint8_t queueId, shared_ptr
countersTable): PfcWdLossyHandler(port, queue, queueId, countersTable) @@ -440,7 +484,7 @@ PfcWdLossyHandler::PfcWdLossyHandler(sai_object_id_t port, sai_object_id_t queue SWSS_LOG_ENTER(); string platform = getenv("platform") ? getenv("platform") : ""; - if (platform == CISCO_8000_PLATFORM_SUBSTRING) + if (platform == CISCO_8000_PLATFORM_SUBSTRING || ((platform == BRCM_PLATFORM_SUBSTRING) && (gSwitchOrch->checkPfcDlrInitEnable()))) { SWSS_LOG_DEBUG("Skipping in constructor PfcWdLossyHandler for platform %s on port 0x%" PRIx64, platform.c_str(), port); @@ -467,7 +511,7 @@ PfcWdLossyHandler::~PfcWdLossyHandler(void) SWSS_LOG_ENTER(); string platform = getenv("platform") ? getenv("platform") : ""; - if (platform == CISCO_8000_PLATFORM_SUBSTRING) + if (platform == CISCO_8000_PLATFORM_SUBSTRING || ((platform == BRCM_PLATFORM_SUBSTRING) && (gSwitchOrch->checkPfcDlrInitEnable()))) { SWSS_LOG_DEBUG("Skipping in destructor PfcWdLossyHandler for platform %s on port 0x%" PRIx64, platform.c_str(), getPort()); @@ -565,7 +609,7 @@ PfcWdZeroBufferHandler::PfcWdZeroBufferHandler(sai_object_id_t port, return; } - setPriorityGroupAndQueueLockFlag(portInstance, true); + setQueueLockFlag(portInstance, true); sai_attribute_t attr; attr.id = SAI_QUEUE_ATTR_BUFFER_PROFILE_ID; @@ -581,7 +625,7 @@ PfcWdZeroBufferHandler::PfcWdZeroBufferHandler(sai_object_id_t port, sai_object_id_t oldQueueProfileId = attr.value.oid; attr.id = SAI_QUEUE_ATTR_BUFFER_PROFILE_ID; - attr.value.oid = ZeroBufferProfile::getZeroBufferProfile(false); + attr.value.oid = ZeroBufferProfile::getZeroBufferProfile(); // Set our zero buffer profile status = sai_queue_api->set_queue_attribute(queue, &attr); @@ -593,35 +637,6 @@ PfcWdZeroBufferHandler::PfcWdZeroBufferHandler(sai_object_id_t port, // Save original buffer profile m_originalQueueBufferProfile = oldQueueProfileId; - - // Get PG - sai_object_id_t pg = portInstance.m_priority_group_ids[static_cast (queueId)]; - - attr.id = SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE; - - // Get PG's buffer profile - status = sai_buffer_api->get_ingress_priority_group_attribute(pg, 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to get buffer profile ID on PG 0x%" PRIx64 ": %d", pg, status); - return; - } - - // Set zero profile to PG - sai_object_id_t oldPgProfileId = attr.value.oid; - - attr.id = SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE; - attr.value.oid = ZeroBufferProfile::getZeroBufferProfile(true); - - status = sai_buffer_api->set_ingress_priority_group_attribute(pg, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to set buffer profile ID on pg 0x%" PRIx64 ": %d", pg, status); - return; - } - - // Save original buffer profile - m_originalPgBufferProfile = oldPgProfileId; } PfcWdZeroBufferHandler::~PfcWdZeroBufferHandler(void) @@ -647,41 +662,12 @@ PfcWdZeroBufferHandler::~PfcWdZeroBufferHandler(void) return; } - auto idx = size_t(getQueueId()); - sai_object_id_t pg = portInstance.m_priority_group_ids[idx]; - sai_object_id_t pending_profile_id = portInstance.m_priority_group_pending_profile[idx]; - - attr.id = SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE; - - if (pending_profile_id != SAI_NULL_OBJECT_ID) - { - attr.value.oid = pending_profile_id; - SWSS_LOG_NOTICE("Priority group %zd on port %s has been restored to pending profile 0x%" PRIx64, - idx, portInstance.m_alias.c_str(), pending_profile_id); - portInstance.m_priority_group_pending_profile[idx] = SAI_NULL_OBJECT_ID; - } - else - { - attr.value.oid = m_originalPgBufferProfile; - SWSS_LOG_NOTICE("Priority group %zd on port %s has been restored to original profile 0x%" PRIx64, - idx, portInstance.m_alias.c_str(), m_originalPgBufferProfile); - } - - // Set our zero buffer profile - status = sai_buffer_api->set_ingress_priority_group_attribute(pg, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to set buffer profile ID on queue 0x%" PRIx64 ": %d", getQueue(), status); - return; - } - - setPriorityGroupAndQueueLockFlag(portInstance, false); + setQueueLockFlag(portInstance, false); } -void PfcWdZeroBufferHandler::setPriorityGroupAndQueueLockFlag(Port& port, bool isLocked) const +void PfcWdZeroBufferHandler::setQueueLockFlag(Port& port, bool isLocked) const { - // set lock bits on PG and queue - port.m_priority_group_lock[static_cast(getQueueId())] = isLocked; + // set lock bits on queue for (size_t i = 0; i < port.m_queue_ids.size(); ++i) { if (port.m_queue_ids[i] == getQueue()) @@ -701,9 +687,8 @@ PfcWdZeroBufferHandler::ZeroBufferProfile::~ZeroBufferProfile(void) { SWSS_LOG_ENTER(); - // Destroy ingress and egress profiles and pools - destroyZeroBufferProfile(true); - destroyZeroBufferProfile(false); + // Destroy egress profiles and pools + destroyZeroBufferProfile(); } PfcWdZeroBufferHandler::ZeroBufferProfile &PfcWdZeroBufferHandler::ZeroBufferProfile::getInstance(void) @@ -715,24 +700,25 @@ PfcWdZeroBufferHandler::ZeroBufferProfile &PfcWdZeroBufferHandler::ZeroBufferPro return instance; } -sai_object_id_t PfcWdZeroBufferHandler::ZeroBufferProfile::getZeroBufferProfile(bool ingress) +sai_object_id_t PfcWdZeroBufferHandler::ZeroBufferProfile::getZeroBufferProfile() { SWSS_LOG_ENTER(); - if (getInstance().getProfile(ingress) == SAI_NULL_OBJECT_ID) + if (getInstance().getProfile() == SAI_NULL_OBJECT_ID) { - getInstance().createZeroBufferProfile(ingress); + getInstance().createZeroBufferProfile(); } - return getInstance().getProfile(ingress); + return getInstance().getProfile(); } -void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ingress) +void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile() { SWSS_LOG_ENTER(); sai_attribute_t attr; vector attribs; + sai_status_t status; // Create zero pool attr.id = SAI_BUFFER_POOL_ATTR_SIZE; @@ -740,18 +726,18 @@ void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ing attribs.push_back(attr); attr.id = SAI_BUFFER_POOL_ATTR_TYPE; - attr.value.u32 = ingress ? SAI_BUFFER_POOL_TYPE_INGRESS : SAI_BUFFER_POOL_TYPE_EGRESS; + attr.value.u32 = SAI_BUFFER_POOL_TYPE_EGRESS; attribs.push_back(attr); attr.id = SAI_BUFFER_POOL_ATTR_THRESHOLD_MODE; attr.value.u32 = SAI_BUFFER_POOL_THRESHOLD_MODE_DYNAMIC; attribs.push_back(attr); - sai_status_t status = sai_buffer_api->create_buffer_pool( - &getPool(ingress), - gSwitchId, - static_cast(attribs.size()), - attribs.data()); + status = sai_buffer_api->create_buffer_pool( + &getPool(), + gSwitchId, + static_cast(attribs.size()), + attribs.data()); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to create dynamic zero buffer pool for PFC WD: %d", status); @@ -762,7 +748,7 @@ void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ing attribs.clear(); attr.id = SAI_BUFFER_PROFILE_ATTR_POOL_ID; - attr.value.oid = getPool(ingress); + attr.value.oid = getPool(); attribs.push_back(attr); attr.id = SAI_BUFFER_PROFILE_ATTR_THRESHOLD_MODE; @@ -774,11 +760,11 @@ void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ing attribs.push_back(attr); attr.id = SAI_BUFFER_PROFILE_ATTR_SHARED_DYNAMIC_TH; - attr.value.s8 = -8; // ALPHA_0 + attr.value.s8 = -8; attribs.push_back(attr); status = sai_buffer_api->create_buffer_profile( - &getProfile(ingress), + &getProfile(), gSwitchId, static_cast(attribs.size()), attribs.data()); @@ -789,18 +775,18 @@ void PfcWdZeroBufferHandler::ZeroBufferProfile::createZeroBufferProfile(bool ing } } -void PfcWdZeroBufferHandler::ZeroBufferProfile::destroyZeroBufferProfile(bool ingress) +void PfcWdZeroBufferHandler::ZeroBufferProfile::destroyZeroBufferProfile() { SWSS_LOG_ENTER(); - sai_status_t status = sai_buffer_api->remove_buffer_profile(getProfile(ingress)); + sai_status_t status = sai_buffer_api->remove_buffer_profile(getProfile()); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to remove static zero buffer profile for PFC WD: %d", status); return; } - status = sai_buffer_api->remove_buffer_pool(getPool(ingress)); + status = sai_buffer_api->remove_buffer_pool(getPool()); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to remove static zero buffer pool for PFC WD: %d", status); diff --git a/orchagent/pfcactionhandler.h b/orchagent/pfcactionhandler.h index a55ac3b4a4..acfc923423 100644 --- a/orchagent/pfcactionhandler.h +++ b/orchagent/pfcactionhandler.h @@ -115,6 +115,14 @@ class PfcWdAclHandler: public PfcWdLossyHandler void updatePfcAclRule(shared_ptr rule, uint8_t queueId, string strTable, vector port); }; +class PfcWdDlrHandler: public PfcWdLossyHandler +{ + public: + PfcWdDlrHandler(sai_object_id_t port, sai_object_id_t queue, + uint8_t queueId, shared_ptr
countersTable); + virtual ~PfcWdDlrHandler(void); +}; + // PFC queue that implements drop action by draining queue with buffer of zero size class PfcWdZeroBufferHandler: public PfcWdLossyHandler { @@ -125,42 +133,39 @@ class PfcWdZeroBufferHandler: public PfcWdLossyHandler private: /* - * Sets lock bits on port's priority group and queue - * to protect them from being changed by other Orch's - */ - void setPriorityGroupAndQueueLockFlag(Port& port, bool isLocked) const; + * Sets lock bits on port's queue + * to protect it from being changed by other Orch's + */ + void setQueueLockFlag(Port& port, bool isLocked) const; // Singletone class for keeping shared data - zero buffer profiles class ZeroBufferProfile { public: ~ZeroBufferProfile(void); - static sai_object_id_t getZeroBufferProfile(bool ingress); + static sai_object_id_t getZeroBufferProfile(); private: ZeroBufferProfile(void); static ZeroBufferProfile &getInstance(void); - void createZeroBufferProfile(bool ingress); - void destroyZeroBufferProfile(bool ingress); + void createZeroBufferProfile(); + void destroyZeroBufferProfile(); - sai_object_id_t& getProfile(bool ingress) + sai_object_id_t& getProfile() { - return ingress ? m_zeroIngressBufferProfile : m_zeroEgressBufferProfile; + return m_zeroEgressBufferProfile; } - sai_object_id_t& getPool(bool ingress) + sai_object_id_t& getPool() { - return ingress ? m_zeroIngressBufferPool : m_zeroEgressBufferPool; + return m_zeroEgressBufferPool; } - sai_object_id_t m_zeroIngressBufferPool = SAI_NULL_OBJECT_ID; sai_object_id_t m_zeroEgressBufferPool = SAI_NULL_OBJECT_ID; - sai_object_id_t m_zeroIngressBufferProfile = SAI_NULL_OBJECT_ID; sai_object_id_t m_zeroEgressBufferProfile = SAI_NULL_OBJECT_ID; }; sai_object_id_t m_originalQueueBufferProfile = SAI_NULL_OBJECT_ID; - sai_object_id_t m_originalPgBufferProfile = SAI_NULL_OBJECT_ID; }; // PFC queue that implements drop action by draining queue via SAI diff --git a/orchagent/pfcwdorch.cpp b/orchagent/pfcwdorch.cpp index be4c1e51c4..1d662e97f4 100644 --- a/orchagent/pfcwdorch.cpp +++ b/orchagent/pfcwdorch.cpp @@ -27,9 +27,14 @@ #define PFC_WD_TC_MAX 8 #define COUNTER_CHECK_POLL_TIMEOUT_SEC 1 +extern sai_object_id_t gSwitchId; +extern sai_switch_api_t* sai_switch_api; extern sai_port_api_t *sai_port_api; extern sai_queue_api_t *sai_queue_api; +extern event_handle_t g_events_handle; + +extern SwitchOrch *gSwitchOrch; extern PortsOrch *gPortsOrch; template @@ -229,6 +234,36 @@ task_process_status PfcWdOrch::createEntry(const st SWSS_LOG_ERROR("Unsupported action %s for platform %s", value.c_str(), m_platform.c_str()); return task_process_status::task_invalid_entry; } + if(m_platform == BRCM_PLATFORM_SUBSTRING) + { + if(gSwitchOrch->checkPfcDlrInitEnable()) + { + if(m_pfcwd_ports.empty()) + { + sai_attribute_t attr; + attr.id = SAI_SWITCH_ATTR_PFC_DLR_PACKET_ACTION; + attr.value.u32 = packet_action_map.at(value); + + sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if(status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set switch level PFC DLR packet action rv : %d", status); + return task_process_status::task_invalid_entry; + } + setPfcDlrPacketAction(action); + } + else + { + if(getPfcDlrPacketAction() != action) + { + string DlrPacketAction = serializeAction(getPfcDlrPacketAction()); + SWSS_LOG_ERROR("Invalid PFC Watchdog action %s as switch level action %s is set", + value.c_str(), DlrPacketAction.c_str()); + return task_process_status::task_invalid_entry; + } + } + } + } } else { @@ -272,6 +307,7 @@ task_process_status PfcWdOrch::createEntry(const st } SWSS_LOG_NOTICE("Started PFC Watchdog on port %s", port.m_alias.c_str()); + m_pfcwd_ports.insert(port.m_alias); return task_process_status::task_success; } @@ -290,6 +326,7 @@ task_process_status PfcWdOrch::deleteEntry(const st } SWSS_LOG_NOTICE("Stopped PFC Watchdog on port %s", name.c_str()); + m_pfcwd_ports.erase(port.m_alias); return task_process_status::task_success; } @@ -399,9 +436,9 @@ void PfcWdSwOrch::enableBigRedSwitchMode() continue; } - if (!gPortsOrch->getPortPfc(port.m_port_id, &pfcMask)) + if (!gPortsOrch->getPortPfcWatchdogStatus(port.m_port_id, &pfcMask)) { - SWSS_LOG_ERROR("Failed to get PFC mask on port %s", port.m_alias.c_str()); + SWSS_LOG_ERROR("Failed to get PFC watchdog mask on port %s", port.m_alias.c_str()); return; } @@ -443,9 +480,9 @@ void PfcWdSwOrch::enableBigRedSwitchMode() continue; } - if (!gPortsOrch->getPortPfc(port.m_port_id, &pfcMask)) + if (!gPortsOrch->getPortPfcWatchdogStatus(port.m_port_id, &pfcMask)) { - SWSS_LOG_ERROR("Failed to get PFC mask on port %s", port.m_alias.c_str()); + SWSS_LOG_ERROR("Failed to get PFC watchdog mask on port %s", port.m_alias.c_str()); return; } @@ -489,7 +526,7 @@ bool PfcWdSwOrch::registerInWdDb(const Port& port, uint8_t pfcMask = 0; - if (!gPortsOrch->getPortPfc(port.m_port_id, &pfcMask)) + if (!gPortsOrch->getPortPfcWatchdogStatus(port.m_port_id, &pfcMask)) { SWSS_LOG_ERROR("Failed to get PFC mask on port %s", port.m_alias.c_str()); return false; @@ -900,6 +937,26 @@ void PfcWdSwOrch::doTask(SelectableTimer &timer) } +template +void PfcWdSwOrch::report_pfc_storm( + sai_object_id_t id, const PfcWdQueueEntry *entry) +{ + event_params_t params = { + { "ifname", entry->portAlias }, + { "queue_index", to_string(entry->index) }, + { "queue_id", to_string(id) }, + { "port_id", to_string(entry->portId) }}; + + SWSS_LOG_NOTICE( + "PFC Watchdog detected PFC storm on port %s, queue index %d, queue id 0x%" PRIx64 " and port id 0x%" PRIx64 ".", + entry->portAlias.c_str(), + entry->index, + id, + entry->portId); + + event_publish(g_events_handle, "pfc-storm", ¶ms); +} + template bool PfcWdSwOrch::startWdActionOnQueue(const string &event, sai_object_id_t queueId) { @@ -922,12 +979,7 @@ bool PfcWdSwOrch::startWdActionOnQueue(const string { if (entry->second.handler == nullptr) { - SWSS_LOG_NOTICE( - "PFC Watchdog detected PFC storm on port %s, queue index %d, queue id 0x%" PRIx64 " and port id 0x%" PRIx64 ".", - entry->second.portAlias.c_str(), - entry->second.index, - entry->first, - entry->second.portId); + report_pfc_storm(entry->first, &entry->second); entry->second.handler = make_shared( entry->second.portId, @@ -944,12 +996,7 @@ bool PfcWdSwOrch::startWdActionOnQueue(const string { if (entry->second.handler == nullptr) { - SWSS_LOG_NOTICE( - "PFC Watchdog detected PFC storm on port %s, queue index %d, queue id 0x%" PRIx64 " and port id 0x%" PRIx64 ".", - entry->second.portAlias.c_str(), - entry->second.index, - entry->first, - entry->second.portId); + report_pfc_storm(entry->first, &entry->second); entry->second.handler = make_shared( entry->second.portId, @@ -966,12 +1013,7 @@ bool PfcWdSwOrch::startWdActionOnQueue(const string { if (entry->second.handler == nullptr) { - SWSS_LOG_NOTICE( - "PFC Watchdog detected PFC storm on port %s, queue index %d, queue id 0x%" PRIx64 " and port id 0x%" PRIx64 ".", - entry->second.portAlias.c_str(), - entry->second.index, - entry->first, - entry->second.portId); + report_pfc_storm(entry->first, &entry->second); entry->second.handler = make_shared( entry->second.portId, @@ -1064,4 +1106,5 @@ bool PfcWdSwOrch::bake() // Trick to keep member functions in a separate file template class PfcWdSwOrch; template class PfcWdSwOrch; +template class PfcWdSwOrch; template class PfcWdSwOrch; diff --git a/orchagent/pfcwdorch.h b/orchagent/pfcwdorch.h index 4013ab9ad5..8c30d7068e 100644 --- a/orchagent/pfcwdorch.h +++ b/orchagent/pfcwdorch.h @@ -7,6 +7,7 @@ #include "producertable.h" #include "notificationconsumer.h" #include "timer.h" +#include "events.h" extern "C" { #include "sai.h" @@ -22,6 +23,12 @@ enum class PfcWdAction PFC_WD_ACTION_ALERT, }; +static const map packet_action_map = { + {"drop", SAI_PACKET_ACTION_DROP}, + {"forward", SAI_PACKET_ACTION_FORWARD}, + {"alert", SAI_PACKET_ACTION_FORWARD} +}; + template class PfcWdOrch: public Orch { @@ -49,15 +56,18 @@ class PfcWdOrch: public Orch virtual task_process_status createEntry(const string& key, const vector& data); task_process_status deleteEntry(const string& name); + PfcWdAction getPfcDlrPacketAction() { return PfcDlrPacketAction; } + void setPfcDlrPacketAction(PfcWdAction action) { PfcDlrPacketAction = action; } protected: virtual bool startWdActionOnQueue(const string &event, sai_object_id_t queueId) = 0; string m_platform = ""; - private: shared_ptr m_countersDb = nullptr; shared_ptr
m_countersTable = nullptr; + PfcWdAction PfcDlrPacketAction = PfcWdAction::PFC_WD_ACTION_UNKNOWN; + std::set m_pfcwd_ports; }; template @@ -118,6 +128,8 @@ class PfcWdSwOrch: public PfcWdOrch void enableBigRedSwitchMode(); void setBigRedSwitchMode(string value); + void report_pfc_storm(sai_object_id_t id, const PfcWdQueueEntry *); + map m_entryMap; map m_brsEntryMap; diff --git a/orchagent/policerorch.cpp b/orchagent/policerorch.cpp index c4528f6330..68dfffe898 100644 --- a/orchagent/policerorch.cpp +++ b/orchagent/policerorch.cpp @@ -8,10 +8,13 @@ using namespace std; using namespace swss; extern sai_policer_api_t* sai_policer_api; +extern sai_port_api_t *sai_port_api; extern sai_object_id_t gSwitchId; extern PortsOrch* gPortsOrch; +#define ETHERNET_PREFIX "Ethernet" + static const string meter_type_field = "METER_TYPE"; static const string mode_field = "MODE"; static const string color_source_field = "COLOR_SOURCE"; @@ -23,6 +26,11 @@ static const string green_packet_action_field = "GREEN_PACKET_ACTION"; static const string red_packet_action_field = "RED_PACKET_ACTION"; static const string yellow_packet_action_field = "YELLOW_PACKET_ACTION"; +static const string storm_control_kbps = "KBPS"; +static const string storm_broadcast = "broadcast"; +static const string storm_unknown_unicast = "unknown-unicast"; +static const string storm_unknown_mcast = "unknown-multicast"; + static const map meter_type_map = { {"PACKETS", SAI_METER_TYPE_PACKETS}, {"BYTES", SAI_METER_TYPE_BYTES} @@ -105,15 +113,268 @@ bool PolicerOrch::decreaseRefCount(const string &name) return true; } -PolicerOrch::PolicerOrch(DBConnector* db, string tableName) : - Orch(db, tableName) +PolicerOrch::PolicerOrch(vector &tableNames, PortsOrch *portOrch) : Orch(tableNames), m_portsOrch(portOrch) { SWSS_LOG_ENTER(); } +task_process_status PolicerOrch::handlePortStormControlTable(swss::KeyOpFieldsValuesTuple tuple) +{ + auto key = kfvKey(tuple); + auto op = kfvOp(tuple); + string storm_key = key; + auto tokens = tokenize(storm_key, config_db_key_delimiter); + auto interface_name = tokens[0]; + auto storm_type = tokens[1]; + Port port; + + /*Only proceed for Ethernet interfaces*/ + if (strncmp(interface_name.c_str(), ETHERNET_PREFIX, strlen(ETHERNET_PREFIX))) + { + SWSS_LOG_ERROR("%s: Unsupported / Invalid interface %s", + storm_type.c_str(), interface_name.c_str()); + return task_process_status::task_success; + } + if (!gPortsOrch->getPort(interface_name, port)) + { + SWSS_LOG_ERROR("Failed to apply storm-control %s to port %s. Port not found", + storm_type.c_str(), interface_name.c_str()); + /*continue here as there can be more interfaces*/ + return task_process_status::task_success; + } + /*Policer Name: __*/ + const auto storm_policer_name = "_"+interface_name+"_"+storm_type; + + if (op == SET_COMMAND) + { + // Mark the operation as an 'update', if the policer exists. + bool update = m_syncdPolicers.find(storm_policer_name) != m_syncdPolicers.end(); + vector attrs; + bool cir = false; + sai_attribute_t attr; + + /*Meter type hardcoded to BYTES*/ + attr.id = SAI_POLICER_ATTR_METER_TYPE; + attr.value.s32 = (sai_meter_type_t) meter_type_map.at("BYTES"); + attrs.push_back(attr); + + /*Policer mode hardcoded to STORM_CONTROL*/ + attr.id = SAI_POLICER_ATTR_MODE; + attr.value.s32 = (sai_policer_mode_t) policer_mode_map.at("STORM_CONTROL"); + attrs.push_back(attr); + + /*Red Packet Action hardcoded to DROP*/ + attr.id = SAI_POLICER_ATTR_RED_PACKET_ACTION; + attr.value.s32 = packet_action_map.at("DROP"); + attrs.push_back(attr); + + for (auto i = kfvFieldsValues(tuple).begin(); + i != kfvFieldsValues(tuple).end(); ++i) + { + auto field = to_upper(fvField(*i)); + auto value = to_upper(fvValue(*i)); + + /*BPS value is used as CIR*/ + if (field == storm_control_kbps) + { + attr.id = SAI_POLICER_ATTR_CIR; + /*convert kbps to bps*/ + attr.value.u64 = (stoul(value)*1000/8); + cir = true; + attrs.push_back(attr); + SWSS_LOG_DEBUG("CIR %s",value.c_str()); + } + else + { + SWSS_LOG_ERROR("Unknown storm control attribute %s specified", + field.c_str()); + continue; + } + } + /*CIR is mandatory parameter*/ + if (!cir) + { + SWSS_LOG_ERROR("Failed to create storm control policer %s,\ + missing mandatory fields", storm_policer_name.c_str()); + return task_process_status::task_failed; + } + + /*Enabling storm-control on port*/ + sai_attribute_t port_attr; + if (storm_type == storm_broadcast) + { + port_attr.id = SAI_PORT_ATTR_BROADCAST_STORM_CONTROL_POLICER_ID; + } + else if (storm_type == storm_unknown_unicast) + { + port_attr.id = SAI_PORT_ATTR_FLOOD_STORM_CONTROL_POLICER_ID; + } + else if (storm_type == storm_unknown_mcast) + { + port_attr.id = SAI_PORT_ATTR_MULTICAST_STORM_CONTROL_POLICER_ID; + } + else + { + SWSS_LOG_ERROR("Unknown storm_type %s", storm_type.c_str()); + return task_process_status::task_failed; + } + + sai_object_id_t policer_id; + // Create a new policer + if (!update) + { + sai_status_t status = sai_policer_api->create_policer( + &policer_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create policer %s, rv:%d", + storm_policer_name.c_str(), status); + if (handleSaiCreateStatus(SAI_API_POLICER, status) == task_need_retry) + { + return task_process_status::task_need_retry; + } + } + + SWSS_LOG_DEBUG("Created storm-control policer %s", storm_policer_name.c_str()); + m_syncdPolicers[storm_policer_name] = policer_id; + m_policerRefCounts[storm_policer_name] = 0; + } + // Update an existing policer + else + { + policer_id = m_syncdPolicers[storm_policer_name]; + + // The update operation has limitations that it could only update + // the rate and the size accordingly. + // STORM_CONTROL: CIR, CBS + for (auto & attr: attrs) + { + if (attr.id != SAI_POLICER_ATTR_CIR) + { + continue; + } + + sai_status_t status = sai_policer_api->set_policer_attribute( + policer_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to update policer %s attribute, rv:%d", + storm_policer_name.c_str(), status); + if (handleSaiSetStatus(SAI_API_POLICER, status) == task_need_retry) + { + return task_process_status::task_need_retry; + } + + } + } + } + policer_id = m_syncdPolicers[storm_policer_name]; + + if (update) + { + SWSS_LOG_NOTICE("update storm-control policer %s", storm_policer_name.c_str()); + port_attr.value.oid = SAI_NULL_OBJECT_ID; + /*Remove and re-apply policer*/ + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &port_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove storm-control %s from port %s, rv:%d", + storm_type.c_str(), interface_name.c_str(), status); + if (handleSaiSetStatus(SAI_API_POLICER, status) == task_need_retry) + { + return task_process_status::task_need_retry; + } + } + } + port_attr.value.oid = policer_id; + + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &port_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to apply storm-control %s to port %s, rv:%d", + storm_type.c_str(), interface_name.c_str(),status); + + /*TODO: Do the below policer cleanup in an API*/ + /*Remove the already created policer*/ + if (SAI_STATUS_SUCCESS != sai_policer_api->remove_policer( + m_syncdPolicers[storm_policer_name])) + { + SWSS_LOG_ERROR("Failed to remove policer %s, rv:%d", + storm_policer_name.c_str(), status); + /*TODO: Just doing a syslog. */ + } + + SWSS_LOG_NOTICE("Removed policer %s as set_port_attribute for %s failed", + storm_policer_name.c_str(),interface_name.c_str()); + m_syncdPolicers.erase(storm_policer_name); + m_policerRefCounts.erase(storm_policer_name); + + return task_process_status::task_need_retry; + } + } + else if (op == DEL_COMMAND) + { + if (m_syncdPolicers.find(storm_policer_name) == m_syncdPolicers.end()) + { + SWSS_LOG_ERROR("Policer %s not configured", storm_policer_name.c_str()); + return task_process_status::task_success; + } + + sai_attribute_t port_attr; + if (storm_type == storm_broadcast) + { + port_attr.id = SAI_PORT_ATTR_BROADCAST_STORM_CONTROL_POLICER_ID; + } + else if (storm_type == storm_unknown_unicast) + { + port_attr.id = SAI_PORT_ATTR_FLOOD_STORM_CONTROL_POLICER_ID; + } + else if (storm_type == storm_unknown_mcast) + { + port_attr.id = SAI_PORT_ATTR_MULTICAST_STORM_CONTROL_POLICER_ID; + } + else + { + SWSS_LOG_ERROR("Unknown storm_type %s", storm_type.c_str()); + return task_process_status::task_failed; + } + + port_attr.value.oid = SAI_NULL_OBJECT_ID; + + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &port_attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove storm-control %s from port %s, rv:%d", + storm_type.c_str(), interface_name.c_str(), status); + if (handleSaiRemoveStatus(SAI_API_POLICER, status) == task_need_retry) + { + return task_process_status::task_need_retry; + } + } + + status = sai_policer_api->remove_policer( + m_syncdPolicers[storm_policer_name]); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove policer %s, rv:%d", + storm_policer_name.c_str(), status); + if (handleSaiRemoveStatus(SAI_API_POLICER, status) == task_need_retry) + { + return task_process_status::task_need_retry; + } + } + + SWSS_LOG_NOTICE("Removed policer %s", storm_policer_name.c_str()); + m_syncdPolicers.erase(storm_policer_name); + m_policerRefCounts.erase(storm_policer_name); + } + return task_process_status::task_success; +} + void PolicerOrch::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); + task_process_status storm_status = task_success; if (!gPortsOrch->allPortsReady()) { @@ -127,7 +388,23 @@ void PolicerOrch::doTask(Consumer &consumer) auto key = kfvKey(tuple); auto op = kfvOp(tuple); + auto table_name = consumer.getTableName(); + // Special handling for storm-control configuration. + if (table_name == CFG_PORT_STORM_CONTROL_TABLE_NAME) + { + storm_status = handlePortStormControlTable(tuple); + if ((storm_status == task_process_status::task_success) || + (storm_status == task_process_status::task_failed)) + { + it = consumer.m_toSync.erase(it); + } + else + { + it++; + } + continue; + } if (op == SET_COMMAND) { // Mark the operation as an 'update', if the policer exists. diff --git a/orchagent/policerorch.h b/orchagent/policerorch.h index d735da03b7..9814179958 100644 --- a/orchagent/policerorch.h +++ b/orchagent/policerorch.h @@ -14,16 +14,20 @@ typedef map PolicerRefCountTable; class PolicerOrch : public Orch { public: - PolicerOrch(DBConnector* db, string tableName); + PolicerOrch(vector &tableNames, PortsOrch *portOrch); bool policerExists(const string &name); bool getPolicerOid(const string &name, sai_object_id_t &oid); bool increaseRefCount(const string &name); bool decreaseRefCount(const string &name); + task_process_status handlePortStormControlTable(swss::KeyOpFieldsValuesTuple tuple); private: + PortsOrch *m_portsOrch; virtual void doTask(Consumer& consumer); PolicerTable m_syncdPolicers; PolicerRefCountTable m_policerRefCounts; }; + + diff --git a/orchagent/port.h b/orchagent/port.h index 83f61e1b1c..cf79267243 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -2,7 +2,7 @@ #define SWSS_PORT_H extern "C" { -#include "sai.h" +#include } #include @@ -12,6 +12,8 @@ extern "C" { #include #include +#include + #define DEFAULT_PORT_VLAN_ID 1 /* * Default MTU is derived from SAI_PORT_ATTR_MTU (1514) @@ -73,6 +75,9 @@ struct SystemLagInfo class Port { +public: + typedef sai_bridge_port_fdb_learning_mode_t port_learn_mode_t; + public: enum Type { CPU, @@ -85,8 +90,21 @@ class Port SUBPORT, SYSTEM, UNKNOWN - } ; + }; + + enum Role + { + Ext, // external + Int, // internal + Inb, // inband + Rec // recirculation + }; +public: + static constexpr std::size_t max_lanes = 8; // Max HW lanes + static constexpr std::size_t max_fec_modes = 3; // Max FEC modes (sync with SAI) + +public: Port() {}; Port(std::string alias, Type type) : m_alias(alias), m_type(type) {}; @@ -107,12 +125,13 @@ class Port } std::string m_alias; - Type m_type; - int m_index = 0; // PHY_PORT: index + Type m_type = UNKNOWN; + uint16_t m_index = 0; // PHY_PORT: index uint32_t m_mtu = DEFAULT_MTU; uint32_t m_speed = 0; // Mbps - std::string m_learn_mode = "hardware"; - int m_autoneg = -1; // -1 means not set, 0 = disabled, 1 = enabled + port_learn_mode_t m_learn_mode = SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW; + bool m_autoneg = false; + bool m_link_training = false; bool m_admin_state_up = false; bool m_init = false; bool m_l3_vni = false; @@ -139,28 +158,26 @@ class Port std::vector m_queue_ids; std::vector m_priority_group_ids; sai_port_priority_flow_control_mode_t m_pfc_asym = SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED; - uint8_t m_pfc_bitmask = 0; + uint8_t m_pfc_bitmask = 0; // PFC enable bit mask + uint8_t m_pfcwd_sw_bitmask = 0; // PFC software watchdog enable uint16_t m_tpid = DEFAULT_TPID; uint32_t m_nat_zone_id = 0; uint32_t m_vnid = VNID_NONE; uint32_t m_fdb_count = 0; uint32_t m_up_member_count = 0; uint32_t m_maximum_headroom = 0; - std::vector m_adv_speeds; - sai_port_interface_type_t m_interface_type; - std::vector m_adv_interface_types; + std::set m_adv_speeds; + sai_port_interface_type_t m_interface_type = SAI_PORT_INTERFACE_TYPE_NONE; + std::set m_adv_interface_types; bool m_mpls = false; - /* - * Following two bit vectors are used to lock - * the PG/queue from being changed in BufferOrch. + * Following bit vector is used to lock + * the queue from being changed in BufferOrch. * The use case scenario is when PfcWdZeroBufferHandler - * sets zero buffer profile it should protect PG/queue + * sets zero buffer profile it should protect queue * from being overwritten in BufferOrch. */ std::vector m_queue_lock; - std::vector m_priority_group_lock; - std::vector m_priority_group_pending_profile; std::unordered_set m_ingress_acl_tables_uset; std::unordered_set m_egress_acl_tables_uset; @@ -170,10 +187,24 @@ class Port SystemLagInfo m_system_lag_info; sai_object_id_t m_switch_id = 0; + sai_object_id_t m_system_side_id = 0; sai_object_id_t m_line_side_id = 0; - bool m_fec_cfg = false; - bool m_an_cfg = false; + /* pre-emphasis */ + std::map> m_preemphasis; + + /* Force initial parameter configuration flags */ + bool m_an_cfg = false; // Auto-negotiation (AN) + bool m_adv_speed_cfg = false; // Advertised speed + bool m_intf_cfg = false; // Interface type + bool m_adv_intf_cfg = false; // Advertised interface type + bool m_fec_cfg = false; // Forward Error Correction (FEC) + bool m_pfc_asym_cfg = false; // Asymmetric Priority Flow Control (PFC) + bool m_lm_cfg = false; // Forwarding Database (FDB) Learning Mode (LM) + bool m_lt_cfg = false; // Link Training (LT) + + int m_cap_an = -1; /* Capability - AutoNeg, -1 means not set */ + int m_cap_lt = -1; /* Capability - LinkTraining, -1 means not set */ }; } diff --git a/orchagent/port/portcnt.h b/orchagent/port/portcnt.h new file mode 100644 index 0000000000..c0c3ea359e --- /dev/null +++ b/orchagent/port/portcnt.h @@ -0,0 +1,178 @@ +#pragma once + +extern "C" { +#include +#include +} + +#include +#include + +#include +#include +#include +#include + +#include "../port.h" + +class PortConfig final +{ +public: + PortConfig() = default; + ~PortConfig() = default; + + PortConfig(const std::string &key, const std::string &op) noexcept + { + this->key = key; + this->op = op; + } + + struct { + std::string value; + bool is_set = false; + } alias; // Port alias + + struct { + std::uint16_t value; + bool is_set = false; + } index; // Interface index + + struct { + std::set value; + bool is_set = false; + } lanes; // Lane information of a physical port + + struct { + std::uint32_t value; + bool is_set = false; + } speed; // Port speed + + struct { + bool value; + bool is_set = false; + } autoneg; // Port autoneg + + struct { + std::set value; + bool is_set = false; + } adv_speeds; // Port advertised speeds + + struct { + sai_port_interface_type_t value; + bool is_set = false; + } interface_type; // Port interface type + + struct { + std::set value; + bool is_set = false; + } adv_interface_types; // Port advertised interface types + + struct { + sai_port_fec_mode_t value; + bool is_set = false; + } fec; // Port FEC + + struct { + std::uint32_t value; + bool is_set = false; + } mtu; // Port MTU + + struct { + std::uint16_t value; + bool is_set = false; + } tpid; // Port TPID + + struct { + sai_port_priority_flow_control_mode_t value; + bool is_set = false; + } pfc_asym; // Port asymmetric PFC + + struct { + sai_bridge_port_fdb_learning_mode_t value; + bool is_set = false; + } learn_mode; // Port FDB learn mode + + struct { + bool value; + bool is_set = false; + } link_training; // Port link training + + struct { + + struct { + std::vector value; + bool is_set = false; + } preemphasis; // Port serdes pre-emphasis + + struct { + std::vector value; + bool is_set = false; + } idriver; // Port serdes idriver + + struct { + std::vector value; + bool is_set = false; + } ipredriver; // Port serdes ipredriver + + struct { + std::vector value; + bool is_set = false; + } pre1; // Port serdes pre1 + + struct { + std::vector value; + bool is_set = false; + } pre2; // Port serdes pre2 + + struct { + std::vector value; + bool is_set = false; + } pre3; // Port serdes pre3 + + struct { + std::vector value; + bool is_set = false; + } main; // Port serdes main + + struct { + std::vector value; + bool is_set = false; + } post1; // Port serdes post1 + + struct { + std::vector value; + bool is_set = false; + } post2; // Port serdes post2 + + struct { + std::vector value; + bool is_set = false; + } post3; // Port serdes post3 + + struct { + std::vector value; + bool is_set = false; + } attn; // Port serdes attn + + } serdes; // Port serdes + + struct { + swss::Port::Role value; + bool is_set = false; + } role; // Port role + + struct { + bool value; + bool is_set = false; + } admin_status; // Port admin status + + struct { + std::string value; + bool is_set = false; + } description; // Port description + + std::string key; + std::string op; + + std::unordered_map fieldValueMap; +}; diff --git a/orchagent/port/porthlpr.cpp b/orchagent/port/porthlpr.cpp new file mode 100644 index 0000000000..95914c3e36 --- /dev/null +++ b/orchagent/port/porthlpr.cpp @@ -0,0 +1,947 @@ +// includes ----------------------------------------------------------------------------------------------------------- + +#include +#include + +#include +#include +#include + +#include + +#include "portschema.h" +#include "converter.h" +#include "tokenize.h" +#include "logger.h" + +#include "porthlpr.h" + +using namespace swss; + +// types -------------------------------------------------------------------------------------------------------------- + +typedef decltype(PortConfig::serdes) PortSerdes_t; + +// constants ---------------------------------------------------------------------------------------------------------- + +static const std::uint32_t minPortSpeed = 1; +static const std::uint32_t maxPortSpeed = 800000; + +static const std::uint32_t minPortMtu = 68; +static const std::uint32_t maxPortMtu = 9216; + +static const std::unordered_map portModeMap = +{ + { PORT_MODE_ON, true }, + { PORT_MODE_OFF, false } +}; + +static const std::unordered_map portStatusMap = +{ + { PORT_STATUS_UP, true }, + { PORT_STATUS_DOWN, false } +}; + +static const std::unordered_map portInterfaceTypeMap = +{ + { PORT_INTERFACE_TYPE_NONE, SAI_PORT_INTERFACE_TYPE_NONE }, + { PORT_INTERFACE_TYPE_CR, SAI_PORT_INTERFACE_TYPE_CR }, + { PORT_INTERFACE_TYPE_CR2, SAI_PORT_INTERFACE_TYPE_CR2 }, + { PORT_INTERFACE_TYPE_CR4, SAI_PORT_INTERFACE_TYPE_CR4 }, + { PORT_INTERFACE_TYPE_CR8, SAI_PORT_INTERFACE_TYPE_CR8 }, + { PORT_INTERFACE_TYPE_SR, SAI_PORT_INTERFACE_TYPE_SR }, + { PORT_INTERFACE_TYPE_SR2, SAI_PORT_INTERFACE_TYPE_SR2 }, + { PORT_INTERFACE_TYPE_SR4, SAI_PORT_INTERFACE_TYPE_SR4 }, + { PORT_INTERFACE_TYPE_SR8, SAI_PORT_INTERFACE_TYPE_SR8 }, + { PORT_INTERFACE_TYPE_LR, SAI_PORT_INTERFACE_TYPE_LR }, + { PORT_INTERFACE_TYPE_LR4, SAI_PORT_INTERFACE_TYPE_LR4 }, + { PORT_INTERFACE_TYPE_LR8, SAI_PORT_INTERFACE_TYPE_LR8 }, + { PORT_INTERFACE_TYPE_KR, SAI_PORT_INTERFACE_TYPE_KR }, + { PORT_INTERFACE_TYPE_KR4, SAI_PORT_INTERFACE_TYPE_KR4 }, + { PORT_INTERFACE_TYPE_KR8, SAI_PORT_INTERFACE_TYPE_KR8 }, + { PORT_INTERFACE_TYPE_CAUI, SAI_PORT_INTERFACE_TYPE_CAUI }, + { PORT_INTERFACE_TYPE_GMII, SAI_PORT_INTERFACE_TYPE_GMII }, + { PORT_INTERFACE_TYPE_SFI, SAI_PORT_INTERFACE_TYPE_SFI }, + { PORT_INTERFACE_TYPE_XLAUI, SAI_PORT_INTERFACE_TYPE_XLAUI }, + { PORT_INTERFACE_TYPE_KR2, SAI_PORT_INTERFACE_TYPE_KR2 }, + { PORT_INTERFACE_TYPE_CAUI4, SAI_PORT_INTERFACE_TYPE_CAUI4 }, + { PORT_INTERFACE_TYPE_XAUI, SAI_PORT_INTERFACE_TYPE_XAUI }, + { PORT_INTERFACE_TYPE_XFI, SAI_PORT_INTERFACE_TYPE_XFI }, + { PORT_INTERFACE_TYPE_XGMII, SAI_PORT_INTERFACE_TYPE_XGMII } +}; + +static const std::unordered_map portFecMap = +{ + { PORT_FEC_NONE, SAI_PORT_FEC_MODE_NONE }, + { PORT_FEC_RS, SAI_PORT_FEC_MODE_RS }, + { PORT_FEC_FC, SAI_PORT_FEC_MODE_FC } +}; + +static const std::unordered_map portFecRevMap = +{ + { SAI_PORT_FEC_MODE_NONE, PORT_FEC_NONE }, + { SAI_PORT_FEC_MODE_RS, PORT_FEC_RS }, + { SAI_PORT_FEC_MODE_FC, PORT_FEC_FC } +}; + +static const std::unordered_map portPfcAsymMap = +{ + { PORT_MODE_ON, SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE }, + { PORT_MODE_OFF, SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED } +}; + +static const std::unordered_map portLearnModeMap = +{ + { PORT_LEARN_MODE_DROP, SAI_BRIDGE_PORT_FDB_LEARNING_MODE_DROP }, + { PORT_LEARN_MODE_DISABLE, SAI_BRIDGE_PORT_FDB_LEARNING_MODE_DISABLE }, + { PORT_LEARN_MODE_HARDWARE, SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW }, + { PORT_LEARN_MODE_CPU_TRAP, SAI_BRIDGE_PORT_FDB_LEARNING_MODE_CPU_TRAP }, + { PORT_LEARN_MODE_CPU_LOG, SAI_BRIDGE_PORT_FDB_LEARNING_MODE_CPU_LOG }, + { PORT_LEARN_MODE_NOTIFICATION, SAI_BRIDGE_PORT_FDB_LEARNING_MODE_FDB_NOTIFICATION } +}; + +static const std::unordered_map portRoleMap = +{ + { PORT_ROLE_EXT, Port::Role::Ext }, + { PORT_ROLE_INT, Port::Role::Int }, + { PORT_ROLE_INB, Port::Role::Inb }, + { PORT_ROLE_REC, Port::Role::Rec } +}; + +// functions ---------------------------------------------------------------------------------------------------------- + +template +static inline T toUInt(const std::string &hexStr) +{ + if (hexStr.substr(0, 2) != "0x") + { + throw std::invalid_argument("Invalid argument: '" + hexStr + "'"); + } + + return to_uint(hexStr); +} + +static inline std::uint16_t toUInt16(const std::string &hexStr) +{ + return toUInt(hexStr); +} + +static inline std::uint32_t toUInt32(const std::string &hexStr) +{ + return toUInt(hexStr); +} + +// Port helper -------------------------------------------------------------------------------------------------------- + +bool PortHelper::fecToStr(std::string &str, sai_port_fec_mode_t value) const +{ + const auto &cit = portFecRevMap.find(value); + if (cit == portFecRevMap.cend()) + { + return false; + } + + str = cit->second; + + return true; +} + +std::string PortHelper::getFieldValueStr(const PortConfig &port, const std::string &field) const +{ + static std::string str; + + const auto &cit = port.fieldValueMap.find(field); + if (cit != port.fieldValueMap.cend()) + { + return cit->second; + } + + return str; +} + +std::string PortHelper::getAutonegStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_AUTONEG); +} + +std::string PortHelper::getPortInterfaceTypeStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_INTERFACE_TYPE); +} + +std::string PortHelper::getAdvInterfaceTypesStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_ADV_INTERFACE_TYPES); +} + +std::string PortHelper::getFecStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_FEC); +} + +std::string PortHelper::getPfcAsymStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_PFC_ASYM); +} + +std::string PortHelper::getLearnModeStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_LEARN_MODE); +} + +std::string PortHelper::getLinkTrainingStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_LINK_TRAINING); +} + +std::string PortHelper::getAdminStatusStr(const PortConfig &port) const +{ + return this->getFieldValueStr(port, PORT_ADMIN_STATUS); +} + +bool PortHelper::parsePortAlias(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty string is prohibited", field.c_str()); + return false; + } + + port.alias.value = value; + port.alias.is_set = true; + + return true; +} + +bool PortHelper::parsePortIndex(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + try + { + port.index.value = to_uint(value); + port.index.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + return true; +} + +bool PortHelper::parsePortLanes(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty string is prohibited", field.c_str()); + return false; + } + + const auto &laneList = tokenize(value, ','); + + try + { + for (const auto &cit : laneList) + { + port.lanes.value.insert(to_uint(cit)); + } + + port.lanes.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + if (port.lanes.value.size() != laneList.size()) + { + SWSS_LOG_WARN("Duplicate lanes in field(%s): unexpected value(%s)", field.c_str(), value.c_str()); + } + + return true; +} + +bool PortHelper::parsePortSpeed(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + try + { + port.speed.value = to_uint(value); + port.speed.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + if (!((minPortSpeed <= port.speed.value) && (port.speed.value <= maxPortSpeed))) + { + SWSS_LOG_ERROR( + "Failed to parse field(%s): value(%s) is out of range: %u <= speed <= %u", + field.c_str(), value.c_str(), minPortSpeed, maxPortSpeed + ); + return false; + } + + return true; +} + +bool PortHelper::parsePortAutoneg(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portModeMap.find(value); + if (cit == portModeMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.autoneg.value = cit->second; + port.autoneg.is_set = true; + + return true; +} + +bool PortHelper::parsePortAdvSpeeds(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + auto nValue = boost::algorithm::to_lower_copy(value); + + if (nValue == PORT_ADV_ALL) + { + port.adv_speeds.is_set = true; + return true; + } + + const auto &speedList = tokenize(nValue, ','); + + try + { + for (const auto &cit : speedList) + { + auto speed = to_uint(cit); + + if (!((minPortSpeed <= speed) && (speed <= maxPortSpeed))) + { + SWSS_LOG_ERROR( + "Failed to parse field(%s): value(%s) is out of range: %u <= speed <= %u", + field.c_str(), value.c_str(), minPortSpeed, maxPortSpeed + ); + return false; + } + + port.adv_speeds.value.insert(speed); + } + + port.adv_speeds.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + if (port.adv_speeds.value.size() != speedList.size()) + { + SWSS_LOG_WARN("Duplicate speeds in field(%s): unexpected value(%s)", field.c_str(), value.c_str()); + } + + return true; +} + +bool PortHelper::parsePortInterfaceType(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + auto nValue = boost::algorithm::to_lower_copy(value); + + const auto &cit = portInterfaceTypeMap.find(nValue); + if (cit == portInterfaceTypeMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.interface_type.value = cit->second; + port.interface_type.is_set = true; + + return true; +} + +bool PortHelper::parsePortAdvInterfaceTypes(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + auto nValue = boost::algorithm::to_lower_copy(value); + + if (nValue == PORT_ADV_ALL) + { + port.adv_interface_types.is_set = true; + return true; + } + + const auto &intfTypeList = tokenize(nValue, ','); + + for (const auto &cit1 : intfTypeList) + { + const auto &cit2 = portInterfaceTypeMap.find(cit1); + if (cit2 == portInterfaceTypeMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.adv_interface_types.value.insert(cit2->second); + } + + port.adv_interface_types.is_set = true; + + if (port.adv_interface_types.value.size() != intfTypeList.size()) + { + SWSS_LOG_WARN("Duplicate interface types in field(%s): unexpected value(%s)", field.c_str(), value.c_str()); + } + + return true; +} + +bool PortHelper::parsePortFec(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portFecMap.find(value); + if (cit == portFecMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.fec.value = cit->second; + port.fec.is_set = true; + + return true; +} + +bool PortHelper::parsePortMtu(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + try + { + port.mtu.value = to_uint(value); + port.mtu.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + if (!((minPortMtu <= port.mtu.value) && (port.mtu.value <= maxPortMtu))) + { + SWSS_LOG_ERROR( + "Failed to parse field(%s): value(%s) is out of range: %u <= mtu <= %u", + field.c_str(), value.c_str(), minPortMtu, maxPortMtu + ); + return false; + } + + return true; +} + +bool PortHelper::parsePortTpid(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + try + { + port.tpid.value = toUInt16(value); + port.tpid.is_set = true; + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + return true; +} + +bool PortHelper::parsePortPfcAsym(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portPfcAsymMap.find(value); + if (cit == portPfcAsymMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.pfc_asym.value = cit->second; + port.pfc_asym.is_set = true; + + return true; +} + +bool PortHelper::parsePortLearnMode(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portLearnModeMap.find(value); + if (cit == portLearnModeMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.learn_mode.value = cit->second; + port.learn_mode.is_set = true; + + return true; +} + +bool PortHelper::parsePortLinkTraining(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portModeMap.find(value); + if (cit == portModeMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.link_training.value = cit->second; + port.link_training.is_set = true; + + return true; +} + +template +bool PortHelper::parsePortSerdes(T &serdes, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty string is prohibited", field.c_str()); + return false; + } + + const auto &serdesList = tokenize(value, ','); + + try + { + for (const auto &cit : serdesList) + { + serdes.value.push_back(toUInt32(cit)); + } + } + catch (const std::exception &e) + { + SWSS_LOG_ERROR("Failed to parse field(%s): %s", field.c_str(), e.what()); + return false; + } + + serdes.is_set = true; + + return true; +} + +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::preemphasis) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::idriver) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::ipredriver) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::pre1) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::pre2) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::pre3) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::main) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::post1) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::post2) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::post3) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::attn) &serdes, const std::string &field, const std::string &value) const; + +bool PortHelper::parsePortRole(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portRoleMap.find(value); + if (cit == portRoleMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.role.value = cit->second; + port.role.is_set = true; + + return true; +} + +bool PortHelper::parsePortAdminStatus(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = portStatusMap.find(value); + if (cit == portStatusMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + port.admin_status.value = cit->second; + port.admin_status.is_set = true; + + return true; +} + +bool PortHelper::parsePortDescription(PortConfig &port, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + port.description.value = value; + port.description.is_set = true; + + return true; +} + +bool PortHelper::parsePortConfig(PortConfig &port) const +{ + SWSS_LOG_ENTER(); + + for (const auto &cit : port.fieldValueMap) + { + const auto &field = cit.first; + const auto &value = cit.second; + + if (field == PORT_ALIAS) + { + if (!this->parsePortAlias(port, field, value)) + { + return false; + } + } + else if (field == PORT_INDEX) + { + if (!this->parsePortIndex(port, field, value)) + { + return false; + } + } + else if (field == PORT_LANES) + { + if (!this->parsePortLanes(port, field, value)) + { + return false; + } + } + else if (field == PORT_SPEED) + { + if (!this->parsePortSpeed(port, field, value)) + { + return false; + } + } + else if (field == PORT_AUTONEG) + { + if (!this->parsePortAutoneg(port, field, value)) + { + return false; + } + } + else if (field == PORT_ADV_SPEEDS) + { + if (!this->parsePortAdvSpeeds(port, field, value)) + { + return false; + } + } + else if (field == PORT_INTERFACE_TYPE) + { + if (!this->parsePortInterfaceType(port, field, value)) + { + return false; + } + } + else if (field == PORT_ADV_INTERFACE_TYPES) + { + if (!this->parsePortAdvInterfaceTypes(port, field, value)) + { + return false; + } + } + else if (field == PORT_FEC) + { + if (!this->parsePortFec(port, field, value)) + { + return false; + } + } + else if (field == PORT_MTU) + { + if (!this->parsePortMtu(port, field, value)) + { + return false; + } + } + else if (field == PORT_TPID) + { + if (!this->parsePortTpid(port, field, value)) + { + return false; + } + } + else if (field == PORT_PFC_ASYM) + { + if (!this->parsePortPfcAsym(port, field, value)) + { + return false; + } + } + else if (field == PORT_LEARN_MODE) + { + if (!this->parsePortLearnMode(port, field, value)) + { + return false; + } + } + else if (field == PORT_LINK_TRAINING) + { + if (!this->parsePortLinkTraining(port, field, value)) + { + return false; + } + } + else if (field == PORT_PREEMPHASIS) + { + if (!this->parsePortSerdes(port.serdes.preemphasis, field, value)) + { + return false; + } + } + else if (field == PORT_IDRIVER) + { + if (!this->parsePortSerdes(port.serdes.idriver, field, value)) + { + return false; + } + } + else if (field == PORT_IPREDRIVER) + { + if (!this->parsePortSerdes(port.serdes.ipredriver, field, value)) + { + return false; + } + } + else if (field == PORT_PRE1) + { + if (!this->parsePortSerdes(port.serdes.pre1, field, value)) + { + return false; + } + } + else if (field == PORT_PRE2) + { + if (!this->parsePortSerdes(port.serdes.pre2, field, value)) + { + return false; + } + } + else if (field == PORT_PRE3) + { + if (!this->parsePortSerdes(port.serdes.pre3, field, value)) + { + return false; + } + } + else if (field == PORT_MAIN) + { + if (!this->parsePortSerdes(port.serdes.main, field, value)) + { + return false; + } + } + else if (field == PORT_POST1) + { + if (!this->parsePortSerdes(port.serdes.post1, field, value)) + { + return false; + } + } + else if (field == PORT_POST2) + { + if (!this->parsePortSerdes(port.serdes.post2, field, value)) + { + return false; + } + } + else if (field == PORT_POST3) + { + if (!this->parsePortSerdes(port.serdes.post3, field, value)) + { + return false; + } + } + else if (field == PORT_ATTN) + { + if (!this->parsePortSerdes(port.serdes.attn, field, value)) + { + return false; + } + } + else if (field == PORT_ROLE) + { + if (!this->parsePortRole(port, field, value)) + { + return false; + } + } + else if (field == PORT_ADMIN_STATUS) + { + if (!this->parsePortAdminStatus(port, field, value)) + { + return false; + } + } + else if (field == PORT_DESCRIPTION) + { + if (!this->parsePortDescription(port, field, value)) + { + return false; + } + } + else + { + SWSS_LOG_WARN("Unknown field(%s): skipping ...", field.c_str()); + } + } + + return this->validatePortConfig(port); +} + +bool PortHelper::validatePortConfig(PortConfig &port) const +{ + SWSS_LOG_ENTER(); + + if (!port.lanes.is_set) + { + SWSS_LOG_ERROR("Validation error: missing mandatory field(%s)", PORT_LANES); + return false; + } + + if (!port.speed.is_set) + { + SWSS_LOG_ERROR("Validation error: missing mandatory field(%s)", PORT_SPEED); + return false; + } + + if (!port.admin_status.is_set) + { + SWSS_LOG_INFO( + "Missing non mandatory field(%s): setting default value(%s)", + PORT_ADMIN_STATUS, + PORT_STATUS_DOWN + ); + + port.admin_status.value = false; + port.admin_status.is_set = true; + + port.fieldValueMap[PORT_ADMIN_STATUS] = PORT_STATUS_DOWN; + } + + return true; +} diff --git a/orchagent/port/porthlpr.h b/orchagent/port/porthlpr.h new file mode 100644 index 0000000000..f3a86e7054 --- /dev/null +++ b/orchagent/port/porthlpr.h @@ -0,0 +1,55 @@ +#pragma once + +#include + +#include +#include + +#include "portcnt.h" + +class PortHelper final +{ +public: + PortHelper() = default; + ~PortHelper() = default; + +public: + bool fecToStr(std::string &str, sai_port_fec_mode_t value) const; + + std::string getAutonegStr(const PortConfig &port) const; + std::string getPortInterfaceTypeStr(const PortConfig &port) const; + std::string getAdvInterfaceTypesStr(const PortConfig &port) const; + std::string getFecStr(const PortConfig &port) const; + std::string getPfcAsymStr(const PortConfig &port) const; + std::string getLearnModeStr(const PortConfig &port) const; + std::string getLinkTrainingStr(const PortConfig &port) const; + std::string getAdminStatusStr(const PortConfig &port) const; + + bool parsePortConfig(PortConfig &port) const; + +private: + std::string getFieldValueStr(const PortConfig &port, const std::string &field) const; + + template + bool parsePortSerdes(T &serdes, const std::string &field, const std::string &value) const; + + bool parsePortAlias(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortIndex(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortLanes(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortSpeed(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortAutoneg(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortAdvSpeeds(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortInterfaceType(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortAdvInterfaceTypes(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortFec(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortMtu(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortTpid(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortPfcAsym(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortLearnMode(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortLinkTraining(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortRole(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortAdminStatus(PortConfig &port, const std::string &field, const std::string &value) const; + bool parsePortDescription(PortConfig &port, const std::string &field, const std::string &value) const; + + bool validatePortConfig(PortConfig &port) const; +}; diff --git a/orchagent/port/portschema.h b/orchagent/port/portschema.h new file mode 100644 index 0000000000..a01ea7271c --- /dev/null +++ b/orchagent/port/portschema.h @@ -0,0 +1,81 @@ +#pragma once + +// defines ------------------------------------------------------------------------------------------------------------ + +#define PORT_MODE_OFF "off" +#define PORT_MODE_ON "on" + +#define PORT_STATUS_DOWN "down" +#define PORT_STATUS_UP "up" + +#define PORT_ADV_ALL "all" + +#define PORT_INTERFACE_TYPE_NONE "none" +#define PORT_INTERFACE_TYPE_CR "cr" +#define PORT_INTERFACE_TYPE_CR2 "cr2" +#define PORT_INTERFACE_TYPE_CR4 "cr4" +#define PORT_INTERFACE_TYPE_CR8 "cr8" +#define PORT_INTERFACE_TYPE_SR "sr" +#define PORT_INTERFACE_TYPE_SR2 "sr2" +#define PORT_INTERFACE_TYPE_SR4 "sr4" +#define PORT_INTERFACE_TYPE_SR8 "sr8" +#define PORT_INTERFACE_TYPE_LR "lr" +#define PORT_INTERFACE_TYPE_LR4 "lr4" +#define PORT_INTERFACE_TYPE_LR8 "lr8" +#define PORT_INTERFACE_TYPE_KR "kr" +#define PORT_INTERFACE_TYPE_KR4 "kr4" +#define PORT_INTERFACE_TYPE_KR8 "kr8" +#define PORT_INTERFACE_TYPE_CAUI "caui" +#define PORT_INTERFACE_TYPE_GMII "gmii" +#define PORT_INTERFACE_TYPE_SFI "sfi" +#define PORT_INTERFACE_TYPE_XLAUI "xlaui" +#define PORT_INTERFACE_TYPE_KR2 "kr2" +#define PORT_INTERFACE_TYPE_CAUI4 "caui4" +#define PORT_INTERFACE_TYPE_XAUI "xaui" +#define PORT_INTERFACE_TYPE_XFI "xfi" +#define PORT_INTERFACE_TYPE_XGMII "xgmii" + +#define PORT_FEC_NONE "none" +#define PORT_FEC_RS "rs" +#define PORT_FEC_FC "fc" + +#define PORT_LEARN_MODE_DROP "drop" +#define PORT_LEARN_MODE_DISABLE "disable" +#define PORT_LEARN_MODE_HARDWARE "hardware" +#define PORT_LEARN_MODE_CPU_TRAP "cpu_trap" +#define PORT_LEARN_MODE_CPU_LOG "cpu_log" +#define PORT_LEARN_MODE_NOTIFICATION "notification" + +#define PORT_ROLE_EXT "Ext" +#define PORT_ROLE_INT "Int" +#define PORT_ROLE_INB "Inb" +#define PORT_ROLE_REC "Rec" + +#define PORT_ALIAS "alias" +#define PORT_INDEX "index" +#define PORT_LANES "lanes" +#define PORT_SPEED "speed" +#define PORT_AUTONEG "autoneg" +#define PORT_ADV_SPEEDS "adv_speeds" +#define PORT_INTERFACE_TYPE "interface_type" +#define PORT_ADV_INTERFACE_TYPES "adv_interface_types" +#define PORT_FEC "fec" +#define PORT_MTU "mtu" +#define PORT_TPID "tpid" +#define PORT_PFC_ASYM "pfc_asym" +#define PORT_LEARN_MODE "learn_mode" +#define PORT_LINK_TRAINING "link_training" +#define PORT_PREEMPHASIS "preemphasis" +#define PORT_IDRIVER "idriver" +#define PORT_IPREDRIVER "ipredriver" +#define PORT_PRE1 "pre1" +#define PORT_PRE2 "pre2" +#define PORT_PRE3 "pre3" +#define PORT_MAIN "main" +#define PORT_POST1 "post1" +#define PORT_POST2 "post2" +#define PORT_POST3 "post3" +#define PORT_ATTN "attn" +#define PORT_ROLE "role" +#define PORT_ADMIN_STATUS "admin_status" +#define PORT_DESCRIPTION "description" diff --git a/orchagent/port_rates.lua b/orchagent/port_rates.lua index 1d3d3f24f1..c29977d153 100644 --- a/orchagent/port_rates.lua +++ b/orchagent/port_rates.lua @@ -29,28 +29,33 @@ logit(alpha) logit(one_minus_alpha) logit(delta) -local n = table.getn(KEYS) -for i = 1, n do - local state_table = rates_table_name .. ':' .. KEYS[i] .. ':' .. 'PORT' +local function compute_rate(port) + local state_table = rates_table_name .. ':' .. port .. ':' .. 'PORT' local initialized = redis.call('HGET', state_table, 'INIT_DONE') logit(initialized) -- Get new COUNTERS values - local in_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_UCAST_PKTS') - local in_non_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS') - local out_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_UCAST_PKTS') - local out_non_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS') - local in_octets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_OCTETS') - local out_octets = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_OCTETS') + local in_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_UCAST_PKTS') + local in_non_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS') + local out_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_UCAST_PKTS') + local out_non_ucast_pkts = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS') + local in_octets = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_OCTETS') + local out_octets = redis.call('HGET', counters_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_OCTETS') + + if not in_ucast_pkts or not in_non_ucast_pkts or not out_ucast_pkts or + not out_non_ucast_pkts or not in_octets or not out_octets then + logit("Not found some counters on " .. port) + return + end if initialized == 'DONE' or initialized == 'COUNTERS_LAST' then -- Get old COUNTERS values - local in_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_UCAST_PKTS_last') - local in_non_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS_last') - local out_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_UCAST_PKTS_last') - local out_non_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS_last') - local in_octets_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_OCTETS_last') - local out_octets_last = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_OCTETS_last') + local in_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_UCAST_PKTS_last') + local in_non_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS_last') + local out_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_UCAST_PKTS_last') + local out_non_ucast_pkts_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS_last') + local in_octets_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_OCTETS_last') + local out_octets_last = redis.call('HGET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_OCTETS_last') -- Calculate new rates values local rx_bps_new = (in_octets - in_octets_last) / delta * 1000 @@ -60,22 +65,22 @@ for i = 1, n do if initialized == "DONE" then -- Get old rates values - local rx_bps_old = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'RX_BPS') - local rx_pps_old = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'RX_PPS') - local tx_bps_old = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'TX_BPS') - local tx_pps_old = redis.call('HGET', rates_table_name .. ':' .. KEYS[i], 'TX_PPS') + local rx_bps_old = redis.call('HGET', rates_table_name .. ':' .. port, 'RX_BPS') + local rx_pps_old = redis.call('HGET', rates_table_name .. ':' .. port, 'RX_PPS') + local tx_bps_old = redis.call('HGET', rates_table_name .. ':' .. port, 'TX_BPS') + local tx_pps_old = redis.call('HGET', rates_table_name .. ':' .. port, 'TX_PPS') -- Smooth the rates values and store them in DB - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'RX_BPS', alpha*rx_bps_new + one_minus_alpha*rx_bps_old) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'RX_PPS', alpha*rx_pps_new + one_minus_alpha*rx_pps_old) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'TX_BPS', alpha*tx_bps_new + one_minus_alpha*tx_bps_old) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'TX_PPS', alpha*tx_pps_new + one_minus_alpha*tx_pps_old) + redis.call('HSET', rates_table_name .. ':' .. port, 'RX_BPS', alpha*rx_bps_new + one_minus_alpha*rx_bps_old) + redis.call('HSET', rates_table_name .. ':' .. port, 'RX_PPS', alpha*rx_pps_new + one_minus_alpha*rx_pps_old) + redis.call('HSET', rates_table_name .. ':' .. port, 'TX_BPS', alpha*tx_bps_new + one_minus_alpha*tx_bps_old) + redis.call('HSET', rates_table_name .. ':' .. port, 'TX_PPS', alpha*tx_pps_new + one_minus_alpha*tx_pps_old) else -- Store unsmoothed initial rates values in DB - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'RX_BPS', rx_bps_new) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'RX_PPS', rx_pps_new) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'TX_BPS', tx_bps_new) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'TX_PPS', tx_pps_new) + redis.call('HSET', rates_table_name .. ':' .. port, 'RX_BPS', rx_bps_new) + redis.call('HSET', rates_table_name .. ':' .. port, 'RX_PPS', rx_pps_new) + redis.call('HSET', rates_table_name .. ':' .. port, 'TX_BPS', tx_bps_new) + redis.call('HSET', rates_table_name .. ':' .. port, 'TX_PPS', tx_pps_new) redis.call('HSET', state_table, 'INIT_DONE', 'DONE') end else @@ -83,12 +88,17 @@ for i = 1, n do end -- Set old COUNTERS values - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_UCAST_PKTS_last', in_ucast_pkts) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS_last', in_non_ucast_pkts) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_UCAST_PKTS_last', out_ucast_pkts) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS_last', out_non_ucast_pkts) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_IN_OCTETS_last', in_octets) - redis.call('HSET', rates_table_name .. ':' .. KEYS[i], 'SAI_PORT_STAT_IF_OUT_OCTETS_last', out_octets) + redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_UCAST_PKTS_last', in_ucast_pkts) + redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS_last', in_non_ucast_pkts) + redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_UCAST_PKTS_last', out_ucast_pkts) + redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS_last', out_non_ucast_pkts) + redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_IN_OCTETS_last', in_octets) + redis.call('HSET', rates_table_name .. ':' .. port, 'SAI_PORT_STAT_IF_OUT_OCTETS_last', out_octets) +end + +local n = table.getn(KEYS) +for i = 1, n do + compute_rate(KEYS[i]) end return logtable diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 52a35c7a39..bd1efff7b4 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -16,7 +16,6 @@ #include #include #include -#include #include #include "net/if.h" @@ -30,6 +29,7 @@ #include "countercheckorch.h" #include "notifier.h" #include "fdborch.h" +#include "switchorch.h" #include "stringutility.h" #include "subscriberstatetable.h" @@ -49,18 +49,25 @@ extern NeighOrch *gNeighOrch; extern CrmOrch *gCrmOrch; extern BufferOrch *gBufferOrch; extern FdbOrch *gFdbOrch; +extern SwitchOrch *gSwitchOrch; extern Directory gDirectory; extern sai_system_port_api_t *sai_system_port_api; extern string gMySwitchType; extern int32_t gVoqMySwitchId; extern string gMyHostName; extern string gMyAsicName; +extern event_handle_t g_events_handle; + +// defines ------------------------------------------------------------------------------------------------------------ #define DEFAULT_SYSTEM_PORT_MTU 9100 #define VLAN_PREFIX "Vlan" #define DEFAULT_VLAN_ID 1 #define MAX_VALID_VLAN_ID 4094 +#define DEFAULT_HOSTIF_TX_QUEUE 7 +#define PORT_SPEED_LIST_DEFAULT_SIZE 16 +#define PORT_STATE_POLLING_SEC 5 #define PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 1000 #define PORT_BUFFER_DROP_STAT_POLLING_INTERVAL_MS 60000 #define QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS 10000 @@ -69,6 +76,17 @@ extern string gMyAsicName; #define PG_DROP_FLEX_STAT_COUNTER_POLL_MSECS "10000" #define PORT_RATE_FLEX_COUNTER_POLLING_INTERVAL_MS "1000" +// types -------------------------------------------------------------------------------------------------------------- + +struct PortAttrValue +{ + std::vector lanes; +}; + +typedef PortAttrValue PortAttrValue_t; +typedef std::map> PortSerdesAttrMap_t; + +// constants ---------------------------------------------------------------------------------------------------------- static map fec_mode_map = { @@ -77,10 +95,11 @@ static map fec_mode_map = { "fc", SAI_PORT_FEC_MODE_FC } }; -static map pfc_asym_map = +static map fec_mode_reverse_map = { - { "on", SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE }, - { "off", SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED } + { SAI_PORT_FEC_MODE_NONE, "none" }, + { SAI_PORT_FEC_MODE_RS, "rs" }, + { SAI_PORT_FEC_MODE_FC, "fc" } }; static map learn_mode_map = @@ -112,62 +131,38 @@ static map autoneg_mode_map = { "off", 0 } }; +static map link_training_failure_map = +{ + { SAI_PORT_LINK_TRAINING_FAILURE_STATUS_NO_ERROR, "none" }, + { SAI_PORT_LINK_TRAINING_FAILURE_STATUS_FRAME_LOCK_ERROR, "frame_lock"}, + { SAI_PORT_LINK_TRAINING_FAILURE_STATUS_SNR_LOWER_THRESHOLD, "snr_low"}, + { SAI_PORT_LINK_TRAINING_FAILURE_STATUS_TIME_OUT, "timeout"} +}; + +static map link_training_rx_status_map = +{ + { SAI_PORT_LINK_TRAINING_RX_STATUS_NOT_TRAINED, "not_trained" }, + { SAI_PORT_LINK_TRAINING_RX_STATUS_TRAINED, "trained"} +}; + // Interface type map used for gearbox static map interface_type_map = { { "none", SAI_PORT_INTERFACE_TYPE_NONE }, { "cr", SAI_PORT_INTERFACE_TYPE_CR }, { "cr4", SAI_PORT_INTERFACE_TYPE_CR4 }, + { "cr8", SAI_PORT_INTERFACE_TYPE_CR8 }, { "sr", SAI_PORT_INTERFACE_TYPE_SR }, { "sr4", SAI_PORT_INTERFACE_TYPE_SR4 }, + { "sr8", SAI_PORT_INTERFACE_TYPE_SR8 }, { "lr", SAI_PORT_INTERFACE_TYPE_LR }, { "lr4", SAI_PORT_INTERFACE_TYPE_LR4 }, + { "lr8", SAI_PORT_INTERFACE_TYPE_LR8 }, { "kr", SAI_PORT_INTERFACE_TYPE_KR }, - { "kr4", SAI_PORT_INTERFACE_TYPE_KR4 } + { "kr4", SAI_PORT_INTERFACE_TYPE_KR4 }, + { "kr8", SAI_PORT_INTERFACE_TYPE_KR8 } }; -// Interface type map used for auto negotiation -static map interface_type_map_for_an = -{ - { "none", SAI_PORT_INTERFACE_TYPE_NONE }, - { "cr", SAI_PORT_INTERFACE_TYPE_CR }, - { "cr2", SAI_PORT_INTERFACE_TYPE_CR2 }, - { "cr4", SAI_PORT_INTERFACE_TYPE_CR4 }, - { "sr", SAI_PORT_INTERFACE_TYPE_SR }, - { "sr2", SAI_PORT_INTERFACE_TYPE_SR2 }, - { "sr4", SAI_PORT_INTERFACE_TYPE_SR4 }, - { "lr", SAI_PORT_INTERFACE_TYPE_LR }, - { "lr4", SAI_PORT_INTERFACE_TYPE_LR4 }, - { "kr", SAI_PORT_INTERFACE_TYPE_KR }, - { "kr4", SAI_PORT_INTERFACE_TYPE_KR4 }, - { "caui", SAI_PORT_INTERFACE_TYPE_CAUI }, - { "gmii", SAI_PORT_INTERFACE_TYPE_GMII }, - { "sfi", SAI_PORT_INTERFACE_TYPE_SFI }, - { "xlaui", SAI_PORT_INTERFACE_TYPE_XLAUI }, - { "kr2", SAI_PORT_INTERFACE_TYPE_KR2 }, - { "caui4", SAI_PORT_INTERFACE_TYPE_CAUI4 }, - { "xaui", SAI_PORT_INTERFACE_TYPE_XAUI }, - { "xfi", SAI_PORT_INTERFACE_TYPE_XFI }, - { "xgmii", SAI_PORT_INTERFACE_TYPE_XGMII } -}; - -static const std::string& getValidInterfaceTypes() -{ - static std::string validInterfaceTypes; - if (validInterfaceTypes.empty()) - { - std::ostringstream oss; - for (auto &iter : interface_type_map_for_an) - { - oss << iter.first << " "; - } - validInterfaceTypes = oss.str(); - boost::to_upper(validInterfaceTypes); - } - - return validInterfaceTypes; -} - const vector port_stat_ids = { SAI_PORT_STAT_IF_IN_OCTETS, @@ -237,6 +232,28 @@ const vector port_stat_ids = SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS }; +const vector gbport_stat_ids = +{ + SAI_PORT_STAT_IF_IN_OCTETS, + SAI_PORT_STAT_IF_IN_UCAST_PKTS, + SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS, + SAI_PORT_STAT_IF_OUT_OCTETS, + SAI_PORT_STAT_IF_OUT_UCAST_PKTS, + SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS, + SAI_PORT_STAT_IF_IN_DISCARDS, + SAI_PORT_STAT_IF_OUT_DISCARDS, + SAI_PORT_STAT_IF_IN_ERRORS, + SAI_PORT_STAT_IF_OUT_ERRORS, + SAI_PORT_STAT_ETHER_RX_OVERSIZE_PKTS, + SAI_PORT_STAT_ETHER_TX_OVERSIZE_PKTS, + SAI_PORT_STAT_ETHER_STATS_UNDERSIZE_PKTS, + SAI_PORT_STAT_ETHER_STATS_JABBERS, + SAI_PORT_STAT_ETHER_STATS_FRAGMENTS, + SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES, + SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES, + SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS +}; + const vector port_buffer_drop_stat_ids = { SAI_PORT_STAT_IN_DROPPED_PKTS, @@ -273,11 +290,73 @@ static char* hostif_vlan_tag[] = { [SAI_HOSTIF_VLAN_TAG_ORIGINAL] = "SAI_HOSTIF_VLAN_TAG_ORIGINAL" }; +// functions ---------------------------------------------------------------------------------------------------------- + static bool isValidPortTypeForLagMember(const Port& port) { return (port.m_type == Port::Type::PHY || port.m_type == Port::Type::SYSTEM); } +static void getPortSerdesAttr(PortSerdesAttrMap_t &map, const PortConfig &port) +{ + if (port.serdes.preemphasis.is_set) + { + map[SAI_PORT_SERDES_ATTR_PREEMPHASIS] = port.serdes.preemphasis.value; + } + + if (port.serdes.idriver.is_set) + { + map[SAI_PORT_SERDES_ATTR_IDRIVER] = port.serdes.idriver.value; + } + + if (port.serdes.ipredriver.is_set) + { + map[SAI_PORT_SERDES_ATTR_IPREDRIVER] = port.serdes.ipredriver.value; + } + + if (port.serdes.pre1.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_PRE1] = port.serdes.pre1.value; + } + + if (port.serdes.pre2.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_PRE2] = port.serdes.pre2.value; + } + + if (port.serdes.pre3.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_PRE3] = port.serdes.pre3.value; + } + + if (port.serdes.main.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_MAIN] = port.serdes.main.value; + } + + if (port.serdes.post1.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_POST1] = port.serdes.post1.value; + } + + if (port.serdes.post2.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_POST2] = port.serdes.post2.value; + } + + if (port.serdes.post3.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_POST3] = port.serdes.post3.value; + } + + if (port.serdes.attn.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_FIR_ATTN] = port.serdes.attn.value; + } +} + +// Port OA ------------------------------------------------------------------------------------------------------------ + /* * Initialize PortsOrch * 0) If Gearbox is enabled, then initialize the external PHYs as defined in @@ -293,18 +372,28 @@ static bool isValidPortTypeForLagMember(const Port& port) * bridge. By design, SONiC switch starts with all bridge ports removed from * default VLAN and all ports removed from .1Q bridge. */ + + +// PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector &tables, DBConnector *chassisAppDb) : +// Orch(tables), PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector &tableNames, DBConnector *chassisAppDb) : Orch(db, tableNames), m_portStateTable(stateDb, STATE_PORT_TABLE_NAME), port_stat_manager(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), + gb_port_stat_manager("GB_FLEX_COUNTER_DB", + PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, + PORT_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), port_buffer_drop_stat_manager(PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP, StatsMode::READ, PORT_BUFFER_DROP_STAT_POLLING_INTERVAL_MS, false), - queue_stat_manager(QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false) + queue_stat_manager(QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP, StatsMode::READ, QUEUE_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, false), + m_port_state_poller(new SelectableTimer(timespec { .tv_sec = PORT_STATE_POLLING_SEC, .tv_nsec = 0 })) { SWSS_LOG_ENTER(); /* Initialize counter table */ m_counter_db = shared_ptr(new DBConnector("COUNTERS_DB", 0)); m_counterTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_PORT_NAME_MAP)); + m_counterSysPortTable = unique_ptr
( + new Table(m_counter_db.get(), COUNTERS_SYSTEM_PORT_NAME_MAP)); m_counterLagTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_LAG_NAME_MAP)); FieldValueTuple tuple("", ""); vector defaultLagFv; @@ -319,6 +408,7 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector(new Table(m_counter_db.get(), COUNTERS_QUEUE_NAME_MAP)); + m_voqTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_VOQ_NAME_MAP)); m_queuePortTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_PORT_MAP)); m_queueIndexTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_INDEX_MAP)); m_queueTypeTable = unique_ptr
(new Table(m_counter_db.get(), COUNTERS_QUEUE_TYPE_MAP)); @@ -381,100 +471,11 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vectorget_switch_attribute(gSwitchId, 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to get CPU port, rv:%d", status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); - if (handle_status != task_process_status::task_success) - { - throw runtime_error("PortsOrch initialization failure"); - } - } - - m_cpuPort = Port("CPU", Port::CPU); - m_cpuPort.m_port_id = attr.value.oid; - m_portList[m_cpuPort.m_alias] = m_cpuPort; - m_port_ref_count[m_cpuPort.m_alias] = 0; - - /* Get port number */ - attr.id = SAI_SWITCH_ATTR_PORT_NUMBER; - - status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to get port number, rv:%d", status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); - if (handle_status != task_process_status::task_success) - { - throw runtime_error("PortsOrch initialization failure"); - } - } - - m_portCount = attr.value.u32; - SWSS_LOG_NOTICE("Get %d ports", m_portCount); - - /* Get port list */ - vector port_list; - port_list.resize(m_portCount); - - attr.id = SAI_SWITCH_ATTR_PORT_LIST; - attr.value.objlist.count = (uint32_t)port_list.size(); - attr.value.objlist.list = port_list.data(); - - status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to get port list, rv:%d", status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); - if (handle_status != task_process_status::task_success) - { - throw runtime_error("PortsOrch initialization failure"); - } - } - - /* Get port hardware lane info */ - for (i = 0; i < m_portCount; i++) - { - sai_uint32_t lanes[8] = { 0,0,0,0,0,0,0,0 }; - attr.id = SAI_PORT_ATTR_HW_LANE_LIST; - attr.value.u32list.count = 8; - attr.value.u32list.list = lanes; - - status = sai_port_api->get_port_attribute(port_list[i], 1, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to get hardware lane list pid:%" PRIx64, port_list[i]); - task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); - if (handle_status != task_process_status::task_success) - { - throw runtime_error("PortsOrch initialization failure"); - } - } - - set tmp_lane_set; - for (j = 0; j < attr.value.u32list.count; j++) - { - tmp_lane_set.insert(attr.value.u32list.list[j]); - } + this->initializeCpuPort(); - string tmp_lane_str = ""; - for (auto s : tmp_lane_set) - { - tmp_lane_str += to_string(s) + " "; - } - tmp_lane_str = tmp_lane_str.substr(0, tmp_lane_str.size()-1); - - SWSS_LOG_NOTICE("Get port with lanes pid:%" PRIx64 " lanes:%s", port_list[i], tmp_lane_str.c_str()); - m_portListLaneMap[tmp_lane_set] = port_list[i]; - } + /* Get ports */ + this->initializePorts(); /* Get the flood control types and check if combined mode is supported */ vector supported_flood_control_types(max_flood_control_types, 0); @@ -515,7 +516,49 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector attrs; attr.id = SAI_SWITCH_ATTR_DEFAULT_1Q_BRIDGE_ID; attrs.push_back(attr); @@ -548,6 +591,14 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector (new LagIdAllocator(chassisAppDb)); } + + auto executor = new ExecutableTimer(m_port_state_poller, this, "PORT_STATE_POLLER"); + Orch::addExecutor(executor); } -void PortsOrch::removeDefaultVlanMembers() +void PortsOrch::initializeCpuPort() { - /* Get VLAN members in default VLAN */ - vector vlan_member_list(m_portCount + m_systemPortCount); + SWSS_LOG_ENTER(); sai_attribute_t attr; - attr.id = SAI_VLAN_ATTR_MEMBER_LIST; - attr.value.objlist.count = (uint32_t)vlan_member_list.size(); - attr.value.objlist.list = vlan_member_list.data(); + attr.id = SAI_SWITCH_ATTR_CPU_PORT; - sai_status_t status = sai_vlan_api->get_vlan_attribute(m_defaultVlan, 1, &attr); + auto status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to get VLAN member list in default VLAN, rv:%d", status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_VLAN, status); + SWSS_LOG_ERROR("Failed to get CPU port, rv:%d", status); + auto handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); if (handle_status != task_process_status::task_success) { - throw runtime_error("PortsOrch initialization failure"); + SWSS_LOG_THROW("PortsOrch initialization failure"); } } - /* Remove VLAN members in default VLAN */ - for (uint32_t i = 0; i < attr.value.objlist.count; i++) - { - status = sai_vlan_api->remove_vlan_member(vlan_member_list[i]); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to remove VLAN member, rv:%d", status); - throw runtime_error("PortsOrch initialization failure"); - } - } + this->m_cpuPort = Port("CPU", Port::CPU); + this->m_cpuPort.m_port_id = attr.value.oid; + this->m_portList[m_cpuPort.m_alias] = m_cpuPort; + this->m_port_ref_count[m_cpuPort.m_alias] = 0; - SWSS_LOG_NOTICE("Remove %d VLAN members from default VLAN", attr.value.objlist.count); + SWSS_LOG_NOTICE("Get CPU port pid:%" PRIx64, this->m_cpuPort.m_port_id); } -void PortsOrch::removeDefaultBridgePorts() +void PortsOrch::initializePorts() { - /* Get bridge ports in default 1Q bridge - * By default, there will be (m_portCount + m_systemPortCount) number of SAI_BRIDGE_PORT_TYPE_PORT - * ports and one SAI_BRIDGE_PORT_TYPE_1Q_ROUTER port. The former type of - * ports will be removed. */ - vector bridge_port_list(m_portCount + m_systemPortCount + 1); + SWSS_LOG_ENTER(); + sai_status_t status; sai_attribute_t attr; - attr.id = SAI_BRIDGE_ATTR_PORT_LIST; - attr.value.objlist.count = (uint32_t)bridge_port_list.size(); - attr.value.objlist.list = bridge_port_list.data(); - sai_status_t status = sai_bridge_api->get_bridge_attribute(m_default1QBridge, 1, &attr); + // Get port number + attr.id = SAI_SWITCH_ATTR_PORT_NUMBER; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to get bridge port list in default 1Q bridge, rv:%d", status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_BRIDGE, status); + SWSS_LOG_ERROR("Failed to get port number, rv:%d", status); + auto handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); if (handle_status != task_process_status::task_success) { - throw runtime_error("PortsOrch initialization failure"); + SWSS_LOG_THROW("PortsOrch initialization failure"); } } - auto bridge_port_count = attr.value.objlist.count; + this->m_portCount = attr.value.u32; - /* Remove SAI_BRIDGE_PORT_TYPE_PORT bridge ports in default 1Q bridge */ - for (uint32_t i = 0; i < bridge_port_count; i++) + SWSS_LOG_NOTICE("Get %d ports", this->m_portCount); + + // Get port list + std::vector portList(this->m_portCount, SAI_NULL_OBJECT_ID); + + attr.id = SAI_SWITCH_ATTR_PORT_LIST; + attr.value.objlist.count = static_cast(portList.size()); + attr.value.objlist.list = portList.data(); + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if (status != SAI_STATUS_SUCCESS) { - attr.id = SAI_BRIDGE_PORT_ATTR_TYPE; - attr.value.s32 = SAI_NULL_OBJECT_ID; + SWSS_LOG_ERROR("Failed to get port list, rv:%d", status); + auto handle_status = handleSaiGetStatus(SAI_API_SWITCH, status); + if (handle_status != task_process_status::task_success) + { + SWSS_LOG_THROW("PortsOrch initialization failure"); + } + } - status = sai_bridge_api->get_bridge_port_attribute(bridge_port_list[i], 1, &attr); + // Get port hardware lane info + for (const auto &portId : portList) + { + std::vector laneList(Port::max_lanes, 0); + + attr.id = SAI_PORT_ATTR_HW_LANE_LIST; + attr.value.u32list.count = static_cast(laneList.size()); + attr.value.u32list.list = laneList.data(); + + status = sai_port_api->get_port_attribute(portId, 1, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to get bridge port type, rv:%d", status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_BRIDGE, status); + SWSS_LOG_ERROR("Failed to get hardware lane list pid:%" PRIx64, portId); + auto handle_status = handleSaiGetStatus(SAI_API_PORT, status); if (handle_status != task_process_status::task_success) { - throw runtime_error("PortsOrch initialization failure"); + SWSS_LOG_THROW("PortsOrch initialization failure"); } } - if (attr.value.s32 == SAI_BRIDGE_PORT_TYPE_PORT) + + std::set laneSet; + for (sai_uint32_t i = 0; i < attr.value.u32list.count; i++) { - status = sai_bridge_api->remove_bridge_port(bridge_port_list[i]); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to remove bridge port, rv:%d", status); - throw runtime_error("PortsOrch initialization failure"); - } + laneSet.insert(attr.value.u32list.list[i]); } - } - SWSS_LOG_NOTICE("Remove bridge ports from default 1Q bridge"); + this->m_portListLaneMap[laneSet] = portId; + + SWSS_LOG_NOTICE( + "Get port with lanes pid:%" PRIx64 " lanes:%s", + portId, swss::join(" ", laneSet.cbegin(), laneSet.cend()).c_str() + ); + } } -bool PortsOrch::allPortsReady() +auto PortsOrch::getPortConfigState() const -> port_config_state_t { - return m_initDone && m_pendingPortSet.empty(); + return this->m_portConfigState; } -/* Upon receiving PortInitDone, all the configured ports have been created in both hardware and kernel*/ -bool PortsOrch::isInitDone() +void PortsOrch::setPortConfigState(port_config_state_t value) { - return m_initDone; + this->m_portConfigState = value; } -// Upon m_portConfigState transiting to PORT_CONFIG_DONE state, all physical ports have been "created" in hardware. -// Because of the asynchronous nature of sairedis calls, "create" in the strict sense means that the SAI create_port() -// function is called and the create port event has been pushed to the sairedis pipeline. Because sairedis pipeline -// preserves the order of the events received, any event that depends on the physical port being created first, e.g., -// buffer profile apply, will be popped in the FIFO fashion, processed in the right order after the physical port is -// physically created in the ASIC, and thus can be issued safely when this function call returns true. -bool PortsOrch::isConfigDone() +bool PortsOrch::addPortBulk(const std::vector &portList) { - return m_portConfigState == PORT_CONFIG_DONE; -} + // The method is used to create ports in a bulk mode. + // The action takes place when: + // 1. Ports are being initialized at system start + // 2. Ports are being added/removed by a user at runtime + + SWSS_LOG_ENTER(); + + if (portList.empty()) + { + return true; + } + + std::vector attrValueList; + std::vector> attrDataList; + std::vector attrCountList; + std::vector attrPtrList; + + auto portCount = static_cast(portList.size()); + std::vector oidList(portCount, SAI_NULL_OBJECT_ID); + std::vector statusList(portCount, SAI_STATUS_SUCCESS); + + for (const auto &cit : portList) + { + sai_attribute_t attr; + std::vector attrList; + + if (cit.lanes.is_set) + { + PortAttrValue_t attrValue; + auto &outList = attrValue.lanes; + auto &inList = cit.lanes.value; + outList.insert(outList.begin(), inList.begin(), inList.end()); + attrValueList.push_back(attrValue); + + attr.id = SAI_PORT_ATTR_HW_LANE_LIST; + attr.value.u32list.count = static_cast(attrValueList.back().lanes.size()); + attr.value.u32list.list = attrValueList.back().lanes.data(); + attrList.push_back(attr); + } + + if (cit.speed.is_set) + { + attr.id = SAI_PORT_ATTR_SPEED; + attr.value.u32 = cit.speed.value; + attrList.push_back(attr); + } + + if (cit.autoneg.is_set) + { + attr.id = SAI_PORT_ATTR_AUTO_NEG_MODE; + attr.value.booldata = cit.autoneg.value; + attrList.push_back(attr); + } + + if (cit.fec.is_set) + { + attr.id = SAI_PORT_ATTR_FEC_MODE; + attr.value.s32 = cit.fec.value; + attrList.push_back(attr); + } + + attrDataList.push_back(attrList); + attrCountList.push_back(static_cast(attrDataList.back().size())); + attrPtrList.push_back(attrDataList.back().data()); + } + + auto status = sai_port_api->create_ports( + gSwitchId, portCount, attrCountList.data(), attrPtrList.data(), + SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR, + oidList.data(), statusList.data() + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create ports with bulk operation, rv:%d", status); + + auto handle_status = handleSaiCreateStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + SWSS_LOG_THROW("PortsOrch bulk create failure"); + } + + return false; + } + + for (std::uint32_t i = 0; i < portCount; i++) + { + if (statusList.at(i) != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to create port %s with bulk operation, rv:%d", + portList.at(i).key.c_str(), statusList.at(i) + ); + + auto handle_status = handleSaiCreateStatus(SAI_API_PORT, statusList.at(i)); + if (handle_status != task_process_status::task_success) + { + SWSS_LOG_THROW("PortsOrch bulk create failure"); + } + + return false; + } + + m_portListLaneMap[portList.at(i).lanes.value] = oidList.at(i); + m_portCount++; + } + + // newly created ports might be put in the default vlan so remove all ports from + // the default vlan. + if (gMySwitchType == "voq") { + removeDefaultVlanMembers(); + removeDefaultBridgePorts(); + } + + SWSS_LOG_NOTICE("Created ports: %s", swss::join(',', oidList.begin(), oidList.end()).c_str()); + + return true; +} + +bool PortsOrch::removePortBulk(const std::vector &portList) +{ + SWSS_LOG_ENTER(); + + if (portList.empty()) + { + return true; + } + + for (const auto &cit : portList) + { + Port p; + + // Make sure to bring down admin state + if (getPort(cit, p)) + { + setPortAdminStatus(p, false); + } + // else : port is in default state or not yet created + + // Remove port serdes (if exists) before removing port since this reference is dependency + removePortSerdesAttribute(cit); + } + + auto portCount = static_cast(portList.size()); + std::vector statusList(portCount, SAI_STATUS_SUCCESS); + + auto status = sai_port_api->remove_ports( + portCount, portList.data(), + SAI_BULK_OP_ERROR_MODE_IGNORE_ERROR, + statusList.data() + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove ports with bulk operation, rv:%d", status); + + auto handle_status = handleSaiRemoveStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + SWSS_LOG_THROW("PortsOrch bulk remove failure"); + } + + return false; + } + + for (std::uint32_t i = 0; i < portCount; i++) + { + if (statusList.at(i) != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to remove port %" PRIx64 " with bulk operation, rv:%d", + portList.at(i), statusList.at(i) + ); + + auto handle_status = handleSaiRemoveStatus(SAI_API_PORT, statusList.at(i)); + if (handle_status != task_process_status::task_success) + { + SWSS_LOG_THROW("PortsOrch bulk remove failure"); + } + + return false; + } + + m_portSupportedSpeeds.erase(portList.at(i)); + m_portCount--; + } + + SWSS_LOG_NOTICE("Removed ports: %s", swss::join(',', portList.begin(), portList.end()).c_str()); + + return true; +} + +void PortsOrch::removeDefaultVlanMembers() +{ + /* Get VLAN members in default VLAN */ + vector vlan_member_list(m_portCount + m_systemPortCount); + + sai_attribute_t attr; + attr.id = SAI_VLAN_ATTR_MEMBER_LIST; + attr.value.objlist.count = (uint32_t)vlan_member_list.size(); + attr.value.objlist.list = vlan_member_list.data(); + + sai_status_t status = sai_vlan_api->get_vlan_attribute(m_defaultVlan, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get VLAN member list in default VLAN, rv:%d", status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_VLAN, status); + if (handle_status != task_process_status::task_success) + { + throw runtime_error("PortsOrch initialization failure"); + } + } + + /* Remove VLAN members in default VLAN */ + for (uint32_t i = 0; i < attr.value.objlist.count; i++) + { + status = sai_vlan_api->remove_vlan_member(vlan_member_list[i]); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove VLAN member, rv:%d", status); + throw runtime_error("PortsOrch initialization failure"); + } + } + + SWSS_LOG_NOTICE("Remove %d VLAN members from default VLAN", attr.value.objlist.count); +} + +void PortsOrch::removeDefaultBridgePorts() +{ + /* Get bridge ports in default 1Q bridge + * By default, there will be (m_portCount + m_systemPortCount) number of SAI_BRIDGE_PORT_TYPE_PORT + * ports and one SAI_BRIDGE_PORT_TYPE_1Q_ROUTER port. The former type of + * ports will be removed. */ + vector bridge_port_list(m_portCount + m_systemPortCount + 1); + + sai_attribute_t attr; + attr.id = SAI_BRIDGE_ATTR_PORT_LIST; + attr.value.objlist.count = (uint32_t)bridge_port_list.size(); + attr.value.objlist.list = bridge_port_list.data(); + + sai_status_t status = sai_bridge_api->get_bridge_attribute(m_default1QBridge, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get bridge port list in default 1Q bridge, rv:%d", status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_BRIDGE, status); + if (handle_status != task_process_status::task_success) + { + throw runtime_error("PortsOrch initialization failure"); + } + } + + auto bridge_port_count = attr.value.objlist.count; + + /* Remove SAI_BRIDGE_PORT_TYPE_PORT bridge ports in default 1Q bridge */ + for (uint32_t i = 0; i < bridge_port_count; i++) + { + attr.id = SAI_BRIDGE_PORT_ATTR_TYPE; + attr.value.s32 = SAI_NULL_OBJECT_ID; + + status = sai_bridge_api->get_bridge_port_attribute(bridge_port_list[i], 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get bridge port type, rv:%d", status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_BRIDGE, status); + if (handle_status != task_process_status::task_success) + { + throw runtime_error("PortsOrch initialization failure"); + } + } + if (attr.value.s32 == SAI_BRIDGE_PORT_TYPE_PORT) + { + status = sai_bridge_api->remove_bridge_port(bridge_port_list[i]); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove bridge port, rv:%d", status); + throw runtime_error("PortsOrch initialization failure"); + } + } + } + + SWSS_LOG_NOTICE("Remove bridge ports from default 1Q bridge"); +} + +bool PortsOrch::allPortsReady() +{ + return m_initDone && m_pendingPortSet.empty(); +} + +/* Upon receiving PortInitDone, all the configured ports have been created in both hardware and kernel*/ +bool PortsOrch::isInitDone() +{ + return m_initDone; +} + +// Upon m_portConfigState transiting to PORT_CONFIG_DONE state, all physical ports have been "created" in hardware. +// Because of the asynchronous nature of sairedis calls, "create" in the strict sense means that the SAI create_port() +// function is called and the create port event has been pushed to the sairedis pipeline. Because sairedis pipeline +// preserves the order of the events received, any event that depends on the physical port being created first, e.g., +// buffer profile apply, will be popped in the FIFO fashion, processed in the right order after the physical port is +// physically created in the ASIC, and thus can be issued safely when this function call returns true. +bool PortsOrch::isConfigDone() +{ + return m_portConfigState == PORT_CONFIG_DONE; +} + +bool PortsOrch::isGearboxEnabled() +{ + return m_gearboxEnabled; +} /* Use this method to retrieve the desired port if the destination port is a Gearbox port. * For example, if Gearbox is enabled on a specific physical interface, @@ -753,7 +1118,10 @@ bool PortsOrch::getPort(sai_object_id_t id, Port &port) } else { - getPort(itr->second, port); + if (!getPort(itr->second, port)) + { + SWSS_LOG_THROW("Inconsistent saiOidToAlias map and m_portList map: oid=%" PRIx64, id); + } return true; } @@ -994,6 +1362,38 @@ void PortsOrch::getCpuPort(Port &port) port = m_cpuPort; } +/* + * Create host_tx_ready field in PORT_TABLE of STATE-DB + * and set the field to false by default for the + * front port. + */ +void PortsOrch::initHostTxReadyState(Port &port) +{ + SWSS_LOG_ENTER(); + + vector tuples; + bool exist = m_portStateTable.get(port.m_alias, tuples); + string hostTxReady; + + if (exist) + { + for (auto i : tuples) + { + if (fvField(i) == "host_tx_ready") + { + hostTxReady = fvValue(i); + } + } + } + + if (hostTxReady.empty()) + { + m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + SWSS_LOG_NOTICE("initialize host_tx_ready as false for port %s", + port.m_alias.c_str()); + } +} + bool PortsOrch::setPortAdminStatus(Port &port, bool state) { SWSS_LOG_ENTER(); @@ -1002,11 +1402,27 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) attr.id = SAI_PORT_ATTR_ADMIN_STATE; attr.value.booldata = state; + // if sync between cmis module configuration and asic is supported, + // do not change host_tx_ready value in STATE DB when admin status is changed. + + /* Update the host_tx_ready to false before setting admin_state, when admin state is false */ + if (!state && !m_cmisModuleAsicSyncSupported) + { + m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + SWSS_LOG_NOTICE("Set admin status DOWN host_tx_ready to false for port %s", + port.m_alias.c_str()); + } + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to set admin status %s to port pid:%" PRIx64, - state ? "UP" : "DOWN", port.m_port_id); + SWSS_LOG_ERROR("Failed to set admin status %s for port %s." + " Setting host_tx_ready as false", + state ? "UP" : "DOWN", port.m_alias.c_str()); + if (!m_cmisModuleAsicSyncSupported) + { + m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + } task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -1014,22 +1430,47 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) } } - SWSS_LOG_INFO("Set admin status %s to port pid:%" PRIx64, - state ? "UP" : "DOWN", port.m_port_id); + bool gbstatus = setGearboxPortsAttr(port, SAI_PORT_ATTR_ADMIN_STATE, &state); + if (gbstatus != true && !m_cmisModuleAsicSyncSupported) + { + m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + SWSS_LOG_NOTICE("Set host_tx_ready to false as gbstatus is false " + "for port %s", port.m_alias.c_str()); + } - setGearboxPortsAttr(port, SAI_PORT_ATTR_ADMIN_STATE, &state); + /* Update the state table for host_tx_ready*/ + if (state && (gbstatus == true) && (status == SAI_STATUS_SUCCESS) && !m_cmisModuleAsicSyncSupported) + { + m_portStateTable.hset(port.m_alias, "host_tx_ready", "true"); + SWSS_LOG_NOTICE("Set admin status UP host_tx_ready to true for port %s", + port.m_alias.c_str()); + } return true; } -bool PortsOrch::getPortAdminStatus(sai_object_id_t id, bool &up) +void PortsOrch::setHostTxReady(sai_object_id_t portId, std::string status) { - SWSS_LOG_ENTER(); - - getDestPortId(id, LINE_PORT_TYPE, id); - - sai_attribute_t attr; - attr.id = SAI_PORT_ATTR_ADMIN_STATE; + Port p; + SWSS_LOG_ERROR("NOA inside setHostTxReady function. status = %s", status.c_str()); + + if (!getPort(portId, p)) + { + SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); + return; + } + + m_portStateTable.hset(p.m_alias, "host_tx_ready", status); +} + +bool PortsOrch::getPortAdminStatus(sai_object_id_t id, bool &up) +{ + SWSS_LOG_ENTER(); + + getDestPortId(id, LINE_PORT_TYPE, id); + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_ADMIN_STATE; sai_status_t status = sai_port_api->get_port_attribute(id, 1, &attr); if (status != SAI_STATUS_SUCCESS) @@ -1047,72 +1488,122 @@ bool PortsOrch::getPortAdminStatus(sai_object_id_t id, bool &up) return true; } -bool PortsOrch::setPortMtu(sai_object_id_t id, sai_uint32_t mtu) +bool PortsOrch::getPortHostTxReady(const Port& port, bool &hostTxReadyVal) +{ + SWSS_LOG_ENTER(); + SWSS_LOG_ERROR("NOA inside getPortHostTxReady function"); + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_HOST_TX_READY_STATUS; + + sai_status_t status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + return false; + } + + hostTxReadyVal = attr.value.s32; + + return true; +} + +bool PortsOrch::getPortMtu(const Port& port, sai_uint32_t &mtu) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_MTU; + + sai_status_t status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + return false; + } + + mtu = attr.value.u32 - (uint32_t)(sizeof(struct ether_header) + FCS_LEN + VLAN_TAG_LEN); + + /* Reduce the default MTU got from ASIC by MAX_MACSEC_SECTAG_SIZE */ + if (mtu > MAX_MACSEC_SECTAG_SIZE) + { + mtu -= MAX_MACSEC_SECTAG_SIZE; + } + + return true; +} + +bool PortsOrch::setPortMtu(const Port& port, sai_uint32_t mtu) { SWSS_LOG_ENTER(); sai_attribute_t attr; attr.id = SAI_PORT_ATTR_MTU; /* mtu + 14 + 4 + 4 = 22 bytes */ - attr.value.u32 = (uint32_t)(mtu + sizeof(struct ether_header) + FCS_LEN + VLAN_TAG_LEN); + mtu += (uint32_t)(sizeof(struct ether_header) + FCS_LEN + VLAN_TAG_LEN); + attr.value.u32 = mtu; + + if (isMACsecPort(port.m_port_id)) + { + attr.value.u32 += MAX_MACSEC_SECTAG_SIZE; + } - sai_status_t status = sai_port_api->set_port_attribute(id, &attr); + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to set MTU %u to port pid:%" PRIx64 ", rv:%d", - attr.value.u32, id, status); + attr.value.u32, port.m_port_id, status); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { return parseHandleSaiStatusFailure(handle_status); } } - SWSS_LOG_INFO("Set MTU %u to port pid:%" PRIx64, attr.value.u32, id); + + if (m_gearboxEnabled) + { + setGearboxPortsAttr(port, SAI_PORT_ATTR_MTU, &mtu); + } + SWSS_LOG_INFO("Set MTU %u to port pid:%" PRIx64, attr.value.u32, port.m_port_id); return true; } -bool PortsOrch::setPortTpid(sai_object_id_t id, sai_uint16_t tpid) +bool PortsOrch::setPortTpid(Port &port, sai_uint16_t tpid) { SWSS_LOG_ENTER(); - sai_status_t status = SAI_STATUS_SUCCESS; - sai_attribute_t attr; + sai_attribute_t attr; attr.id = SAI_PORT_ATTR_TPID; + attr.value.u16 = tpid; - attr.value.u16 = (uint16_t)tpid; - - status = sai_port_api->set_port_attribute(id, &attr); + auto status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to set TPID 0x%x to port pid:%" PRIx64 ", rv:%d", - attr.value.u16, id, status); + SWSS_LOG_ERROR("Failed to set TPID 0x%x to port %s, rv:%d", + attr.value.u16, port.m_alias.c_str(), status); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { return parseHandleSaiStatusFailure(handle_status); } } - else - { - SWSS_LOG_NOTICE("Set TPID 0x%x to port pid:%" PRIx64, attr.value.u16, id); - } + + SWSS_LOG_NOTICE("Set TPID 0x%x to port %s", attr.value.u16, port.m_alias.c_str()); + return true; } - -bool PortsOrch::setPortFec(Port &port, sai_port_fec_mode_t mode) +bool PortsOrch::setPortFec(Port &port, sai_port_fec_mode_t fec_mode) { SWSS_LOG_ENTER(); sai_attribute_t attr; attr.id = SAI_PORT_ATTR_FEC_MODE; - attr.value.s32 = mode; + attr.value.s32 = fec_mode; sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to set fec mode %d to port pid:%" PRIx64, mode, port.m_port_id); + SWSS_LOG_ERROR("Failed to set FEC mode %d to port %s", fec_mode, port.m_alias.c_str()); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -1120,9 +1611,9 @@ bool PortsOrch::setPortFec(Port &port, sai_port_fec_mode_t mode) } } - SWSS_LOG_INFO("Set fec mode %d to port pid:%" PRIx64, mode, port.m_port_id); + setGearboxPortsAttr(port, SAI_PORT_ATTR_FEC_MODE, &fec_mode); - setGearboxPortsAttr(port, SAI_PORT_ATTR_FEC_MODE, &mode); + SWSS_LOG_NOTICE("Set port %s FEC mode %d", port.m_alias.c_str(), fec_mode); return true; } @@ -1193,42 +1684,64 @@ bool PortsOrch::setPortPfc(sai_object_id_t portId, uint8_t pfc_bitmask) return true; } -bool PortsOrch::setPortPfcAsym(Port &port, string pfc_asym) +bool PortsOrch::setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfcwd_bitmask) { SWSS_LOG_ENTER(); - sai_attribute_t attr; - uint8_t pfc = 0; + Port p; - if (!getPortPfc(port.m_port_id, &pfc)) + if (!getPort(portId, p)) { + SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); return false; } - auto found = pfc_asym_map.find(pfc_asym); - if (found == pfc_asym_map.end()) + p.m_pfcwd_sw_bitmask = pfcwd_bitmask; + + m_portList[p.m_alias] = p; + + SWSS_LOG_INFO("Set PFC watchdog port id=0x%" PRIx64 ", bitmast=0x%x", portId, pfcwd_bitmask); + return true; +} + +bool PortsOrch::getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfcwd_bitmask) +{ + SWSS_LOG_ENTER(); + + Port p; + + if (!pfcwd_bitmask || !getPort(portId, p)) { - SWSS_LOG_ERROR("Incorrect asymmetric PFC mode: %s", pfc_asym.c_str()); + SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); return false; } - auto new_pfc_asym = found->second; - if (port.m_pfc_asym == new_pfc_asym) + *pfcwd_bitmask = p.m_pfcwd_sw_bitmask; + + return true; +} + +bool PortsOrch::setPortPfcAsym(Port &port, sai_port_priority_flow_control_mode_t pfc_asym) +{ + SWSS_LOG_ENTER(); + + uint8_t pfc = 0; + if (!getPortPfc(port.m_port_id, &pfc)) { - SWSS_LOG_NOTICE("Already set asymmetric PFC mode: %s", pfc_asym.c_str()); - return true; + return false; } - port.m_pfc_asym = new_pfc_asym; + port.m_pfc_asym = pfc_asym; m_portList[port.m_alias] = port; + sai_attribute_t attr; attr.id = SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_MODE; - attr.value.s32 = (int32_t) port.m_pfc_asym; + attr.value.s32 = pfc_asym; sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to set PFC mode %d to port id 0x%" PRIx64 " (rc:%d)", port.m_pfc_asym, port.m_port_id, status); + SWSS_LOG_ERROR("Failed to set PFC mode %d to port id 0x%" PRIx64 " (rc:%d)", pfc_asym, port.m_port_id, status); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -1241,7 +1754,7 @@ bool PortsOrch::setPortPfcAsym(Port &port, string pfc_asym) return false; } - if (port.m_pfc_asym == SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE) + if (pfc_asym == SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE) { attr.id = SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_RX; attr.value.u8 = static_cast(0xff); @@ -1252,13 +1765,13 @@ bool PortsOrch::setPortPfcAsym(Port &port, string pfc_asym) SWSS_LOG_ERROR("Failed to set RX PFC 0x%x to port id 0x%" PRIx64 " (rc:%d)", attr.value.u8, port.m_port_id, status); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) - { - return parseHandleSaiStatusFailure(handle_status); - } + { + return parseHandleSaiStatusFailure(handle_status); + } } } - SWSS_LOG_INFO("Set asymmetric PFC %s to port id 0x%" PRIx64, pfc_asym.c_str(), port.m_port_id); + SWSS_LOG_INFO("Set asymmetric PFC %d to port id 0x%" PRIx64, pfc_asym, port.m_port_id); return true; } @@ -1610,7 +2123,7 @@ bool PortsOrch::bindAclTable(sai_object_id_t port_oid, member_attrs.push_back(member_attr); member_attr.id = SAI_ACL_TABLE_GROUP_MEMBER_ATTR_PRIORITY; - member_attr.value.u32 = 100; // TODO: double check! + member_attr.value.u32 = 100; member_attrs.push_back(member_attr); status = sai_acl_api->create_acl_table_group_member(&group_member_oid, gSwitchId, (uint32_t)member_attrs.size(), member_attrs.data()); @@ -1860,12 +2373,156 @@ void PortsOrch::initPortSupportedSpeeds(const std::string& alias, sai_object_id_ m_portStateTable.set(alias, v); } + +void PortsOrch::initPortCapAutoNeg(Port &port) +{ + sai_status_t status; + sai_attribute_t attr; + + attr.id = SAI_PORT_ATTR_SUPPORTED_AUTO_NEG_MODE; + status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (status == SAI_STATUS_SUCCESS) + { + port.m_cap_an = attr.value.booldata ? 1 : 0; + } + else + { + // To avoid breakage on the existing platforms, AN should be 1 by default + port.m_cap_an = 1; + SWSS_LOG_WARN("Unable to get %s AN support capability", + port.m_alias.c_str()); + } +} + +void PortsOrch::initPortCapLinkTraining(Port &port) +{ + // TODO: + // Add SAI_PORT_ATTR_SUPPORTED_LINK_TRAINING_MODE query when it is + // available in the saiport.h of SAI. + port.m_cap_lt = 1; + SWSS_LOG_WARN("Unable to get %s LT support capability", port.m_alias.c_str()); +} + +bool PortsOrch::isFecModeSupported(const Port &port, sai_port_fec_mode_t fec_mode) +{ + initPortSupportedFecModes(port.m_alias, port.m_port_id); + + const auto &obj = m_portSupportedFecModes.at(port.m_port_id); + + if (!obj.supported) + { + return true; + } + + if (obj.data.empty()) + { + return false; + } + + return std::find(obj.data.cbegin(), obj.data.cend(), fec_mode) != obj.data.cend(); +} + +sai_status_t PortsOrch::getPortSupportedFecModes(PortSupportedFecModes &supported_fecmodes, sai_object_id_t port_id) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + std::vector fecModes(Port::max_fec_modes); + attr.id = SAI_PORT_ATTR_SUPPORTED_FEC_MODE; + attr.value.s32list.count = static_cast(fecModes.size()); + attr.value.s32list.list = fecModes.data(); + + auto status = sai_port_api->get_port_attribute(port_id, 1, &attr); + if (status == SAI_STATUS_SUCCESS) + { + for (std::uint32_t i = 0; i < attr.value.s32list.count; i++) + { + supported_fecmodes.insert(static_cast(attr.value.s32list.list[i])); + } + } + else + { + if (SAI_STATUS_IS_ATTR_NOT_SUPPORTED(status) || + SAI_STATUS_IS_ATTR_NOT_IMPLEMENTED(status) || + (status == SAI_STATUS_NOT_SUPPORTED) || + (status == SAI_STATUS_NOT_IMPLEMENTED)) + { + // unable to validate FEC mode if attribute is not supported on platform + SWSS_LOG_NOTICE( + "Unable to validate FEC mode for port id=%" PRIx64 " due to unsupported by platform", port_id + ); + } + else + { + SWSS_LOG_ERROR( + "Failed to get a list of supported FEC modes for port id=%" PRIx64 ". Error=%d", port_id, status + ); + } + } + + return status; +} + +void PortsOrch::initPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id) +{ + SWSS_LOG_ENTER(); + + // If port supported speeds map already contains the information, save the SAI call + if (m_portSupportedFecModes.count(port_id) > 0) + { + return; + } + + auto &obj = m_portSupportedFecModes[port_id]; + auto &supported_fec_modes = obj.data; + + auto status = getPortSupportedFecModes(supported_fec_modes, port_id); + if (status != SAI_STATUS_SUCCESS) + { + // Do not expose "supported_fecs" in case fetching FEC modes is not supported by the vendor + SWSS_LOG_INFO("No supported_fecs exposed to STATE_DB for port %s since fetching supported FEC modes is not supported by the vendor", + alias.c_str()); + return; + } + + obj.supported = true; + + std::vector fecModeList; + if (supported_fec_modes.empty()) + { + fecModeList.push_back("N/A"); + } + else + { + for (const auto &cit : supported_fec_modes) + { + std::string fecMode; + if (!m_portHlpr.fecToStr(fecMode, cit)) + { + SWSS_LOG_ERROR( + "Failed to convert FEC mode for port %s: unknown value %d", + alias.c_str(), static_cast(cit) + ); + continue; + } + + fecModeList.push_back(fecMode); + } + } + + std::vector v; + std::string supported_fec_modes_str = swss::join(',', fecModeList.begin(), fecModeList.end()); + v.emplace_back(std::make_pair("supported_fecs", supported_fec_modes_str)); + + m_portStateTable.set(alias, v); +} + /* * If Gearbox is enabled and this is a Gearbox port then set the attributes accordingly. */ -bool PortsOrch::setGearboxPortsAttr(Port &port, sai_port_attr_t id, void *value) +bool PortsOrch::setGearboxPortsAttr(const Port &port, sai_port_attr_t id, void *value) { - bool status; + bool status = false; status = setGearboxPortAttr(port, PHY_PORT_TYPE, id, value); @@ -1881,7 +2538,7 @@ bool PortsOrch::setGearboxPortsAttr(Port &port, sai_port_attr_t id, void *value) * If Gearbox is enabled and this is a Gearbox port then set the specific lane attribute. * Note: the appl_db is also updated (Gearbox config_db tables are TBA). */ -bool PortsOrch::setGearboxPortAttr(Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value) +bool PortsOrch::setGearboxPortAttr(const Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value) { sai_status_t status = SAI_STATUS_SUCCESS; sai_object_id_t dest_port_id; @@ -1935,6 +2592,15 @@ bool PortsOrch::setGearboxPortAttr(Port &port, dest_port_type_t port_type, sai_p } SWSS_LOG_NOTICE("BOX: Set %s lane %s %d", port.m_alias.c_str(), speed_attr.c_str(), speed); break; + case SAI_PORT_ATTR_MTU: + attr.id = id; + attr.value.u32 = *static_cast(value); + if (LINE_PORT_TYPE == port_type && isMACsecPort(dest_port_id)) + { + attr.value.u32 += MAX_MACSEC_SECTAG_SIZE; + } + SWSS_LOG_NOTICE("BOX: Set %s MTU %d", port.m_alias.c_str(), attr.value.u32); + break; default: return false; } @@ -2014,17 +2680,56 @@ bool PortsOrch::getPortSpeed(sai_object_id_t id, sai_uint32_t &speed) return true; } -task_process_status PortsOrch::setPortAdvSpeeds(sai_object_id_t port_id, std::vector& speed_list) +bool PortsOrch::getPortAdvSpeeds(const Port& port, bool remote, std::vector& speed_list) { - SWSS_LOG_ENTER(); + sai_object_id_t port_id = port.m_port_id; + sai_object_id_t line_port_id; sai_attribute_t attr; sai_status_t status; + std::vector speeds(PORT_SPEED_LIST_DEFAULT_SIZE); + + attr.id = remote ? SAI_PORT_ATTR_REMOTE_ADVERTISED_SPEED : SAI_PORT_ATTR_ADVERTISED_SPEED; + attr.value.u32list.count = static_cast(speeds.size()); + attr.value.u32list.list = speeds.data(); + + if (getDestPortId(port_id, LINE_PORT_TYPE, line_port_id)) + { + status = sai_port_api->get_port_attribute(line_port_id, 1, &attr); + } + else + { + status = sai_port_api->get_port_attribute(port_id, 1, &attr); + } + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Unable to get advertised speed for %s", port.m_alias.c_str()); + return false; + } + speeds.resize(attr.value.u32list.count); + speed_list.swap(speeds); + return true; +} + +bool PortsOrch::getPortAdvSpeeds(const Port& port, bool remote, string& adv_speeds) +{ + std::vector speed_list; + bool rc = getPortAdvSpeeds(port, remote, speed_list); + + adv_speeds = rc ? swss::join(',', speed_list.begin(), speed_list.end()) : ""; + return rc; +} + +task_process_status PortsOrch::setPortAdvSpeeds(Port &port, std::set &speed_list) +{ + SWSS_LOG_ENTER(); + sai_attribute_t attr; + std::vector speedList(speed_list.begin(), speed_list.end()); attr.id = SAI_PORT_ATTR_ADVERTISED_SPEED; - attr.value.u32list.list = speed_list.data(); - attr.value.u32list.count = static_cast(speed_list.size()); + attr.value.u32list.list = speedList.data(); + attr.value.u32list.count = static_cast(speedList.size()); - status = sai_port_api->set_port_attribute(port_id, &attr); + auto status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { return handleSaiSetStatus(SAI_API_PORT, status); @@ -2033,16 +2738,15 @@ task_process_status PortsOrch::setPortAdvSpeeds(sai_object_id_t port_id, std::ve return task_success; } -task_process_status PortsOrch::setPortInterfaceType(sai_object_id_t port_id, sai_port_interface_type_t interface_type) +task_process_status PortsOrch::setPortInterfaceType(Port &port, sai_port_interface_type_t interface_type) { SWSS_LOG_ENTER(); - sai_attribute_t attr; - sai_status_t status; + sai_attribute_t attr; attr.id = SAI_PORT_ATTR_INTERFACE_TYPE; - attr.value.u32 = static_cast(interface_type); + attr.value.s32 = interface_type; - status = sai_port_api->set_port_attribute(port_id, &attr); + auto status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { return handleSaiSetStatus(SAI_API_PORT, status); @@ -2051,17 +2755,17 @@ task_process_status PortsOrch::setPortInterfaceType(sai_object_id_t port_id, sai return task_success; } -task_process_status PortsOrch::setPortAdvInterfaceTypes(sai_object_id_t port_id, std::vector &interface_types) +task_process_status PortsOrch::setPortAdvInterfaceTypes(Port &port, std::set &interface_types) { SWSS_LOG_ENTER(); - sai_attribute_t attr; - sai_status_t status; + sai_attribute_t attr; + std::vector interfaceTypeList(interface_types.begin(), interface_types.end()); attr.id = SAI_PORT_ATTR_ADVERTISED_INTERFACE_TYPE; - attr.value.u32list.list = interface_types.data(); - attr.value.u32list.count = static_cast(interface_types.size()); + attr.value.s32list.list = interfaceTypeList.data(); + attr.value.s32list.count = static_cast(interfaceTypeList.size()); - status = sai_port_api->set_port_attribute(port_id, &attr); + auto status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { return handleSaiSetStatus(SAI_API_PORT, status); @@ -2074,19 +2778,36 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin { SWSS_LOG_ENTER(); - sai_attribute_t attr[2]; - attr[0].id = SAI_QUEUE_ATTR_TYPE; - attr[1].id = SAI_QUEUE_ATTR_INDEX; + auto const &queueInfoRef = m_queueInfo.find(queue_id); - sai_status_t status = sai_queue_api->get_queue_attribute(queue_id, 2, attr); - if (status != SAI_STATUS_SUCCESS) + sai_attribute_t attr[2]; + if (queueInfoRef == m_queueInfo.end()) { - SWSS_LOG_ERROR("Failed to get queue type and index for queue %" PRIu64 " rv:%d", queue_id, status); - task_process_status handle_status = handleSaiGetStatus(SAI_API_QUEUE, status); - if (handle_status != task_process_status::task_success) + attr[0].id = SAI_QUEUE_ATTR_TYPE; + attr[1].id = SAI_QUEUE_ATTR_INDEX; + + sai_status_t status = sai_queue_api->get_queue_attribute(queue_id, 2, attr); + if (status != SAI_STATUS_SUCCESS) { - return false; + SWSS_LOG_ERROR("Failed to get queue type and index for queue %" PRIu64 " rv:%d", queue_id, status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_QUEUE, status); + if (handle_status != task_process_status::task_success) + { + return false; + } } + + SWSS_LOG_INFO("Caching information (index %d type %d) for queue %" PRIx64, attr[1].value.u8, attr[0].value.s32, queue_id); + + m_queueInfo[queue_id].type = static_cast(attr[0].value.s32); + m_queueInfo[queue_id].index = attr[1].value.u8; + } + else + { + attr[0].value.s32 = m_queueInfo[queue_id].type; + attr[1].value.u8 = m_queueInfo[queue_id].index; + + SWSS_LOG_INFO("Fetched cached information (index %d type %d) for queue %" PRIx64, attr[1].value.u8, attr[0].value.s32, queue_id); } switch (attr[0].value.s32) @@ -2100,8 +2821,11 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin case SAI_QUEUE_TYPE_MULTICAST: type = "SAI_QUEUE_TYPE_MULTICAST"; break; + case SAI_QUEUE_TYPE_UNICAST_VOQ: + type = "SAI_QUEUE_TYPE_UNICAST_VOQ"; + break; default: - SWSS_LOG_ERROR("Got unsupported queue type %d for %" PRIu64 " queue", attr[0].value.s32, queue_id); + SWSS_LOG_ERROR("Got unsupported queue type %d for %" PRIx64 " queue", attr[0].value.s32, queue_id); throw runtime_error("Got unsupported queue type"); } @@ -2110,43 +2834,88 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin return true; } -task_process_status PortsOrch::setPortAutoNeg(sai_object_id_t id, int an) +bool PortsOrch::isAutoNegEnabled(sai_object_id_t id) { SWSS_LOG_ENTER(); sai_attribute_t attr; attr.id = SAI_PORT_ATTR_AUTO_NEG_MODE; - attr.value.booldata = (an == 1 ? true : false); - sai_status_t status = sai_port_api->set_port_attribute(id, &attr); + sai_status_t status = sai_port_api->get_port_attribute(id, 1, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to set AutoNeg %u to port pid:%" PRIx64, attr.value.booldata, id); - return handleSaiSetStatus(SAI_API_PORT, status); + SWSS_LOG_ERROR("Failed to get port AutoNeg status for port pid:%" PRIx64, id); + return false; } - SWSS_LOG_INFO("Set AutoNeg %u to port pid:%" PRIx64, attr.value.booldata, id); - return task_success; + + return attr.value.booldata; } -bool PortsOrch::setHostIntfsOperStatus(const Port& port, bool isUp) const +task_process_status PortsOrch::setPortAutoNeg(Port &port, bool autoneg) { SWSS_LOG_ENTER(); sai_attribute_t attr; - attr.id = SAI_HOSTIF_ATTR_OPER_STATUS; - attr.value.booldata = isUp; + attr.id = SAI_PORT_ATTR_AUTO_NEG_MODE; + attr.value.booldata = autoneg; - sai_status_t status = sai_hostif_api->set_hostif_attribute(port.m_hif_id, &attr); + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_WARN("Failed to set operation status %s to host interface %s", - isUp ? "UP" : "DOWN", port.m_alias.c_str()); - return false; + SWSS_LOG_ERROR("Failed to set AutoNeg %u to port %s", attr.value.booldata, port.m_alias.c_str()); + return handleSaiSetStatus(SAI_API_PORT, status); + } + SWSS_LOG_INFO("Set AutoNeg %u to port %s", attr.value.booldata, port.m_alias.c_str()); + return task_success; +} + +task_process_status PortsOrch::setPortLinkTraining(const Port &port, bool state) +{ + SWSS_LOG_ENTER(); + + if (port.m_type != Port::PHY) + { + return task_failed; + } + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_LINK_TRAINING_ENABLE; + attr.value.booldata = state; + + string op = state ? "on" : "off"; + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set LT %s to port %s", op.c_str(), port.m_alias.c_str()); + return handleSaiSetStatus(SAI_API_PORT, status); + } + + SWSS_LOG_INFO("Set LT %s to port %s", op.c_str(), port.m_alias.c_str()); + + return task_success; +} + +bool PortsOrch::setHostIntfsOperStatus(const Port& port, bool isUp) const +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + attr.id = SAI_HOSTIF_ATTR_OPER_STATUS; + attr.value.booldata = isUp; + + sai_status_t status = sai_hostif_api->set_hostif_attribute(port.m_hif_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Failed to set operation status %s to host interface %s", + isUp ? "UP" : "DOWN", port.m_alias.c_str()); + return false; } SWSS_LOG_NOTICE("Set operation status %s to host interface %s", isUp ? "UP" : "DOWN", port.m_alias.c_str()); + event_params_t params = {{"ifname",port.m_alias},{"status",isUp ? "up" : "down"}}; + event_publish(g_events_handle, "if-state", ¶ms); return true; } @@ -2180,6 +2949,23 @@ bool PortsOrch::createVlanHostIntf(Port& vl, string hostif_name) attr.value.chardata[SAI_HOSTIF_NAME_SIZE - 1] = '\0'; attrs.push_back(attr); + bool set_hostif_tx_queue = false; + if (gSwitchOrch->querySwitchCapability(SAI_OBJECT_TYPE_HOSTIF, SAI_HOSTIF_ATTR_QUEUE)) + { + set_hostif_tx_queue = true; + } + else + { + SWSS_LOG_WARN("Hostif queue attribute not supported"); + } + + if (set_hostif_tx_queue) + { + attr.id = SAI_HOSTIF_ATTR_QUEUE; + attr.value.u32 = DEFAULT_HOSTIF_TX_QUEUE; + attrs.push_back(attr); + } + sai_status_t status = sai_hostif_api->create_hostif(&vl.m_vlan_info.host_intf_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); if (status != SAI_STATUS_SUCCESS) { @@ -2221,17 +3007,26 @@ void PortsOrch::updateDbPortOperStatus(const Port& port, sai_port_oper_status_t m_portTable->set(port.m_alias, tuples); } -bool PortsOrch::addPort(const set &lane_set, uint32_t speed, int an, string fec_mode) +bool PortsOrch::addPort(const PortConfig &port) { SWSS_LOG_ENTER(); - vector lanes(lane_set.begin(), lane_set.end()); + if (!port.speed.is_set || !port.lanes.is_set) + { + /* + speed and lane list are mandatory attributes for the initial create_port call + This check is required because the incoming notifs may not be atomic + */ + return true; + } + + vector lanes(port.lanes.value.begin(), port.lanes.value.end()); sai_attribute_t attr; vector attrs; attr.id = SAI_PORT_ATTR_SPEED; - attr.value.u32 = speed; + attr.value.u32 = port.speed.value; attrs.push_back(attr); attr.id = SAI_PORT_ATTR_HW_LANE_LIST; @@ -2239,17 +3034,17 @@ bool PortsOrch::addPort(const set &lane_set, uint32_t speed, int an, string attr.value.u32list.count = static_cast(lanes.size()); attrs.push_back(attr); - if (an == true) + if (port.autoneg.is_set) { attr.id = SAI_PORT_ATTR_AUTO_NEG_MODE; - attr.value.booldata = true; + attr.value.booldata = port.autoneg.value; attrs.push_back(attr); } - if (!fec_mode.empty()) + if (port.fec.is_set) { attr.id = SAI_PORT_ATTR_FEC_MODE; - attr.value.u32 = fec_mode_map[fec_mode]; + attr.value.s32 = port.fec.value; attrs.push_back(attr); } @@ -2257,7 +3052,7 @@ bool PortsOrch::addPort(const set &lane_set, uint32_t speed, int an, string sai_status_t status = sai_port_api->create_port(&port_id, gSwitchId, static_cast(attrs.size()), attrs.data()); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to create port with the speed %u, rv:%d", speed, status); + SWSS_LOG_ERROR("Failed to create port with the speed %u, rv:%d", port.speed.value, status); task_process_status handle_status = handleSaiCreateStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -2265,10 +3060,17 @@ bool PortsOrch::addPort(const set &lane_set, uint32_t speed, int an, string } } - m_portListLaneMap[lane_set] = port_id; + m_portListLaneMap[port.lanes.value] = port_id; m_portCount++; - SWSS_LOG_NOTICE("Create port %" PRIx64 " with the speed %u", port_id, speed); + // newly created ports might be put in the default vlan so remove all ports from + // the default vlan. + if (gMySwitchType == "voq") { + removeDefaultVlanMembers(); + removeDefaultBridgePorts(); + } + + SWSS_LOG_NOTICE("Create port %" PRIx64 " with the speed %u", port_id, port.speed.value); return true; } @@ -2285,10 +3087,27 @@ sai_status_t PortsOrch::removePort(sai_object_id_t port_id) */ if (getPort(port_id, port)) { - setPortAdminStatus(port, false); + /* Bring port down before removing port */ + if (!setPortAdminStatus(port, false)) + { + SWSS_LOG_ERROR("Failed to set admin status to DOWN to remove port %" PRIx64, port_id); + } } /* else : port is in default state or not yet created */ + /* + * Remove port serdes (if exists) before removing port since this + * reference is dependency. + */ + + removePortSerdesAttribute(port_id); + + for (auto queue_id : port.m_queue_ids) + { + SWSS_LOG_INFO("Removing cached information for queue %" PRIx64, queue_id); + m_queueInfo.erase(queue_id); + } + sai_status_t status = sai_port_api->remove_port(port_id); if (status != SAI_STATUS_SUCCESS) { @@ -2317,10 +3136,15 @@ string PortsOrch::getPriorityGroupDropPacketsFlexCounterTableKey(string key) return string(PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP) + ":" + key; } -bool PortsOrch::initPort(const string &alias, const string &role, const int index, const set &lane_set) +bool PortsOrch::initPort(const PortConfig &port) { SWSS_LOG_ENTER(); + const auto &alias = port.key; + const auto &role = port.role.value; + const auto &index = port.index.value; + const auto &lane_set = port.lanes.value; + /* Determine if the lane combination exists in switch */ if (m_portListLaneMap.find(lane_set) != m_portListLaneMap.end()) { @@ -2363,7 +3187,15 @@ bool PortsOrch::initPort(const string &alias, const string &role, const int inde if (flex_counters_orch->getPortCountersState()) { auto port_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP); - port_stat_manager.setCounterIdList(p.m_port_id, CounterType::PORT, port_counter_stats); + port_stat_manager.setCounterIdList(p.m_port_id, + CounterType::PORT, port_counter_stats); + auto gbport_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, true); + if (p.m_system_side_id) + gb_port_stat_manager.setCounterIdList(p.m_system_side_id, + CounterType::PORT, gbport_counter_stats); + if (p.m_line_side_id) + gb_port_stat_manager.setCounterIdList(p.m_line_side_id, + CounterType::PORT, gbport_counter_stats); } if (flex_counters_orch->getPortBufferDropCountersState()) { @@ -2376,7 +3208,7 @@ bool PortsOrch::initPort(const string &alias, const string &role, const int inde m_portList[alias].m_init = true; - if (role == "Rec" || role == "Inb") + if (role == Port::Role::Rec || role == Port::Role::Inb) { m_recircPortRole[alias] = role; } @@ -2403,8 +3235,13 @@ void PortsOrch::deInitPort(string alias, sai_object_id_t port_id) { SWSS_LOG_ENTER(); - Port p(alias, Port::PHY); - p.m_port_id = port_id; + Port p; + + if (!getPort(port_id, p)) + { + SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, port_id); + return; + } /* remove port from flex_counter_table for updating counters */ auto flex_counters_orch = gDirectory.get(); @@ -2418,9 +3255,8 @@ void PortsOrch::deInitPort(string alias, sai_object_id_t port_id) port_buffer_drop_stat_manager.clearCounterIdList(p.m_port_id); } - /* remove port name map from counter table */ - m_counter_db->hdel(COUNTERS_PORT_NAME_MAP, alias); + m_counterTable->hdel("", alias); /* Remove the associated port serdes attribute */ removePortSerdesAttribute(p.m_port_id); @@ -2429,7 +3265,6 @@ void PortsOrch::deInitPort(string alias, sai_object_id_t port_id) SWSS_LOG_NOTICE("De-Initialized port %s", alias.c_str()); } - bool PortsOrch::bake() { SWSS_LOG_ENTER(); @@ -2498,10 +3333,9 @@ void PortsOrch::cleanPortTable(const vector& keys) void PortsOrch::removePortFromLanesMap(string alias) { - for (auto it = m_lanesAliasSpeedMap.begin(); it != m_lanesAliasSpeedMap.end(); it++) { - if (get<0>(it->second) == alias) + if (it->second.key == alias) { SWSS_LOG_NOTICE("Removing port %s from lanes map", alias.c_str()); it = m_lanesAliasSpeedMap.erase(it); @@ -2529,41 +3363,69 @@ void PortsOrch::doPortTask(Consumer &consumer) { SWSS_LOG_ENTER(); - auto it = consumer.m_toSync.begin(); - while (it != consumer.m_toSync.end()) + auto &taskMap = consumer.m_toSync; + auto it = taskMap.begin(); + + while (it != taskMap.end()) { - auto &t = it->second; + auto keyOpFieldsValues = it->second; + auto key = kfvKey(keyOpFieldsValues); + auto op = kfvOp(keyOpFieldsValues); - string alias = kfvKey(t); - string op = kfvOp(t); + SWSS_LOG_INFO("KEY: %s, OP: %s", key.c_str(), op.c_str()); + + if (key.empty()) + { + SWSS_LOG_ERROR("Failed to parse port key: empty string"); + it = taskMap.erase(it); + continue; + } - if (alias == "PortConfigDone") + /* Got notification from portsyncd application: + * + * When portsorch receives 'PortConfigDone' message, it indicates port configuration + * procedure is done. Port configuration assumes all data has been read from config db + * and pushed to application db. + * + * Before port configuration procedure, none of other tasks are executed. + */ + if (key == "PortConfigDone") { - if (m_portConfigState != PORT_CONFIG_MISSING) + it = taskMap.erase(it); + + /* portsyncd restarting case: + * When portsyncd restarts, duplicate notifications may be received. + */ + if (getPortConfigState() != PORT_CONFIG_MISSING) { // Already received, ignore this task - it = consumer.m_toSync.erase(it); continue; } - m_portConfigState = PORT_CONFIG_RECEIVED; + setPortConfigState(PORT_CONFIG_RECEIVED); - for (auto i : kfvFieldsValues(t)) + for (const auto &cit : kfvFieldsValues(keyOpFieldsValues)) { - if (fvField(i) == "count") + if (fvField(cit) == "count") { - m_portCount = to_uint(fvValue(i)); + m_portCount = to_uint(fvValue(cit)); } } + + SWSS_LOG_INFO("Got PortConfigDone notification from portsyncd"); + + it = taskMap.begin(); + continue; } - /* Get notification from application */ - /* portsyncd application: + /* Got notification from portsyncd application: + * * When portsorch receives 'PortInitDone' message, it indicates port initialization - * procedure is done. Before port initialization procedure, none of other tasks - * are executed. + * procedure is done. Port initialization assumes all netdevs have been created. + * + * Before port initialization procedure, none of other tasks are executed. */ - if (alias == "PortInitDone") + if (key == "PortInitDone") { /* portsyncd restarting case: * When portsyncd restarts, duplicate notifications may be received. @@ -2572,198 +3434,40 @@ void PortsOrch::doPortTask(Consumer &consumer) { addSystemPorts(); m_initDone = true; - SWSS_LOG_INFO("Get PortInitDone notification from portsyncd."); + SWSS_LOG_INFO("Got PortInitDone notification from portsyncd"); } - it = consumer.m_toSync.erase(it); - return; - + it = taskMap.erase(it); + continue; } + PortConfig pCfg(key, op); + if (op == SET_COMMAND) { - set lane_set; - vector attr_val; - map> serdes_attr; - typedef pair> serdes_attr_pair; - string admin_status; - string fec_mode; - string pfc_asym; - uint32_t mtu = 0; - uint32_t speed = 0; - string learn_mode; - string an_str; - int an = -1; - int index = -1; - string role; - string adv_speeds_str; - string interface_type_str; - string adv_interface_types_str; - vector adv_speeds; - sai_port_interface_type_t interface_type; - vector adv_interface_types; - string tpid_string; - uint16_t tpid = 0; + auto &fvMap = m_portConfigMap[key]; - for (auto i : kfvFieldsValues(t)) + for (const auto &cit : kfvFieldsValues(keyOpFieldsValues)) { - attr_val.clear(); - /* Set interface index */ - if (fvField(i) == "index") - { - index = (int)stoul(fvValue(i)); - } - /* Get lane information of a physical port and initialize the port */ - else if (fvField(i) == "lanes") - { - string lane_str; - istringstream iss(fvValue(i)); + auto fieldName = fvField(cit); + auto fieldValue = fvValue(cit); - while (getline(iss, lane_str, ',')) - { - int lane = stoi(lane_str); - lane_set.insert(lane); - } - } - /* Set port admin status */ - else if (fvField(i) == "admin_status") - { - admin_status = fvValue(i); - } - /* Set port MTU */ - else if (fvField(i) == "mtu") - { - mtu = (uint32_t)stoul(fvValue(i)); - } - /* Set port TPID */ - if (fvField(i) == "tpid") - { - tpid_string = fvValue(i); - // Need to get rid of the leading 0x - tpid_string.erase(0,2); - tpid = (uint16_t)stoi(tpid_string, 0, 16); - SWSS_LOG_DEBUG("Handling TPID to 0x%x, string value:%s", tpid, tpid_string.c_str()); - } - /* Set port speed */ - else if (fvField(i) == "speed") - { - speed = (uint32_t)stoul(fvValue(i)); - } - /* Set port fec */ - else if (fvField(i) == "fec") - { - fec_mode = fvValue(i); - } - /* Get port fdb learn mode*/ - else if (fvField(i) == "learn_mode") - { - learn_mode = fvValue(i); - } - /* Set port asymmetric PFC */ - else if (fvField(i) == "pfc_asym") - { - pfc_asym = fvValue(i); - } - /* Set autoneg and ignore the port speed setting */ - else if (fvField(i) == "autoneg") - { - an_str = fvValue(i); - } - /* Set advertised speeds */ - else if (fvField(i) == "adv_speeds") - { - adv_speeds_str = fvValue(i); - } - /* Set interface type */ - else if (fvField(i) == "interface_type") - { - interface_type_str = fvValue(i); - } - /* Set advertised interface type */ - else if (fvField(i) == "adv_interface_types") - { - adv_interface_types_str = fvValue(i); - } - /* Set port serdes Pre-emphasis */ - else if (fvField(i) == "preemphasis") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_PREEMPHASIS, attr_val)); - } - /* Set port serdes idriver */ - else if (fvField(i) == "idriver") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_IDRIVER, attr_val)); - } - /* Set port serdes ipredriver */ - else if (fvField(i) == "ipredriver") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_IPREDRIVER, attr_val)); - } - /* Set port serdes pre1 */ - else if (fvField(i) == "pre1") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_PRE1, attr_val)); - } - /* Set port serdes pre2 */ - else if (fvField(i) == "pre2") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_PRE2, attr_val)); - } - /* Set port serdes pre3 */ - else if (fvField(i) == "pre3") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_PRE3, attr_val)); - } - /* Set port serdes main */ - else if (fvField(i) == "main") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_MAIN, attr_val)); - } - /* Set port serdes post1 */ - else if (fvField(i) == "post1") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_POST1, attr_val)); - } - /* Set port serdes post2 */ - else if (fvField(i) == "post2") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_POST2, attr_val)); - } - /* Set port serdes post3 */ - else if (fvField(i) == "post3") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_POST3, attr_val)); - } - /* Set port serdes attn */ - else if (fvField(i) == "attn") - { - getPortSerdesVal(fvValue(i), attr_val); - serdes_attr.insert(serdes_attr_pair(SAI_PORT_SERDES_ATTR_TX_FIR_ATTN, attr_val)); - } + SWSS_LOG_INFO("FIELD: %s, VALUE: %s", fieldName.c_str(), fieldValue.c_str()); - /* Get port role */ - if (fvField(i) == "role") - { - role = fvValue(i); - } + fvMap[fieldName] = fieldValue; } - /* Collect information about all received ports */ - if (lane_set.size()) + pCfg.fieldValueMap = fvMap; + + if (!m_portHlpr.parsePortConfig(pCfg)) { - m_lanesAliasSpeedMap[lane_set] = make_tuple(alias, speed, an, fec_mode, index, role); + it = taskMap.erase(it); + continue; } + /* Collect information about all received ports */ + m_lanesAliasSpeedMap[pCfg.lanes.value] = pCfg; + // TODO: // Fix the issue below // After PortConfigDone, while waiting for "PortInitDone" and the first gBufferOrch->isPortReady(alias), @@ -2775,497 +3479,669 @@ void PortsOrch::doPortTask(Consumer &consumer) * 2. Create new ports * 3. Initialize all ports */ - if (m_portConfigState == PORT_CONFIG_RECEIVED || m_portConfigState == PORT_CONFIG_DONE) + if (getPortConfigState() != PORT_CONFIG_MISSING) { + std::vector portsToAddList; + std::vector portsToRemoveList; + + // Port remove comparison logic for (auto it = m_portListLaneMap.begin(); it != m_portListLaneMap.end();) { if (m_lanesAliasSpeedMap.find(it->first) == m_lanesAliasSpeedMap.end()) { - if (SAI_STATUS_SUCCESS != removePort(it->second)) - { - throw runtime_error("PortsOrch initialization failure."); - } + portsToRemoveList.push_back(it->second); it = m_portListLaneMap.erase(it); + continue; } - else + + it++; + } + + // Bulk port remove + if (!portsToRemoveList.empty()) + { + if (!removePortBulk(portsToRemoveList)) { - it++; + SWSS_LOG_THROW("PortsOrch initialization failure"); } } + // Port add comparison logic for (auto it = m_lanesAliasSpeedMap.begin(); it != m_lanesAliasSpeedMap.end();) { if (m_portListLaneMap.find(it->first) == m_portListLaneMap.end()) { - if (!addPort(it->first, get<1>(it->second), get<2>(it->second), get<3>(it->second))) - { - throw runtime_error("PortsOrch initialization failure."); - } + portsToAddList.push_back(it->second); + it++; + continue; } - if (!initPort(get<0>(it->second), get<5>(it->second), get<4>(it->second), it->first)) + if (!initPort(it->second)) { // Failure has been recorded in initPort it++; continue; } - initPortSupportedSpeeds(get<0>(it->second), m_portListLaneMap[it->first]); + initPortSupportedSpeeds(it->second.key, m_portListLaneMap[it->first]); + initPortSupportedFecModes(it->second.key, m_portListLaneMap[it->first]); + it++; } - m_portConfigState = PORT_CONFIG_DONE; + // Bulk port add + if (!portsToAddList.empty()) + { + if (!addPortBulk(portsToAddList)) + { + SWSS_LOG_THROW("PortsOrch initialization failure"); + } + + for (const auto &cit : portsToAddList) + { + if (!initPort(cit)) + { + // Failure has been recorded in initPort + continue; + } + + initPortSupportedSpeeds(cit.key, m_portListLaneMap[cit.lanes.value]); + initPortSupportedFecModes(cit.key, m_portListLaneMap[cit.lanes.value]); + } + } + + setPortConfigState(PORT_CONFIG_DONE); } - if (m_portConfigState != PORT_CONFIG_DONE) + if (getPortConfigState() != PORT_CONFIG_DONE) { // Not yet receive PortConfigDone. Save it for future retry it++; continue; } - if (alias == "PortConfigDone") - { - it = consumer.m_toSync.erase(it); - continue; - } - - if (!gBufferOrch->isPortReady(alias)) + if (!gBufferOrch->isPortReady(pCfg.key)) { // buffer configuration hasn't been applied yet. save it for future retry - m_pendingPortSet.emplace(alias); + m_pendingPortSet.emplace(pCfg.key); it++; continue; } else { - m_pendingPortSet.erase(alias); + m_pendingPortSet.erase(pCfg.key); } Port p; - if (!getPort(alias, p)) + if (!getPort(pCfg.key, p)) { - SWSS_LOG_ERROR("Failed to get port id by alias:%s", alias.c_str()); + SWSS_LOG_ERROR("Failed to get port id by alias: %s", pCfg.key.c_str()); } else { - if (!an_str.empty()) - { - if (autoneg_mode_map.find(an_str) == autoneg_mode_map.end()) - { - SWSS_LOG_ERROR("Failed to parse autoneg value: %s", an_str.c_str()); - // Invalid auto negotiation mode configured, don't retry - it = consumer.m_toSync.erase(it); - continue; - } + PortSerdesAttrMap_t serdes_attr; + getPortSerdesAttr(serdes_attr, pCfg); - an = autoneg_mode_map[an_str]; - if (an != p.m_autoneg) + if (pCfg.autoneg.is_set) + { + if (!p.m_an_cfg || p.m_autoneg != pCfg.autoneg.value) { + if (p.m_cap_an < 0) + { + initPortCapAutoNeg(p); + m_portList[p.m_alias] = p; + } + if (p.m_cap_an < 1) + { + SWSS_LOG_ERROR("%s: autoneg is not supported (cap=%d)", p.m_alias.c_str(), p.m_cap_an); + // autoneg is not supported, don't retry + it = taskMap.erase(it); + continue; + } if (p.m_admin_state_up) { /* Bring port down before applying speed */ if (!setPortAdminStatus(p, false)) { - SWSS_LOG_ERROR("Failed to set port %s admin status DOWN to set port autoneg mode", alias.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s admin status DOWN to set port autoneg mode", + p.m_alias.c_str() + ); it++; continue; } p.m_admin_state_up = false; - m_portList[alias] = p; + m_portList[p.m_alias] = p; + } + + auto status = setPortAutoNeg(p, pCfg.autoneg.value); + if (status != task_success) + { + SWSS_LOG_ERROR( + "Failed to set port %s AN from %d to %d", + p.m_alias.c_str(), p.m_autoneg, pCfg.autoneg.value + ); + if (status == task_need_retry) + { + it++; + } + else + { + it = taskMap.erase(it); + } + continue; + } + + p.m_autoneg = pCfg.autoneg.value; + p.m_an_cfg = true; + m_portList[p.m_alias] = p; + m_portStateTable.hdel(p.m_alias, "rmt_adv_speeds"); + updatePortStatePoll(p, PORT_STATE_POLL_AN, pCfg.autoneg.value); + + SWSS_LOG_NOTICE( + "Set port %s autoneg to %s", + p.m_alias.c_str(), m_portHlpr.getAutonegStr(pCfg).c_str() + ); + } + } + + if (pCfg.link_training.is_set) + { + if (!p.m_lt_cfg || ((p.m_link_training != pCfg.link_training.value) && (p.m_type == Port::PHY))) + { + if (p.m_cap_lt < 0) + { + initPortCapLinkTraining(p); + m_portList[p.m_alias] = p; + } + if (p.m_cap_lt < 1) + { + SWSS_LOG_WARN("%s: LT is not supported(cap=%d)", p.m_alias.c_str(), p.m_cap_lt); + // Don't retry + it = taskMap.erase(it); + continue; } - auto status = setPortAutoNeg(p.m_port_id, an); + auto status = setPortLinkTraining(p, pCfg.link_training.value); if (status != task_success) { - SWSS_LOG_ERROR("Failed to set port %s AN from %d to %d", alias.c_str(), p.m_autoneg, an); + SWSS_LOG_ERROR( + "Failed to set port %s LT from %d to %d", + p.m_alias.c_str(), p.m_link_training, pCfg.link_training.value + ); if (status == task_need_retry) { it++; } else { - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); } continue; } - SWSS_LOG_NOTICE("Set port %s AutoNeg from %d to %d", alias.c_str(), p.m_autoneg, an); - p.m_autoneg = an; - m_portList[alias] = p; + + m_portStateTable.hset(p.m_alias, "link_training_status", m_portHlpr.getLinkTrainingStr(pCfg)); + p.m_link_training = pCfg.link_training.value; + p.m_lt_cfg = true; + m_portList[p.m_alias] = p; + updatePortStatePoll(p, PORT_STATE_POLL_LT, pCfg.link_training.value); + + // Restore pre-emphasis when LT is transitioned from ON to OFF + if (!p.m_link_training && serdes_attr.empty()) + { + serdes_attr = p.m_preemphasis; + } + + SWSS_LOG_NOTICE( + "Set port %s link training to %s", + p.m_alias.c_str(), m_portHlpr.getLinkTrainingStr(pCfg).c_str() + ); } } - if (speed != 0) + if (pCfg.speed.is_set) { - if (speed != p.m_speed) + if (p.m_speed != pCfg.speed.value) { - if (!isSpeedSupported(alias, p.m_port_id, speed)) + if (!isSpeedSupported(p.m_alias, p.m_port_id, pCfg.speed.value)) { - SWSS_LOG_ERROR("Unsupported port speed %u", speed); + SWSS_LOG_ERROR( + "Unsupported port %s speed %u", + p.m_alias.c_str(), pCfg.speed.value + ); // Speed not supported, dont retry - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); continue; } - // for backward compatible, if p.m_autoneg != 1, toggle admin status - if (p.m_admin_state_up && p.m_autoneg != 1) + // for backward compatible, if autoneg is off, toggle admin status + if (p.m_admin_state_up && !p.m_autoneg) { /* Bring port down before applying speed */ if (!setPortAdminStatus(p, false)) { - SWSS_LOG_ERROR("Failed to set port %s admin status DOWN to set speed", alias.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s admin status DOWN to set speed", + p.m_alias.c_str() + ); it++; continue; } p.m_admin_state_up = false; - m_portList[alias] = p; + m_portList[p.m_alias] = p; } - auto status = setPortSpeed(p, speed); + auto status = setPortSpeed(p, pCfg.speed.value); if (status != task_success) { - SWSS_LOG_ERROR("Failed to set port %s speed from %u to %u", alias.c_str(), p.m_speed, speed); + SWSS_LOG_ERROR( + "Failed to set port %s speed from %u to %u", + p.m_alias.c_str(), p.m_speed, pCfg.speed.value + ); if (status == task_need_retry) { it++; } else { - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); } continue; } - SWSS_LOG_NOTICE("Set port %s speed from %u to %u", alias.c_str(), p.m_speed, speed); - p.m_speed = speed; - m_portList[alias] = p; + p.m_speed = pCfg.speed.value; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s speed to %u", + p.m_alias.c_str(), pCfg.speed.value + ); } else { /* Always update Gearbox speed on Gearbox ports */ - setGearboxPortsAttr(p, SAI_PORT_ATTR_SPEED, &speed); + setGearboxPortsAttr(p, SAI_PORT_ATTR_SPEED, &pCfg.speed.value); } } - if (!adv_speeds_str.empty()) + if (pCfg.adv_speeds.is_set) { - boost::to_lower(adv_speeds_str); - if (!getPortAdvSpeedsVal(adv_speeds_str, adv_speeds)) - { - // Invalid advertised speeds configured, dont retry - it = consumer.m_toSync.erase(it); - continue; - } - - if (adv_speeds != p.m_adv_speeds) + if (!p.m_adv_speed_cfg || p.m_adv_speeds != pCfg.adv_speeds.value) { - if (p.m_admin_state_up && p.m_autoneg == 1) + if (p.m_admin_state_up && p.m_autoneg) { /* Bring port down before applying speed */ if (!setPortAdminStatus(p, false)) { - SWSS_LOG_ERROR("Failed to set port %s admin status DOWN to set interface type", alias.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s admin status DOWN to set interface type", + p.m_alias.c_str() + ); it++; continue; } p.m_admin_state_up = false; - m_portList[alias] = p; + m_portList[p.m_alias] = p; } + auto adv_speeds = swss::join(',', pCfg.adv_speeds.value.begin(), pCfg.adv_speeds.value.end()); auto ori_adv_speeds = swss::join(',', p.m_adv_speeds.begin(), p.m_adv_speeds.end()); - auto status = setPortAdvSpeeds(p.m_port_id, adv_speeds); + auto status = setPortAdvSpeeds(p, pCfg.adv_speeds.value); if (status != task_success) { - - SWSS_LOG_ERROR("Failed to set port %s advertised speed from %s to %s", alias.c_str(), - ori_adv_speeds.c_str(), - adv_speeds_str.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s advertised speed from %s to %s", + p.m_alias.c_str(), ori_adv_speeds.c_str(), adv_speeds.c_str() + ); if (status == task_need_retry) { it++; } else { - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); } continue; } - SWSS_LOG_NOTICE("Set port %s advertised speed from %s to %s", alias.c_str(), - ori_adv_speeds.c_str(), - adv_speeds_str.c_str()); - p.m_adv_speeds.swap(adv_speeds); - m_portList[alias] = p; + + p.m_adv_speeds = pCfg.adv_speeds.value; + p.m_adv_speed_cfg = true; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s advertised speed from %s to %s", + p.m_alias.c_str(), ori_adv_speeds.c_str(), adv_speeds.c_str() + ); } } - if (!interface_type_str.empty()) + if (pCfg.interface_type.is_set) { - boost::to_lower(interface_type_str); - if (!getPortInterfaceTypeVal(interface_type_str, interface_type)) + if (!p.m_intf_cfg || p.m_interface_type != pCfg.interface_type.value) { - // Invalid interface type configured, dont retry - it = consumer.m_toSync.erase(it); - continue; - } - - if (interface_type != p.m_interface_type) - { - if (p.m_admin_state_up && p.m_autoneg == 0) + if (p.m_admin_state_up && !p.m_autoneg) { /* Bring port down before applying speed */ if (!setPortAdminStatus(p, false)) { - SWSS_LOG_ERROR("Failed to set port %s admin status DOWN to set interface type", alias.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s admin status DOWN to set interface type", + p.m_alias.c_str() + ); it++; continue; } p.m_admin_state_up = false; - m_portList[alias] = p; + m_portList[p.m_alias] = p; } - auto status = setPortInterfaceType(p.m_port_id, interface_type); + auto status = setPortInterfaceType(p, pCfg.interface_type.value); if (status != task_success) { - SWSS_LOG_ERROR("Failed to set port %s interface type to %s", alias.c_str(), interface_type_str.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s interface type to %s", + p.m_alias.c_str(), m_portHlpr.getPortInterfaceTypeStr(pCfg).c_str() + ); if (status == task_need_retry) { it++; } else { - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); } continue; } - SWSS_LOG_NOTICE("Set port %s interface type to %s", alias.c_str(), interface_type_str.c_str()); - p.m_interface_type = interface_type; - m_portList[alias] = p; + p.m_interface_type = pCfg.interface_type.value; + p.m_intf_cfg = true; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s interface type to %s", + p.m_alias.c_str(), m_portHlpr.getPortInterfaceTypeStr(pCfg).c_str() + ); } } - if (!adv_interface_types_str.empty()) + if (pCfg.adv_interface_types.is_set) { - boost::to_lower(adv_interface_types_str); - if (!getPortAdvInterfaceTypesVal(adv_interface_types_str, adv_interface_types)) - { - // Invalid advertised interface types configured, dont retry - it = consumer.m_toSync.erase(it); - continue; - } - - if (adv_interface_types != p.m_adv_interface_types && p.m_autoneg == 1) + if (!p.m_adv_intf_cfg || p.m_adv_interface_types != pCfg.adv_interface_types.value) { - if (p.m_admin_state_up) + if (p.m_admin_state_up && p.m_autoneg) { /* Bring port down before applying speed */ if (!setPortAdminStatus(p, false)) { - SWSS_LOG_ERROR("Failed to set port %s admin status DOWN to set interface type", alias.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s admin status DOWN to set interface type", + p.m_alias.c_str() + ); it++; continue; } p.m_admin_state_up = false; - m_portList[alias] = p; + m_portList[p.m_alias] = p; } - auto status = setPortAdvInterfaceTypes(p.m_port_id, adv_interface_types); + auto status = setPortAdvInterfaceTypes(p, pCfg.adv_interface_types.value); if (status != task_success) { - SWSS_LOG_ERROR("Failed to set port %s advertised interface type to %s", alias.c_str(), adv_interface_types_str.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s advertised interface types to %s", + p.m_alias.c_str(), m_portHlpr.getAdvInterfaceTypesStr(pCfg).c_str() + ); if (status == task_need_retry) { it++; } else { - it = consumer.m_toSync.erase(it); + it = taskMap.erase(it); } continue; } - SWSS_LOG_NOTICE("Set port %s advertised interface type to %s", alias.c_str(), adv_interface_types_str.c_str()); - p.m_adv_interface_types.swap(adv_interface_types); - m_portList[alias] = p; + p.m_adv_interface_types = pCfg.adv_interface_types.value; + p.m_adv_intf_cfg = true; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s advertised interface type to %s", + p.m_alias.c_str(), m_portHlpr.getAdvInterfaceTypesStr(pCfg).c_str() + ); } } - if (mtu != 0 && mtu != p.m_mtu) + if (pCfg.mtu.is_set) { - if (setPortMtu(p.m_port_id, mtu)) + if (p.m_mtu != pCfg.mtu.value) { - p.m_mtu = mtu; - m_portList[alias] = p; - SWSS_LOG_NOTICE("Set port %s MTU to %u", alias.c_str(), mtu); + if (!setPortMtu(p, pCfg.mtu.value)) + { + SWSS_LOG_ERROR( + "Failed to set port %s MTU to %u", + p.m_alias.c_str(), pCfg.mtu.value + ); + it++; + continue; + } + + p.m_mtu = pCfg.mtu.value; + m_portList[p.m_alias] = p; + if (p.m_rif_id) { gIntfsOrch->setRouterIntfsMtu(p); } + // Sub interfaces inherit parent physical port mtu - updateChildPortsMtu(p, mtu); - } - else - { - SWSS_LOG_ERROR("Failed to set port %s MTU to %u", alias.c_str(), mtu); - it++; - continue; + updateChildPortsMtu(p, pCfg.mtu.value); + + SWSS_LOG_NOTICE( + "Set port %s MTU to %u", + p.m_alias.c_str(), pCfg.mtu.value + ); } } - if (tpid != 0 && tpid != p.m_tpid) + if (pCfg.tpid.is_set) { - SWSS_LOG_DEBUG("Set port %s TPID to 0x%x", alias.c_str(), tpid); - if (setPortTpid(p.m_port_id, tpid)) - { - p.m_tpid = tpid; - m_portList[alias] = p; - } - else + if (p.m_tpid != pCfg.tpid.value) { - SWSS_LOG_ERROR("Failed to set port %s TPID to 0x%x", alias.c_str(), tpid); - it++; - continue; + if (!setPortTpid(p, pCfg.tpid.value)) + { + SWSS_LOG_ERROR( + "Failed to set port %s TPID to 0x%x", + p.m_alias.c_str(), pCfg.tpid.value + ); + it++; + continue; + } + + p.m_tpid = pCfg.tpid.value; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s TPID to 0x%x", + p.m_alias.c_str(), pCfg.tpid.value + ); } } - if (!fec_mode.empty()) + if (pCfg.fec.is_set) { - if (fec_mode_map.find(fec_mode) != fec_mode_map.end()) + /* reset fec mode upon mode change */ + if (!p.m_fec_cfg || p.m_fec_mode != pCfg.fec.value) { - /* reset fec mode upon mode change */ - if (!p.m_fec_cfg || p.m_fec_mode != fec_mode_map[fec_mode]) + if (!isFecModeSupported(p, pCfg.fec.value)) { - if (p.m_admin_state_up) - { - /* Bring port down before applying fec mode*/ - if (!setPortAdminStatus(p, false)) - { - SWSS_LOG_ERROR("Failed to set port %s admin status DOWN to set fec mode", alias.c_str()); - it++; - continue; - } - - p.m_admin_state_up = false; - p.m_fec_mode = fec_mode_map[fec_mode]; - p.m_fec_cfg = true; - - if (setPortFec(p, p.m_fec_mode)) - { - m_portList[alias] = p; - SWSS_LOG_NOTICE("Set port %s fec to %s", alias.c_str(), fec_mode.c_str()); - } - else - { - SWSS_LOG_ERROR("Failed to set port %s fec to %s", alias.c_str(), fec_mode.c_str()); - it++; - continue; - } - } - else + SWSS_LOG_ERROR( + "Unsupported port %s FEC mode %s", + p.m_alias.c_str(), m_portHlpr.getFecStr(pCfg).c_str() + ); + // FEC mode is not supported, don't retry + it = taskMap.erase(it); + continue; + } + + if (p.m_admin_state_up) + { + /* Bring port down before applying fec mode*/ + if (!setPortAdminStatus(p, false)) { - /* Port is already down, setting fec mode*/ - p.m_fec_mode = fec_mode_map[fec_mode]; - p.m_fec_cfg = true; - if (setPortFec(p, p.m_fec_mode)) - { - m_portList[alias] = p; - SWSS_LOG_NOTICE("Set port %s fec to %s", alias.c_str(), fec_mode.c_str()); - } - else - { - SWSS_LOG_ERROR("Failed to set port %s fec to %s", alias.c_str(), fec_mode.c_str()); - it++; - continue; - } + SWSS_LOG_ERROR( + "Failed to set port %s admin status DOWN to set fec mode", + p.m_alias.c_str() + ); + it++; + continue; } + + p.m_admin_state_up = false; + m_portList[p.m_alias] = p; } - } - else - { - SWSS_LOG_ERROR("Unknown fec mode %s", fec_mode.c_str()); + + if (!setPortFec(p, pCfg.fec.value)) + { + SWSS_LOG_ERROR( + "Failed to set port %s FEC mode %s", + p.m_alias.c_str(), m_portHlpr.getFecStr(pCfg).c_str() + ); + it++; + continue; + } + + p.m_fec_mode = pCfg.fec.value; + p.m_fec_cfg = true; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s FEC mode to %s", + p.m_alias.c_str(), m_portHlpr.getFecStr(pCfg).c_str() + ); } } - if (!learn_mode.empty() && (p.m_learn_mode != learn_mode)) + if (pCfg.learn_mode.is_set) { - if (p.m_bridge_port_id != SAI_NULL_OBJECT_ID) + if (!p.m_lm_cfg || p.m_learn_mode != pCfg.learn_mode.value) { - if(setBridgePortLearnMode(p, learn_mode)) + if(!setBridgePortLearnMode(p, pCfg.learn_mode.value)) { - p.m_learn_mode = learn_mode; - m_portList[alias] = p; - SWSS_LOG_NOTICE("Set port %s learn mode to %s", alias.c_str(), learn_mode.c_str()); - } - else - { - SWSS_LOG_ERROR("Failed to set port %s learn mode to %s", alias.c_str(), learn_mode.c_str()); + SWSS_LOG_ERROR( + "Failed to set port %s learn mode to %s", + p.m_alias.c_str(), m_portHlpr.getLearnModeStr(pCfg).c_str() + ); it++; continue; } - } - else - { - p.m_learn_mode = learn_mode; - m_portList[alias] = p; - SWSS_LOG_NOTICE("Saved to set port %s learn mode %s", alias.c_str(), learn_mode.c_str()); + p.m_learn_mode = pCfg.learn_mode.value; + p.m_lm_cfg = true; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s learn mode to %s", + p.m_alias.c_str(), m_portHlpr.getLearnModeStr(pCfg).c_str() + ); } } - if (pfc_asym != "") + if (pCfg.pfc_asym.is_set) { - if (setPortPfcAsym(p, pfc_asym)) - { - SWSS_LOG_NOTICE("Set port %s asymmetric PFC to %s", alias.c_str(), pfc_asym.c_str()); - } - else + if (!p.m_pfc_asym_cfg || p.m_pfc_asym != pCfg.pfc_asym.value) { - SWSS_LOG_ERROR("Failed to set port %s asymmetric PFC to %s", alias.c_str(), pfc_asym.c_str()); - it++; - continue; + if (!setPortPfcAsym(p, pCfg.pfc_asym.value)) + { + SWSS_LOG_ERROR( + "Failed to set port %s asymmetric PFC to %s", + p.m_alias.c_str(), m_portHlpr.getPfcAsymStr(pCfg).c_str() + ); + it++; + continue; + } + + p.m_pfc_asym = pCfg.pfc_asym.value; + p.m_pfc_asym_cfg = true; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s asymmetric PFC to %s", + p.m_alias.c_str(), m_portHlpr.getPfcAsymStr(pCfg).c_str() + ); } } - if (serdes_attr.size() != 0) + if (!serdes_attr.empty()) { - if (setPortSerdesAttribute(p.m_port_id, serdes_attr)) + if (p.m_link_training) + { + SWSS_LOG_NOTICE("Save port %s preemphasis for LT", p.m_alias.c_str()); + p.m_preemphasis = serdes_attr; + m_portList[p.m_alias] = p; + } + else if (setPortSerdesAttribute(p.m_port_id, gSwitchId, serdes_attr)) { - SWSS_LOG_NOTICE("Set port %s preemphasis is success", alias.c_str()); + SWSS_LOG_NOTICE("Set port %s preemphasis is success", p.m_alias.c_str()); + p.m_preemphasis = serdes_attr; + m_portList[p.m_alias] = p; } else { - SWSS_LOG_ERROR("Failed to set port %s pre-emphasis", alias.c_str()); + SWSS_LOG_ERROR("Failed to set port %s pre-emphasis", p.m_alias.c_str()); it++; continue; } - } + /* create host_tx_ready field in state-db */ + initHostTxReadyState(p); + /* Last step set port admin status */ - if (!admin_status.empty() && (p.m_admin_state_up != (admin_status == "up"))) + if (pCfg.admin_status.is_set) { - if (setPortAdminStatus(p, admin_status == "up")) - { - p.m_admin_state_up = (admin_status == "up"); - m_portList[alias] = p; - SWSS_LOG_NOTICE("Set port %s admin status to %s", alias.c_str(), admin_status.c_str()); - } - else + if (p.m_admin_state_up != pCfg.admin_status.value) { - SWSS_LOG_ERROR("Failed to set port %s admin status to %s", alias.c_str(), admin_status.c_str()); - it++; - continue; + if (!setPortAdminStatus(p, pCfg.admin_status.value)) + { + SWSS_LOG_ERROR( + "Failed to set port %s admin status to %s", + p.m_alias.c_str(), m_portHlpr.getAdminStatusStr(pCfg).c_str() + ); + it++; + continue; + } + + p.m_admin_state_up = pCfg.admin_status.value; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s admin status to %s", + p.m_alias.c_str(), m_portHlpr.getAdminStatusStr(pCfg).c_str() + ); } } } } else if (op == DEL_COMMAND) { + Port p; + if (!getPort(pCfg.key, p)) + { + SWSS_LOG_ERROR("Failed to remove port: alias %s doesn't exist", pCfg.key.c_str()); + m_portConfigMap.erase(pCfg.key); + it = taskMap.erase(it); + continue; + } + + const auto &alias = pCfg.key; + if (m_port_ref_count[alias] > 0) { SWSS_LOG_WARN("Unable to remove port %s: ref count %u", alias.c_str(), m_port_ref_count[alias]); @@ -3322,7 +4198,11 @@ void PortsOrch::doPortTask(Consumer &consumer) removePortFromPortListMap(port_id); /* Delete port from port list */ + m_portConfigMap.erase(alias); m_portList.erase(alias); + saiOidToAlias.erase(port_id); + + SWSS_LOG_NOTICE("Removed port %s", alias.c_str()); } else { @@ -3570,6 +4450,65 @@ void PortsOrch::doVlanMemberTask(Consumer &consumer) } } +void PortsOrch::doTransceiverInfoTableTask(Consumer &consumer) +{ + /* + the idea is to listen to transceiver info table, and also maintain an internal list of plugged modules. + + */ + SWSS_LOG_ENTER(); + SWSS_LOG_ERROR("NOA inside doTransceiverInfoTableTask"); + string table_name = consumer.getTableName(); + + auto it = consumer.m_toSync.begin(); + while(it != consumer.m_toSync.end()) + { + auto t = it->second; + string alias = kfvKey(t); + string op = kfvOp(t); + + if (op == SET_COMMAND) + { + SWSS_LOG_ERROR("NOA doTransceiverInfoTableTask set command"); + if (m_pluggedModulesPort.find(alias) == m_pluggedModulesPort.end()) + { + m_pluggedModulesPort[alias] = m_portList[alias]; + SWSS_LOG_ERROR("NOA suppose to set host tx signal = true"); // TODO + setSaiHostTxSignal(m_pluggedModulesPort[alias], true); + SWSS_LOG_ERROR("NOA after setSaiHostTxSignal"); + } + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_ERROR("NOA doTransceiverInfoTableTask del command"); + Port p; + if (m_pluggedModulesPort.find(alias) != m_pluggedModulesPort.end()) + { + p = m_pluggedModulesPort[alias]; + m_pluggedModulesPort.erase(alias); + } + setSaiHostTxSignal(p, false); + } + it = consumer.m_toSync.erase(it); + } +} + +bool PortsOrch::setSaiHostTxSignal(Port port, bool enable) +{ + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + attr.value.booldata = enable; + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Could not set port {} attribute {}"); + return false; + } + + return true; +} + void PortsOrch::doLagTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -3588,7 +4527,8 @@ void PortsOrch::doLagTask(Consumer &consumer) { // Retrieve attributes uint32_t mtu = 0; - string learn_mode; + string learn_mode_str; + sai_bridge_port_fdb_learning_mode_t learn_mode = SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW; string operation_status; uint32_t lag_id = 0; int32_t switch_id = -1; @@ -3603,7 +4543,17 @@ void PortsOrch::doLagTask(Consumer &consumer) } else if (fvField(i) == "learn_mode") { - learn_mode = fvValue(i); + learn_mode_str = fvValue(i); + + const auto &cit = learn_mode_map.find(learn_mode_str); + if (cit == learn_mode_map.cend()) + { + SWSS_LOG_ERROR("Invalid MAC learn mode: %s", learn_mode_str.c_str()); + it++; + continue; + } + + learn_mode = cit->second; } else if (fvField(i) == "oper_status") { @@ -3703,7 +4653,7 @@ void PortsOrch::doLagTask(Consumer &consumer) } } - if (!learn_mode.empty() && (l.m_learn_mode != learn_mode)) + if (!learn_mode_str.empty() && (l.m_learn_mode != learn_mode)) { if (l.m_bridge_port_id != SAI_NULL_OBJECT_ID) { @@ -3711,11 +4661,11 @@ void PortsOrch::doLagTask(Consumer &consumer) { l.m_learn_mode = learn_mode; m_portList[alias] = l; - SWSS_LOG_NOTICE("Set port %s learn mode to %s", alias.c_str(), learn_mode.c_str()); + SWSS_LOG_NOTICE("Set port %s learn mode to %s", alias.c_str(), learn_mode_str.c_str()); } else { - SWSS_LOG_ERROR("Failed to set port %s learn mode to %s", alias.c_str(), learn_mode.c_str()); + SWSS_LOG_ERROR("Failed to set port %s learn mode to %s", alias.c_str(), learn_mode_str.c_str()); it++; continue; } @@ -3725,7 +4675,7 @@ void PortsOrch::doLagTask(Consumer &consumer) l.m_learn_mode = learn_mode; m_portList[alias] = l; - SWSS_LOG_NOTICE("Saved to set port %s learn mode %s", alias.c_str(), learn_mode.c_str()); + SWSS_LOG_NOTICE("Saved to set port %s learn mode %s", alias.c_str(), learn_mode_str.c_str()); } } } @@ -3844,13 +4794,19 @@ void PortsOrch::doLagMemberTask(Consumer &consumer) continue; } - if (!addLagMember(lag, port, (status == "enabled"))) + if (!addLagMember(lag, port, status)) { it++; continue; } } + if ((gMySwitchType == "voq") && (port.m_type != Port::SYSTEM)) + { + //Sync to SYSTEM_LAG_MEMBER_TABLE of CHASSIS_APP_DB + voqSyncAddLagMember(lag, port, status); + } + /* Sync an enabled member */ if (status == "enabled") { @@ -3924,7 +4880,7 @@ void PortsOrch::doTask() APP_LAG_TABLE_NAME, APP_LAG_MEMBER_TABLE_NAME, APP_VLAN_TABLE_NAME, - APP_VLAN_MEMBER_TABLE_NAME, + APP_VLAN_MEMBER_TABLE_NAME }; for (auto tableName: tableOrder) @@ -3951,7 +4907,12 @@ void PortsOrch::doTask(Consumer &consumer) string table_name = consumer.getTableName(); - if (table_name == APP_PORT_TABLE_NAME) + if (table_name == STATE_TRANSCEIVER_INFO_TABLE_NAME && m_cmisModuleAsicSyncSupported) + { + SWSS_LOG_ERROR("NOA doTask getting into doTransceiverInfoTableTask"); + doTransceiverInfoTableTask(consumer); + } + else if (table_name == APP_PORT_TABLE_NAME) { doPortTask(consumer); } @@ -3982,6 +4943,51 @@ void PortsOrch::doTask(Consumer &consumer) } } +void PortsOrch::initializeVoqs(Port &port) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + attr.id = SAI_SYSTEM_PORT_ATTR_QOS_NUMBER_OF_VOQS; + sai_status_t status = sai_system_port_api->get_system_port_attribute( + port.m_system_port_oid, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get number of voqs for port %s rv:%d", port.m_alias.c_str(), status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + throw runtime_error("PortsOrch initialization failure."); + } + } + SWSS_LOG_INFO("Get %d voq for port %s", attr.value.u32, port.m_alias.c_str()); + + m_port_voq_ids[port.m_alias] = std::vector( attr.value.u32 ); + + if (attr.value.u32 == 0) + { + return; + } + + attr.id = SAI_SYSTEM_PORT_ATTR_QOS_VOQ_LIST; + attr.value.objlist.count = (uint32_t) m_port_voq_ids[port.m_alias].size(); + attr.value.objlist.list = m_port_voq_ids[port.m_alias].data(); + + status = sai_system_port_api->get_system_port_attribute( + port.m_system_port_oid, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get voq list for port %s rv:%d", port.m_alias.c_str(), status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + throw runtime_error("PortsOrch initialization failure."); + } + } + + SWSS_LOG_INFO("Get voqs for port %s", port.m_alias.c_str()); +} + void PortsOrch::initializeQueues(Port &port) { SWSS_LOG_ENTER(); @@ -4026,6 +5032,50 @@ void PortsOrch::initializeQueues(Port &port) SWSS_LOG_INFO("Get queues for port %s", port.m_alias.c_str()); } +void PortsOrch::initializeSchedulerGroups(Port &port) +{ + std::vector scheduler_group_ids; + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_QOS_NUMBER_OF_SCHEDULER_GROUPS; + sai_status_t status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get number of scheduler groups for port:%s", port.m_alias.c_str()); + task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + throw runtime_error("PortsOrch initialization failure."); + } + } + SWSS_LOG_INFO("Got %d number of scheduler groups for port %s", attr.value.u32, port.m_alias.c_str()); + + scheduler_group_ids.resize(attr.value.u32); + + if (attr.value.u32 == 0) + { + return; + } + + attr.id = SAI_PORT_ATTR_QOS_SCHEDULER_GROUP_LIST; + attr.value.objlist.count = (uint32_t)scheduler_group_ids.size(); + attr.value.objlist.list = scheduler_group_ids.data(); + + status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get scheduler group list for port %s rv:%d", port.m_alias.c_str(), status); + task_process_status handle_status = handleSaiGetStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + throw runtime_error("PortsOrch initialization failure."); + } + } + + SWSS_LOG_INFO("Got scheduler groups for port %s", port.m_alias.c_str()); +} + void PortsOrch::initializePriorityGroups(Port &port) { SWSS_LOG_ENTER(); @@ -4045,8 +5095,6 @@ void PortsOrch::initializePriorityGroups(Port &port) SWSS_LOG_INFO("Get %d priority groups for port %s", attr.value.u32, port.m_alias.c_str()); port.m_priority_group_ids.resize(attr.value.u32); - port.m_priority_group_lock.resize(attr.value.u32); - port.m_priority_group_pending_profile.resize(attr.value.u32); if (attr.value.u32 == 0) { @@ -4102,6 +5150,7 @@ bool PortsOrch::initializePort(Port &port) initializePriorityGroups(port); initializeQueues(port); + initializeSchedulerGroups(port); initializePortBufferMaximumParameters(port); /* Create host interface */ @@ -4146,6 +5195,7 @@ bool PortsOrch::initializePort(Port &port) port.m_oper_status = SAI_PORT_OPER_STATUS_DOWN; } + /* initialize port admin status */ if (!getPortAdminStatus(port.m_port_id, port.m_admin_state_up)) { @@ -4154,12 +5204,42 @@ bool PortsOrch::initializePort(Port &port) } /* initialize port admin speed */ - if (!getPortSpeed(port.m_port_id, port.m_speed)) + if (!isAutoNegEnabled(port.m_port_id) && !getPortSpeed(port.m_port_id, port.m_speed)) { SWSS_LOG_ERROR("Failed to get initial port admin speed %d", port.m_speed); return false; } + /* initialize port mtu */ + if (!getPortMtu(port, port.m_mtu)) + { + SWSS_LOG_ERROR("Failed to get initial port mtu %d", port.m_mtu); + } + + /* initialize port host_tx_ready value (only for supporting systems) */ + if (m_cmisModuleAsicSyncSupported) + { + bool hostTxReadyVal; + if (!getPortHostTxReady(port, hostTxReadyVal)) + { + // SWSS_LOG_ERROR("Failed to get host_tx_ready value from SAI to Port %d" PRIx64 , port.m_port_id); + SWSS_LOG_ERROR("NOA fail to get host_tx_ready"); + } + /* set value to state DB */ + + string hostTxReadyStr = "false"; + if (hostTxReadyVal) + { + SWSS_LOG_ERROR("NOA host tx ready value from getPortHostTxReady is true"); + hostTxReadyStr = "true"; + } + else + { + SWSS_LOG_ERROR("NOA host tx ready value from getPortHostTxReady is false"); + } + m_portStateTable.hset(port.m_alias, "host_tx_ready", hostTxReadyStr); + } + /* * always initialize Port SAI_HOSTIF_ATTR_OPER_STATUS based on oper_status value in appDB. */ @@ -4198,6 +5278,23 @@ bool PortsOrch::addHostIntfs(Port &port, string alias, sai_object_id_t &host_int attr.value.chardata[SAI_HOSTIF_NAME_SIZE - 1] = '\0'; attrs.push_back(attr); + bool set_hostif_tx_queue = false; + if (gSwitchOrch->querySwitchCapability(SAI_OBJECT_TYPE_HOSTIF, SAI_HOSTIF_ATTR_QUEUE)) + { + set_hostif_tx_queue = true; + } + else + { + SWSS_LOG_WARN("Hostif queue attribute not supported"); + } + + if (set_hostif_tx_queue) + { + attr.id = SAI_HOSTIF_ATTR_QUEUE; + attr.value.u32 = DEFAULT_HOSTIF_TX_QUEUE; + attrs.push_back(attr); + } + sai_status_t status = sai_hostif_api->create_hostif(&host_intfs_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); if (status != SAI_STATUS_SUCCESS) { @@ -4248,6 +5345,12 @@ bool PortsOrch::addBridgePort(Port &port) return true; } + if (port.m_rif_id != 0) + { + SWSS_LOG_NOTICE("Cannot create bridge port, interface %s is a router port", port.m_alias.c_str()); + return false; + } + sai_attribute_t attr; vector attrs; @@ -4299,15 +5402,7 @@ bool PortsOrch::addBridgePort(Port &port) /* And with hardware FDB learning mode set to HW (explicit default value) */ attr.id = SAI_BRIDGE_PORT_ATTR_FDB_LEARNING_MODE; - auto found = learn_mode_map.find(port.m_learn_mode); - if (found == learn_mode_map.end()) - { - attr.value.s32 = SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW; - } - else - { - attr.value.s32 = found->second; - } + attr.value.s32 = port.m_learn_mode; attrs.push_back(attr); sai_status_t status = sai_bridge_api->create_bridge_port(&port.m_bridge_port_id, gSwitchId, (uint32_t)attrs.size(), attrs.data()); @@ -4399,7 +5494,7 @@ bool PortsOrch::removeBridgePort(Port &port) return true; } -bool PortsOrch::setBridgePortLearnMode(Port &port, string learn_mode) +bool PortsOrch::setBridgePortLearnMode(Port &port, sai_bridge_port_fdb_learning_mode_t learn_mode) { SWSS_LOG_ENTER(); @@ -4408,17 +5503,10 @@ bool PortsOrch::setBridgePortLearnMode(Port &port, string learn_mode) return true; } - auto found = learn_mode_map.find(learn_mode); - if (found == learn_mode_map.end()) - { - SWSS_LOG_ERROR("Incorrect MAC learn mode: %s", learn_mode.c_str()); - return false; - } - /* Set bridge port learning mode */ sai_attribute_t attr; attr.id = SAI_BRIDGE_PORT_ATTR_FDB_LEARNING_MODE; - attr.value.s32 = found->second; + attr.value.s32 = learn_mode; sai_status_t status = sai_bridge_api->set_bridge_port_attribute(port.m_bridge_port_id, &attr); if (status != SAI_STATUS_SUCCESS) @@ -4432,7 +5520,7 @@ bool PortsOrch::setBridgePortLearnMode(Port &port, string learn_mode) } } - SWSS_LOG_NOTICE("Set bridge port %s learning mode %s", port.m_alias.c_str(), learn_mode.c_str()); + SWSS_LOG_NOTICE("Set bridge port %s learning mode %d", port.m_alias.c_str(), learn_mode); return true; } @@ -4483,9 +5571,10 @@ bool PortsOrch::removeVlan(Port vlan) return false for retry */ if (vlan.m_fdb_count > 0) { - SWSS_LOG_NOTICE("VLAN %s still has assiciated FDB entries", vlan.m_alias.c_str()); + SWSS_LOG_NOTICE("VLAN %s still has %d FDB entries", vlan.m_alias.c_str(), vlan.m_fdb_count); return false; } + if (m_port_ref_count[vlan.m_alias] > 0) { SWSS_LOG_ERROR("Failed to remove ref count %d VLAN %s", @@ -4658,7 +5747,11 @@ bool PortsOrch::addVlanFloodGroups(Port &vlan, Port &port, string end_point_ip) { SWSS_LOG_ERROR("Failed to set l2mc flood type combined " " to vlan %hu for unknown unicast flooding", vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } } vlan.m_vlan_info.uuc_flood_type = SAI_VLAN_FLOOD_CONTROL_TYPE_COMBINED; } @@ -4673,10 +5766,15 @@ bool PortsOrch::addVlanFloodGroups(Port &vlan, Port &port, string end_point_ip) { SWSS_LOG_ERROR("Failed to set l2mc flood type combined " " to vlan %hu for broadcast flooding", vlan.m_vlan_info.vlan_id); - return false; - } - vlan.m_vlan_info.bc_flood_type = SAI_VLAN_FLOOD_CONTROL_TYPE_COMBINED; - } + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } + } + vlan.m_vlan_info.bc_flood_type = SAI_VLAN_FLOOD_CONTROL_TYPE_COMBINED; + } if (vlan.m_vlan_info.l2mc_group_id == SAI_NULL_OBJECT_ID) { @@ -4684,7 +5782,12 @@ bool PortsOrch::addVlanFloodGroups(Port &vlan, Port &port, string end_point_ip) if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to create l2mc flood group"); - return false; + task_process_status handle_status = handleSaiCreateStatus(SAI_API_L2MC_GROUP, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } if (vlan.m_vlan_info.uuc_flood_type == SAI_VLAN_FLOOD_CONTROL_TYPE_COMBINED) @@ -4698,7 +5801,12 @@ bool PortsOrch::addVlanFloodGroups(Port &vlan, Port &port, string end_point_ip) SWSS_LOG_ERROR("Failed to set l2mc group %" PRIx64 " to vlan %hu for unknown unicast flooding", l2mc_group_id, vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } } if (vlan.m_vlan_info.bc_flood_type == SAI_VLAN_FLOOD_CONTROL_TYPE_COMBINED) @@ -4712,7 +5820,12 @@ bool PortsOrch::addVlanFloodGroups(Port &vlan, Port &port, string end_point_ip) SWSS_LOG_ERROR("Failed to set l2mc group %" PRIx64 " to vlan %hu for broadcast flooding", l2mc_group_id, vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } } vlan.m_vlan_info.l2mc_group_id = l2mc_group_id; @@ -4752,11 +5865,19 @@ bool PortsOrch::addVlanFloodGroups(Port &vlan, Port &port, string end_point_ip) { SWSS_LOG_ERROR("Failed to create l2mc group member for adding tunnel %s to vlan %hu", end_point_ip.c_str(), vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiCreateStatus(SAI_API_L2MC_GROUP, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } vlan.m_vlan_info.l2mc_members[end_point_ip] = l2mc_group_member; m_portList[vlan.m_alias] = vlan; increaseBridgePortRefCount(port); + + VlanMemberUpdate update = { vlan, port, true }; + notify(SUBJECT_TYPE_VLAN_MEMBER_CHANGE, static_cast(&update)); return true; } @@ -4779,7 +5900,11 @@ bool PortsOrch::removeVlanEndPointIp(Port &vlan, Port &port, string end_point_ip { SWSS_LOG_ERROR("Failed to remove end point ip %s from vlan %hu", end_point_ip.c_str(), vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_L2MC_GROUP, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } } decreaseBridgePortRefCount(port); vlan.m_vlan_info.l2mc_members.erase(end_point_ip); @@ -4799,7 +5924,12 @@ bool PortsOrch::removeVlanEndPointIp(Port &vlan, Port &port, string end_point_ip SWSS_LOG_ERROR("Failed to set null l2mc group " " to vlan %hu for unknown unicast flooding", vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } attr.id = SAI_VLAN_ATTR_UNKNOWN_UNICAST_FLOOD_CONTROL_TYPE; attr.value.s32 = SAI_VLAN_FLOOD_CONTROL_TYPE_ALL; @@ -4809,7 +5939,12 @@ bool PortsOrch::removeVlanEndPointIp(Port &vlan, Port &port, string end_point_ip SWSS_LOG_ERROR("Failed to set flood control type all" " to vlan %hu for unknown unicast flooding", vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } vlan.m_vlan_info.uuc_flood_type = SAI_VLAN_FLOOD_CONTROL_TYPE_ALL; } @@ -4824,7 +5959,12 @@ bool PortsOrch::removeVlanEndPointIp(Port &vlan, Port &port, string end_point_ip SWSS_LOG_ERROR("Failed to set null l2mc group " " to vlan %hu for broadcast flooding", vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } attr.id = SAI_VLAN_ATTR_BROADCAST_FLOOD_CONTROL_TYPE; attr.value.s32 = SAI_VLAN_FLOOD_CONTROL_TYPE_ALL; @@ -4834,7 +5974,12 @@ bool PortsOrch::removeVlanEndPointIp(Port &vlan, Port &port, string end_point_ip SWSS_LOG_ERROR("Failed to set flood control type all" " to vlan %hu for broadcast flooding", vlan.m_vlan_info.vlan_id); - return false; + task_process_status handle_status = handleSaiSetStatus(SAI_API_VLAN, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } vlan.m_vlan_info.bc_flood_type = SAI_VLAN_FLOOD_CONTROL_TYPE_ALL; } @@ -4842,10 +5987,16 @@ bool PortsOrch::removeVlanEndPointIp(Port &vlan, Port &port, string end_point_ip if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to remove l2mc group %" PRIx64, l2mc_group_id); - return false; + task_process_status handle_status = handleSaiRemoveStatus(SAI_API_L2MC_GROUP, status); + if (handle_status != task_success) + { + m_portList[vlan.m_alias] = vlan; + return parseHandleSaiStatusFailure(handle_status); + } } vlan.m_vlan_info.l2mc_group_id = SAI_NULL_OBJECT_ID; } + m_portList[vlan.m_alias] = vlan; return true; } @@ -4927,7 +6078,7 @@ bool PortsOrch::addLag(string lag_alias, uint32_t spa_id, int32_t switch_id) auto lagport = m_portList.find(lag_alias); if (lagport != m_portList.end()) { - /* The deletion of bridgeport attached to the lag may still be + /* The deletion of bridgeport attached to the lag may still be * pending due to fdb entries still present on the lag. Wait * until the cleanup is done. */ @@ -5112,9 +6263,10 @@ void PortsOrch::getLagMember(Port &lag, vector &portv) } } -bool PortsOrch::addLagMember(Port &lag, Port &port, bool enableForwarding) +bool PortsOrch::addLagMember(Port &lag, Port &port, string member_status) { SWSS_LOG_ENTER(); + bool enableForwarding = (member_status == "enabled"); sai_uint32_t pvid; if (getPortPvid(lag, pvid)) @@ -5186,7 +6338,7 @@ bool PortsOrch::addLagMember(Port &lag, Port &port, bool enableForwarding) if (gMySwitchType == "voq") { //Sync to SYSTEM_LAG_MEMBER_TABLE of CHASSIS_APP_DB - voqSyncAddLagMember(lag, port); + voqSyncAddLagMember(lag, port, member_status); } return true; @@ -5274,12 +6426,6 @@ bool PortsOrch::setCollectionOnLagMember(Port &lagMember, bool enableCollection) /* Port must be LAG member */ assert(lagMember.m_lag_member_id); - // Collection is not applicable for system port lag members (i.e, members of remote LAGs) - if (lagMember.m_type == Port::SYSTEM) - { - return true; - } - sai_status_t status = SAI_STATUS_FAILURE; sai_attribute_t attr {}; @@ -5311,12 +6457,6 @@ bool PortsOrch::setDistributionOnLagMember(Port &lagMember, bool enableDistribut /* Port must be LAG member */ assert(lagMember.m_lag_member_id); - // Distribution is not applicable for system port lag members (i.e, members of remote LAGs) - if (lagMember.m_type == Port::SYSTEM) - { - return true; - } - sai_status_t status = SAI_STATUS_FAILURE; sai_attribute_t attr {}; @@ -5351,11 +6491,11 @@ bool PortsOrch::addTunnel(string tunnel_alias, sai_object_id_t tunnel_id, bool h tunnel.m_tunnel_id = tunnel_id; if (hwlearning) { - tunnel.m_learn_mode = "hardware"; + tunnel.m_learn_mode = SAI_BRIDGE_PORT_FDB_LEARNING_MODE_HW; } else { - tunnel.m_learn_mode = "disable"; + tunnel.m_learn_mode = SAI_BRIDGE_PORT_FDB_LEARNING_MODE_DISABLE; } m_portList[tunnel_alias] = tunnel; @@ -5373,7 +6513,7 @@ bool PortsOrch::removeTunnel(Port tunnel) return true; } -void PortsOrch::generateQueueMap() +void PortsOrch::generateQueueMap(map queuesStateVector) { if (m_isQueueMapGenerated) { @@ -5384,63 +6524,297 @@ void PortsOrch::generateQueueMap() { if (it.second.m_type == Port::PHY) { - generateQueueMapPerPort(it.second); + if (!queuesStateVector.count(it.second.m_alias)) + { + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); + } + generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias), false); + if (gMySwitchType == "voq") + { + generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias), true); + } + } + + if (it.second.m_type == Port::SYSTEM) + { + if (!queuesStateVector.count(it.second.m_alias)) + { + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); + } + generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias), true); } } m_isQueueMapGenerated = true; } -void PortsOrch::generateQueueMapPerPort(const Port& port) +void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState, bool voq) { /* Create the Queue map in the Counter DB */ - /* Add stat counters to flex_counter */ vector queueVector; vector queuePortVector; vector queueIndexVector; vector queueTypeVector; + std::vector queue_ids; - for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) + if (voq) + { + queue_ids = m_port_voq_ids[port.m_alias]; + } + else + { + queue_ids = port.m_queue_ids; + } + + for (size_t queueIndex = 0; queueIndex < queue_ids.size(); ++queueIndex) { std::ostringstream name; - name << port.m_alias << ":" << queueIndex; - const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + if (voq) + { + name << port.m_system_port_info.alias << ":" << queueIndex; + } + else + { + name << port.m_alias << ":" << queueIndex; + } - queueVector.emplace_back(name.str(), id); - queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + const auto id = sai_serialize_object_id(queue_ids[queueIndex]); string queueType; uint8_t queueRealIndex = 0; - if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) + if (getQueueTypeAndIndex(queue_ids[queueIndex], queueType, queueRealIndex)) { + /* voq counters are always enabled. There is no mechanism to disable voq + * counters in a voq system. */ + if ((gMySwitchType != "voq") && !queuesState.isQueueCounterEnabled(queueRealIndex)) + { + continue; + } queueTypeVector.emplace_back(id, queueType); queueIndexVector.emplace_back(id, to_string(queueRealIndex)); } - // Install a flex counter for this queue to track stats - std::unordered_set counter_stats; - for (const auto& it: queue_stat_ids) + queueVector.emplace_back(name.str(), id); + if (voq) + { + // Install a flex counter for this voq to track stats. Voq counters do + // not have buffer queue config. So it does not get enabled through the + // flexcounter orch logic. Always enabled voq counters. + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, true); + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_system_port_oid)); + } + else + { + // In voq systems, always install a flex counter for this egress queue + // to track stats. In voq systems, the buffer profiles are defined on + // sysports. So the phy ports do not have buffer queue config. Hence + // queuesStateVector built by getQueueConfigurations in flexcounterorch + // never has phy ports in voq systems. So always enabled egress queue + // counter on voq systems. + if (gMySwitchType == "voq") + { + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false); + } + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + } + } + + if (voq) + { + m_voqTable->set("", queueVector); + } + else + { + m_queueTable->set("", queueVector); + CounterCheckOrch::getInstance().addPort(port); + } + m_queuePortTable->set("", queuePortVector); + m_queueIndexTable->set("", queueIndexVector); + m_queueTypeTable->set("", queueTypeVector); + +} + +void PortsOrch::addQueueFlexCounters(map queuesStateVector) +{ + if (m_isQueueFlexCountersAdded) + { + return; + } + + for (const auto& it: m_portList) + { + if (it.second.m_type == Port::PHY) { - counter_stats.emplace(sai_serialize_queue_stat(it)); + if (!queuesStateVector.count(it.second.m_alias)) + { + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); + } + addQueueFlexCountersPerPort(it.second, queuesStateVector.at(it.second.m_alias)); } - queue_stat_manager.setCounterIdList(port.m_queue_ids[queueIndex], CounterType::QUEUE, counter_stats); + } + + m_isQueueFlexCountersAdded = true; +} - /* add watermark queue counters */ - string key = getQueueWatermarkFlexCounterTableKey(id); - string delimiter(""); - std::ostringstream counters_stream; - for (const auto& it: queueWatermarkStatIds) +void PortsOrch::addQueueFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState) +{ + for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) + { + string queueType; + uint8_t queueRealIndex = 0; + if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) { - counters_stream << delimiter << sai_serialize_queue_stat(it); - delimiter = comma; + if (!queuesState.isQueueCounterEnabled(queueRealIndex)) + { + continue; + } + // Install a flex counter for this queue to track stats + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false); } + } +} - vector fieldValues; - fieldValues.emplace_back(QUEUE_COUNTER_ID_LIST, counters_stream.str()); +void PortsOrch::addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq) +{ + std::unordered_set counter_stats; + std::vector queue_ids; + + for (const auto& it: queue_stat_ids) + { + counter_stats.emplace(sai_serialize_queue_stat(it)); + } + if (voq) + { + queue_ids = m_port_voq_ids[port.m_alias]; + } + else + { + queue_ids = port.m_queue_ids; + } + + queue_stat_manager.setCounterIdList(queue_ids[queueIndex], CounterType::QUEUE, counter_stats); +} + + +void PortsOrch::addQueueWatermarkFlexCounters(map queuesStateVector) +{ + if (m_isQueueWatermarkFlexCountersAdded) + { + return; + } + + for (const auto& it: m_portList) + { + if (it.second.m_type == Port::PHY) + { + if (!queuesStateVector.count(it.second.m_alias)) + { + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); + } + addQueueWatermarkFlexCountersPerPort(it.second, queuesStateVector.at(it.second.m_alias)); + } + } + + m_isQueueWatermarkFlexCountersAdded = true; +} + +void PortsOrch::addQueueWatermarkFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState) +{ + /* Add stat counters to flex_counter */ + + for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) + { + string queueType; + uint8_t queueRealIndex = 0; + if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) + { + if (!queuesState.isQueueCounterEnabled(queueRealIndex)) + { + continue; + } + addQueueWatermarkFlexCountersPerPortPerQueueIndex(port, queueIndex); + } + } +} + +void PortsOrch::addQueueWatermarkFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex) +{ + const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + + /* add watermark queue counters */ + string key = getQueueWatermarkFlexCounterTableKey(id); - m_flexCounterTable->set(key, fieldValues); + string delimiter(""); + std::ostringstream counters_stream; + for (const auto& it: queueWatermarkStatIds) + { + counters_stream << delimiter << sai_serialize_queue_stat(it); + delimiter = comma; + } + + vector fieldValues; + fieldValues.emplace_back(QUEUE_COUNTER_ID_LIST, counters_stream.str()); + + m_flexCounterTable->set(key, fieldValues); +} + +void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) +{ + SWSS_LOG_ENTER(); + + /* Create the Queue map in the Counter DB */ + vector queueVector; + vector queuePortVector; + vector queueIndexVector; + vector queueTypeVector; + + auto toks = tokenize(queues, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << queueIndex; + + const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + + string queueType; + uint8_t queueRealIndex = 0; + if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) + { + queueTypeVector.emplace_back(id, queueType); + queueIndexVector.emplace_back(id, to_string(queueRealIndex)); + } + + queueVector.emplace_back(name.str(), id); + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + + auto flexCounterOrch = gDirectory.get(); + if (flexCounterOrch->getQueueCountersState()) + { + // Install a flex counter for this queue to track stats + addQueueFlexCountersPerPortPerQueueIndex(port, queueIndex, false); + } + if (flexCounterOrch->getQueueWatermarkCountersState()) + { + /* add watermark queue counters */ + addQueueWatermarkFlexCountersPerPortPerQueueIndex(port, queueIndex); + } } m_queueTable->set("", queueVector); @@ -5451,7 +6825,57 @@ void PortsOrch::generateQueueMapPerPort(const Port& port) CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::generatePriorityGroupMap() +void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) +{ + SWSS_LOG_ENTER(); + + /* Remove the Queues maps in the Counter DB */ + /* Remove stat counters from flex_counter DB */ + auto toks = tokenize(queues, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << queueIndex; + const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + + // Remove the queue counter from counters DB maps + m_queueTable->hdel("", name.str()); + m_queuePortTable->hdel("", id); + + string queueType; + uint8_t queueRealIndex = 0; + if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) + { + m_queueTypeTable->hdel("", id); + m_queueIndexTable->hdel("", id); + } + + auto flexCounterOrch = gDirectory.get(); + if (flexCounterOrch->getQueueCountersState()) + { + // Remove the flex counter for this queue + queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); + } + + if (flexCounterOrch->getQueueWatermarkCountersState()) + { + // Remove watermark queue counters + string key = getQueueWatermarkFlexCounterTableKey(id); + m_flexCounterTable->del(key); + } + } + + CounterCheckOrch::getInstance().removePort(port); +} + +void PortsOrch::generatePriorityGroupMap(map pgsStateVector) { if (m_isPriorityGroupMapGenerated) { @@ -5462,23 +6886,32 @@ void PortsOrch::generatePriorityGroupMap() { if (it.second.m_type == Port::PHY) { - generatePriorityGroupMapPerPort(it.second); + if (!pgsStateVector.count(it.second.m_alias)) + { + auto maxPgNumber = getNumberOfPortSupportedPgCounters(it.second.m_alias); + FlexCounterPgStates flexCounterPgState(maxPgNumber); + pgsStateVector.insert(make_pair(it.second.m_alias, flexCounterPgState)); + } + generatePriorityGroupMapPerPort(it.second, pgsStateVector.at(it.second.m_alias)); } } m_isPriorityGroupMapGenerated = true; } -void PortsOrch::generatePriorityGroupMapPerPort(const Port& port) +void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState) { /* Create the PG map in the Counter DB */ - /* Add stat counters to flex_counter */ vector pgVector; vector pgPortVector; vector pgIndexVector; for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) { + if (!pgsState.isPgCounterEnabled(static_cast(pgIndex))) + { + continue; + } std::ostringstream name; name << port.m_alias << ":" << pgIndex; @@ -5488,43 +6921,221 @@ void PortsOrch::generatePriorityGroupMapPerPort(const Port& port) pgPortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); pgIndexVector.emplace_back(id, to_string(pgIndex)); - string key = getPriorityGroupWatermarkFlexCounterTableKey(id); + } + + m_pgTable->set("", pgVector); + m_pgPortTable->set("", pgPortVector); + m_pgIndexTable->set("", pgIndexVector); + + CounterCheckOrch::getInstance().addPort(port); +} + +void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) +{ + SWSS_LOG_ENTER(); + + /* Create the PG map in the Counter DB */ + /* Add stat counters to flex_counter */ + vector pgVector; + vector pgPortVector; + vector pgIndexVector; + + auto toks = tokenize(pgs, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << pgIndex; + + const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + pgVector.emplace_back(name.str(), id); + pgPortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + pgIndexVector.emplace_back(id, to_string(pgIndex)); - std::string delimiter = ""; - std::ostringstream counters_stream; - /* Add watermark counters to flex_counter */ - for (const auto& it: ingressPriorityGroupWatermarkStatIds) + auto flexCounterOrch = gDirectory.get(); + if (flexCounterOrch->getPgCountersState()) + { + /* Add dropped packets counters to flex_counter */ + addPriorityGroupFlexCountersPerPortPerPgIndex(port, pgIndex); + } + if (flexCounterOrch->getPgWatermarkCountersState()) + { + /* Add watermark counters to flex_counter */ + addPriorityGroupWatermarkFlexCountersPerPortPerPgIndex(port, pgIndex); + } + } + + m_pgTable->set("", pgVector); + m_pgPortTable->set("", pgPortVector); + m_pgIndexTable->set("", pgIndexVector); + + CounterCheckOrch::getInstance().addPort(port); +} + +void PortsOrch::addPriorityGroupFlexCounters(map pgsStateVector) +{ + if (m_isPriorityGroupFlexCountersAdded) + { + return; + } + + for (const auto& it: m_portList) + { + if (it.second.m_type == Port::PHY) + { + if (!pgsStateVector.count(it.second.m_alias)) + { + auto maxPgNumber = getNumberOfPortSupportedPgCounters(it.second.m_alias); + FlexCounterPgStates flexCounterPgState(maxPgNumber); + pgsStateVector.insert(make_pair(it.second.m_alias, flexCounterPgState)); + } + addPriorityGroupFlexCountersPerPort(it.second, pgsStateVector.at(it.second.m_alias)); + } + } + + m_isPriorityGroupFlexCountersAdded = true; +} + +void PortsOrch::addPriorityGroupFlexCountersPerPort(const Port& port, FlexCounterPgStates& pgsState) +{ + for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) + { + if (!pgsState.isPgCounterEnabled(static_cast(pgIndex))) + { + continue; + } + addPriorityGroupFlexCountersPerPortPerPgIndex(port, pgIndex); + } +} + +void PortsOrch::addPriorityGroupFlexCountersPerPortPerPgIndex(const Port& port, size_t pgIndex) +{ + const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + string delimiter = ""; + std::ostringstream ingress_pg_drop_packets_counters_stream; + string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); + /* Add dropped packets counters to flex_counter */ + for (const auto& it: ingressPriorityGroupDropStatIds) + { + ingress_pg_drop_packets_counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); + if (delimiter.empty()) { - counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); delimiter = comma; } + } + vector fieldValues; + fieldValues.emplace_back(PG_COUNTER_ID_LIST, ingress_pg_drop_packets_counters_stream.str()); + m_flexCounterTable->set(key, fieldValues); +} - vector fieldValues; - fieldValues.emplace_back(PG_COUNTER_ID_LIST, counters_stream.str()); - m_flexCounterTable->set(key, fieldValues); +void PortsOrch::addPriorityGroupWatermarkFlexCounters(map pgsStateVector) +{ + if (m_isPriorityGroupWatermarkFlexCountersAdded) + { + return; + } - delimiter = ""; - std::ostringstream ingress_pg_drop_packets_counters_stream; - key = getPriorityGroupDropPacketsFlexCounterTableKey(id); - /* Add dropped packets counters to flex_counter */ - for (const auto& it: ingressPriorityGroupDropStatIds) + for (const auto& it: m_portList) + { + if (it.second.m_type == Port::PHY) { - ingress_pg_drop_packets_counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); - if (delimiter.empty()) + if (!pgsStateVector.count(it.second.m_alias)) { - delimiter = comma; + auto maxPgNumber = getNumberOfPortSupportedPgCounters(it.second.m_alias); + FlexCounterPgStates flexCounterPgState(maxPgNumber); + pgsStateVector.insert(make_pair(it.second.m_alias, flexCounterPgState)); } + addPriorityGroupWatermarkFlexCountersPerPort(it.second, pgsStateVector.at(it.second.m_alias)); } - fieldValues.clear(); - fieldValues.emplace_back(PG_COUNTER_ID_LIST, ingress_pg_drop_packets_counters_stream.str()); - m_flexCounterTable->set(key, fieldValues); } - m_pgTable->set("", pgVector); - m_pgPortTable->set("", pgPortVector); - m_pgIndexTable->set("", pgIndexVector); + m_isPriorityGroupWatermarkFlexCountersAdded = true; +} - CounterCheckOrch::getInstance().addPort(port); +void PortsOrch::addPriorityGroupWatermarkFlexCountersPerPort(const Port& port, FlexCounterPgStates& pgsState) +{ + /* Add stat counters to flex_counter */ + + for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) + { + if (!pgsState.isPgCounterEnabled(static_cast(pgIndex))) + { + continue; + } + addPriorityGroupWatermarkFlexCountersPerPortPerPgIndex(port, pgIndex); + } +} + +void PortsOrch::addPriorityGroupWatermarkFlexCountersPerPortPerPgIndex(const Port& port, size_t pgIndex) +{ + const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + string key = getPriorityGroupWatermarkFlexCounterTableKey(id); + + std::string delimiter = ""; + std::ostringstream counters_stream; + /* Add watermark counters to flex_counter */ + for (const auto& it: ingressPriorityGroupWatermarkStatIds) + { + counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); + delimiter = comma; + } + + vector fieldValues; + fieldValues.emplace_back(PG_COUNTER_ID_LIST, counters_stream.str()); + m_flexCounterTable->set(key, fieldValues); +} + +void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) +{ + SWSS_LOG_ENTER(); + + /* Remove the Pgs maps in the Counter DB */ + /* Remove stat counters from flex_counter DB */ + auto toks = tokenize(pgs, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << pgIndex; + const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + // Remove the pg counter from counters DB maps + m_pgTable->hdel("", name.str()); + m_pgPortTable->hdel("", id); + m_pgIndexTable->hdel("", id); + + auto flexCounterOrch = gDirectory.get(); + if (flexCounterOrch->getPgCountersState()) + { + // Remove dropped packets counters from flex_counter + string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); + m_flexCounterTable->del(key); + } + + if (flexCounterOrch->getPgWatermarkCountersState()) + { + // Remove watermark counters from flex_counter + string key = getPriorityGroupWatermarkFlexCounterTableKey(id); + m_flexCounterTable->del(key); + } + } + + CounterCheckOrch::getInstance().removePort(port); } void PortsOrch::generatePortCounterMap() @@ -5535,6 +7146,7 @@ void PortsOrch::generatePortCounterMap() } auto port_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP); + auto gbport_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP, true); for (const auto& it: m_portList) { // Set counter stats only for PHY ports to ensure syncd will not try to query the counter statistics from the HW for non-PHY ports. @@ -5542,7 +7154,14 @@ void PortsOrch::generatePortCounterMap() { continue; } - port_stat_manager.setCounterIdList(it.second.m_port_id, CounterType::PORT, port_counter_stats); + port_stat_manager.setCounterIdList(it.second.m_port_id, + CounterType::PORT, port_counter_stats); + if (it.second.m_system_side_id) + gb_port_stat_manager.setCounterIdList(it.second.m_system_side_id, + CounterType::PORT, gbport_counter_stats); + if (it.second.m_line_side_id) + gb_port_stat_manager.setCounterIdList(it.second.m_line_side_id, + CounterType::PORT, gbport_counter_stats); } m_isPortCounterMapGenerated = true; @@ -5569,13 +7188,25 @@ void PortsOrch::generatePortBufferDropCounterMap() m_isPortBufferDropCounterMapGenerated = true; } +uint32_t PortsOrch::getNumberOfPortSupportedPgCounters(string port) +{ + return static_cast(m_portList[port].m_priority_group_ids.size()); +} + +uint32_t PortsOrch::getNumberOfPortSupportedQueueCounters(string port) +{ + return static_cast(m_portList[port].m_queue_ids.size()); +} + void PortsOrch::doTask(NotificationConsumer &consumer) { SWSS_LOG_ENTER(); + SWSS_LOG_ERROR("NOA inside doTask of notification consumer"); /* Wait for all ports to be initialized */ if (!allPortsReady()) { + SWSS_LOG_ERROR("NOA inside doTask of notification consumer - reutrn because ports are not ready"); return; } @@ -5585,13 +7216,15 @@ void PortsOrch::doTask(NotificationConsumer &consumer) consumer.pop(op, data, values); - if (&consumer != m_portStatusNotificationConsumer) + if (&consumer != m_portStatusNotificationConsumer && &consumer != m_portHostTxReadyNotificationConsumer) { + SWSS_LOG_ERROR("NOA inside doTask of notification - consumer is not valid"); return; } - if (op == "port_state_change") + if (&consumer == m_portStatusNotificationConsumer && op == "port_state_change") { + SWSS_LOG_ERROR("NOA inside doTask of notification - port state change notification"); uint32_t count; sai_port_oper_status_notification_t *portoperstatus = nullptr; @@ -5608,7 +7241,7 @@ void PortsOrch::doTask(NotificationConsumer &consumer) if (!getPort(id, port)) { - SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, id); + SWSS_LOG_NOTICE("Got port state change for port id 0x%" PRIx64 " which does not exist, possibly outdated event", id); continue; } @@ -5621,6 +7254,10 @@ void PortsOrch::doTask(NotificationConsumer &consumer) SWSS_LOG_NOTICE("%s oper speed is %d", port.m_alias.c_str(), speed); updateDbPortOperSpeed(port, speed); } + else + { + updateDbPortOperSpeed(port, 0); + } } /* update m_portList */ @@ -5629,6 +7266,28 @@ void PortsOrch::doTask(NotificationConsumer &consumer) sai_deserialize_free_port_oper_status_ntf(count, portoperstatus); } + else if (&consumer == m_portHostTxReadyNotificationConsumer && op == "port_host_tx_ready") + { + SWSS_LOG_ERROR("NOA inside doTask of notification - port host tx ready change notification!!!!!!!!!!!!!!!!!!"); + sai_object_id_t port_id; + sai_object_id_t switch_id; + // sai_port_host_tx_ready_status_t *host_tx_ready_status = SAI_NULL_OBJECT_ID; + sai_port_host_tx_ready_status_t host_tx_ready_status; + + sai_deserialize_port_host_tx_ready_ntf(data, port_id, switch_id, host_tx_ready_status); + + if (host_tx_ready_status == SAI_PORT_HOST_TX_READY_STATUS_READY) + { + setHostTxReady(port_id, "true"); + } + else if (host_tx_ready_status == SAI_PORT_HOST_TX_READY_STATUS_NOT_READY) + { + setHostTxReady(port_id, "false"); + } + + sai_deserialize_free_port_host_tx_ready_ntf(host_tx_ready_status); + } + } void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) @@ -5644,6 +7303,19 @@ void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) if (port.m_type == Port::PHY) { updateDbPortOperStatus(port, status); + updateGearboxPortOperStatus(port); + + /* Refresh the port states and reschedule the poller tasks */ + if (port.m_autoneg > 0) + { + refreshPortStateAutoNeg(port); + updatePortStatePoll(port, PORT_STATE_POLL_AN, !(status == SAI_PORT_OPER_STATUS_UP)); + } + if (port.m_link_training > 0) + { + refreshPortStateLinkTraining(port); + updatePortStatePoll(port, PORT_STATE_POLL_LT, !(status == SAI_PORT_OPER_STATUS_UP)); + } } port.m_oper_status = status; @@ -5682,9 +7354,9 @@ void PortsOrch::updateDbPortOperSpeed(Port &port, sai_uint32_t speed) SWSS_LOG_ENTER(); vector tuples; - FieldValueTuple tuple("speed", to_string(speed)); - tuples.push_back(tuple); - m_portTable->set(port.m_alias, tuples); + string speedStr = speed != 0 ? to_string(speed) : "N/A"; + tuples.emplace_back(std::make_pair("speed", speedStr)); + m_portStateTable.set(port.m_alias, tuples); // We don't set port.m_speed = speed here, because CONFIG_DB still hold the old // value. If we set it here, next time configure any attributes related port will @@ -5731,6 +7403,10 @@ void PortsOrch::refreshPortStatus() SWSS_LOG_INFO("%s oper speed is %d", port.m_alias.c_str(), speed); updateDbPortOperSpeed(port, speed); } + else + { + updateDbPortOperSpeed(port, 0); + } } } } @@ -5795,6 +7471,50 @@ bool PortsOrch::getPortOperSpeed(const Port& port, sai_uint32_t& speed) const return true; } +bool PortsOrch::getPortLinkTrainingRxStatus(const Port &port, sai_port_link_training_rx_status_t &rx_status) +{ + SWSS_LOG_ENTER(); + + if (port.m_type != Port::PHY) + { + return false; + } + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_LINK_TRAINING_RX_STATUS; + sai_status_t ret = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (ret != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get LT rx status for %s", port.m_alias.c_str()); + return false; + } + + rx_status = static_cast(attr.value.u32); + return true; +} + +bool PortsOrch::getPortLinkTrainingFailure(const Port &port, sai_port_link_training_failure_status_t &failure) +{ + SWSS_LOG_ENTER(); + + if (port.m_type != Port::PHY) + { + return false; + } + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_LINK_TRAINING_FAILURE_STATUS; + sai_status_t ret = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (ret != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to get LT failure status for %s", port.m_alias.c_str()); + return false; + } + + failure = static_cast(attr.value.u32); + return true; +} + bool PortsOrch::getSaiAclBindPointType(Port::Type type, sai_acl_bind_point_type_t &sai_acl_bind_type) { @@ -5858,7 +7578,7 @@ bool PortsOrch::removeAclTableGroup(const Port &p) return true; } -bool PortsOrch::setPortSerdesAttribute(sai_object_id_t port_id, +bool PortsOrch::setPortSerdesAttribute(sai_object_id_t port_id, sai_object_id_t switch_id, map> &serdes_attr) { SWSS_LOG_ENTER(); @@ -5910,7 +7630,7 @@ bool PortsOrch::setPortSerdesAttribute(sai_object_id_t port_id, port_serdes_attr.value.u32list.list = it->second.data(); attr_list.emplace_back(port_serdes_attr); } - status = sai_port_api->create_port_serdes(&port_serdes_id, gSwitchId, + status = sai_port_api->create_port_serdes(&port_serdes_id, switch_id, static_cast(serdes_attr.size()+1), attr_list.data()); @@ -5948,6 +7668,7 @@ void PortsOrch::removePortSerdesAttribute(sai_object_id_t port_id) if (port_attr.value.oid != SAI_NULL_OBJECT_ID) { + // SWSS_LOG_ERROR("NOA suppose to do remove_port_serdes now, but removed it"); status = sai_port_api->remove_port_serdes(port_attr.value.oid); if (status != SAI_STATUS_SUCCESS) { @@ -5961,7 +7682,8 @@ void PortsOrch::removePortSerdesAttribute(sai_object_id_t port_id) } void PortsOrch::getPortSerdesVal(const std::string& val_str, - std::vector &lane_values) + std::vector &lane_values, + int base) { SWSS_LOG_ENTER(); @@ -5971,91 +7693,11 @@ void PortsOrch::getPortSerdesVal(const std::string& val_str, while (std::getline(iss, lane_str, ',')) { - lane_val = (uint32_t)std::stoul(lane_str, NULL, 16); + lane_val = (uint32_t)std::stoul(lane_str, NULL, base); lane_values.push_back(lane_val); } } -bool PortsOrch::getPortAdvSpeedsVal(const std::string &val_str, - std::vector &speed_values) -{ - SWSS_LOG_ENTER(); - - if (val_str == "all") - { - return true; - } - - uint32_t speed_val; - std::string speed_str; - std::istringstream iss(val_str); - - try - { - while (std::getline(iss, speed_str, ',')) - { - speed_val = (uint32_t)std::stoul(speed_str); - speed_values.push_back(speed_val); - } - } - catch (const std::invalid_argument &e) - { - SWSS_LOG_ERROR("Failed to parse adv_speeds value: %s", val_str.c_str()); - return false; - } - std::sort(speed_values.begin(), speed_values.end()); - return true; -} - -bool PortsOrch::getPortInterfaceTypeVal(const std::string &s, - sai_port_interface_type_t &interface_type) -{ - SWSS_LOG_ENTER(); - - auto iter = interface_type_map_for_an.find(s); - if (iter != interface_type_map_for_an.end()) - { - interface_type = interface_type_map_for_an[s]; - return true; - } - else - { - const std::string &validInterfaceTypes = getValidInterfaceTypes(); - SWSS_LOG_ERROR("Failed to parse interface_type value %s, valid interface type includes: %s", - s.c_str(), validInterfaceTypes.c_str()); - return false; - } -} - -bool PortsOrch::getPortAdvInterfaceTypesVal(const std::string &val_str, - std::vector &type_values) -{ - SWSS_LOG_ENTER(); - if (val_str == "all") - { - return true; - } - - sai_port_interface_type_t interface_type ; - std::string type_str; - std::istringstream iss(val_str); - bool valid; - - while (std::getline(iss, type_str, ',')) - { - valid = getPortInterfaceTypeVal(type_str, interface_type); - if (!valid) { - const std::string &validInterfaceTypes = getValidInterfaceTypes(); - SWSS_LOG_ERROR("Failed to parse adv_interface_types value %s, valid interface type includes: %s", - val_str.c_str(), validInterfaceTypes.c_str()); - return false; - } - type_values.push_back(static_cast(interface_type)); - } - std::sort(type_values.begin(), type_values.end()); - return true; -} - /* Bring up/down Vlan interface associated with L3 VNI*/ bool PortsOrch::updateL3VniStatus(uint16_t vlan_id, bool isUp) { @@ -6122,6 +7764,9 @@ void PortsOrch::initGearbox() SWSS_LOG_NOTICE("BOX: m_gearboxInterfaceMap size = %d.", (int) m_gearboxInterfaceMap.size()); SWSS_LOG_NOTICE("BOX: m_gearboxLaneMap size = %d.", (int) m_gearboxLaneMap.size()); SWSS_LOG_NOTICE("BOX: m_gearboxPortMap size = %d.", (int) m_gearboxPortMap.size()); + + m_gb_counter_db = shared_ptr(new DBConnector("GB_COUNTERS_DB", 0)); + m_gbcounterTable = unique_ptr
(new Table(m_gb_counter_db.get(), COUNTERS_PORT_NAME_MAP)); } } @@ -6220,6 +7865,7 @@ bool PortsOrch::initGearboxPort(Port &port) } SWSS_LOG_NOTICE("BOX: Created Gearbox system-side port 0x%" PRIx64 " for alias:%s index:%d", systemPort, port.m_alias.c_str(), port.m_index); + port.m_system_side_id = systemPort; /* Create LINE-SIDE port */ attrs.clear(); @@ -6332,6 +7978,61 @@ bool PortsOrch::initGearboxPort(Port &port) SWSS_LOG_NOTICE("BOX: Connected Gearbox ports; system-side:0x%" PRIx64 " to line-side:0x%" PRIx64, systemPort, linePort); m_gearboxPortListLaneMap[port.m_port_id] = make_tuple(systemPort, linePort); port.m_line_side_id = linePort; + saiOidToAlias[systemPort] = port.m_alias; + saiOidToAlias[linePort] = port.m_alias; + + /* Add gearbox system/line port name map to counter table */ + FieldValueTuple tuple(port.m_alias + "_system", sai_serialize_object_id(systemPort)); + vector fields; + fields.push_back(tuple); + m_gbcounterTable->set("", fields); + + fields[0] = FieldValueTuple(port.m_alias + "_line", sai_serialize_object_id(linePort)); + m_gbcounterTable->set("", fields); + + /* Set serdes tx taps on system and line side */ + map> serdes_attr; + typedef pair> serdes_attr_pair; + vector attr_val; + for (auto pair: tx_fir_strings_system_side) { + if (m_gearboxInterfaceMap[port.m_index].tx_firs.find(pair.first) != m_gearboxInterfaceMap[port.m_index].tx_firs.end() ) { + attr_val.clear(); + getPortSerdesVal(m_gearboxInterfaceMap[port.m_index].tx_firs[pair.first], attr_val, 10); + serdes_attr.insert(serdes_attr_pair(pair.second, attr_val)); + } + } + if (serdes_attr.size() != 0) + { + if (setPortSerdesAttribute(systemPort, phyOid, serdes_attr)) + { + SWSS_LOG_NOTICE("Set port %s system side preemphasis is success", port.m_alias.c_str()); + } + else + { + SWSS_LOG_ERROR("Failed to set port %s system side pre-emphasis", port.m_alias.c_str()); + return false; + } + } + serdes_attr.clear(); + for (auto pair: tx_fir_strings_line_side) { + if (m_gearboxInterfaceMap[port.m_index].tx_firs.find(pair.first) != m_gearboxInterfaceMap[port.m_index].tx_firs.end() ) { + attr_val.clear(); + getPortSerdesVal(m_gearboxInterfaceMap[port.m_index].tx_firs[pair.first], attr_val, 10); + serdes_attr.insert(serdes_attr_pair(pair.second, attr_val)); + } + } + if (serdes_attr.size() != 0) + { + if (setPortSerdesAttribute(linePort, phyOid, serdes_attr)) + { + SWSS_LOG_NOTICE("Set port %s line side preemphasis is success", port.m_alias.c_str()); + } + else + { + SWSS_LOG_ERROR("Failed to set port %s line side pre-emphasis", port.m_alias.c_str()); + return false; + } + } } } @@ -6475,7 +8176,7 @@ bool PortsOrch::getSystemPorts() return true; } -bool PortsOrch::getRecircPort(Port &port, string role) +bool PortsOrch::getRecircPort(Port &port, Port::Role role) { for (auto it = m_recircPortRole.begin(); it != m_recircPortRole.end(); it++) { @@ -6484,7 +8185,12 @@ bool PortsOrch::getRecircPort(Port &port, string role) return getPort(it->first, port); } } - SWSS_LOG_ERROR("Failed to find recirc port with role %s", role.c_str()); + + SWSS_LOG_ERROR( + "Failed to find recirc port %s with role %d", + port.m_alias.c_str(), static_cast(role) + ); + return false; } @@ -6612,7 +8318,14 @@ bool PortsOrch::addSystemPorts() port.m_system_port_info.speed = attrs[1].value.sysportconfig.speed; port.m_system_port_info.num_voq = attrs[1].value.sysportconfig.num_voq; + initializeVoqs( port ); setPort(port.m_alias, port); + /* Add system port name map to counter table */ + FieldValueTuple tuple(port.m_system_port_info.alias, + sai_serialize_object_id(system_port_oid)); + vector fields; + fields.push_back(tuple); + m_counterSysPortTable->set("", fields); if(m_port_ref_count.find(port.m_alias) == m_port_ref_count.end()) { m_port_ref_count[port.m_alias] = 0; @@ -6719,7 +8432,7 @@ void PortsOrch::voqSyncDelLag(Port &lag) m_tableVoqSystemLagTable->del(key); } -void PortsOrch::voqSyncAddLagMember(Port &lag, Port &port) +void PortsOrch::voqSyncAddLagMember(Port &lag, Port &port, string status) { // Sync only local lag's member add to CHASSIS_APP_DB if (lag.m_system_lag_info.switch_id != gVoqMySwitchId) @@ -6728,8 +8441,8 @@ void PortsOrch::voqSyncAddLagMember(Port &lag, Port &port) } vector attrs; - FieldValueTuple nullFv ("NULL", "NULL"); - attrs.push_back(nullFv); + FieldValueTuple statusFv ("status", status); + attrs.push_back(statusFv); string key = lag.m_system_lag_info.alias + ":" + port.m_system_port_info.alias; m_tableVoqSystemLagMemberTable->set(key, attrs); @@ -6747,12 +8460,13 @@ void PortsOrch::voqSyncDelLagMember(Port &lag, Port &port) m_tableVoqSystemLagMemberTable->del(key); } -std::unordered_set PortsOrch::generateCounterStats(const string& type) +std::unordered_set PortsOrch::generateCounterStats(const string& type, bool gearbox) { std::unordered_set counter_stats; if (type == PORT_STAT_COUNTER_FLEX_COUNTER_GROUP) { - for (const auto& it: port_stat_ids) + auto& stat_ids = gearbox ? gbport_stat_ids : port_stat_ids; + for (const auto& it: stat_ids) { counter_stats.emplace(sai_serialize_port_stat(it)); } @@ -6766,3 +8480,216 @@ std::unordered_set PortsOrch::generateCounterStats(const string& ty } return counter_stats; } + +void PortsOrch::updateGearboxPortOperStatus(const Port& port) +{ + if (!isGearboxEnabled()) + return; + + SWSS_LOG_NOTICE("BOX: port %s, system_side_id:0x%" PRIx64 "line_side_id:0x%" PRIx64, + port.m_alias.c_str(), port.m_system_side_id, port.m_line_side_id); + + if (!port.m_system_side_id || !port.m_line_side_id) + return; + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_OPER_STATUS; + sai_status_t ret = sai_port_api->get_port_attribute(port.m_system_side_id, 1, &attr); + if (ret != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("BOX: Failed to get system_oper_status for %s", port.m_alias.c_str()); + } + else + { + sai_port_oper_status_t oper = static_cast(attr.value.u32); + vector tuples; + FieldValueTuple tuple("system_oper_status", oper_status_strings.at(oper)); + tuples.push_back(tuple); + m_portTable->set(port.m_alias, tuples); + } + + attr.id = SAI_PORT_ATTR_OPER_STATUS; + ret = sai_port_api->get_port_attribute(port.m_line_side_id, 1, &attr); + if (ret != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("BOX: Failed to get line_oper_status for %s", port.m_alias.c_str()); + } + else + { + sai_port_oper_status_t oper = static_cast(attr.value.u32); + vector tuples; + FieldValueTuple tuple("line_oper_status", oper_status_strings.at(oper)); + tuples.push_back(tuple); + m_portTable->set(port.m_alias, tuples); + } +} + +bool PortsOrch::decrFdbCount(const std::string& alias, int count) +{ + auto itr = m_portList.find(alias); + if (itr == m_portList.end()) + { + return false; + } + else + { + itr->second.m_fdb_count -= count; + } + return true; +} + +void PortsOrch::setMACsecEnabledState(sai_object_id_t port_id, bool enabled) +{ + SWSS_LOG_ENTER(); + + Port p; + if (!getPort(port_id, p)) + { + SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, port_id); + return; + } + + if (enabled) + { + m_macsecEnabledPorts.insert(port_id); + } + else + { + m_macsecEnabledPorts.erase(port_id); + } + + if (p.m_mtu) + { + setPortMtu(p, p.m_mtu); + } +} + +bool PortsOrch::isMACsecPort(sai_object_id_t port_id) const +{ + SWSS_LOG_ENTER(); + + return m_macsecEnabledPorts.find(port_id) != m_macsecEnabledPorts.end(); +} + +vector PortsOrch::getPortVoQIds(Port& port) +{ + SWSS_LOG_ENTER(); + + return m_port_voq_ids[port.m_alias]; +} + +/* Refresh the per-port Auto-Negotiation operational states */ +void PortsOrch::refreshPortStateAutoNeg(const Port &port) +{ + SWSS_LOG_ENTER(); + + if (port.m_type != Port::Type::PHY) + { + return; + } + + string adv_speeds = "N/A"; + + if (port.m_admin_state_up) + { + if (!getPortAdvSpeeds(port, true, adv_speeds)) + { + adv_speeds = "N/A"; + updatePortStatePoll(port, PORT_STATE_POLL_AN, false); + } + } + + m_portStateTable.hset(port.m_alias, "rmt_adv_speeds", adv_speeds); +} + +/* Refresh the per-port Link-Training operational states */ +void PortsOrch::refreshPortStateLinkTraining(const Port &port) +{ + SWSS_LOG_ENTER(); + + if (port.m_type != Port::Type::PHY) + { + return; + } + + string status = "off"; + + if (port.m_admin_state_up && port.m_link_training > 0 && port.m_cap_lt > 0) + { + sai_port_link_training_rx_status_t rx_status; + sai_port_link_training_failure_status_t failure; + + if (!getPortLinkTrainingRxStatus(port, rx_status)) + { + status = "on"; // LT is enabled, while the rx status is unavailable + } + else if (rx_status == SAI_PORT_LINK_TRAINING_RX_STATUS_TRAINED) + { + status = link_training_rx_status_map.at(rx_status); + } + else + { + if (getPortLinkTrainingFailure(port, failure) && + failure != SAI_PORT_LINK_TRAINING_FAILURE_STATUS_NO_ERROR) + { + status = link_training_failure_map.at(failure); + } + else + { + status = link_training_rx_status_map.at(rx_status); + } + } + } + + m_portStateTable.hset(port.m_alias, "link_training_status", status); +} + +/* Activate/De-activate a specific port state poller task */ +void PortsOrch::updatePortStatePoll(const Port &port, port_state_poll_t type, bool active) +{ + if (type == PORT_STATE_POLL_NONE) + { + return; + } + if (active) + { + m_port_state_poll[port.m_alias] |= type; + m_port_state_poller->start(); + } + else + { + m_port_state_poll[port.m_alias] &= ~type; + } +} + +void PortsOrch::doTask(swss::SelectableTimer &timer) +{ + Port port; + + for (auto it = m_port_state_poll.begin(); it != m_port_state_poll.end(); ) + { + if ((it->second == PORT_STATE_POLL_NONE) || !getPort(it->first, port)) + { + it = m_port_state_poll.erase(it); + continue; + } + if (!port.m_admin_state_up) + { + ++it; + continue; + } + if (it->second & PORT_STATE_POLL_AN) + { + refreshPortStateAutoNeg(port); + } + if (it->second & PORT_STATE_POLL_LT) + { + refreshPortStateLinkTraining(port); + } + ++it; + } + if (m_port_state_poll.size() == 0) + { + m_port_state_poller->stop(); + } +} diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 843ffbd7f1..0556007c48 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -14,10 +14,13 @@ #include "saihelper.h" #include "lagid.h" #include "flexcounterorch.h" +#include "events.h" +#include "port/porthlpr.h" #define FCS_LEN 4 #define VLAN_TAG_LEN 4 +#define MAX_MACSEC_SECTAG_SIZE 32 #define PORT_STAT_COUNTER_FLEX_COUNTER_GROUP "PORT_STAT_COUNTER" #define PORT_RATE_COUNTER_FLEX_COUNTER_GROUP "PORT_RATE_COUNTER" #define PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP "PORT_BUFFER_DROP_STAT" @@ -27,6 +30,7 @@ #define PG_DROP_STAT_COUNTER_FLEX_COUNTER_GROUP "PG_DROP_STAT_COUNTER" typedef std::vector PortSupportedSpeeds; +typedef std::set PortSupportedFecModes; static const map oper_status_strings = { @@ -46,6 +50,28 @@ static const unordered_map string_oper_status = { "not present", SAI_PORT_OPER_STATUS_NOT_PRESENT } }; +static const std::map tx_fir_strings_system_side = +{ + {"system_tx_fir_pre1", SAI_PORT_SERDES_ATTR_TX_FIR_PRE1}, + {"system_tx_fir_pre2", SAI_PORT_SERDES_ATTR_TX_FIR_PRE2}, + {"system_tx_fir_pre3", SAI_PORT_SERDES_ATTR_TX_FIR_PRE3}, + {"system_tx_fir_post1", SAI_PORT_SERDES_ATTR_TX_FIR_POST1}, + {"system_tx_fir_post2", SAI_PORT_SERDES_ATTR_TX_FIR_POST2}, + {"system_tx_fir_post3", SAI_PORT_SERDES_ATTR_TX_FIR_POST3}, + {"system_tx_fir_main", SAI_PORT_SERDES_ATTR_TX_FIR_MAIN} +}; + +static const std::map tx_fir_strings_line_side = +{ + {"line_tx_fir_pre1", SAI_PORT_SERDES_ATTR_TX_FIR_PRE1}, + {"line_tx_fir_pre2", SAI_PORT_SERDES_ATTR_TX_FIR_PRE2}, + {"line_tx_fir_pre3", SAI_PORT_SERDES_ATTR_TX_FIR_PRE3}, + {"line_tx_fir_post1", SAI_PORT_SERDES_ATTR_TX_FIR_POST1}, + {"line_tx_fir_post2", SAI_PORT_SERDES_ATTR_TX_FIR_POST2}, + {"line_tx_fir_post3", SAI_PORT_SERDES_ATTR_TX_FIR_POST3}, + {"line_tx_fir_main", SAI_PORT_SERDES_ATTR_TX_FIR_MAIN} +}; + struct PortUpdate { Port port; @@ -72,14 +98,33 @@ struct VlanMemberUpdate bool add; }; +struct queueInfo +{ + // SAI_QUEUE_ATTR_TYPE + sai_queue_type_t type; + // SAI_QUEUE_ATTR_INDEX + sai_uint8_t index; +}; + +template +struct PortCapability +{ + bool supported = false; + T data; +}; + +typedef PortCapability PortFecModeCapability_t; + class PortsOrch : public Orch, public Subject { public: PortsOrch(DBConnector *db, DBConnector *stateDb, vector &tableNames, DBConnector *chassisAppDb); + // PortsOrch(DBConnector *db, DBConnector *stateDb, vector &tables, DBConnector *chassisAppDb); bool allPortsReady(); bool isInitDone(); bool isConfigDone(); + bool isGearboxEnabled(); bool isPortAdminUp(const string &alias); map& getAllPorts(); @@ -94,6 +139,7 @@ class PortsOrch : public Orch, public Subject bool getPortByBridgePortId(sai_object_id_t bridge_port_id, Port &port); void setPort(string alias, Port port); void getCpuPort(Port &port); + void initHostTxReadyState(Port &port); bool getInbandPort(Port &port); bool getVlanByVlanId(sai_vlan_id_t vlan_id, Port &vlan); @@ -124,8 +170,23 @@ class PortsOrch : public Orch, public Subject bool getPortPfc(sai_object_id_t portId, uint8_t *pfc_bitmask); bool setPortPfc(sai_object_id_t portId, uint8_t pfc_bitmask); - void generateQueueMap(); - void generatePriorityGroupMap(); + bool setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfc_bitmask); + bool getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfc_bitmask); + + void generateQueueMap(map queuesStateVector); + uint32_t getNumberOfPortSupportedQueueCounters(string port); + void createPortBufferQueueCounters(const Port &port, string queues); + void removePortBufferQueueCounters(const Port &port, string queues); + void addQueueFlexCounters(map queuesStateVector); + void addQueueWatermarkFlexCounters(map queuesStateVector); + + void generatePriorityGroupMap(map pgsStateVector); + uint32_t getNumberOfPortSupportedPgCounters(string port); + void createPortBufferPgCounters(const Port &port, string pgs); + void removePortBufferPgCounters(const Port& port, string pgs); + void addPriorityGroupFlexCounters(map pgsStateVector); + void addPriorityGroupWatermarkFlexCounters(map pgsStateVector); + void generatePortCounterMap(); void generatePortBufferDropCounterMap(); @@ -156,7 +217,7 @@ class PortsOrch : public Orch, public Subject bool setVoqInbandIntf(string &alias, string &type); bool getPortVlanMembers(Port &port, vlan_members_t &vlan_members); - bool getRecircPort(Port &p, string role); + bool getRecircPort(Port &p, Port::Role role); const gearbox_phy_t* getGearboxPhy(const Port &port); @@ -165,12 +226,22 @@ class PortsOrch : public Orch, public Subject bool getPortOperStatus(const Port& port, sai_port_oper_status_t& status) const; + void updateGearboxPortOperStatus(const Port& port); + + bool decrFdbCount(const string& alias, int count); + + void setMACsecEnabledState(sai_object_id_t port_id, bool enabled); + bool isMACsecPort(sai_object_id_t port_id) const; + vector getPortVoQIds(Port& port); + private: unique_ptr
m_counterTable; + unique_ptr
m_counterSysPortTable; unique_ptr
m_counterLagTable; unique_ptr
m_portTable; unique_ptr
m_gearboxTable; unique_ptr
m_queueTable; + unique_ptr
m_voqTable; unique_ptr
m_queuePortTable; unique_ptr
m_queueIndexTable; unique_ptr
m_queueTypeTable; @@ -195,7 +266,14 @@ class PortsOrch : public Orch, public Subject FlexCounterManager port_buffer_drop_stat_manager; FlexCounterManager queue_stat_manager; + FlexCounterManager gb_port_stat_manager; + shared_ptr m_gb_counter_db; + unique_ptr
m_gbcounterTable; + + // Supported speeds on the system side. std::map m_portSupportedSpeeds; + // Supported FEC modes on the system side. + std::map m_portSupportedFecModes; bool m_initDone = false; Port m_cpuPort; @@ -226,16 +304,18 @@ class PortsOrch : public Orch, public Subject port_config_state_t m_portConfigState = PORT_CONFIG_MISSING; sai_uint32_t m_portCount; - map, sai_object_id_t> m_portListLaneMap; - map, tuple> m_lanesAliasSpeedMap; + map, sai_object_id_t> m_portListLaneMap; + map, PortConfig> m_lanesAliasSpeedMap; map m_portList; + map m_pluggedModulesPort; map m_portVlanMember; + map> m_port_voq_ids; /* mapping from SAI object ID to Name for faster * retrieval of Port/VLAN from object ID for events * coming from SAI */ unordered_map saiOidToAlias; - unordered_map m_portOidToIndex; + unordered_map m_portOidToIndex; map m_port_ref_count; unordered_set m_pendingPortSet; const uint32_t max_flood_control_types = 4; @@ -244,6 +324,13 @@ class PortsOrch : public Orch, public Subject map m_bridge_port_ref_count; NotificationConsumer* m_portStatusNotificationConsumer; + NotificationConsumer* m_portHostTxReadyNotificationConsumer; + + swss::SelectableTimer *m_port_state_poller = nullptr; + + bool m_saiHwTxSignalSupported = false; + bool m_saiTxReadyNotifySupported = false; + bool m_cmisModuleAsicSyncSupported = false; void doTask() override; void doTask(Consumer &consumer); @@ -252,8 +339,10 @@ class PortsOrch : public Orch, public Subject void doVlanMemberTask(Consumer &consumer); void doLagTask(Consumer &consumer); void doLagMemberTask(Consumer &consumer); + void doTransceiverInfoTableTask(Consumer &consumer); void doTask(NotificationConsumer &consumer); + void doTask(swss::SelectableTimer &timer); void removePortFromLanesMap(string alias); void removePortFromPortListMap(sai_object_id_t port_id); @@ -264,11 +353,13 @@ class PortsOrch : public Orch, public Subject void initializePriorityGroups(Port &port); void initializePortBufferMaximumParameters(Port &port); void initializeQueues(Port &port); + void initializeSchedulerGroups(Port &port); + void initializeVoqs(Port &port); bool addHostIntfs(Port &port, string alias, sai_object_id_t &host_intfs_id); bool setHostIntfsStripTag(Port &port, sai_hostif_vlan_tag_t strip); - bool setBridgePortLearnMode(Port &port, string learn_mode); + bool setBridgePortLearnMode(Port &port, sai_bridge_port_fdb_learning_mode_t learn_mode); bool addVlan(string vlan); bool removeVlan(Port vlan); @@ -276,65 +367,105 @@ class PortsOrch : public Orch, public Subject bool addLag(string lag, uint32_t spa_id, int32_t switch_id); bool removeLag(Port lag); bool setLagTpid(sai_object_id_t id, sai_uint16_t tpid); - bool addLagMember(Port &lag, Port &port, bool enableForwarding); + bool addLagMember(Port &lag, Port &port, string status); bool removeLagMember(Port &lag, Port &port); bool setCollectionOnLagMember(Port &lagMember, bool enableCollection); bool setDistributionOnLagMember(Port &lagMember, bool enableDistribution); - bool addPort(const set &lane_set, uint32_t speed, int an=0, string fec=""); + bool addPort(const PortConfig &port); sai_status_t removePort(sai_object_id_t port_id); - bool initPort(const string &alias, const string &role, const int index, const set &lane_set); + bool initPort(const PortConfig &port); void deInitPort(string alias, sai_object_id_t port_id); + void initPortCapAutoNeg(Port &port); + void initPortCapLinkTraining(Port &port); + bool setPortAdminStatus(Port &port, bool up); bool getPortAdminStatus(sai_object_id_t id, bool& up); - bool setPortMtu(sai_object_id_t id, sai_uint32_t mtu); - bool setPortTpid(sai_object_id_t id, sai_uint16_t tpid); + bool getPortMtu(const Port& port, sai_uint32_t &mtu); + bool getPortHostTxReady(const Port& port, bool &hostTxReadyVal); + bool setPortMtu(const Port& port, sai_uint32_t mtu); + bool setPortTpid(Port &port, sai_uint16_t tpid); bool setPortPvid (Port &port, sai_uint32_t pvid); bool getPortPvid(Port &port, sai_uint32_t &pvid); - bool setPortFec(Port &port, sai_port_fec_mode_t mode); - bool setPortPfcAsym(Port &port, string pfc_asym); + bool setPortFec(Port &port, sai_port_fec_mode_t fec_mode); + bool setPortPfcAsym(Port &port, sai_port_priority_flow_control_mode_t pfc_asym); bool getDestPortId(sai_object_id_t src_port_id, dest_port_type_t port_type, sai_object_id_t &des_port_id); bool setBridgePortAdminStatus(sai_object_id_t id, bool up); + bool setSaiHostTxSignal(Port port, bool enable); + + void setHostTxReady(sai_object_id_t portId, std::string status); + // Get supported speeds on system side bool isSpeedSupported(const std::string& alias, sai_object_id_t port_id, sai_uint32_t speed); void getPortSupportedSpeeds(const std::string& alias, sai_object_id_t port_id, PortSupportedSpeeds &supported_speeds); void initPortSupportedSpeeds(const std::string& alias, sai_object_id_t port_id); + // Get supported FEC modes on system side + bool isFecModeSupported(const Port &port, sai_port_fec_mode_t fec_mode); + sai_status_t getPortSupportedFecModes(PortSupportedFecModes &supported_fecmodes, sai_object_id_t port_id); + void initPortSupportedFecModes(const std::string& alias, sai_object_id_t port_id); task_process_status setPortSpeed(Port &port, sai_uint32_t speed); bool getPortSpeed(sai_object_id_t id, sai_uint32_t &speed); - bool setGearboxPortsAttr(Port &port, sai_port_attr_t id, void *value); - bool setGearboxPortAttr(Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value); + bool setGearboxPortsAttr(const Port &port, sai_port_attr_t id, void *value); + bool setGearboxPortAttr(const Port &port, dest_port_type_t port_type, sai_port_attr_t id, void *value); - task_process_status setPortAdvSpeeds(sai_object_id_t port_id, std::vector& speed_list); + bool getPortAdvSpeeds(const Port& port, bool remote, std::vector& speed_list); + bool getPortAdvSpeeds(const Port& port, bool remote, string& adv_speeds); + task_process_status setPortAdvSpeeds(Port &port, std::set &speed_list); bool getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uint8_t &index); bool m_isQueueMapGenerated = false; - void generateQueueMapPerPort(const Port& port); + void generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState, bool voq); + bool m_isQueueFlexCountersAdded = false; + void addQueueFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState); + void addQueueFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex, bool voq); + + bool m_isQueueWatermarkFlexCountersAdded = false; + void addQueueWatermarkFlexCountersPerPort(const Port& port, FlexCounterQueueStates& queuesState); + void addQueueWatermarkFlexCountersPerPortPerQueueIndex(const Port& port, size_t queueIndex); bool m_isPriorityGroupMapGenerated = false; - void generatePriorityGroupMapPerPort(const Port& port); + void generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState); + bool m_isPriorityGroupFlexCountersAdded = false; + void addPriorityGroupFlexCountersPerPort(const Port& port, FlexCounterPgStates& pgsState); + void addPriorityGroupFlexCountersPerPortPerPgIndex(const Port& port, size_t pgIndex); + + bool m_isPriorityGroupWatermarkFlexCountersAdded = false; + void addPriorityGroupWatermarkFlexCountersPerPort(const Port& port, FlexCounterPgStates& pgsState); + void addPriorityGroupWatermarkFlexCountersPerPortPerPgIndex(const Port& port, size_t pgIndex); bool m_isPortCounterMapGenerated = false; bool m_isPortBufferDropCounterMapGenerated = false; - task_process_status setPortAutoNeg(sai_object_id_t id, int an); - bool setPortFecMode(sai_object_id_t id, int fec); - task_process_status setPortInterfaceType(sai_object_id_t id, sai_port_interface_type_t interface_type); - task_process_status setPortAdvInterfaceTypes(sai_object_id_t id, std::vector &interface_types); + bool isAutoNegEnabled(sai_object_id_t id); + task_process_status setPortAutoNeg(Port &port, bool autoneg); + task_process_status setPortInterfaceType(Port &port, sai_port_interface_type_t interface_type); + task_process_status setPortAdvInterfaceTypes(Port &port, std::set &interface_types); + task_process_status setPortLinkTraining(const Port& port, bool state); void updatePortOperStatus(Port &port, sai_port_oper_status_t status); bool getPortOperSpeed(const Port& port, sai_uint32_t& speed) const; void updateDbPortOperSpeed(Port &port, sai_uint32_t speed); - void getPortSerdesVal(const std::string& s, std::vector &lane_values); - bool getPortAdvSpeedsVal(const std::string &s, std::vector &speed_values); - bool getPortInterfaceTypeVal(const std::string &s, sai_port_interface_type_t &interface_type); - bool getPortAdvInterfaceTypesVal(const std::string &s, std::vector &type_values); + bool getPortLinkTrainingRxStatus(const Port &port, sai_port_link_training_rx_status_t &rx_status); + bool getPortLinkTrainingFailure(const Port &port, sai_port_link_training_failure_status_t &failure); - bool setPortSerdesAttribute(sai_object_id_t port_id, + typedef enum { + PORT_STATE_POLL_NONE = 0, + PORT_STATE_POLL_AN = 0x00000001, /* Auto Negotiation */ + PORT_STATE_POLL_LT = 0x00000002 /* Link Trainig */ + } port_state_poll_t; + + map m_port_state_poll; + void updatePortStatePoll(const Port &port, port_state_poll_t type, bool active); + void refreshPortStateAutoNeg(const Port &port); + void refreshPortStateLinkTraining(const Port &port); + + void getPortSerdesVal(const std::string& s, std::vector &lane_values, int base = 16); + bool setPortSerdesAttribute(sai_object_id_t port_id, sai_object_id_t switch_id, std::map> &serdes_attr); @@ -345,7 +476,7 @@ class PortsOrch : public Orch, public Subject void initGearbox(); bool initGearboxPort(Port &port); - map m_recircPortRole; + map m_recircPortRole; //map key is tuple of map, sai_object_id_t> m_systemPortOidMap; @@ -356,11 +487,29 @@ class PortsOrch : public Orch, public Subject unique_ptr
m_tableVoqSystemLagMemberTable; void voqSyncAddLag(Port &lag); void voqSyncDelLag(Port &lag); - void voqSyncAddLagMember(Port &lag, Port &port); + void voqSyncAddLagMember(Port &lag, Port &port, string status); void voqSyncDelLagMember(Port &lag, Port &port); unique_ptr m_lagIdAllocator; + set m_macsecEnabledPorts; + + std::unordered_set generateCounterStats(const string& type, bool gearbox = false); + map m_queueInfo; + +private: + void initializeCpuPort(); + void initializePorts(); + + auto getPortConfigState() const -> port_config_state_t; + void setPortConfigState(port_config_state_t value); - std::unordered_set generateCounterStats(const string& type); + bool addPortBulk(const std::vector &portList); + bool removePortBulk(const std::vector &portList); + +private: + // Port config aggregator + std::unordered_map> m_portConfigMap; + // Port OA helper + PortHelper m_portHlpr; }; #endif /* SWSS_PORTSORCH_H */ diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 5a48b69969..568e89bcf2 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -25,8 +25,12 @@ extern sai_acl_api_t* sai_acl_api; extern SwitchOrch *gSwitchOrch; extern PortsOrch *gPortsOrch; +extern QosOrch *gQosOrch; extern sai_object_id_t gSwitchId; extern CrmOrch *gCrmOrch; +extern string gMySwitchType; +extern string gMyHostName; +extern string gMyAsicName; map ecn_map = { {"ecn_none", SAI_ECN_MARK_MODE_NONE}, @@ -45,12 +49,20 @@ enum { RED_DROP_PROBABILITY_SET = (1U << 2) }; +enum { + GREEN_WRED_ENABLED = (1U << 0), + YELLOW_WRED_ENABLED = (1U << 1), + RED_WRED_ENABLED = (1U << 2) +}; + // field_name is what is expected in CONFIG_DB PORT_QOS_MAP table map qos_to_attr_map = { {dscp_to_tc_field_name, SAI_PORT_ATTR_QOS_DSCP_TO_TC_MAP}, {mpls_tc_to_tc_field_name, SAI_PORT_ATTR_QOS_MPLS_EXP_TO_TC_MAP}, {dot1p_to_tc_field_name, SAI_PORT_ATTR_QOS_DOT1P_TO_TC_MAP}, {tc_to_queue_field_name, SAI_PORT_ATTR_QOS_TC_TO_QUEUE_MAP}, + {tc_to_dot1p_field_name, SAI_PORT_ATTR_QOS_TC_AND_COLOR_TO_DOT1P_MAP}, + {tc_to_dscp_field_name, SAI_PORT_ATTR_QOS_TC_AND_COLOR_TO_DSCP_MAP}, {tc_to_pg_map_field_name, SAI_PORT_ATTR_QOS_TC_TO_PRIORITY_GROUP_MAP}, {pfc_to_pg_map_name, SAI_PORT_ATTR_QOS_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP}, {pfc_to_queue_map_name, SAI_PORT_ATTR_QOS_PFC_PRIORITY_TO_QUEUE_MAP}, @@ -78,6 +90,9 @@ type_map QosOrch::m_qos_maps = { {CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, new object_reference_map()}, {CFG_DSCP_TO_FC_MAP_TABLE_NAME, new object_reference_map()}, {CFG_EXP_TO_FC_MAP_TABLE_NAME, new object_reference_map()}, + {CFG_TC_TO_DOT1P_MAP_TABLE_NAME, new object_reference_map()}, + {CFG_TC_TO_DSCP_MAP_TABLE_NAME, new object_reference_map()}, + {APP_TUNNEL_DECAP_TABLE_NAME, new object_reference_map()} }; map qos_to_ref_table_map = { @@ -85,25 +100,31 @@ map qos_to_ref_table_map = { {mpls_tc_to_tc_field_name, CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME}, {dot1p_to_tc_field_name, CFG_DOT1P_TO_TC_MAP_TABLE_NAME}, {tc_to_queue_field_name, CFG_TC_TO_QUEUE_MAP_TABLE_NAME}, + {tc_to_dot1p_field_name, CFG_TC_TO_DOT1P_MAP_TABLE_NAME}, + {tc_to_dscp_field_name, CFG_TC_TO_DSCP_MAP_TABLE_NAME}, {tc_to_pg_map_field_name, CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME}, {pfc_to_pg_map_name, CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME}, {pfc_to_queue_map_name, CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME}, {scheduler_field_name, CFG_SCHEDULER_TABLE_NAME}, {wred_profile_field_name, CFG_WRED_PROFILE_TABLE_NAME}, {dscp_to_fc_field_name, CFG_DSCP_TO_FC_MAP_TABLE_NAME}, - {exp_to_fc_field_name, CFG_EXP_TO_FC_MAP_TABLE_NAME} + {exp_to_fc_field_name, CFG_EXP_TO_FC_MAP_TABLE_NAME}, + {decap_dscp_to_tc_field_name, CFG_DSCP_TO_TC_MAP_TABLE_NAME}, + {decap_tc_to_pg_field_name, CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME}, + {encap_tc_to_dscp_field_name, CFG_TC_TO_DSCP_MAP_TABLE_NAME}, + {encap_tc_to_queue_field_name, CFG_TC_TO_QUEUE_MAP_TABLE_NAME} }; #define DSCP_MAX_VAL 63 #define EXP_MAX_VAL 7 -task_process_status QosMapHandler::processWorkItem(Consumer& consumer) +#define PORT_NAME_GLOBAL "global" + +task_process_status QosMapHandler::processWorkItem(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); sai_object_id_t sai_object = SAI_NULL_OBJECT_ID; - auto it = consumer.m_toSync.begin(); - KeyOpFieldsValuesTuple tuple = it->second; string qos_object_name = kfvKey(tuple); string qos_map_type_name = consumer.getTableName(); string op = kfvOp(tuple); @@ -111,6 +132,11 @@ task_process_status QosMapHandler::processWorkItem(Consumer& consumer) if (QosOrch::getTypeMap()[qos_map_type_name]->find(qos_object_name) != QosOrch::getTypeMap()[qos_map_type_name]->end()) { sai_object = (*(QosOrch::getTypeMap()[qos_map_type_name]))[qos_object_name].m_saiObjectId; + if ((*(QosOrch::getTypeMap()[qos_map_type_name]))[qos_object_name].m_pendingRemove && op == SET_COMMAND) + { + SWSS_LOG_NOTICE("Entry %s %s is pending remove, need retry", qos_map_type_name.c_str(), qos_object_name.c_str()); + return task_process_status::task_need_retry; + } } if (op == SET_COMMAND) { @@ -139,6 +165,7 @@ task_process_status QosMapHandler::processWorkItem(Consumer& consumer) return task_process_status::task_failed; } (*(QosOrch::getTypeMap()[qos_map_type_name]))[qos_object_name].m_saiObjectId = sai_object; + (*(QosOrch::getTypeMap()[qos_map_type_name]))[qos_object_name].m_pendingRemove = false; SWSS_LOG_NOTICE("Created [%s:%s]", qos_map_type_name.c_str(), qos_object_name.c_str()); } freeAttribResources(attributes); @@ -150,9 +177,16 @@ task_process_status QosMapHandler::processWorkItem(Consumer& consumer) SWSS_LOG_ERROR("Object with name:%s not found.", qos_object_name.c_str()); return task_process_status::task_invalid_entry; } + if (gQosOrch->isObjectBeingReferenced(QosOrch::getTypeMap(), qos_map_type_name, qos_object_name)) + { + auto hint = gQosOrch->objectReferenceInfo(QosOrch::getTypeMap(), qos_map_type_name, qos_object_name); + SWSS_LOG_NOTICE("Can't remove object %s due to being referenced (%s)", qos_object_name.c_str(), hint.c_str()); + (*(QosOrch::getTypeMap()[qos_map_type_name]))[qos_object_name].m_pendingRemove = true; + return task_process_status::task_need_retry; + } if (!removeQosItem(sai_object)) { - SWSS_LOG_ERROR("Failed to remove dscp_to_tc map. db name:%s sai object:%" PRIx64, qos_object_name.c_str(), sai_object); + SWSS_LOG_ERROR("Failed to remove QoS map. db name:%s sai object:%" PRIx64, qos_object_name.c_str(), sai_object); return task_process_status::task_failed; } auto it_to_delete = (QosOrch::getTypeMap()[qos_map_type_name])->find(qos_object_name); @@ -218,34 +252,6 @@ bool DscpToTcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple & return true; } -void DscpToTcMapHandler::applyDscpToTcMapToSwitch(sai_attr_id_t attr_id, sai_object_id_t map_id) -{ - SWSS_LOG_ENTER(); - bool rv = true; - - /* Query DSCP_TO_TC QoS map at switch capability */ - rv = gSwitchOrch->querySwitchDscpToTcCapability(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP); - if (rv == false) - { - SWSS_LOG_ERROR("Switch level DSCP to TC QoS map configuration is not supported"); - return; - } - - /* Apply DSCP_TO_TC QoS map at switch */ - sai_attribute_t attr; - attr.id = attr_id; - attr.value.oid = map_id; - - sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_ERROR("Failed to apply DSCP_TO_TC QoS map to switch rv:%d", status); - return; - } - - SWSS_LOG_NOTICE("Applied DSCP_TO_TC QoS map to switch successfully"); -} - sai_object_id_t DscpToTcMapHandler::addQosItem(const vector &attributes) { SWSS_LOG_ENTER(); @@ -271,16 +277,28 @@ sai_object_id_t DscpToTcMapHandler::addQosItem(const vector &at } SWSS_LOG_DEBUG("created QosMap object:%" PRIx64, sai_object); - applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, sai_object); - return sai_object; } -task_process_status QosOrch::handleDscpToTcTable(Consumer& consumer) +bool DscpToTcMapHandler::removeQosItem(sai_object_id_t sai_object) +{ + SWSS_LOG_ENTER(); + + SWSS_LOG_DEBUG("Removing DscpToTcMap object:%" PRIx64, sai_object); + sai_status_t sai_status = sai_qos_map_api->remove_qos_map(sai_object); + if (SAI_STATUS_SUCCESS != sai_status) + { + SWSS_LOG_ERROR("Failed to remove DSCP_TO_TC map, status:%d", sai_status); + return false; + } + return true; +} + +task_process_status QosOrch::handleDscpToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); DscpToTcMapHandler dscp_tc_handler; - return dscp_tc_handler.processWorkItem(consumer); + return dscp_tc_handler.processWorkItem(consumer, tuple); } bool MplsTcToTcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -331,11 +349,11 @@ sai_object_id_t MplsTcToTcMapHandler::addQosItem(const vector & return sai_object; } -task_process_status QosOrch::handleMplsTcToTcTable(Consumer& consumer) +task_process_status QosOrch::handleMplsTcToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); MplsTcToTcMapHandler mpls_tc_to_tc_handler; - return mpls_tc_to_tc_handler.processWorkItem(consumer); + return mpls_tc_to_tc_handler.processWorkItem(consumer, tuple); } bool Dot1pToTcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -400,11 +418,11 @@ sai_object_id_t Dot1pToTcMapHandler::addQosItem(const vector &a return object_id; } -task_process_status QosOrch::handleDot1pToTcTable(Consumer &consumer) +task_process_status QosOrch::handleDot1pToTcTable(Consumer &consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); Dot1pToTcMapHandler dot1p_tc_handler; - return dot1p_tc_handler.processWorkItem(consumer); + return dot1p_tc_handler.processWorkItem(consumer, tuple); } bool TcToQueueMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -453,11 +471,65 @@ sai_object_id_t TcToQueueMapHandler::addQosItem(const vector &a return sai_object; } -task_process_status QosOrch::handleTcToQueueTable(Consumer& consumer) +task_process_status QosOrch::handleTcToQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); TcToQueueMapHandler tc_queue_handler; - return tc_queue_handler.processWorkItem(consumer); + return tc_queue_handler.processWorkItem(consumer, tuple); +} + +//Functions for TC-to-DOT1P qos map handling +bool TcToDot1pMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) +{ + SWSS_LOG_ENTER(); + sai_attribute_t list_attr; + sai_qos_map_list_t tc_map_list; + tc_map_list.count = (uint32_t)kfvFieldsValues(tuple).size(); + tc_map_list.list = new sai_qos_map_t[tc_map_list.count](); + uint32_t ind = 0; + for (auto i = kfvFieldsValues(tuple).begin(); i != kfvFieldsValues(tuple).end(); i++, ind++) + { + tc_map_list.list[ind].key.tc = (uint8_t)stoi(fvField(*i)); + tc_map_list.list[ind].value.dot1p = (uint8_t)stoi(fvValue(*i)); + } + list_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; + list_attr.value.qosmap.count = tc_map_list.count; + list_attr.value.qosmap.list = tc_map_list.list; + attributes.push_back(list_attr); + return true; +} + +sai_object_id_t TcToDot1pMapHandler::addQosItem(const vector &attributes) +{ + SWSS_LOG_ENTER(); + sai_status_t sai_status; + sai_object_id_t sai_object; + vector qos_map_attrs; + sai_attribute_t qos_map_attr; + + qos_map_attr.id = SAI_QOS_MAP_ATTR_TYPE; + qos_map_attr.value.s32 = SAI_QOS_MAP_TYPE_TC_AND_COLOR_TO_DOT1P; + qos_map_attrs.push_back(qos_map_attr); + + qos_map_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; + qos_map_attr.value.qosmap.count = attributes[0].value.qosmap.count; + qos_map_attr.value.qosmap.list = attributes[0].value.qosmap.list; + qos_map_attrs.push_back(qos_map_attr); + + sai_status = sai_qos_map_api->create_qos_map(&sai_object, gSwitchId, (uint32_t)qos_map_attrs.size(), qos_map_attrs.data()); + if (SAI_STATUS_SUCCESS != sai_status) + { + SWSS_LOG_ERROR("Failed to create tc_to_dot1p qos map. status:%d", sai_status); + return SAI_NULL_OBJECT_ID; + } + return sai_object; +} + +task_process_status QosOrch::handleTcToDot1pTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) +{ + SWSS_LOG_ENTER(); + TcToDot1pMapHandler tc_dot1p_handler; + return tc_dot1p_handler.processWorkItem(consumer, tuple); } void WredMapHandler::freeAttribResources(vector &attributes) @@ -485,47 +557,140 @@ bool WredMapHandler::convertBool(string str, bool &val) return true; } +void WredMapHandler::appendThresholdToAttributeList(sai_attr_id_t type, + sai_uint32_t threshold, + bool needDefer, + vector &normalQueue, + vector &deferredQueue, + sai_uint32_t &newThreshold) +{ + sai_attribute_t attr; + + attr.id = type; + attr.value.u32 = threshold; + if (needDefer) + { + deferredQueue.push_back(attr); + } + else + { + normalQueue.push_back(attr); + } + newThreshold = threshold; +} + +WredMapHandler::qos_wred_thresholds_store_t WredMapHandler::m_wredProfiles; + bool WredMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attribs) { SWSS_LOG_ENTER(); sai_attribute_t attr; + vector deferred_attributes; + auto &key = kfvKey(tuple); + auto &storedProfile = WredMapHandler::m_wredProfiles[key]; + qos_wred_thresholds_t currentProfile = storedProfile; + sai_uint32_t threshold; + + /* + * Setting WRED profile can fail in case + * - the current min threshold is greater than the new max threshold + * - or the current max threshold is less than the new min threshold + * for any color at any time, on some vendor's platforms. + * + * The root cause + * There can be only one attribute in each SAI SET operation, which means + * the vendor SAI do not have a big picture regarding what attributes are being set + * and can only perform the sanity check against each SET operation. + * In the above case, the sanity check will fail. + * + * The fix + * The thresholds that have been applied to SAI will be stored in orchagent. + * + * The original logic is to handle each attribute to be set and append it to an attribute list. + * To resolve the issue, a 2nd half attribute list is introduced and + * will be appended to the original attribute list after all the attributes have been handled. + * + * In the new logic, each threshold to be set will be checked against the stored data. + * In case it violates the condition, the violating attribute will be deferred, done via putting it into the 2nd half attributes list. + * + * For any color, there can be only 1 threshold violating the condition. + * Otherwise, it means both new min > old max and new max > old min, which means either old max < old min or new max < new min, + * which means either old or new data is illegal. + * This can not happen because illegal data can not be applied and stored. + * + * By doing so, the other threshold will be applied first, which extends the threshold range and breaks the violating condition. + * A logic is also introduced to guarantee the min threshold is always less than the max threshold in the new profile to be set. + * + * For example: + * Current min=1M, max=2M, new min=3M, new max=4M + * The min is set first, so current max (2M) < new min (3M), which violates the condition + * By the new logic, min threshold will be deferred so the new max will be applied first and then the new min is applied and no violating. + * min = 1M, max = 2M + * => min = 1M, max = 4M + * => min = 3M, max = 4M + */ + for (auto i = kfvFieldsValues(tuple).begin(); i != kfvFieldsValues(tuple).end(); i++) { if (fvField(*i) == yellow_max_threshold_field_name) { - attr.id = SAI_WRED_ATTR_YELLOW_MAX_THRESHOLD; - attr.value.s32 = stoi(fvValue(*i)); - attribs.push_back(attr); + threshold = stoi(fvValue(*i)); + appendThresholdToAttributeList(SAI_WRED_ATTR_YELLOW_MAX_THRESHOLD, + threshold, + (storedProfile.yellow_min_threshold > threshold), + attribs, + deferred_attributes, + currentProfile.yellow_max_threshold); } else if (fvField(*i) == yellow_min_threshold_field_name) { - attr.id = SAI_WRED_ATTR_YELLOW_MIN_THRESHOLD; - attr.value.s32 = stoi(fvValue(*i)); - attribs.push_back(attr); + threshold = stoi(fvValue(*i)); + appendThresholdToAttributeList(SAI_WRED_ATTR_YELLOW_MIN_THRESHOLD, + threshold, + (storedProfile.yellow_max_threshold < threshold), + attribs, + deferred_attributes, + currentProfile.yellow_min_threshold); } else if (fvField(*i) == green_max_threshold_field_name) { - attr.id = SAI_WRED_ATTR_GREEN_MAX_THRESHOLD; - attr.value.s32 = stoi(fvValue(*i)); - attribs.push_back(attr); + threshold = stoi(fvValue(*i)); + appendThresholdToAttributeList(SAI_WRED_ATTR_GREEN_MAX_THRESHOLD, + threshold, + (storedProfile.green_min_threshold > threshold), + attribs, + deferred_attributes, + currentProfile.green_max_threshold); } else if (fvField(*i) == green_min_threshold_field_name) { - attr.id = SAI_WRED_ATTR_GREEN_MIN_THRESHOLD; - attr.value.s32 = stoi(fvValue(*i)); - attribs.push_back(attr); + threshold = stoi(fvValue(*i)); + appendThresholdToAttributeList(SAI_WRED_ATTR_GREEN_MIN_THRESHOLD, + threshold, + (storedProfile.green_max_threshold < threshold), + attribs, + deferred_attributes, + currentProfile.green_min_threshold); } else if (fvField(*i) == red_max_threshold_field_name) { - attr.id = SAI_WRED_ATTR_RED_MAX_THRESHOLD; - attr.value.s32 = stoi(fvValue(*i)); - attribs.push_back(attr); + threshold = stoi(fvValue(*i)); + appendThresholdToAttributeList(SAI_WRED_ATTR_RED_MAX_THRESHOLD, + threshold, + (storedProfile.red_min_threshold > threshold), + attribs, + deferred_attributes, + currentProfile.red_max_threshold); } else if (fvField(*i) == red_min_threshold_field_name) { - attr.id = SAI_WRED_ATTR_RED_MIN_THRESHOLD; - attr.value.s32 = stoi(fvValue(*i)); - attribs.push_back(attr); + threshold = stoi(fvValue(*i)); + appendThresholdToAttributeList(SAI_WRED_ATTR_RED_MIN_THRESHOLD, + threshold, + (storedProfile.red_max_threshold < threshold), + attribs, + deferred_attributes, + currentProfile.red_min_threshold); } else if (fvField(*i) == green_drop_probability_field_name) { @@ -584,6 +749,18 @@ bool WredMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tupl return false; } } + + if ((currentProfile.green_min_threshold > currentProfile.green_max_threshold) + || (currentProfile.yellow_min_threshold > currentProfile.yellow_max_threshold) + || (currentProfile.red_min_threshold > currentProfile.red_max_threshold)) + { + SWSS_LOG_ERROR("Wrong wred profile: min threshold is greater than max threshold"); + return false; + } + + attribs.insert(attribs.end(), deferred_attributes.begin(), deferred_attributes.end()); + storedProfile = currentProfile; + return true; } @@ -611,6 +788,7 @@ sai_object_id_t WredMapHandler::addQosItem(const vector &attrib sai_attribute_t attr; vector attrs; uint8_t drop_prob_set = 0; + uint8_t wred_enable_set = 0; attr.id = SAI_WRED_ATTR_WEIGHT; attr.value.s32 = 0; @@ -620,32 +798,53 @@ sai_object_id_t WredMapHandler::addQosItem(const vector &attrib { attrs.push_back(attrib); - if (attrib.id == SAI_WRED_ATTR_GREEN_DROP_PROBABILITY) + switch (attrib.id) { + case SAI_WRED_ATTR_GREEN_ENABLE: + if (attrib.value.booldata) + { + wred_enable_set |= GREEN_WRED_ENABLED; + } + break; + case SAI_WRED_ATTR_YELLOW_ENABLE: + if (attrib.value.booldata) + { + wred_enable_set |= YELLOW_WRED_ENABLED; + } + break; + case SAI_WRED_ATTR_RED_ENABLE: + if (attrib.value.booldata) + { + wred_enable_set |= RED_WRED_ENABLED; + } + break; + case SAI_WRED_ATTR_GREEN_DROP_PROBABILITY: drop_prob_set |= GREEN_DROP_PROBABILITY_SET; - } - else if (attrib.id == SAI_WRED_ATTR_YELLOW_DROP_PROBABILITY) - { + break; + case SAI_WRED_ATTR_YELLOW_DROP_PROBABILITY: drop_prob_set |= YELLOW_DROP_PROBABILITY_SET; - } - else if (attrib.id == SAI_WRED_ATTR_RED_DROP_PROBABILITY) - { + break; + case SAI_WRED_ATTR_RED_DROP_PROBABILITY: drop_prob_set |= RED_DROP_PROBABILITY_SET; + break; + default: + break; } } - if (!(drop_prob_set & GREEN_DROP_PROBABILITY_SET)) + + if (!(drop_prob_set & GREEN_DROP_PROBABILITY_SET) && (wred_enable_set & GREEN_WRED_ENABLED)) { attr.id = SAI_WRED_ATTR_GREEN_DROP_PROBABILITY; attr.value.s32 = 100; attrs.push_back(attr); } - if (!(drop_prob_set & YELLOW_DROP_PROBABILITY_SET)) + if (!(drop_prob_set & YELLOW_DROP_PROBABILITY_SET) && (wred_enable_set & YELLOW_WRED_ENABLED)) { attr.id = SAI_WRED_ATTR_YELLOW_DROP_PROBABILITY; attr.value.s32 = 100; attrs.push_back(attr); } - if (!(drop_prob_set & RED_DROP_PROBABILITY_SET)) + if (!(drop_prob_set & RED_DROP_PROBABILITY_SET) && (wred_enable_set & RED_WRED_ENABLED)) { attr.id = SAI_WRED_ATTR_RED_DROP_PROBABILITY; attr.value.s32 = 100; @@ -674,11 +873,11 @@ bool WredMapHandler::removeQosItem(sai_object_id_t sai_object) return true; } -task_process_status QosOrch::handleWredProfileTable(Consumer& consumer) +task_process_status QosOrch::handleWredProfileTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); WredMapHandler wred_handler; - return wred_handler.processWorkItem(consumer); + return wred_handler.processWorkItem(consumer, tuple); } bool TcToPgHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -720,18 +919,18 @@ sai_object_id_t TcToPgHandler::addQosItem(const vector &attribu sai_status = sai_qos_map_api->create_qos_map(&sai_object, gSwitchId, (uint32_t)qos_map_attrs.size(), qos_map_attrs.data()); if (SAI_STATUS_SUCCESS != sai_status) { - SWSS_LOG_ERROR("Failed to create tc_to_queue map. status:%d", sai_status); + SWSS_LOG_ERROR("Failed to create tc_to_pg map. status:%d", sai_status); return SAI_NULL_OBJECT_ID; } return sai_object; } -task_process_status QosOrch::handleTcToPgTable(Consumer& consumer) +task_process_status QosOrch::handleTcToPgTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); TcToPgHandler tc_to_pg_handler; - return tc_to_pg_handler.processWorkItem(consumer); + return tc_to_pg_handler.processWorkItem(consumer, tuple); } bool PfcPrioToPgHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -774,18 +973,18 @@ sai_object_id_t PfcPrioToPgHandler::addQosItem(const vector &at sai_status = sai_qos_map_api->create_qos_map(&sai_object, gSwitchId, (uint32_t)qos_map_attrs.size(), qos_map_attrs.data()); if (SAI_STATUS_SUCCESS != sai_status) { - SWSS_LOG_ERROR("Failed to create tc_to_queue map. status:%d", sai_status); + SWSS_LOG_ERROR("Failed to create pfc_priority_to_queue map. status:%d", sai_status); return SAI_NULL_OBJECT_ID; } return sai_object; } -task_process_status QosOrch::handlePfcPrioToPgTable(Consumer& consumer) +task_process_status QosOrch::handlePfcPrioToPgTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); PfcPrioToPgHandler pfc_prio_to_pg_handler; - return pfc_prio_to_pg_handler.processWorkItem(consumer); + return pfc_prio_to_pg_handler.processWorkItem(consumer, tuple); } bool PfcToQueueHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) @@ -829,7 +1028,7 @@ sai_object_id_t PfcToQueueHandler::addQosItem(const vector &att sai_status = sai_qos_map_api->create_qos_map(&sai_object, gSwitchId, (uint32_t)qos_map_attrs.size(), qos_map_attrs.data()); if (SAI_STATUS_SUCCESS != sai_status) { - SWSS_LOG_ERROR("Failed to create tc_to_queue map. status:%d", sai_status); + SWSS_LOG_ERROR("Failed to create pfc_priority_to_queue map. status:%d", sai_status); return SAI_NULL_OBJECT_ID; } return sai_object; @@ -922,11 +1121,11 @@ sai_object_id_t DscpToFcMapHandler::addQosItem(const vector &at return sai_object; } -task_process_status QosOrch::handleDscpToFcTable(Consumer& consumer) +task_process_status QosOrch::handleDscpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); DscpToFcMapHandler dscp_fc_handler; - return dscp_fc_handler.processWorkItem(consumer); + return dscp_fc_handler.processWorkItem(consumer, tuple); } bool ExpToFcMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, @@ -1013,18 +1212,101 @@ sai_object_id_t ExpToFcMapHandler::addQosItem(const vector &att return sai_object; } -task_process_status QosOrch::handleExpToFcTable(Consumer& consumer) +bool TcToDscpMapHandler::convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, + vector &attributes) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t list_attr; + list_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; + list_attr.value.qosmap.count = (uint32_t)kfvFieldsValues(tuple).size(); + list_attr.value.qosmap.list = new sai_qos_map_t[list_attr.value.qosmap.count](); + uint32_t ind = 0; + + for (auto i = kfvFieldsValues(tuple).begin(); i != kfvFieldsValues(tuple).end(); i++, ind++) + { + try + { + auto value = stoi(fvValue(*i)); + if (value < 0) + { + SWSS_LOG_ERROR("DSCP value %d is negative", value); + delete[] list_attr.value.qosmap.list; + return false; + } + else if (value > DSCP_MAX_VAL) + { + SWSS_LOG_ERROR("DSCP value %d is greater than max value %d", value, DSCP_MAX_VAL); + delete[] list_attr.value.qosmap.list; + return false; + } + list_attr.value.qosmap.list[ind].key.tc = static_cast(stoi(fvField(*i))); + list_attr.value.qosmap.list[ind].value.dscp = static_cast(value); + + SWSS_LOG_DEBUG("key.tc:%d, value.dscp:%d", + list_attr.value.qosmap.list[ind].key.tc, + list_attr.value.qosmap.list[ind].value.dscp); + } + catch(const invalid_argument& e) + { + SWSS_LOG_ERROR("Got exception during conversion: %s", e.what()); + delete[] list_attr.value.qosmap.list; + return false; + } + } + attributes.push_back(list_attr); + return true; +} + +sai_object_id_t TcToDscpMapHandler::addQosItem(const vector &attributes) +{ + SWSS_LOG_ENTER(); + sai_status_t sai_status; + sai_object_id_t sai_object; + vector qos_map_attrs; + + sai_attribute_t qos_map_attr; + qos_map_attr.id = SAI_QOS_MAP_ATTR_TYPE; + qos_map_attr.value.u32 = SAI_QOS_MAP_TYPE_TC_AND_COLOR_TO_DSCP; + qos_map_attrs.push_back(qos_map_attr); + + qos_map_attr.id = SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST; + qos_map_attr.value.qosmap.count = attributes[0].value.qosmap.count; + qos_map_attr.value.qosmap.list = attributes[0].value.qosmap.list; + qos_map_attrs.push_back(qos_map_attr); + + sai_status = sai_qos_map_api->create_qos_map(&sai_object, + gSwitchId, + (uint32_t)qos_map_attrs.size(), + qos_map_attrs.data()); + if (SAI_STATUS_SUCCESS != sai_status) + { + SWSS_LOG_ERROR("Failed to create tc_to_dscp map. status:%d", sai_status); + return SAI_NULL_OBJECT_ID; + } + SWSS_LOG_DEBUG("created QosMap object:%" PRIx64, sai_object); + return sai_object; +} + +task_process_status QosOrch::handleExpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); ExpToFcMapHandler exp_fc_handler; - return exp_fc_handler.processWorkItem(consumer); + return exp_fc_handler.processWorkItem(consumer, tuple); } -task_process_status QosOrch::handlePfcToQueueTable(Consumer& consumer) +task_process_status QosOrch::handlePfcToQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); PfcToQueueHandler pfc_to_queue_handler; - return pfc_to_queue_handler.processWorkItem(consumer); + return pfc_to_queue_handler.processWorkItem(consumer, tuple); +} + +task_process_status QosOrch::handleTcToDscpTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) +{ + SWSS_LOG_ENTER(); + TcToDscpMapHandler tc_to_dscp_handler; + return tc_to_dscp_handler.processWorkItem(consumer, tuple); } QosOrch::QosOrch(DBConnector *db, vector &tableNames) : Orch(db, tableNames) @@ -1053,20 +1335,21 @@ void QosOrch::initTableHandlers() m_qos_handler_map.insert(qos_handler_pair(CFG_WRED_PROFILE_TABLE_NAME, &QosOrch::handleWredProfileTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_DSCP_TO_FC_MAP_TABLE_NAME, &QosOrch::handleDscpToFcTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_EXP_TO_FC_MAP_TABLE_NAME, &QosOrch::handleExpToFcTable)); + m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_DSCP_MAP_TABLE_NAME, &QosOrch::handleTcToDscpTable)); + m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_DOT1P_MAP_TABLE_NAME, &QosOrch::handleTcToDot1pTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, &QosOrch::handleTcToPgTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, &QosOrch::handlePfcPrioToPgTable)); m_qos_handler_map.insert(qos_handler_pair(CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, &QosOrch::handlePfcToQueueTable)); } -task_process_status QosOrch::handleSchedulerTable(Consumer& consumer) +task_process_status QosOrch::handleSchedulerTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); sai_status_t sai_status; sai_object_id_t sai_object = SAI_NULL_OBJECT_ID; - KeyOpFieldsValuesTuple tuple = consumer.m_toSync.begin()->second; string qos_map_type_name = CFG_SCHEDULER_TABLE_NAME; string qos_object_name = kfvKey(tuple); string op = kfvOp(tuple); @@ -1079,6 +1362,11 @@ task_process_status QosOrch::handleSchedulerTable(Consumer& consumer) SWSS_LOG_ERROR("Error sai_object must exist for key %s", qos_object_name.c_str()); return task_process_status::task_invalid_entry; } + if ((*(m_qos_maps[qos_map_type_name]))[qos_object_name].m_pendingRemove && op == SET_COMMAND) + { + SWSS_LOG_NOTICE("Entry %s %s is pending remove, need retry", qos_map_type_name.c_str(), qos_object_name.c_str()); + return task_process_status::task_need_retry; + } } if (op == SET_COMMAND) { @@ -1113,11 +1401,6 @@ task_process_status QosOrch::handleSchedulerTable(Consumer& consumer) attr.value.u8 = (uint8_t)stoi(fvValue(*i)); sai_attr_list.push_back(attr); } - else if (fvField(*i) == scheduler_priority_field_name) - { - // TODO: The meaning is to be able to adjust priority of the given scheduler group. - // However currently SAI model does not provide such ability. - } else if (fvField(*i) == scheduler_meter_type_field_name) { sai_meter_type_t meter_value = scheduler_meter_map.at(fvValue(*i)); @@ -1186,6 +1469,7 @@ task_process_status QosOrch::handleSchedulerTable(Consumer& consumer) } SWSS_LOG_NOTICE("Created [%s:%s]", qos_map_type_name.c_str(), qos_object_name.c_str()); (*(m_qos_maps[qos_map_type_name]))[qos_object_name].m_saiObjectId = sai_object; + (*(m_qos_maps[qos_map_type_name]))[qos_object_name].m_pendingRemove = false; } } else if (op == DEL_COMMAND) @@ -1195,6 +1479,13 @@ task_process_status QosOrch::handleSchedulerTable(Consumer& consumer) SWSS_LOG_ERROR("Object with name:%s not found.", qos_object_name.c_str()); return task_process_status::task_invalid_entry; } + if (gQosOrch->isObjectBeingReferenced(QosOrch::getTypeMap(), qos_map_type_name, qos_object_name)) + { + auto hint = gQosOrch->objectReferenceInfo(QosOrch::getTypeMap(), qos_map_type_name, qos_object_name); + SWSS_LOG_NOTICE("Can't remove object %s due to being referenced (%s)", qos_object_name.c_str(), hint.c_str()); + (*(m_qos_maps[qos_map_type_name]))[qos_object_name].m_pendingRemove = true; + return task_process_status::task_need_retry; + } sai_status = sai_scheduler_api->remove_scheduler(sai_object); if (SAI_STATUS_SUCCESS != sai_status) { @@ -1338,22 +1629,58 @@ sai_object_id_t QosOrch::getSchedulerGroup(const Port &port, const sai_object_id bool QosOrch::applySchedulerToQueueSchedulerGroup(Port &port, size_t queue_ind, sai_object_id_t scheduler_profile_id) { SWSS_LOG_ENTER(); + sai_object_id_t queue_id; + Port input_port = port; + sai_object_id_t group_id = 0; - if (port.m_queue_ids.size() <= queue_ind) + if (gMySwitchType == "voq") { - SWSS_LOG_ERROR("Invalid queue index specified:%zd", queue_ind); - return false; - } + if(port.m_system_port_info.type == SAI_SYSTEM_PORT_TYPE_REMOTE) + { + return true; + } + + // Get local port from system port. port is pointing to local port now + if (!gPortsOrch->getPort(port.m_system_port_info.local_port_oid, port)) + { + SWSS_LOG_ERROR("Port with alias:%s not found", port.m_alias.c_str()); + return task_process_status::task_invalid_entry; + } - const sai_object_id_t queue_id = port.m_queue_ids[queue_ind]; + if (port.m_queue_ids.size() <= queue_ind) + { + SWSS_LOG_ERROR("Invalid queue index specified:%zd", queue_ind); + return false; + } + queue_id = port.m_queue_ids[queue_ind]; + + group_id = getSchedulerGroup(port, queue_id); + if(group_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Failed to find a scheduler group for port: %s queue: %zu", port.m_alias.c_str(), queue_ind); + return false; + } - const sai_object_id_t group_id = getSchedulerGroup(port, queue_id); - if(group_id == SAI_NULL_OBJECT_ID) + // port is set back to system port + port = input_port; + } + else { - SWSS_LOG_ERROR("Failed to find a scheduler group for port: %s queue: %zu", port.m_alias.c_str(), queue_ind); - return false; + if (port.m_queue_ids.size() <= queue_ind) + { + SWSS_LOG_ERROR("Invalid queue index specified:%zd", queue_ind); + return false; + } + queue_id = port.m_queue_ids[queue_ind]; + + group_id = getSchedulerGroup(port, queue_id); + if(group_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_ERROR("Failed to find a scheduler group for port: %s queue: %zu", port.m_alias.c_str(), queue_ind); + return false; + } } - + /* Apply scheduler profile to all port groups */ sai_attribute_t attr; sai_status_t sai_status; @@ -1384,12 +1711,25 @@ bool QosOrch::applyWredProfileToQueue(Port &port, size_t queue_ind, sai_object_i sai_status_t sai_status; sai_object_id_t queue_id; - if (port.m_queue_ids.size() <= queue_ind) + if (gMySwitchType == "voq") { - SWSS_LOG_ERROR("Invalid queue index specified:%zd", queue_ind); - return false; + std :: vector queue_ids = gPortsOrch->getPortVoQIds(port); + if (queue_ids.size() <= queue_ind) + { + SWSS_LOG_ERROR("Invalid voq index specified:%zd", queue_ind); + return task_process_status::task_invalid_entry; + } + queue_id = queue_ids[queue_ind]; + } + else + { + if (port.m_queue_ids.size() <= queue_ind) + { + SWSS_LOG_ERROR("Invalid queue index specified:%zd", queue_ind); + return false; + } + queue_id = port.m_queue_ids[queue_ind]; } - queue_id = port.m_queue_ids[queue_ind]; attr.id = SAI_QUEUE_ATTR_WRED_PROFILE_ID; attr.value.oid = sai_wred_profile; @@ -1406,39 +1746,162 @@ bool QosOrch::applyWredProfileToQueue(Port &port, size_t queue_ind, sai_object_i return true; } -task_process_status QosOrch::handleQueueTable(Consumer& consumer) +task_process_status QosOrch::handleQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); - auto it = consumer.m_toSync.begin(); - KeyOpFieldsValuesTuple tuple = it->second; Port port; bool result; string key = kfvKey(tuple); string op = kfvOp(tuple); size_t queue_ind = 0; vector tokens; + bool local_port = false; + string local_port_name; sai_uint32_t range_low, range_high; vector port_names; ref_resolve_status resolve_result; - // sample "QUEUE: {Ethernet4|0-1}" + /* + Input sample "QUEUE : {Ethernet4|0-1}" or + "QUEUE : {STG01-0101-0400-01T2-LC6|ASIC0|Ethernet4|0-1}" + */ tokens = tokenize(key, config_db_key_delimiter); - if (tokens.size() != 2) + + if (gMySwitchType == "voq") { - SWSS_LOG_ERROR("malformed key:%s. Must contain 2 tokens", key.c_str()); - return task_process_status::task_invalid_entry; + if (tokens.size() != 4) + { + SWSS_LOG_ERROR("malformed key:%s. Must contain 4 tokens", key.c_str()); + return task_process_status::task_invalid_entry; + } + + port_names = tokenize(tokens[0] + config_db_key_delimiter + tokens[1] + config_db_key_delimiter + tokens[2], list_item_delimiter); + if (!parseIndexRange(tokens[3], range_low, range_high)) + { + SWSS_LOG_ERROR("Failed to parse range:%s", tokens[3].c_str()); + return task_process_status::task_invalid_entry; + } + + if(tokens[0] == gMyHostName) + { + local_port = true; + local_port_name = tokens[2]; + SWSS_LOG_INFO("System port %s is local port %d local port name %s", port_names[0].c_str(), local_port, local_port_name.c_str()); + } + } + else + { + if (tokens.size() != 2) + { + SWSS_LOG_ERROR("malformed key:%s. Must contain 2 tokens", key.c_str()); + return task_process_status::task_invalid_entry; + } + port_names = tokenize(tokens[0], list_item_delimiter); + if (!parseIndexRange(tokens[1], range_low, range_high)) + { + SWSS_LOG_ERROR("Failed to parse range:%s", tokens[1].c_str()); + return task_process_status::task_invalid_entry; + } + } + + bool donotChangeScheduler = false; + bool donotChangeWredProfile = false; + sai_object_id_t sai_scheduler_profile; + sai_object_id_t sai_wred_profile; + + if (op == SET_COMMAND) + { + string scheduler_profile_name; + resolve_result = resolveFieldRefValue(m_qos_maps, scheduler_field_name, + qos_to_ref_table_map.at(scheduler_field_name), tuple, + sai_scheduler_profile, scheduler_profile_name); + if (ref_resolve_status::success != resolve_result) + { + if (resolve_result != ref_resolve_status::field_not_found) + { + if(ref_resolve_status::not_resolved == resolve_result) + { + SWSS_LOG_INFO("Missing or invalid scheduler reference"); + return task_process_status::task_need_retry; + } + SWSS_LOG_ERROR("Resolving scheduler reference failed"); + return task_process_status::task_failed; + } + + if (doesObjectExist(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, scheduler_field_name, scheduler_profile_name)) + { + SWSS_LOG_NOTICE("QUEUE|%s %s was configured but is not any more. Remove it", key.c_str(), scheduler_field_name.c_str()); + removeMeFromObjsReferencedByMe(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, scheduler_field_name, scheduler_profile_name); + sai_scheduler_profile = SAI_NULL_OBJECT_ID; + } + else + { + // Did not exist and do not exist. No action + donotChangeScheduler = true; + } + } + else + { + setObjectReference(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, scheduler_field_name, scheduler_profile_name); + SWSS_LOG_INFO("QUEUE %s Field %s %s has been resolved to %" PRIx64 , key.c_str(), scheduler_field_name.c_str(), scheduler_profile_name.c_str(), sai_scheduler_profile); + } + + string wred_profile_name; + resolve_result = resolveFieldRefValue(m_qos_maps, wred_profile_field_name, + qos_to_ref_table_map.at(wred_profile_field_name), tuple, + sai_wred_profile, wred_profile_name); + if (ref_resolve_status::success != resolve_result) + { + if (resolve_result != ref_resolve_status::field_not_found) + { + if(ref_resolve_status::not_resolved == resolve_result) + { + SWSS_LOG_INFO("Missing or invalid wred profile reference"); + return task_process_status::task_need_retry; + } + SWSS_LOG_ERROR("Resolving wred profile reference failed"); + return task_process_status::task_failed; + } + + if (doesObjectExist(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, wred_profile_field_name, wred_profile_name)) + { + SWSS_LOG_NOTICE("QUEUE|%s %s was configured but is not any more. Remove it", key.c_str(), wred_profile_field_name.c_str()); + removeMeFromObjsReferencedByMe(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, wred_profile_field_name, wred_profile_name); + sai_wred_profile = SAI_NULL_OBJECT_ID; + } + else + { + donotChangeWredProfile = true; + } + } + else + { + setObjectReference(m_qos_maps, CFG_QUEUE_TABLE_NAME, key, wred_profile_field_name, wred_profile_name); + } + } + else if (op == DEL_COMMAND) + { + removeObject(QosOrch::getTypeMap(), CFG_QUEUE_TABLE_NAME, key); + sai_scheduler_profile = SAI_NULL_OBJECT_ID; + sai_wred_profile = SAI_NULL_OBJECT_ID; } - port_names = tokenize(tokens[0], list_item_delimiter); - if (!parseIndexRange(tokens[1], range_low, range_high)) + else { - SWSS_LOG_ERROR("Failed to parse range:%s", tokens[1].c_str()); + SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); return task_process_status::task_invalid_entry; } + for (string port_name : port_names) { Port port; SWSS_LOG_DEBUG("processing port:%s", port_name.c_str()); + + if(local_port == true) + { + port_name = local_port_name; + } + if (!gPortsOrch->getPort(port_name, port)) { SWSS_LOG_ERROR("Port with alias:%s not found", port_name.c_str()); @@ -1449,27 +1912,11 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) { queue_ind = ind; SWSS_LOG_DEBUG("processing queue:%zd", queue_ind); - sai_object_id_t sai_scheduler_profile; - string scheduler_profile_name; - resolve_result = resolveFieldRefValue(m_qos_maps, scheduler_field_name, - qos_to_ref_table_map.at(scheduler_field_name), tuple, - sai_scheduler_profile, scheduler_profile_name); - if (ref_resolve_status::success == resolve_result) + + if (!donotChangeScheduler) { - if (op == SET_COMMAND) - { - result = applySchedulerToQueueSchedulerGroup(port, queue_ind, sai_scheduler_profile); - } - else if (op == DEL_COMMAND) - { - // NOTE: The map is un-bound from the port. But the map itself still exists. - result = applySchedulerToQueueSchedulerGroup(port, queue_ind, SAI_NULL_OBJECT_ID); - } - else - { - SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - return task_process_status::task_invalid_entry; - } + result = applySchedulerToQueueSchedulerGroup(port, queue_ind, sai_scheduler_profile); + if (!result) { SWSS_LOG_ERROR("Failed setting field:%s to port:%s, queue:%zd, line:%d", scheduler_field_name.c_str(), port.m_alias.c_str(), queue_ind, __LINE__); @@ -1477,38 +1924,11 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) } SWSS_LOG_DEBUG("Applied scheduler to port:%s", port_name.c_str()); } - else if (resolve_result != ref_resolve_status::field_not_found) - { - if(ref_resolve_status::not_resolved == resolve_result) - { - SWSS_LOG_INFO("Missing or invalid scheduler reference"); - return task_process_status::task_need_retry; - } - SWSS_LOG_ERROR("Resolving scheduler reference failed"); - return task_process_status::task_failed; - } - sai_object_id_t sai_wred_profile; - string wred_profile_name; - resolve_result = resolveFieldRefValue(m_qos_maps, wred_profile_field_name, - qos_to_ref_table_map.at(wred_profile_field_name), tuple, - sai_wred_profile, wred_profile_name); - if (ref_resolve_status::success == resolve_result) + if (!donotChangeWredProfile) { - if (op == SET_COMMAND) - { - result = applyWredProfileToQueue(port, queue_ind, sai_wred_profile); - } - else if (op == DEL_COMMAND) - { - // NOTE: The map is un-bound from the port. But the map itself still exists. - result = applyWredProfileToQueue(port, queue_ind, SAI_NULL_OBJECT_ID); - } - else - { - SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - return task_process_status::task_invalid_entry; - } + result = applyWredProfileToQueue(port, queue_ind, sai_wred_profile); + if (!result) { SWSS_LOG_ERROR("Failed setting field:%s to port:%s, queue:%zd, line:%d", wred_profile_field_name.c_str(), port.m_alias.c_str(), queue_ind, __LINE__); @@ -1516,118 +1936,175 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer) } SWSS_LOG_DEBUG("Applied wred profile to port:%s", port_name.c_str()); } - else if (resolve_result != ref_resolve_status::field_not_found) - { - if (ref_resolve_status::empty == resolve_result) - { - SWSS_LOG_INFO("Missing wred reference. Unbind wred profile from queue"); - // NOTE: The wred profile is un-bound from the port. But the wred profile itself still exists - // and stays untouched. - result = applyWredProfileToQueue(port, queue_ind, SAI_NULL_OBJECT_ID); - if (!result) - { - SWSS_LOG_ERROR("Failed unbinding field:%s from port:%s, queue:%zd, line:%d", wred_profile_field_name.c_str(), port.m_alias.c_str(), queue_ind, __LINE__); - return task_process_status::task_failed; - } - } - else if (ref_resolve_status::not_resolved == resolve_result) - { - SWSS_LOG_INFO("Invalid wred reference"); - return task_process_status::task_need_retry; - } - else - { - SWSS_LOG_ERROR("Resolving wred reference failed"); - return task_process_status::task_failed; - } - } } } SWSS_LOG_DEBUG("finished"); return task_process_status::task_success; } -bool QosOrch::applyMapToPort(Port &port, sai_attr_id_t attr_id, sai_object_id_t map_id) +bool QosOrch::applyDscpToTcMapToSwitch(sai_attr_id_t attr_id, sai_object_id_t map_id) { SWSS_LOG_ENTER(); + /* Query DSCP_TO_TC QoS map at switch capability */ + bool rv = gSwitchOrch->querySwitchCapability(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP); + if (rv == false) + { + SWSS_LOG_ERROR("Switch level DSCP to TC QoS map configuration is not supported"); + return true; + } + + /* Apply DSCP_TO_TC QoS map at switch */ sai_attribute_t attr; attr.id = attr_id; attr.value.oid = map_id; - sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed setting sai object:%" PRIx64 " for port:%s, status:%d", map_id, port.m_alias.c_str(), status); - task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); - if (handle_status != task_success) - { - return parseHandleSaiStatusFailure(handle_status); - } + SWSS_LOG_ERROR("Failed to apply DSCP_TO_TC QoS map to switch rv:%d", status); + return false; } + + SWSS_LOG_NOTICE("Applied DSCP_TO_TC QoS map to switch successfully"); return true; } -task_process_status QosOrch::ResolveMapAndApplyToPort( - Port &port, - sai_port_attr_t port_attr, - string field_name, - KeyOpFieldsValuesTuple &tuple, - string op) +task_process_status QosOrch::handleGlobalQosMap(const string &OP, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); - sai_object_id_t sai_object = SAI_NULL_OBJECT_ID; - string object_name; - bool result; - ref_resolve_status resolve_result = resolveFieldRefValue(m_qos_maps, field_name, - qos_to_ref_table_map.at(field_name), tuple, sai_object, object_name); - if (ref_resolve_status::success == resolve_result) + task_process_status task_status = task_process_status::task_success; + + if (OP == DEL_COMMAND) { - if (op == SET_COMMAND) + string referenced_obj; + if (!doesObjectExist(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, PORT_NAME_GLOBAL, dscp_to_tc_field_name, referenced_obj)) { - result = applyMapToPort(port, port_attr, sai_object); + return task_status; } - else if (op == DEL_COMMAND) + // Set SAI_NULL_OBJECT_ID to switch level if PORT_QOS_MAP|global is removed + if (applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, SAI_NULL_OBJECT_ID)) { - // NOTE: The map is un-bound from the port. But the map itself still exists. - result = applyMapToPort(port, port_attr, SAI_NULL_OBJECT_ID); + removeObject(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, PORT_NAME_GLOBAL); + task_status = task_process_status::task_success; + SWSS_LOG_INFO("Global QoS map type %s is removed", dscp_to_tc_field_name.c_str()); } else { - SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); - return task_process_status::task_invalid_entry; - } - if (!result) - { - SWSS_LOG_ERROR("Failed setting field:%s to port:%s, line:%d", field_name.c_str(), port.m_alias.c_str(), __LINE__); - return task_process_status::task_failed; + task_status = task_process_status::task_failed; + SWSS_LOG_WARN("Failed to remove switch level QoS map type %s", dscp_to_tc_field_name.c_str()); } - SWSS_LOG_DEBUG("Applied field:%s to port:%s, line:%d", field_name.c_str(), port.m_alias.c_str(), __LINE__); - return task_process_status::task_success; + return task_status; } - else if (resolve_result != ref_resolve_status::field_not_found) + + for (auto it = kfvFieldsValues(tuple).begin(); it != kfvFieldsValues(tuple).end(); it++) { - if(ref_resolve_status::not_resolved == resolve_result) + string map_type_name = fvField(*it); + string map_name = fvValue(*it); + if (map_type_name != dscp_to_tc_field_name) { - SWSS_LOG_INFO("Missing or invalid %s reference", field_name.c_str()); - return task_process_status::task_need_retry; + SWSS_LOG_WARN("Qos map type %s is not supported at global level", map_type_name.c_str()); + continue; + } + + if (qos_to_attr_map.find(map_type_name) != qos_to_attr_map.end()) + { + sai_object_id_t id; + string object_name; + ref_resolve_status status = resolveFieldRefValue(m_qos_maps, map_type_name, qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); + + if (status != ref_resolve_status::success) + { + SWSS_LOG_INFO("Global QoS map %s is not yet created", map_name.c_str()); + task_status = task_process_status::task_need_retry; + } + + if (applyDscpToTcMapToSwitch(SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP, id)) + { + setObjectReference(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, PORT_NAME_GLOBAL, map_type_name, object_name); + task_status = task_process_status::task_success; + SWSS_LOG_INFO("Applied QoS map type %s name %s to switch level", map_type_name.c_str(), object_name.c_str()); + } + else + { + task_status = task_process_status::task_failed; + SWSS_LOG_INFO("Failed to apply QoS map type %s name %s to switch level", map_type_name.c_str(), object_name.c_str()); + } } - SWSS_LOG_ERROR("Resolving %s reference failed", field_name.c_str()); - return task_process_status::task_failed; } - return task_process_status::task_success; + return task_status; } -task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) +task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple) { SWSS_LOG_ENTER(); - KeyOpFieldsValuesTuple tuple = consumer.m_toSync.begin()->second; string key = kfvKey(tuple); string op = kfvOp(tuple); + if (key == PORT_NAME_GLOBAL) + { + return handleGlobalQosMap(op, tuple); + } + + vector port_names = tokenize(key, list_item_delimiter); + + if (op == DEL_COMMAND) + { + /* Handle DEL command. Just set all the maps to oid:0x0 */ + for (string port_name : port_names) + { + Port port; + + /* Skip port which is not found */ + if (!gPortsOrch->getPort(port_name, port)) + { + SWSS_LOG_ERROR("Failed to apply QoS maps to port %s. Port is not found.", port_name.c_str()); + continue; + } + + for (auto &mapRef : qos_to_attr_map) + { + string referenced_obj; + if (!doesObjectExist(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key, mapRef.first, referenced_obj)) + { + continue; + } + + sai_attribute_t attr; + attr.id = mapRef.second; + attr.value.oid = SAI_NULL_OBJECT_ID; + + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove %s on port %s, rv:%d", + mapRef.first.c_str(), port_name.c_str(), status); + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); + if (handle_status != task_process_status::task_success) + { + return task_process_status::task_invalid_entry; + } + } + SWSS_LOG_INFO("Removed %s on port %s", mapRef.first.c_str(), port_name.c_str()); + } + + if (!gPortsOrch->setPortPfc(port.m_port_id, 0)) + { + SWSS_LOG_ERROR("Failed to disable PFC on port %s", port_name.c_str()); + } + + SWSS_LOG_INFO("Disabled PFC on port %s", port_name.c_str()); + } + + removeObject(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key); + + return task_process_status::task_success; + } + sai_uint8_t pfc_enable = 0; + sai_uint8_t pfcwd_sw_enable = 0; map> update_list; for (auto it = kfvFieldsValues(tuple).begin(); it != kfvFieldsValues(tuple).end(); it++) { @@ -1636,7 +2113,7 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) { sai_object_id_t id; string object_name; - string map_type_name = fvField(*it), map_name = fvValue(*it); + string &map_type_name = fvField(*it), &map_name = fvValue(*it); ref_resolve_status status = resolveFieldRefValue(m_qos_maps, map_type_name, qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); if (status != ref_resolve_status::success) @@ -1646,21 +2123,48 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) } update_list[qos_to_attr_map[map_type_name]] = make_pair(map_name, id); + setObjectReference(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key, map_type_name, object_name); } - if (fvField(*it) == pfc_enable_name) + else if (fvField(*it) == pfc_enable_name || fvField(*it) == pfcwd_sw_enable_name) { + sai_uint8_t bitmask = 0; vector queue_indexes; queue_indexes = tokenize(fvValue(*it), list_item_delimiter); for(string q_ind : queue_indexes) { sai_uint8_t q_val = (uint8_t)stoi(q_ind); - pfc_enable |= (uint8_t)(1 << q_val); + bitmask |= (uint8_t)(1 << q_val); + } + + if (fvField(*it) == pfc_enable_name) + { + pfc_enable = bitmask; + } + else + { + pfcwd_sw_enable = bitmask; } } } - vector port_names = tokenize(key, list_item_delimiter); + /* Remove any map that was configured but isn't there any longer. */ + for (auto &mapRef : qos_to_attr_map) + { + auto &sai_attribute = mapRef.second; + if (update_list.find(sai_attribute) == update_list.end()) + { + string referenced_obj; + if (!doesObjectExist(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key, mapRef.first, referenced_obj)) + { + continue; + } + SWSS_LOG_NOTICE("PORT_QOS_MAP|%s %s was configured but is not any more. Remove it", key.c_str(), mapRef.first.c_str()); + removeMeFromObjsReferencedByMe(m_qos_maps, CFG_PORT_QOS_MAP_TABLE_NAME, key, mapRef.first, referenced_obj); + update_list[mapRef.second] = make_pair("NULL", SAI_NULL_OBJECT_ID); + } + } + for (string port_name : port_names) { Port port; @@ -1708,6 +2212,9 @@ task_process_status QosOrch::handlePortQosMapTable(Consumer& consumer) SWSS_LOG_INFO("Applied PFC bits 0x%x to port %s", pfc_enable, port_name.c_str()); } + + // Save pfd_wd bitmask unconditionally + gPortsOrch->setPortPfcWatchdogStatus(port.m_port_id, pfcwd_sw_enable); } SWSS_LOG_NOTICE("Applied QoS maps to ports"); @@ -1719,12 +2226,13 @@ void QosOrch::doTask() SWSS_LOG_ENTER(); auto *port_qos_map_cfg_exec = getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME); + auto *queue_exec = getExecutor(CFG_QUEUE_TABLE_NAME); for (const auto &it : m_consumerMap) { auto *exec = it.second.get(); - if (exec == port_qos_map_cfg_exec) + if (exec == port_qos_map_cfg_exec || exec == queue_exec) { continue; } @@ -1733,6 +2241,7 @@ void QosOrch::doTask() } port_qos_map_cfg_exec->drain(); + queue_exec->drain(); } void QosOrch::doTask(Consumer &consumer) @@ -1756,7 +2265,7 @@ void QosOrch::doTask(Consumer &consumer) continue; } - auto task_status = (this->*(m_qos_handler_map[qos_map_type_name]))(consumer); + auto task_status = (this->*(m_qos_handler_map[qos_map_type_name]))(consumer, it->second); switch(task_status) { case task_process_status::task_success : @@ -1781,3 +2290,53 @@ void QosOrch::doTask(Consumer &consumer) } } } + +/** + * Function Description: + * @brief Resolve the id of QoS map that is referenced by tunnel + * + * Arguments: + * @param[in] referencing_table_name - The name of table that is referencing the QoS map + * @param[in] tunnle_name - The name of tunnel + * @param[in] map_type_name - The type of referenced QoS map + * @param[in] tuple - The KeyOpFieldsValuesTuple that contains keys - values + * + * Return Values: + * @return The sai_object_id of referenced map, or SAI_NULL_OBJECT_ID if there's an error + */ +sai_object_id_t QosOrch::resolveTunnelQosMap(std::string referencing_table_name, std::string tunnel_name, std::string map_type_name, KeyOpFieldsValuesTuple& tuple) +{ + sai_object_id_t id; + string object_name; + ref_resolve_status status = resolveFieldRefValue(m_qos_maps, map_type_name, qos_to_ref_table_map.at(map_type_name), tuple, id, object_name); + if (status == ref_resolve_status::success) + { + + setObjectReference(m_qos_maps, referencing_table_name, tunnel_name, map_type_name, object_name); + SWSS_LOG_INFO("Resolved QoS map for table %s tunnel %s type %s name %s", referencing_table_name.c_str(), tunnel_name.c_str(), map_type_name.c_str(), object_name.c_str()); + return id; + } + else + { + SWSS_LOG_ERROR("Failed to resolve QoS map for table %s tunnel %s type %s", referencing_table_name.c_str(), tunnel_name.c_str(), map_type_name.c_str()); + return SAI_NULL_OBJECT_ID; + } +} + +/** + * Function Description: + * @brief Remove the reference from tunnel object. Called after tunnel is removed + * + * Arguments: + * @param[in] referencing_table_name - The name of table that is referencing the QoS map + * @param[in] tunnle_name - The name of tunnel + * + * Return Values: + * @return no return + */ +void QosOrch::removeTunnelReference(std::string referencing_table_name, std::string tunnel_name) +{ + removeObject(m_qos_maps, referencing_table_name, tunnel_name); + SWSS_LOG_INFO("Freed QoS objects referenced by %s:%s", referencing_table_name.c_str(), tunnel_name.c_str()); +} + diff --git a/orchagent/qosorch.h b/orchagent/qosorch.h index cd265d59ec..8079e45bc0 100644 --- a/orchagent/qosorch.h +++ b/orchagent/qosorch.h @@ -14,8 +14,11 @@ const string dot1p_to_tc_field_name = "dot1p_to_tc_map"; const string pfc_to_pg_map_name = "pfc_to_pg_map"; const string pfc_to_queue_map_name = "pfc_to_queue_map"; const string pfc_enable_name = "pfc_enable"; +const string pfcwd_sw_enable_name = "pfcwd_sw_enable"; const string tc_to_pg_map_field_name = "tc_to_pg_map"; const string tc_to_queue_field_name = "tc_to_queue_map"; +const string tc_to_dot1p_field_name = "tc_to_dot1p_map"; +const string tc_to_dscp_field_name = "tc_to_dscp_map"; const string scheduler_field_name = "scheduler"; const string red_max_threshold_field_name = "red_max_threshold"; const string red_min_threshold_field_name = "red_min_threshold"; @@ -28,6 +31,10 @@ const string yellow_drop_probability_field_name = "yellow_drop_probability"; const string green_drop_probability_field_name = "green_drop_probability"; const string dscp_to_fc_field_name = "dscp_to_fc_map"; const string exp_to_fc_field_name = "exp_to_fc_map"; +const string decap_dscp_to_tc_field_name = "decap_dscp_to_tc_map"; +const string decap_tc_to_pg_field_name = "decap_tc_to_pg_map"; +const string encap_tc_to_queue_field_name = "encap_tc_to_queue_map"; +const string encap_tc_to_dscp_field_name = "encap_tc_to_dscp_map"; const string wred_profile_field_name = "wred_profile"; const string wred_red_enable_field_name = "wred_red_enable"; @@ -39,7 +46,6 @@ const string scheduler_algo_DWRR = "DWRR"; const string scheduler_algo_WRR = "WRR"; const string scheduler_algo_STRICT = "STRICT"; const string scheduler_weight_field_name = "weight"; -const string scheduler_priority_field_name = "priority"; const string scheduler_meter_type_field_name = "meter_type"; const string scheduler_min_bandwidth_rate_field_name = "cir";//Committed Information Rate const string scheduler_min_bandwidth_burst_rate_field_name = "cbs";//Committed Burst Size @@ -59,7 +65,7 @@ const string ecn_all = "ecn_all"; class QosMapHandler { public: - task_process_status processWorkItem(Consumer& consumer); + task_process_status processWorkItem(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); virtual bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) = 0; virtual void freeAttribResources(vector &attributes); virtual bool modifyQosItem(sai_object_id_t, vector &attributes); @@ -72,8 +78,7 @@ class DscpToTcMapHandler : public QosMapHandler public: bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) override; sai_object_id_t addQosItem(const vector &attributes) override; -protected: - void applyDscpToTcMapToSwitch(sai_attr_id_t attr_id, sai_object_id_t sai_dscp_to_tc_map); + bool removeQosItem(sai_object_id_t sai_object); }; class MplsTcToTcMapHandler : public QosMapHandler @@ -108,6 +113,24 @@ class WredMapHandler : public QosMapHandler protected: bool convertEcnMode(string str, sai_ecn_mark_mode_t &ecn_val); bool convertBool(string str, bool &val); +private: + void appendThresholdToAttributeList(sai_attr_id_t type, + sai_uint32_t threshold, + bool needDefer, + vector &normalQueue, + vector &deferredQueue, + sai_uint32_t &newThreshold); + typedef struct { + sai_uint32_t green_max_threshold; + sai_uint32_t green_min_threshold; + sai_uint32_t yellow_max_threshold; + sai_uint32_t yellow_min_threshold; + sai_uint32_t red_max_threshold; + sai_uint32_t red_min_threshold; + } qos_wred_thresholds_t; + typedef map qos_wred_thresholds_store_t; + + static qos_wred_thresholds_store_t m_wredProfiles; }; @@ -146,6 +169,21 @@ class ExpToFcMapHandler : public QosMapHandler sai_object_id_t addQosItem(const vector &attributes) override; }; +// Handler for TC_TO_DSCP_MAP +class TcToDscpMapHandler : public QosMapHandler +{ +public: + bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes) override; + sai_object_id_t addQosItem(const vector &attributes) override; +}; + +class TcToDot1pMapHandler : public QosMapHandler +{ +public: + bool convertFieldValuesToAttributes(KeyOpFieldsValuesTuple &tuple, vector &attributes); + sai_object_id_t addQosItem(const vector &attributes); +}; + class QosOrch : public Orch { public: @@ -153,38 +191,42 @@ class QosOrch : public Orch static type_map& getTypeMap(); static type_map m_qos_maps; + + sai_object_id_t resolveTunnelQosMap(std::string referencing_table_name, std::string tunnel_name, std::string map_type_name, KeyOpFieldsValuesTuple& tuple); + void removeTunnelReference(std::string referencing_table_name, std::string tunnel_name); private: void doTask() override; virtual void doTask(Consumer& consumer); - typedef task_process_status (QosOrch::*qos_table_handler)(Consumer& consumer); + typedef task_process_status (QosOrch::*qos_table_handler)(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); typedef map qos_table_handler_map; typedef pair qos_handler_pair; void initTableHandlers(); - task_process_status handleDscpToTcTable(Consumer& consumer); - task_process_status handleMplsTcToTcTable(Consumer& consumer); - task_process_status handleDot1pToTcTable(Consumer& consumer); - task_process_status handlePfcPrioToPgTable(Consumer& consumer); - task_process_status handlePfcToQueueTable(Consumer& consumer); - task_process_status handlePortQosMapTable(Consumer& consumer); - task_process_status handleTcToPgTable(Consumer& consumer); - task_process_status handleTcToQueueTable(Consumer& consumer); - task_process_status handleSchedulerTable(Consumer& consumer); - task_process_status handleQueueTable(Consumer& consumer); - task_process_status handleWredProfileTable(Consumer& consumer); - task_process_status handleDscpToFcTable(Consumer& consumer); - task_process_status handleExpToFcTable(Consumer& consumer); + task_process_status handleDscpToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleMplsTcToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleDot1pToTcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handlePfcPrioToPgTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handlePfcToQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handlePortQosMapTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleTcToPgTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleTcToQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleSchedulerTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleQueueTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleWredProfileTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleDscpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleExpToFcTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleTcToDscpTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + task_process_status handleTcToDot1pTable(Consumer& consumer, KeyOpFieldsValuesTuple &tuple); + + task_process_status handleGlobalQosMap(const string &op, KeyOpFieldsValuesTuple &tuple); sai_object_id_t getSchedulerGroup(const Port &port, const sai_object_id_t queue_id); - bool applyMapToPort(Port &port, sai_attr_id_t attr_id, sai_object_id_t sai_dscp_to_tc_map); bool applySchedulerToQueueSchedulerGroup(Port &port, size_t queue_ind, sai_object_id_t scheduler_profile_id); bool applyWredProfileToQueue(Port &port, size_t queue_ind, sai_object_id_t sai_wred_profile); - task_process_status ResolveMapAndApplyToPort(Port &port,sai_port_attr_t port_attr, - string field_name, KeyOpFieldsValuesTuple &tuple, string op); - + bool applyDscpToTcMapToSwitch(sai_attr_id_t attr_id, sai_object_id_t sai_dscp_to_tc_map); private: qos_table_handler_map m_qos_handler_map; @@ -196,5 +238,8 @@ class QosOrch : public Orch }; std::unordered_map m_scheduler_group_port_info; + + friend QosMapHandler; + friend DscpToTcMapHandler; }; #endif /* SWSS_QOSORCH_H */ diff --git a/orchagent/response_publisher.cpp b/orchagent/response_publisher.cpp index 5d0490167c..031f1aefef 100644 --- a/orchagent/response_publisher.cpp +++ b/orchagent/response_publisher.cpp @@ -5,13 +5,6 @@ #include #include -#include "timestamp.h" - -extern bool gResponsePublisherRecord; -extern bool gResponsePublisherLogRotate; -extern std::ofstream gResponsePublisherRecordOfs; -extern std::string gResponsePublisherRecordFile; - namespace { @@ -35,27 +28,10 @@ std::string PrependedComponent(const ReturnCode &status) return kOrchagentComponent; } -void PerformLogRotate() -{ - if (!gResponsePublisherLogRotate) - { - return; - } - gResponsePublisherLogRotate = false; - - gResponsePublisherRecordOfs.close(); - gResponsePublisherRecordOfs.open(gResponsePublisherRecordFile); - if (!gResponsePublisherRecordOfs.is_open()) - { - SWSS_LOG_ERROR("Failed to reopen Response Publisher record file %s: %s", gResponsePublisherRecordFile.c_str(), - strerror(errno)); - } -} - void RecordDBWrite(const std::string &table, const std::string &key, const std::vector &attrs, const std::string &op) { - if (!gResponsePublisherRecord) + if (!swss::Recorder::Instance().respub.isRecord()) { return; } @@ -66,16 +42,15 @@ void RecordDBWrite(const std::string &table, const std::string &key, const std:: s += "|" + fvField(attr) + ":" + fvValue(attr); } - PerformLogRotate(); - gResponsePublisherRecordOfs << swss::getTimestamp() << "|" << s << std::endl; + swss::Recorder::Instance().respub.record(s); } void RecordResponse(const std::string &response_channel, const std::string &key, const std::vector &attrs, const std::string &status) { - if (!gResponsePublisherRecord) + if (!swss::Recorder::Instance().respub.isRecord()) { - return; + return; } std::string s = response_channel + ":" + key + "|" + status; @@ -84,13 +59,15 @@ void RecordResponse(const std::string &response_channel, const std::string &key, s += "|" + fvField(attr) + ":" + fvValue(attr); } - PerformLogRotate(); - gResponsePublisherRecordOfs << swss::getTimestamp() << "|" << s << std::endl; + swss::Recorder::Instance().respub.record(s); } } // namespace -ResponsePublisher::ResponsePublisher() : m_db("APPL_STATE_DB", 0) +ResponsePublisher::ResponsePublisher(bool buffered) : + m_db(std::make_unique("APPL_STATE_DB", 0)), + m_pipe(std::make_unique(m_db.get())), + m_buffered(buffered) { } @@ -107,17 +84,14 @@ void ResponsePublisher::publish(const std::string &table, const std::string &key } std::string response_channel = "APPL_DB_" + table + "_RESPONSE_CHANNEL"; - if (m_notifiers.find(table) == m_notifiers.end()) - { - m_notifiers[table] = std::make_unique(&m_db, response_channel); - } + swss::NotificationProducer notificationProducer{m_pipe.get(), response_channel, m_buffered}; auto intent_attrs_copy = intent_attrs; // Add error message as the first field-value-pair. swss::FieldValueTuple err_str("err_str", PrependedComponent(status) + status.message()); intent_attrs_copy.insert(intent_attrs_copy.begin(), err_str); // Sends the response to the notification channel. - m_notifiers[table]->send(status.codeStr(), key, intent_attrs_copy); + notificationProducer.send(status.codeStr(), key, intent_attrs_copy); RecordResponse(response_channel, key, intent_attrs_copy, status.codeStr()); } @@ -140,17 +114,14 @@ void ResponsePublisher::publish(const std::string &table, const std::string &key void ResponsePublisher::writeToDB(const std::string &table, const std::string &key, const std::vector &values, const std::string &op, bool replace) { - if (m_tables.find(table) == m_tables.end()) - { - m_tables[table] = std::make_unique(&m_db, table); - } + swss::Table applStateTable{m_pipe.get(), table, m_buffered}; auto attrs = values; if (op == SET_COMMAND) { if (replace) { - m_tables[table]->del(key); + applStateTable.del(key); } if (!values.size()) { @@ -160,9 +131,9 @@ void ResponsePublisher::writeToDB(const std::string &table, const std::string &k // Write to DB only if the key does not exist or non-NULL attributes are // being written to the entry. std::vector fv; - if (!m_tables[table]->get(key, fv)) + if (!applStateTable.get(key, fv)) { - m_tables[table]->set(key, attrs); + applStateTable.set(key, attrs); RecordDBWrite(table, key, attrs, op); return; } @@ -179,13 +150,23 @@ void ResponsePublisher::writeToDB(const std::string &table, const std::string &k } if (attrs.size()) { - m_tables[table]->set(key, attrs); + applStateTable.set(key, attrs); RecordDBWrite(table, key, attrs, op); } } else if (op == DEL_COMMAND) { - m_tables[table]->del(key); + applStateTable.del(key); RecordDBWrite(table, key, {}, op); } } + +void ResponsePublisher::flush() +{ + m_pipe->flush(); +} + +void ResponsePublisher::setBuffered(bool buffered) +{ + m_buffered = buffered; +} diff --git a/orchagent/response_publisher.h b/orchagent/response_publisher.h index cd688112e8..ff7bd291e4 100644 --- a/orchagent/response_publisher.h +++ b/orchagent/response_publisher.h @@ -9,6 +9,7 @@ #include "notificationproducer.h" #include "response_publisher_interface.h" #include "table.h" +#include "recorder.h" // This class performs two tasks when publish is called: // 1. Sends a notification into the redis channel. @@ -16,7 +17,8 @@ class ResponsePublisher : public ResponsePublisherInterface { public: - explicit ResponsePublisher(); + explicit ResponsePublisher(bool buffered = false); + virtual ~ResponsePublisher() = default; // Intent attributes are the attributes sent in the notification into the @@ -42,10 +44,21 @@ class ResponsePublisher : public ResponsePublisherInterface void writeToDB(const std::string &table, const std::string &key, const std::vector &values, const std::string &op, bool replace = false) override; + /** + * @brief Flush pending responses + */ + void flush(); + + /** + * @brief Set buffering mode + * + * @param buffered Flag whether responses are buffered + */ + void setBuffered(bool buffered); + private: - swss::DBConnector m_db; - // Maps table names to tables. - std::unordered_map> m_tables; - // Maps table names to notifiers. - std::unordered_map> m_notifiers; + std::unique_ptr m_db; + std::unique_ptr m_pipe; + + bool m_buffered{false}; }; diff --git a/orchagent/response_publisher_interface.h b/orchagent/response_publisher_interface.h index 92d364a500..094238b826 100644 --- a/orchagent/response_publisher_interface.h +++ b/orchagent/response_publisher_interface.h @@ -5,32 +5,31 @@ #include "return_code.h" #include "table.h" -class ResponsePublisherInterface { - public: - virtual ~ResponsePublisherInterface() = default; +class ResponsePublisherInterface +{ + public: + virtual ~ResponsePublisherInterface() = default; - // Publishes the response status. - // If intent attributes are empty, it is a delete operation. - // What "publish" needs to do is completely up to implementation. - // This API does not include redis DB namespace. So if implementation chooses - // to write to a redis DB, it will need to use a fixed namespace. - // The replace flag indicates the state attributes will replace the old ones. - virtual void publish(const std::string& table, const std::string& key, - const std::vector& intent_attrs, - const ReturnCode& status, - const std::vector& state_attrs, - bool replace = false) = 0; + // Publishes the response status. + // If intent attributes are empty, it is a delete operation. + // What "publish" needs to do is completely up to implementation. + // This API does not include redis DB namespace. So if implementation chooses + // to write to a redis DB, it will need to use a fixed namespace. + // The replace flag indicates the state attributes will replace the old ones. + virtual void publish(const std::string &table, const std::string &key, + const std::vector &intent_attrs, const ReturnCode &status, + const std::vector &state_attrs, bool replace = false) = 0; - // Publishes response status. If response status is OK then also writes the - // intent attributes into the DB. - // The replace flag indicates a replace operation. - virtual void publish(const std::string& table, const std::string& key, - const std::vector& intent_attrs, - const ReturnCode& status, bool replace = false) = 0; + // Publishes response status. If response status is OK then also writes the + // intent attributes into the DB. + // The replace flag indicates a replace operation. + virtual void publish(const std::string &table, const std::string &key, + const std::vector &intent_attrs, const ReturnCode &status, + bool replace = false) = 0; - // Write to DB only. This API does not send notification. - // The replace flag indicates the new attributes will replace the old ones. - virtual void writeToDB(const std::string& table, const std::string& key, - const std::vector& values, - const std::string& op, bool replace = false) = 0; + // Write to DB only. This API does not send notification. + // The replace flag indicates the new attributes will replace the old ones. + virtual void writeToDB(const std::string &table, const std::string &key, + const std::vector &values, const std::string &op, + bool replace = false) = 0; }; diff --git a/orchagent/return_code.h b/orchagent/return_code.h index 87a1a761e1..ed154784b7 100644 --- a/orchagent/return_code.h +++ b/orchagent/return_code.h @@ -145,6 +145,21 @@ using swss::StatusCode; return RETURN_INTERNAL_ERROR_AND_RAISE_CRITICAL_RC_; \ } while (0) +#define SAI_RANGED_STATUS_IS_INVALID_ATTRIBUTE(x) \ + ((SAI_STATUS_CODE(x) & ~(0xFFFFL)) == SAI_STATUS_CODE(SAI_STATUS_INVALID_ATTRIBUTE_0)) + +#define SAI_RANGED_STATUS_IS_INVALID_ATTR_VALUE(x) \ + ((SAI_STATUS_CODE(x) & ~(0xFFFFL)) == SAI_STATUS_CODE(SAI_STATUS_INVALID_ATTR_VALUE_0)) + +#define SAI_RANGED_STATUS_IS_ATTR_NOT_IMPLEMENTED(x) \ + ((SAI_STATUS_CODE(x) & ~(0xFFFFL)) == SAI_STATUS_CODE(SAI_STATUS_ATTR_NOT_IMPLEMENTED_0)) + +#define SAI_RANGED_STATUS_IS_UNKNOWN_ATTRIBUTE(x) \ + ((SAI_STATUS_CODE(x) & ~(0xFFFFL)) == SAI_STATUS_CODE(SAI_STATUS_UNKNOWN_ATTRIBUTE_0)) + +#define SAI_RANGED_STATUS_IS_ATTR_NOT_SUPPORTED(x) \ + ((SAI_STATUS_CODE(x) & ~(0xFFFFL)) == SAI_STATUS_CODE(SAI_STATUS_ATTR_NOT_SUPPORTED_0)) + class ReturnCode { public: @@ -164,7 +179,31 @@ class ReturnCode { if (m_saiStatusCodeLookup.find(status) == m_saiStatusCodeLookup.end()) { - status_ = StatusCode::SWSS_RC_UNKNOWN; + // Check for ranged SAI codes. + if (SAI_RANGED_STATUS_IS_INVALID_ATTRIBUTE(status)) + { + status_ = StatusCode::SWSS_RC_INVALID_PARAM; + } + else if (SAI_RANGED_STATUS_IS_INVALID_ATTR_VALUE(status)) + { + status_ = StatusCode::SWSS_RC_INVALID_PARAM; + } + else if (SAI_RANGED_STATUS_IS_ATTR_NOT_IMPLEMENTED(status)) + { + status_ = StatusCode::SWSS_RC_UNIMPLEMENTED; + } + else if (SAI_RANGED_STATUS_IS_UNKNOWN_ATTRIBUTE(status)) + { + status_ = StatusCode::SWSS_RC_INVALID_PARAM; + } + else if (SAI_RANGED_STATUS_IS_ATTR_NOT_SUPPORTED(status)) + { + status_ = StatusCode::SWSS_RC_UNIMPLEMENTED; + } + else + { + status_ = StatusCode::SWSS_RC_UNKNOWN; + } } else { @@ -259,7 +298,7 @@ class ReturnCode } private: - // SAI codes that are not included in this lookup map will map to + // Non-ranged SAI codes that are not included in this lookup map will map to // SWSS_RC_UNKNOWN. This includes the general SAI failure: SAI_STATUS_FAILURE. std::unordered_map m_saiStatusCodeLookup = { {SAI_STATUS_SUCCESS, StatusCode::SWSS_RC_SUCCESS}, diff --git a/orchagent/routeorch.cpp b/orchagent/routeorch.cpp index e3c27b9818..b8b9056439 100644 --- a/orchagent/routeorch.cpp +++ b/orchagent/routeorch.cpp @@ -5,6 +5,8 @@ #include "nhgorch.h" #include "cbf/cbfnhgorch.h" #include "logger.h" +#include "flowcounterrouteorch.h" +#include "muxorch.h" #include "swssnet.h" #include "crmorch.h" #include "directory.h" @@ -22,6 +24,7 @@ extern CrmOrch *gCrmOrch; extern Directory gDirectory; extern NhgOrch *gNhgOrch; extern CbfNhgOrch *gCbfNhgOrch; +extern FlowCounterRouteOrch *gFlowCounterRouteOrch; extern size_t gMaxBulkSize; @@ -45,6 +48,8 @@ RouteOrch::RouteOrch(DBConnector *db, vector &tableNames, { SWSS_LOG_ENTER(); + m_publisher.setBuffered(true); + sai_attribute_t attr; attr.id = SAI_SWITCH_ATTR_NUMBER_OF_ECMP_GROUPS; @@ -145,7 +150,6 @@ RouteOrch::RouteOrch(DBConnector *db, vector &tableNames, addLinkLocalRouteToMe(gVirtualRouterId, default_link_local_prefix); SWSS_LOG_NOTICE("Created link local ipv6 route %s to cpu", default_link_local_prefix.to_string().c_str()); - } std::string RouteOrch::getLinkLocalEui64Addr(void) @@ -212,6 +216,8 @@ void RouteOrch::addLinkLocalRouteToMe(sai_object_id_t vrf_id, IpPrefix linklocal gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + gFlowCounterRouteOrch->onAddMiscRouteEntry(vrf_id, linklocal_prefix.getSubnet()); + SWSS_LOG_NOTICE("Created link local ipv6 route %s to cpu", linklocal_prefix.to_string().c_str()); } @@ -233,6 +239,8 @@ void RouteOrch::delLinkLocalRouteToMe(sai_object_id_t vrf_id, IpPrefix linklocal gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); + gFlowCounterRouteOrch->onRemoveMiscRouteEntry(vrf_id, linklocal_prefix.getSubnet()); + SWSS_LOG_NOTICE("Deleted link local ipv6 route %s to cpu", linklocal_prefix.to_string().c_str()); } @@ -494,7 +502,7 @@ void RouteOrch::doTask(Consumer& consumer) auto rc = toBulk.emplace(std::piecewise_construct, std::forward_as_tuple(key, op), - std::forward_as_tuple()); + std::forward_as_tuple(key, (op == SET_COMMAND))); bool inserted = rc.second; auto& ctx = rc.first->second; @@ -625,6 +633,11 @@ void RouteOrch::doTask(Consumer& consumer) if (fvField(i) == "seg_src") srv6_source = fvValue(i); + + if (fvField(i) == "protocol") + { + ctx.protocol = fvValue(i); + } } /* @@ -653,6 +666,8 @@ void RouteOrch::doTask(Consumer& consumer) NextHopGroupKey& nhg = ctx.nhg; vector srv6_segv; vector srv6_src; + bool l3Vni = true; + uint32_t vni = 0; /* Check if the next hop group is owned by the NhgOrch. */ if (nhg_index.empty()) @@ -684,6 +699,23 @@ void RouteOrch::doTask(Consumer& consumer) ipv.resize(alsv.size()); } + for (auto &vni_str: vni_labelv) + { + vni = static_cast(std::stoul(vni_str)); + if (!m_vrfOrch->isL3VniVlan(vni)) + { + SWSS_LOG_WARN("Route %s is received on non L3 VNI %s", key.c_str(), vni_str.c_str()); + l3Vni = false; + break; + } + } + + if (!l3Vni) + { + it++; + continue; + } + /* Set the empty ip(s) to zero * as IpAddress("") will construct a incorrect ip. */ for (auto &ip : ipv) @@ -826,6 +858,10 @@ void RouteOrch::doTask(Consumer& consumer) /* fullmask subnet route is same as ip2me route */ else if (ip_prefix.isFullMask() && m_intfsOrch->isPrefixSubnet(ip_prefix, alsv[0])) { + /* The prefix is full mask (/32 or /128) and it is an interface subnet route, so IntfOrch has already + * created an IP2ME route for it and we skip programming such route here as it already exists. + * However, to keep APPL_DB and APPL_STATE_DB consistent we have to publish it. */ + publishRouteState(ctx); it = consumer.m_toSync.erase(it); } /* subnet route, vrf leaked route, etc */ @@ -855,7 +891,9 @@ void RouteOrch::doTask(Consumer& consumer) } else { - /* Duplicate entry */ + /* Duplicate entry. Publish route state anyway since there could be multiple DEL, SET operations + * consolidated by ConsumerStateTable leading to orchagent receiving only the last SET update. */ + publishRouteState(ctx); it = consumer.m_toSync.erase(it); } @@ -1547,6 +1585,24 @@ bool RouteOrch::updateNextHopRoutes(const NextHopKey& nextHop, uint32_t& numRout auto rt = it->second.begin(); while(rt != it->second.end()) { + /* Check if route is mux multi-nexthop route + * we define this as a route present in + * mux_multi_active_nh_table + * These routes originally point to NHG and should be handled by updateRoute() + */ + MuxOrch* mux_orch = gDirectory.get(); + if (mux_orch->isMultiNexthopRoute((*rt).prefix)) + { + /* multiple mux nexthop case: + * skip for now, muxOrch::updateRoute() will handle route + */ + SWSS_LOG_INFO("Route %s is mux multi nexthop route, skipping.", + (*rt).prefix.to_string().c_str()); + + ++rt; + continue; + } + SWSS_LOG_INFO("Updating route %s", (*rt).prefix.to_string().c_str()); next_hop_id = m_neighOrch->getNextHopId(nextHop); @@ -1575,6 +1631,24 @@ bool RouteOrch::updateNextHopRoutes(const NextHopKey& nextHop, uint32_t& numRout return true; } +/** + * @brief returns a route prefix associated with nexthopkey + * @param routeKeys empty set of routekeys to populate + * @param nexthopKey nexthop key to lookup + * @return true if found, false if not found. + */ +bool RouteOrch::getRoutesForNexthop(std::set& routeKeys, const NextHopKey& nexthopKey) +{ + auto it = m_nextHops.find(nexthopKey); + + if (it != m_nextHops.end()) + { + routeKeys = it->second; + } + + return it != m_nextHops.end(); +} + void RouteOrch::addTempRoute(RouteBulkContext& ctx, const NextHopGroupKey &nextHops) { SWSS_LOG_ENTER(); @@ -2140,15 +2214,22 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey { decreaseNextHopRefCount(it_route->second.nhg_key); auto ol_nextHops = it_route->second.nhg_key; - if (ol_nextHops.getSize() > 1 - && m_syncdNextHopGroups[ol_nextHops].ref_count == 0) + if (ol_nextHops.getSize() > 1) { - m_bulkNhgReducedRefCnt.emplace(ol_nextHops, 0); + if (m_syncdNextHopGroups[ol_nextHops].ref_count == 0) + { + SWSS_LOG_NOTICE("Update Nexthop Group %s", ol_nextHops.to_string().c_str()); + m_bulkNhgReducedRefCnt.emplace(ol_nextHops, 0); + } } else if (ol_nextHops.is_overlay_nexthop()) { - SWSS_LOG_NOTICE("Update overlay Nexthop %s", ol_nextHops.to_string().c_str()); - m_bulkNhgReducedRefCnt.emplace(ol_nextHops, vrf_id); + const NextHopKey& nexthop = *it_route->second.nhg_key.getNextHops().begin(); + if (m_neighOrch->getNextHopRefCount(nexthop) == 0) + { + SWSS_LOG_NOTICE("Update overlay Nexthop %s", ol_nextHops.to_string().c_str()); + m_bulkNhgReducedRefCnt.emplace(ol_nextHops, vrf_id); + } } else if (ol_nextHops.is_srv6_nexthop()) { @@ -2197,6 +2278,7 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey ipPrefix.to_string().c_str(), nextHops.to_string().c_str()); } + MuxOrch* mux_orch = gDirectory.get(); if (ctx.nhg_index.empty() && nextHops.getSize() == 1 && !nextHops.is_overlay_nexthop() && !nextHops.is_srv6_nexthop()) { RouteKey r_key = { vrf_id, ipPrefix }; @@ -2206,16 +2288,42 @@ bool RouteOrch::addRoutePost(const RouteBulkContext& ctx, const NextHopGroupKey addNextHopRoute(nexthop, r_key); } } + else if (mux_orch->isMuxNexthops(nextHops)) + { + RouteKey routekey = { vrf_id, ipPrefix }; + auto nexthop_list = nextHops.getNextHops(); + for (auto nh = nexthop_list.begin(); nh != nexthop_list.end(); nh++) + { + if (!nh->ip_address.isZero()) + { + addNextHopRoute(*nh, routekey); + } + } + } if (ipPrefix.isDefaultRoute()) { updateDefRouteState(ipPrefix.to_string(), true); } + if (it_route == m_syncdRoutes.at(vrf_id).end()) + { + gFlowCounterRouteOrch->handleRouteAdd(vrf_id, ipPrefix); + } + m_syncdRoutes[vrf_id][ipPrefix] = RouteNhg(nextHops, ctx.nhg_index); + // update routes to reflect mux state + if (mux_orch->isMuxNexthops(nextHops)) + { + mux_orch->updateRoute(ipPrefix, true); + } + notifyNextHopChangeObservers(vrf_id, ipPrefix, nextHops, true); + /* Publish and update APPL STATE DB route entry programming status */ + publishRouteState(ctx); + /* * If the route uses a temporary synced NHG owned by NhgOrch, return false * in order to keep trying to update the route in case the NHG is updated, @@ -2373,16 +2481,37 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) decreaseNextHopRefCount(it_route->second.nhg_key); auto ol_nextHops = it_route->second.nhg_key; - - if (it_route->second.nhg_key.getSize() > 1 - && m_syncdNextHopGroups[it_route->second.nhg_key].ref_count == 0) + MuxOrch* mux_orch = gDirectory.get(); + if (it_route->second.nhg_key.getSize() > 1) { - m_bulkNhgReducedRefCnt.emplace(it_route->second.nhg_key, 0); + if (m_syncdNextHopGroups[it_route->second.nhg_key].ref_count == 0) + { + SWSS_LOG_NOTICE("Remove Nexthop Group %s", ol_nextHops.to_string().c_str()); + m_bulkNhgReducedRefCnt.emplace(it_route->second.nhg_key, 0); + if (mux_orch->isMuxNexthops(ol_nextHops)) + { + SWSS_LOG_NOTICE("Remove mux Nexthop %s", ol_nextHops.to_string().c_str()); + RouteKey routekey = { vrf_id, ipPrefix }; + auto nexthop_list = ol_nextHops.getNextHops(); + for (auto nh = nexthop_list.begin(); nh != nexthop_list.end(); nh++) + { + if (!nh->ip_address.isZero()) + { + removeNextHopRoute(*nh, routekey); + } + } + mux_orch->updateRoute(ipPrefix, false); + } + } } else if (ol_nextHops.is_overlay_nexthop()) { - SWSS_LOG_NOTICE("Remove overlay Nexthop %s", ol_nextHops.to_string().c_str()); - m_bulkNhgReducedRefCnt.emplace(ol_nextHops, vrf_id); + const NextHopKey& nexthop = *it_route->second.nhg_key.getNextHops().begin(); + if (m_neighOrch->getNextHopRefCount(nexthop) == 0) + { + SWSS_LOG_NOTICE("Remove overlay Nexthop %s", ol_nextHops.to_string().c_str()); + m_bulkNhgReducedRefCnt.emplace(ol_nextHops, vrf_id); + } } /* * Additionally check if the NH has label and its ref count == 0, then @@ -2409,6 +2538,9 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) SWSS_LOG_INFO("Remove route %s with next hop(s) %s", ipPrefix.to_string().c_str(), it_route->second.nhg_key.to_string().c_str()); + + /* Publish removal status, removes route entry from APPL STATE DB */ + publishRouteState(ctx); if (ipPrefix.isDefaultRoute() && vrf_id == gVirtualRouterId) { @@ -2429,6 +2561,8 @@ bool RouteOrch::removeRoutePost(const RouteBulkContext& ctx) m_syncdRoutes.erase(vrf_id); m_vrfOrch->decreaseVrfRefCount(vrf_id); } + + gFlowCounterRouteOrch->handleRouteRemove(vrf_id, ipPrefix); } return true; @@ -2563,3 +2697,22 @@ void RouteOrch::decNhgRefCount(const std::string &nhg_index) gCbfNhgOrch->decNhgRefCount(nhg_index); } } + +void RouteOrch::publishRouteState(const RouteBulkContext& ctx, const ReturnCode& status) +{ + SWSS_LOG_ENTER(); + + std::vector fvs; + + /* Leave the fvs empty if the operation type is "DEL". + * An empty fvs makes ResponsePublisher::publish() remove the state entry from APPL_STATE_DB + */ + if (ctx.is_set) + { + fvs.emplace_back("protocol", ctx.protocol); + } + + const bool replace = false; + + m_publisher.publish(APP_ROUTE_TABLE_NAME, ctx.key, fvs, status, replace); +} diff --git a/orchagent/routeorch.h b/orchagent/routeorch.h index 2c8826ecf7..b232137766 100644 --- a/orchagent/routeorch.h +++ b/orchagent/routeorch.h @@ -122,8 +122,12 @@ struct RouteBulkContext // using_temp_nhg will track if the NhgOrch's owned NHG is temporary or not bool using_temp_nhg; - RouteBulkContext() - : excp_intfs_flag(false), using_temp_nhg(false) + std::string key; // Key in database table + std::string protocol; // Protocol string + bool is_set; // True if set operation + + RouteBulkContext(const std::string& key, bool is_set) + : key(key), excp_intfs_flag(false), using_temp_nhg(false), is_set(is_set) { } @@ -139,6 +143,8 @@ struct RouteBulkContext excp_intfs_flag = false; vrf_id = SAI_NULL_OBJECT_ID; using_temp_nhg = false; + key.clear(); + protocol.clear(); } }; @@ -195,6 +201,7 @@ class RouteOrch : public Orch, public Subject void addNextHopRoute(const NextHopKey&, const RouteKey&); void removeNextHopRoute(const NextHopKey&, const RouteKey&); bool updateNextHopRoutes(const NextHopKey&, uint32_t&); + bool getRoutesForNexthop(std::set&, const NextHopKey&); bool validnexthopinNextHopGroup(const NextHopKey&, uint32_t&); bool invalidnexthopinNextHopGroup(const NextHopKey&, uint32_t&); @@ -214,10 +221,11 @@ class RouteOrch : public Orch, public Subject unsigned int getNhgCount() { return m_nextHopGroupCount; } unsigned int getMaxNhgCount() { return m_maxNextHopGroupCount; } - + void increaseNextHopGroupCount(); void decreaseNextHopGroupCount(); bool checkNextHopGroupCount(); + const RouteTables& getSyncdRoutes() const { return m_syncdRoutes; } private: SwitchOrch *m_switchOrch; @@ -268,6 +276,8 @@ class RouteOrch : public Orch, public Subject const NhgBase &getNhg(const std::string& nhg_index); void incNhgRefCount(const std::string& nhg_index); void decNhgRefCount(const std::string& nhg_index); + + void publishRouteState(const RouteBulkContext& ctx, const ReturnCode& status = ReturnCode(SAI_STATUS_SUCCESS)); }; #endif /* SWSS_ROUTEORCH_H */ diff --git a/orchagent/saiattr.cpp b/orchagent/saiattr.cpp index 1c24489ed5..fb1d320fe4 100644 --- a/orchagent/saiattr.cpp +++ b/orchagent/saiattr.cpp @@ -66,12 +66,10 @@ sai_attr_id_t SaiAttrWrapper::getAttrId() const void SaiAttrWrapper::swap(SaiAttrWrapper&& other) { - m_objectType = other.m_objectType; - m_meta = other.m_meta; - m_attr = other.m_attr; - m_serializedAttr = other.m_serializedAttr; - other.m_attr = sai_attribute_t{}; - other.m_serializedAttr.clear(); + std::swap(m_objectType, other.m_objectType); + std::swap(m_meta, other.m_meta); + std::swap(m_attr, other.m_attr); + std::swap(m_serializedAttr, other.m_serializedAttr); } void SaiAttrWrapper::init( diff --git a/orchagent/saihelper.cpp b/orchagent/saihelper.cpp index 3b409f7217..d1dc472d7d 100644 --- a/orchagent/saihelper.cpp +++ b/orchagent/saihelper.cpp @@ -71,12 +71,10 @@ sai_srv6_api_t** sai_srv6_api;; sai_l2mc_group_api_t* sai_l2mc_group_api; sai_counter_api_t* sai_counter_api; sai_bfd_api_t* sai_bfd_api; +sai_my_mac_api_t* sai_my_mac_api; +sai_generic_programmable_api_t* sai_generic_programmable_api; extern sai_object_id_t gSwitchId; -extern bool gSairedisRecord; -extern bool gSwssRecord; -extern ofstream gRecordOfs; -extern string gRecordFile; static map hardware_access_map = { @@ -199,6 +197,8 @@ void initSaiApi() sai_api_query(SAI_API_L2MC_GROUP, (void **)&sai_l2mc_group_api); sai_api_query(SAI_API_COUNTER, (void **)&sai_counter_api); sai_api_query(SAI_API_BFD, (void **)&sai_bfd_api); + sai_api_query(SAI_API_MY_MAC, (void **)&sai_my_mac_api); + sai_api_query(SAI_API_GENERIC_PROGRAMMABLE, (void **)&sai_generic_programmable_api); sai_log_set(SAI_API_SWITCH, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_BRIDGE, SAI_LOG_LEVEL_NOTICE); @@ -236,9 +236,11 @@ void initSaiApi() sai_log_set(SAI_API_L2MC_GROUP, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_COUNTER, SAI_LOG_LEVEL_NOTICE); sai_log_set(SAI_API_BFD, SAI_LOG_LEVEL_NOTICE); + sai_log_set(SAI_API_MY_MAC, SAI_LOG_LEVEL_NOTICE); + sai_log_set(SAI_API_GENERIC_PROGRAMMABLE, SAI_LOG_LEVEL_NOTICE); } -void initSaiRedis(const string &record_location, const std::string &record_filename) +void initSaiRedis() { /** * NOTE: Notice that all Redis attributes here are using SAI_NULL_OBJECT_ID @@ -249,9 +251,11 @@ void initSaiRedis(const string &record_location, const std::string &record_filen sai_attribute_t attr; sai_status_t status; - /* set recording dir before enable recording */ + auto record_filename = Recorder::Instance().sairedis.getFile(); + auto record_location = Recorder::Instance().sairedis.getLoc(); - if (gSairedisRecord) + /* set recording dir before enable recording */ + if (Recorder::Instance().sairedis.isRecord()) { attr.id = SAI_REDIS_SWITCH_ATTR_RECORDING_OUTPUT_DIR; attr.value.s8list.count = (uint32_t)record_location.size(); @@ -280,15 +284,14 @@ void initSaiRedis(const string &record_location, const std::string &record_filen } /* Disable/enable SAI Redis recording */ - attr.id = SAI_REDIS_SWITCH_ATTR_RECORD; - attr.value.booldata = gSairedisRecord; + attr.value.booldata = Recorder::Instance().sairedis.isRecord(); status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); if (status != SAI_STATUS_SUCCESS) { SWSS_LOG_ERROR("Failed to %s SAI Redis recording, rv:%d", - gSairedisRecord ? "enable" : "disable", status); + Recorder::Instance().sairedis.isRecord() ? "enable" : "disable", status); exit(EXIT_FAILURE); } @@ -304,7 +307,7 @@ void initSaiRedis(const string &record_location, const std::string &record_filen SWSS_LOG_NOTICE("Enable redis pipeline"); char *platform = getenv("platform"); - if (platform && strstr(platform, MLNX_PLATFORM_SUBSTRING)) + if (platform && (strstr(platform, MLNX_PLATFORM_SUBSTRING) || strstr(platform, XS_PLATFORM_SUBSTRING))) { /* We set this long timeout in order for Orchagent to wait enough time for * response from syncd. It is needed since in init, systemd syncd startup @@ -334,7 +337,7 @@ void initSaiRedis(const string &record_location, const std::string &record_filen } SWSS_LOG_NOTICE("Notify syncd INIT_VIEW"); - if (platform && strstr(platform, MLNX_PLATFORM_SUBSTRING)) + if (platform && (strstr(platform, MLNX_PLATFORM_SUBSTRING) || strstr(platform, XS_PLATFORM_SUBSTRING))) { /* Set timeout back to the default value */ attr.id = SAI_REDIS_SWITCH_ATTR_SYNC_OPERATION_RESPONSE_TIMEOUT; @@ -359,9 +362,6 @@ sai_status_t initSaiPhyApi(swss::gearbox_phy_t *phy) sai_status_t status; char fwPath[PATH_MAX]; char hwinfo[HWINFO_MAX_SIZE + 1]; - char hwinfoIntf[IFNAMSIZ + 1]; - unsigned int hwinfoPhyid; - int ret; SWSS_LOG_ENTER(); @@ -377,19 +377,11 @@ sai_status_t initSaiPhyApi(swss::gearbox_phy_t *phy) attr.value.u32 = 0; attrs.push_back(attr); - ret = sscanf(phy->hwinfo.c_str(), "%" STR(IFNAMSIZ) "[^/]/%u", hwinfoIntf, &hwinfoPhyid); - if (ret != 2) { - SWSS_LOG_ERROR("BOX: hardware info doesn't match the 'interface_name/phyid' " - "format"); - return SAI_STATUS_FAILURE; - } - - if (hwinfoPhyid > std::numeric_limits::max()) { - SWSS_LOG_ERROR("BOX: phyid is bigger than maximum limit"); - return SAI_STATUS_FAILURE; + if( phy->hwinfo.length() > HWINFO_MAX_SIZE ) { + SWSS_LOG_ERROR( "hwinfo string attribute is too long." ); + return SAI_STATUS_FAILURE; } - - strcpy(hwinfo, phy->hwinfo.c_str()); + strncpy(hwinfo, phy->hwinfo.c_str(), phy->hwinfo.length()); attr.id = SAI_SWITCH_ATTR_SWITCH_HARDWARE_INFO; attr.value.s8list.count = (uint32_t) phy->hwinfo.length(); @@ -452,17 +444,327 @@ sai_status_t initSaiPhyApi(swss::gearbox_phy_t *phy) phy->phy_oid = sai_serialize_object_id(phyOid); - attr.id = SAI_SWITCH_ATTR_FIRMWARE_MAJOR_VERSION; - status = sai_switch_api->get_switch_attribute(phyOid, 1, &attr); - if (status != SAI_STATUS_SUCCESS) + if (phy->firmware.length() != 0) { - SWSS_LOG_ERROR("BOX: Failed to get firmware major version:%d rtn:%d", phy->phy_id, status); - return status; + attr.id = SAI_SWITCH_ATTR_FIRMWARE_MAJOR_VERSION; + status = sai_switch_api->get_switch_attribute(phyOid, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("BOX: Failed to get firmware major version for hwinfo:%s, phy:%d, rtn:%d", + phy->hwinfo.c_str(), phy->phy_id, status); + return status; + } + else + { + phy->firmware_major_version = string(attr.value.chardata); + } } - else + return status; +} + +task_process_status handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context) +{ + /* + * This function aims to provide coarse handling of failures in sairedis create + * operation (i.e., notify users by throwing excepions when failures happen). + * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. + * task_need_retry - Cannot handle the status. Need to retry the SAI operation. + * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. + * TODO: 1. Add general handling logic for specific statuses (e.g., SAI_STATUS_ITEM_ALREADY_EXISTS) + * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling + * in each orch. + * 3. Take the type of sai api into consideration. + */ + switch (api) { - phy->firmware_major_version = string(attr.value.chardata); + case SAI_API_FDB: + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); + return task_success; + case SAI_STATUS_ITEM_ALREADY_EXISTS: + /* + * In FDB creation, there are scenarios where the hardware learns an FDB entry before orchagent. + * In such cases, the FDB SAI creation would report the status of SAI_STATUS_ITEM_ALREADY_EXISTS, + * and orchagent should ignore the error and treat it as entry was explicitly created. + */ + return task_success; + default: + SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + case SAI_API_HOSTIF: + switch (status) + { + case SAI_STATUS_SUCCESS: + return task_success; + case SAI_STATUS_FAILURE: + /* + * Host interface maybe failed due to lane not available. + * In some scenarios, like SONiC virtual machine, the invalid lane may be not enabled by VM configuration, + * So just ignore the failure and report an error log. + */ + return task_ignore; + default: + SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + case SAI_API_ROUTE: + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); + return task_success; + case SAI_STATUS_ITEM_ALREADY_EXISTS: + case SAI_STATUS_NOT_EXECUTED: + /* With VNET routes, the same route can be learned via multiple + sources, like via BGP. Handle this gracefully */ + return task_success; + case SAI_STATUS_TABLE_FULL: + return task_need_retry; + default: + SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + case SAI_API_NEIGHBOR: + case SAI_API_NEXT_HOP: + case SAI_API_NEXT_HOP_GROUP: + switch(status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); + return task_success; + case SAI_STATUS_ITEM_ALREADY_EXISTS: + return task_success; + case SAI_STATUS_TABLE_FULL: + return task_need_retry; + default: + SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + default: + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiCreateStatus"); + return task_success; + default: + SWSS_LOG_ERROR("Encountered failure in create operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } } + return task_need_retry; +} - return status; +task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context) +{ + /* + * This function aims to provide coarse handling of failures in sairedis set + * operation (i.e., notify users by throwing excepions when failures happen). + * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. + * task_need_retry - Cannot handle the status. Need to retry the SAI operation. + * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. + * TODO: 1. Add general handling logic for specific statuses + * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling + * in each orch. + * 3. Take the type of sai api into consideration. + */ + if (status == SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiSetStatus"); + return task_success; + } + + switch (api) + { + case SAI_API_PORT: + switch (status) + { + case SAI_STATUS_INVALID_ATTR_VALUE_0: + /* + * If user gives an invalid attribute value, no need to retry or exit orchagent, just fail the current task + * and let user correct the configuration. + */ + SWSS_LOG_ERROR("Encountered SAI_STATUS_INVALID_ATTR_VALUE_0 in set operation, task failed, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + return task_failed; + default: + SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + case SAI_API_TUNNEL: + switch (status) + { + case SAI_STATUS_ATTR_NOT_SUPPORTED_0: + SWSS_LOG_ERROR("Encountered SAI_STATUS_ATTR_NOT_SUPPORTED_0 in set operation, task failed, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + return task_failed; + default: + SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + default: + SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + + return task_need_retry; +} + +task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context) +{ + /* + * This function aims to provide coarse handling of failures in sairedis remove + * operation (i.e., notify users by throwing excepions when failures happen). + * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. + * task_need_retry - Cannot handle the status. Need to retry the SAI operation. + * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. + * TODO: 1. Add general handling logic for specific statuses (e.g., SAI_STATUS_OBJECT_IN_USE, + * SAI_STATUS_ITEM_NOT_FOUND) + * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling + * in each orch. + * 3. Take the type of sai api into consideration. + */ + switch (api) + { + case SAI_API_ROUTE: + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiRemoveStatus"); + return task_success; + case SAI_STATUS_ITEM_NOT_FOUND: + case SAI_STATUS_NOT_EXECUTED: + /* When the same route is learned via multiple sources, + there can be a duplicate remove operation. Handle this gracefully */ + return task_success; + default: + SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + case SAI_API_NEIGHBOR: + case SAI_API_NEXT_HOP: + case SAI_API_NEXT_HOP_GROUP: + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiRemoveStatus"); + return task_success; + case SAI_STATUS_ITEM_NOT_FOUND: + return task_success; + default: + SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; + default: + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiRemoveStatus"); + return task_success; + default: + SWSS_LOG_ERROR("Encountered failure in remove operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + } + return task_need_retry; +} + +task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context) +{ + /* + * This function aims to provide coarse handling of failures in sairedis get + * operation (i.e., notify users by throwing excepions when failures happen). + * Return value: task_success - Handled the status successfully. No need to retry this SAI operation. + * task_need_retry - Cannot handle the status. Need to retry the SAI operation. + * task_failed - Failed to handle the status but another attempt is unlikely to resolve the failure. + * TODO: 1. Add general handling logic for specific statuses + * 2. Develop fine-grain failure handling mechanisms and replace this coarse handling + * in each orch. + * 3. Take the type of sai api into consideration. + */ + switch (status) + { + case SAI_STATUS_SUCCESS: + SWSS_LOG_WARN("SAI_STATUS_SUCCESS is not expected in handleSaiGetStatus"); + return task_success; + case SAI_STATUS_NOT_IMPLEMENTED: + SWSS_LOG_ERROR("Encountered failure in get operation due to the function is not implemented, exiting orchagent, SAI API: %s", + sai_serialize_api(api).c_str()); + throw std::logic_error("SAI get function not implemented"); + default: + SWSS_LOG_ERROR("Encountered failure in get operation, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + } + return task_failed; +} + +bool parseHandleSaiStatusFailure(task_process_status status) +{ + /* + * This function parses task process status from SAI failure handling function to whether a retry is needed. + * Return value: true - no retry is needed. + * false - retry is needed. + */ + switch (status) + { + case task_need_retry: + return false; + case task_failed: + return true; + default: + SWSS_LOG_WARN("task_process_status %d is not expected in parseHandleSaiStatusFailure", status); + } + return true; +} + +/* Handling SAI failure. Request redis to invoke SAI failure dump and abort if set*/ +void handleSaiFailure(bool abort_on_failure) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + + attr.id = SAI_REDIS_SWITCH_ATTR_NOTIFY_SYNCD; + attr.value.s32 = SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP; + sai_status_t status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to take sai failure dump %d", status); + } + if (abort_on_failure) + { + abort(); + } } diff --git a/orchagent/saihelper.h b/orchagent/saihelper.h index a0b2aa2fac..b83f894c2e 100644 --- a/orchagent/saihelper.h +++ b/orchagent/saihelper.h @@ -3,10 +3,19 @@ #include "gearboxutils.h" #include +#include "orch.h" #define IS_ATTR_ID_IN_RANGE(attrId, objectType, attrPrefix) \ ((attrId) >= SAI_ ## objectType ## _ATTR_ ## attrPrefix ## _START && (attrId) <= SAI_ ## objectType ## _ATTR_ ## attrPrefix ## _END) void initSaiApi(); -void initSaiRedis(const std::string &record_location, const std::string &record_filename); +void initSaiRedis(); sai_status_t initSaiPhyApi(swss::gearbox_phy_t *phy); + +/* Handling SAI status*/ +task_process_status handleSaiCreateStatus(sai_api_t api, sai_status_t status, void *context = nullptr); +task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void *context = nullptr); +task_process_status handleSaiRemoveStatus(sai_api_t api, sai_status_t status, void *context = nullptr); +task_process_status handleSaiGetStatus(sai_api_t api, sai_status_t status, void *context = nullptr); +bool parseHandleSaiStatusFailure(task_process_status status); +void handleSaiFailure(bool abort_on_failure); diff --git a/orchagent/sfloworch.cpp b/orchagent/sfloworch.cpp index ac76d23004..2ec367b412 100644 --- a/orchagent/sfloworch.cpp +++ b/orchagent/sfloworch.cpp @@ -83,7 +83,7 @@ bool SflowOrch::sflowUpdateRate(sai_object_id_t port_id, uint32_t rate) if (port_info->second.admin_state) { - if (!sflowAddPort(new_session.m_sample_id, port_id)) + if (!sflowAddPort(new_session.m_sample_id, port_id, port_info->second.m_sample_dir)) { return false; } @@ -107,49 +107,155 @@ bool SflowOrch::sflowUpdateRate(sai_object_id_t port_id, uint32_t rate) return true; } -bool SflowOrch::sflowAddPort(sai_object_id_t sample_id, sai_object_id_t port_id) +bool SflowOrch::sflowAddPort(sai_object_id_t sample_id, sai_object_id_t port_id, string direction) { sai_attribute_t attr; sai_status_t sai_rc; - attr.id = SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE; - attr.value.oid = sample_id; - sai_rc = sai_port_api->set_port_attribute(port_id, &attr); + SWSS_LOG_DEBUG("sflowAddPort %" PRIx64 " portOid %" PRIx64 " dir %s", + sample_id, port_id, direction.c_str()); - if (sai_rc != SAI_STATUS_SUCCESS) + if (direction == "both" || direction == "rx") { - SWSS_LOG_ERROR("Failed to set session %" PRIx64 " on port %" PRIx64 , sample_id, port_id); - task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, sai_rc); - if (handle_status != task_success) + attr.id = SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE; + attr.value.oid = sample_id; + sai_rc = sai_port_api->set_port_attribute(port_id, &attr); + + if (sai_rc != SAI_STATUS_SUCCESS) { - return parseHandleSaiStatusFailure(handle_status); + SWSS_LOG_ERROR("Failed to set session %" PRIx64 " on port %" PRIx64, sample_id, port_id); + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, sai_rc); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + + if (direction == "both" || direction == "tx") + { + attr.id = SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE; + attr.value.oid = sample_id; + sai_rc = sai_port_api->set_port_attribute(port_id, &attr); + + if (sai_rc != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set session %" PRIx64 " on port %" PRIx64, sample_id, port_id); + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, sai_rc); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + return true; +} + +bool SflowOrch::sflowDelPort(sai_object_id_t port_id, string direction) +{ + sai_attribute_t attr; + sai_status_t sai_rc; + + SWSS_LOG_DEBUG("sflowDelPort portOid %" PRIx64 " dir %s", + port_id, direction.c_str()); + + if (direction == "both" || direction == "rx") + { + attr.id = SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE; + attr.value.oid = SAI_NULL_OBJECT_ID; + sai_rc = sai_port_api->set_port_attribute(port_id, &attr); + + if (sai_rc != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to delete session on port %" PRIx64, port_id); + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, sai_rc); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + } + + if (direction == "both" || direction == "tx") + { + attr.id = SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE; + attr.value.oid = SAI_NULL_OBJECT_ID; + sai_rc = sai_port_api->set_port_attribute(port_id, &attr); + + if (sai_rc != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to delete session on port %" PRIx64, port_id); + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, sai_rc); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } } } return true; } -bool SflowOrch::sflowDelPort(sai_object_id_t port_id) +bool SflowOrch::sflowUpdateSampleDirection(sai_object_id_t port_id, string old_dir, string new_dir) { + sai_object_id_t ing_sample_oid = SAI_NULL_OBJECT_ID; + sai_object_id_t egr_sample_oid = SAI_NULL_OBJECT_ID; sai_attribute_t attr; sai_status_t sai_rc; + auto port_info = m_sflowPortInfoMap.find(port_id); + + SWSS_LOG_DEBUG("sflowUpdateSampleDirection portOid %" PRIx64 " old dir %s new dir %s", + port_id, old_dir.c_str(), new_dir.c_str()); + + if ((new_dir == "tx") && (old_dir == "rx" || old_dir == "both")) + { + ing_sample_oid = SAI_NULL_OBJECT_ID; + egr_sample_oid = port_info->second.m_sample_id; + } + + if ((new_dir == "rx") && (old_dir == "tx" || old_dir == "both")) + { + ing_sample_oid = port_info->second.m_sample_id; + egr_sample_oid = SAI_NULL_OBJECT_ID; + } + + if ((new_dir == "both") && (old_dir == "tx" || old_dir == "rx")) + { + ing_sample_oid = port_info->second.m_sample_id; + egr_sample_oid = port_info->second.m_sample_id; + } attr.id = SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE; - attr.value.oid = SAI_NULL_OBJECT_ID; + attr.value.oid = ing_sample_oid; sai_rc = sai_port_api->set_port_attribute(port_id, &attr); if (sai_rc != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to delete session on port %" PRIx64 , port_id); + SWSS_LOG_ERROR("Failed to Ingress session on port %" PRIx64, port_id); task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, sai_rc); if (handle_status != task_success) { return parseHandleSaiStatusFailure(handle_status); } } + + attr.id = SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE; + attr.value.oid = egr_sample_oid; + sai_rc = sai_port_api->set_port_attribute(port_id, &attr); + + if (sai_rc != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to Update Egress session on port %" PRIx64, port_id); + task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, sai_rc); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + return true; } -void SflowOrch::sflowExtractInfo(vector &fvs, bool &admin, uint32_t &rate) +void SflowOrch::sflowExtractInfo(vector &fvs, bool &admin, uint32_t &rate, string &dir) { for (auto i : fvs) { @@ -175,6 +281,13 @@ void SflowOrch::sflowExtractInfo(vector &fvs, bool &admin, uint rate = 0; } } + else if (fvField(i) == "sample_direction") + { + if (fvValue(i) != "error") + { + dir = fvValue(i); + } + } } } @@ -187,10 +300,11 @@ void SflowOrch::sflowStatusSet(Consumer &consumer) auto tuple = it->second; string op = kfvOp(tuple); uint32_t rate = 0; + string dir = ""; if (op == SET_COMMAND) { - sflowExtractInfo(kfvFieldsValues(tuple), m_sflowStatus, rate); + sflowExtractInfo(kfvFieldsValues(tuple), m_sflowStatus, rate, dir); } else if (op == DEL_COMMAND) { @@ -221,7 +335,7 @@ bool SflowOrch::handleSflowSessionDel(sai_object_id_t port_id) uint32_t rate = sflowSessionGetRate(sflowInfo->second.m_sample_id); if (sflowInfo->second.admin_state) { - if (!sflowDelPort(port_id)) + if (!sflowDelPort(port_id, sflowInfo->second.m_sample_dir)) { return false; } @@ -270,6 +384,7 @@ void SflowOrch::doTask(Consumer &consumer) { bool admin_state = m_sflowStatus; uint32_t rate = 0; + string dir = "rx"; if (!m_sflowStatus) { @@ -282,7 +397,15 @@ void SflowOrch::doTask(Consumer &consumer) admin_state = sflowInfo->second.admin_state; } - sflowExtractInfo(kfvFieldsValues(tuple), admin_state, rate); + SWSS_LOG_DEBUG(" Existing Cfg portOid %" PRIx64 " admin %d rate %d dir %s", + port.m_port_id, (unsigned int)admin_state, rate, + sflowInfo->second.m_sample_dir.c_str()); + + sflowExtractInfo(kfvFieldsValues(tuple), admin_state, rate, dir); + + SWSS_LOG_DEBUG("New Cfg portOid %" PRIx64 " admin %d rate %d dir %s", + port.m_port_id, (unsigned int)admin_state, rate, dir.c_str()); + if (sflowInfo == m_sflowPortInfoMap.end()) { if (rate == 0) @@ -308,9 +431,11 @@ void SflowOrch::doTask(Consumer &consumer) m_sflowRateSampleMap[rate] = session; port_info.m_sample_id = session.m_sample_id; } + port_info.m_sample_dir = dir; + if (admin_state) { - if (!sflowAddPort(port_info.m_sample_id, port.m_port_id)) + if (!sflowAddPort(port_info.m_sample_id, port.m_port_id, port_info.m_sample_dir)) { it++; continue; @@ -335,11 +460,12 @@ void SflowOrch::doTask(Consumer &consumer) bool ret = false; if (admin_state) { - ret = sflowAddPort(sflowInfo->second.m_sample_id, port.m_port_id); + ret = sflowAddPort(sflowInfo->second.m_sample_id, port.m_port_id, + sflowInfo->second.m_sample_dir); } else { - ret = sflowDelPort(port.m_port_id); + ret = sflowDelPort(port.m_port_id, sflowInfo->second.m_sample_dir); } if (!ret) { @@ -348,6 +474,17 @@ void SflowOrch::doTask(Consumer &consumer) } sflowInfo->second.admin_state = admin_state; } + + if (dir != sflowInfo->second.m_sample_dir) + { + string old_dir = sflowInfo->second.m_sample_dir; + if (!sflowUpdateSampleDirection(port.m_port_id, old_dir, dir)) + { + it++; + continue; + } + sflowInfo->second.m_sample_dir = dir; + } } } else if (op == DEL_COMMAND) diff --git a/orchagent/sfloworch.h b/orchagent/sfloworch.h index 04a5c9d650..508b22c0aa 100644 --- a/orchagent/sfloworch.h +++ b/orchagent/sfloworch.h @@ -10,6 +10,7 @@ struct SflowPortInfo { bool admin_state; + string m_sample_dir; sai_object_id_t m_sample_id; }; @@ -38,11 +39,12 @@ class SflowOrch : public Orch virtual void doTask(Consumer& consumer); bool sflowCreateSession(uint32_t rate, SflowSession &session); bool sflowDestroySession(SflowSession &session); - bool sflowAddPort(sai_object_id_t sample_id, sai_object_id_t port_id); - bool sflowDelPort(sai_object_id_t port_id); + bool sflowAddPort(sai_object_id_t sample_id, sai_object_id_t port_id, string direction); + bool sflowDelPort(sai_object_id_t port_id, string direction); void sflowStatusSet(Consumer &consumer); bool sflowUpdateRate(sai_object_id_t port_id, uint32_t rate); + bool sflowUpdateSampleDirection(sai_object_id_t port_id, string old_dir, string new_dir); uint32_t sflowSessionGetRate(sai_object_id_t sample_id); bool handleSflowSessionDel(sai_object_id_t port_id); - void sflowExtractInfo(std::vector &fvs, bool &admin, uint32_t &rate); + void sflowExtractInfo(std::vector &fvs, bool &admin, uint32_t &rate, string &dir); }; diff --git a/orchagent/srv6orch.cpp b/orchagent/srv6orch.cpp index 5081e06b6f..5a80576e3a 100644 --- a/orchagent/srv6orch.cpp +++ b/orchagent/srv6orch.cpp @@ -20,6 +20,46 @@ extern sai_next_hop_api_t* sai_next_hop_api; extern RouteOrch *gRouteOrch; extern CrmOrch *gCrmOrch; +const map end_behavior_map = +{ + {"end", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_E}, + {"end.x", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_X}, + {"end.t", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_T}, + {"end.dx6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6}, + {"end.dx4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4}, + {"end.dt4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4}, + {"end.dt6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT6}, + {"end.dt46", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46}, + {"end.b6.encaps", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS}, + {"end.b6.encaps.red", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS_RED}, + {"end.b6.insert", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT}, + {"end.b6.insert.red", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT_RED}, + {"udx6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6}, + {"udx4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4}, + {"udt6", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT6}, + {"udt4", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4}, + {"udt46", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46}, + {"un", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UN}, + {"ua", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UA} +}; + +const map end_flavor_map = +{ + {"end", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD}, + {"end.x", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD}, + {"end.t", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD}, + {"un", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD}, + {"ua", SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD} +}; + +const map sidlist_type_map = +{ + {"insert", SAI_SRV6_SIDLIST_TYPE_INSERT}, + {"insert.red", SAI_SRV6_SIDLIST_TYPE_INSERT_RED}, + {"encaps", SAI_SRV6_SIDLIST_TYPE_ENCAPS}, + {"encaps.red", SAI_SRV6_SIDLIST_TYPE_ENCAPS_RED} +}; + void Srv6Orch::srv6TunnelUpdateNexthops(const string srv6_source, const NextHopKey nhkey, bool insert) { if (insert) @@ -235,7 +275,7 @@ bool Srv6Orch::srv6Nexthops(const NextHopGroupKey &nhgKey, sai_object_id_t &next return true; } -bool Srv6Orch::createUpdateSidList(const string sid_name, const string sid_list) +bool Srv6Orch::createUpdateSidList(const string sid_name, const string sid_list, const string sidlist_type) { SWSS_LOG_ENTER(); bool exists = (sid_table_.find(sid_name) != sid_table_.end()); @@ -271,7 +311,16 @@ bool Srv6Orch::createUpdateSidList(const string sid_name, const string sid_list) attributes.push_back(attr); attr.id = SAI_SRV6_SIDLIST_ATTR_TYPE; - attr.value.s32 = SAI_SRV6_SIDLIST_TYPE_ENCAPS_RED; + if (sidlist_type_map.find(sidlist_type) == sidlist_type_map.end()) + { + SWSS_LOG_INFO("Use default sidlist type: ENCAPS_RED"); + attr.value.s32 = SAI_SRV6_SIDLIST_TYPE_ENCAPS_RED; + } + else + { + SWSS_LOG_INFO("sidlist type: %s", sidlist_type.c_str()); + attr.value.s32 = sidlist_type_map.at(sidlist_type); + } attributes.push_back(attr); status = sai_srv6_api->create_srv6_sidlist(&segment_oid, gSwitchId, (uint32_t) attributes.size(), attributes.data()); if (status != SAI_STATUS_SUCCESS) @@ -333,7 +382,7 @@ void Srv6Orch::doTaskSidTable(const KeyOpFieldsValuesTuple & tuple) SWSS_LOG_ENTER(); string sid_name = kfvKey(tuple); string op = kfvOp(tuple); - string sid_list; + string sid_list, sidlist_type; for (auto i : kfvFieldsValues(tuple)) { @@ -341,10 +390,14 @@ void Srv6Orch::doTaskSidTable(const KeyOpFieldsValuesTuple & tuple) { sid_list = fvValue(i); } + if (fvField(i) == "type") + { + sidlist_type = fvValue(i); + } } if (op == SET_COMMAND) { - if (!createUpdateSidList(sid_name, sid_list)) + if (!createUpdateSidList(sid_name, sid_list, sidlist_type)) { SWSS_LOG_ERROR("Failed to process sid %s", sid_name.c_str()); } @@ -372,62 +425,18 @@ bool Srv6Orch::mySidExists(string my_sid_string) bool Srv6Orch::sidEntryEndpointBehavior(string action, sai_my_sid_entry_endpoint_behavior_t &end_behavior, sai_my_sid_entry_endpoint_behavior_flavor_t &end_flavor) { - if (action == "end") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_E; - end_flavor = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD; - } - else if (action == "end.x") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_X; - end_flavor = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD; - } - else if (action == "end.t") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_T; - end_flavor = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD; - } - else if (action == "end.dx6") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6; - } - else if (action == "end.dx4") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4; - } - else if (action == "end.dt4") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4; - } - else if (action == "end.dt6") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT6; - } - else if (action == "end.dt46") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46; - } - else if (action == "end.b6.encaps") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS; - } - else if (action == "end.b6.encaps.red") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS_RED; - } - else if (action == "end.b6.insert") - { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT; - } - else if (action == "end.b6.insert.red") + if (end_behavior_map.find(action) == end_behavior_map.end()) { - end_behavior = SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT_RED; + SWSS_LOG_ERROR("Invalid endpoint behavior function"); + return false; } - else + end_behavior = end_behavior_map.at(action); + + if (end_flavor_map.find(action) != end_flavor_map.end()) { - SWSS_LOG_ERROR("Invalid endpoing behavior function"); - return false; + end_flavor = end_flavor_map.at(action); } + return true; } @@ -496,7 +505,11 @@ bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, { sai_object_id_t dt_vrf_id; SWSS_LOG_INFO("DT VRF name %s", dt_vrf.c_str()); - if (m_vrfOrch->isVRFexists(dt_vrf)) + if (dt_vrf == "default") + { + dt_vrf_id = gVirtualRouterId; + } + else if (m_vrfOrch->isVRFexists(dt_vrf)) { SWSS_LOG_INFO("VRF %s exists in DB", dt_vrf.c_str()); dt_vrf_id = m_vrfOrch->getVRFid(dt_vrf); diff --git a/orchagent/srv6orch.h b/orchagent/srv6orch.h index 989737a998..e24f5e00f5 100644 --- a/orchagent/srv6orch.h +++ b/orchagent/srv6orch.h @@ -76,7 +76,7 @@ class Srv6Orch : public Orch void doTask(Consumer &consumer); void doTaskSidTable(const KeyOpFieldsValuesTuple &tuple); void doTaskMySidTable(const KeyOpFieldsValuesTuple &tuple); - bool createUpdateSidList(const string seg_name, const string ips); + bool createUpdateSidList(const string seg_name, const string ips, const string sidlist_type); bool deleteSidList(const string seg_name); bool createSrv6Tunnel(const string srv6_source); bool createSrv6Nexthop(const NextHopKey &nh); diff --git a/orchagent/switch/switch_capabilities.cpp b/orchagent/switch/switch_capabilities.cpp new file mode 100644 index 0000000000..d826a9e49f --- /dev/null +++ b/orchagent/switch/switch_capabilities.cpp @@ -0,0 +1,354 @@ +// includes ----------------------------------------------------------------------------------------------------------- + +extern "C" { +#include +#include +#include +#include +} + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "switch_schema.h" +#include "switch_capabilities.h" + +using namespace swss; + +// defines ------------------------------------------------------------------------------------------------------------ + +#define SWITCH_CAPABILITY_HASH_NATIVE_HASH_FIELD_LIST_FIELD "HASH|NATIVE_HASH_FIELD_LIST" +#define SWITCH_CAPABILITY_ECMP_HASH_CAPABLE_FIELD "ECMP_HASH_CAPABLE" +#define SWITCH_CAPABILITY_LAG_HASH_CAPABLE_FIELD "LAG_HASH_CAPABLE" + +#define SWITCH_CAPABILITY_KEY "switch" + +#define SWITCH_STATE_DB_NAME "STATE_DB" +#define SWITCH_STATE_DB_TIMEOUT 0 + +// constants ---------------------------------------------------------------------------------------------------------- + +static const std::unordered_map swHashHashFieldMap = +{ + { SAI_NATIVE_HASH_FIELD_IN_PORT, SWITCH_HASH_FIELD_IN_PORT }, + { SAI_NATIVE_HASH_FIELD_DST_MAC, SWITCH_HASH_FIELD_DST_MAC }, + { SAI_NATIVE_HASH_FIELD_SRC_MAC, SWITCH_HASH_FIELD_SRC_MAC }, + { SAI_NATIVE_HASH_FIELD_ETHERTYPE, SWITCH_HASH_FIELD_ETHERTYPE }, + { SAI_NATIVE_HASH_FIELD_VLAN_ID, SWITCH_HASH_FIELD_VLAN_ID }, + { SAI_NATIVE_HASH_FIELD_IP_PROTOCOL, SWITCH_HASH_FIELD_IP_PROTOCOL }, + { SAI_NATIVE_HASH_FIELD_DST_IP, SWITCH_HASH_FIELD_DST_IP }, + { SAI_NATIVE_HASH_FIELD_SRC_IP, SWITCH_HASH_FIELD_SRC_IP }, + { SAI_NATIVE_HASH_FIELD_L4_DST_PORT, SWITCH_HASH_FIELD_L4_DST_PORT }, + { SAI_NATIVE_HASH_FIELD_L4_SRC_PORT, SWITCH_HASH_FIELD_L4_SRC_PORT }, + { SAI_NATIVE_HASH_FIELD_INNER_DST_MAC, SWITCH_HASH_FIELD_INNER_DST_MAC }, + { SAI_NATIVE_HASH_FIELD_INNER_SRC_MAC, SWITCH_HASH_FIELD_INNER_SRC_MAC }, + { SAI_NATIVE_HASH_FIELD_INNER_ETHERTYPE, SWITCH_HASH_FIELD_INNER_ETHERTYPE }, + { SAI_NATIVE_HASH_FIELD_INNER_IP_PROTOCOL, SWITCH_HASH_FIELD_INNER_IP_PROTOCOL }, + { SAI_NATIVE_HASH_FIELD_INNER_DST_IP, SWITCH_HASH_FIELD_INNER_DST_IP }, + { SAI_NATIVE_HASH_FIELD_INNER_SRC_IP, SWITCH_HASH_FIELD_INNER_SRC_IP }, + { SAI_NATIVE_HASH_FIELD_INNER_L4_DST_PORT, SWITCH_HASH_FIELD_INNER_L4_DST_PORT }, + { SAI_NATIVE_HASH_FIELD_INNER_L4_SRC_PORT, SWITCH_HASH_FIELD_INNER_L4_SRC_PORT } +}; + +// variables ---------------------------------------------------------------------------------------------------------- + +extern sai_object_id_t gSwitchId; + +// functions ---------------------------------------------------------------------------------------------------------- + +static std::string toStr(sai_object_type_t objType, sai_attr_id_t attrId) noexcept +{ + const auto *meta = sai_metadata_get_attr_metadata(objType, attrId); + + return meta != nullptr ? meta->attridname : "UNKNOWN"; +} + +static std::string toStr(const std::set &value) noexcept +{ + std::vector strList; + + for (const auto &cit1 : value) + { + const auto &cit2 = swHashHashFieldMap.find(cit1); + if (cit2 != swHashHashFieldMap.cend()) + { + strList.push_back(cit2->second); + } + } + + return join(",", strList.cbegin(), strList.cend()); +} + +static std::string toStr(bool value) noexcept +{ + return value ? "true" : "false"; +} + +// Switch capabilities ------------------------------------------------------------------------------------------------ + +DBConnector SwitchCapabilities::stateDb(SWITCH_STATE_DB_NAME, SWITCH_STATE_DB_TIMEOUT); +Table SwitchCapabilities::capTable(&stateDb, STATE_SWITCH_CAPABILITY_TABLE_NAME); + +SwitchCapabilities::SwitchCapabilities() +{ + queryHashCapabilities(); + querySwitchCapabilities(); + + writeHashCapabilitiesToDb(); + writeSwitchCapabilitiesToDb(); +} + +bool SwitchCapabilities::isSwitchEcmpHashSupported() const +{ + const auto &nativeHashFieldList = hashCapabilities.nativeHashFieldList; + const auto &ecmpHash = switchCapabilities.ecmpHash; + + return nativeHashFieldList.isAttrSupported && ecmpHash.isAttrSupported; +} + +bool SwitchCapabilities::isSwitchLagHashSupported() const +{ + const auto &nativeHashFieldList = hashCapabilities.nativeHashFieldList; + const auto &lagHash = switchCapabilities.lagHash; + + return nativeHashFieldList.isAttrSupported && lagHash.isAttrSupported; +} + +bool SwitchCapabilities::validateSwitchHashFieldCap(const std::set &hfSet) const +{ + if (!hashCapabilities.nativeHashFieldList.isEnumSupported) + { + return true; + } + + if (hashCapabilities.nativeHashFieldList.hfSet.empty()) + { + SWSS_LOG_ERROR("Failed to validate hash field: no hash field capabilities"); + return false; + } + + for (const auto &cit : hfSet) + { + if (hashCapabilities.nativeHashFieldList.hfSet.count(cit) == 0) + { + SWSS_LOG_ERROR("Failed to validate hash field: value(%s) is not supported"); + return false; + } + } + + return true; +} + +FieldValueTuple SwitchCapabilities::makeHashFieldCapDbEntry() const +{ + const auto &nativeHashFieldList = hashCapabilities.nativeHashFieldList; + + auto field = SWITCH_CAPABILITY_HASH_NATIVE_HASH_FIELD_LIST_FIELD; + auto value = nativeHashFieldList.isEnumSupported ? toStr(nativeHashFieldList.hfSet) : "N/A"; + + return FieldValueTuple(field, value); +} + +FieldValueTuple SwitchCapabilities::makeEcmpHashCapDbEntry() const +{ + auto field = SWITCH_CAPABILITY_ECMP_HASH_CAPABLE_FIELD; + auto value = toStr(isSwitchEcmpHashSupported()); + + return FieldValueTuple(field, value); +} + +FieldValueTuple SwitchCapabilities::makeLagHashCapDbEntry() const +{ + auto field = SWITCH_CAPABILITY_LAG_HASH_CAPABLE_FIELD; + auto value = toStr(isSwitchLagHashSupported()); + + return FieldValueTuple(field, value); +} + +sai_status_t SwitchCapabilities::queryEnumCapabilitiesSai(std::vector &capList, sai_object_type_t objType, sai_attr_id_t attrId) const +{ + sai_s32_list_t enumList = { .count = 0, .list = nullptr }; + + auto status = sai_query_attribute_enum_values_capability(gSwitchId, objType, attrId, &enumList); + if ((status != SAI_STATUS_SUCCESS) && (status != SAI_STATUS_BUFFER_OVERFLOW)) + { + return status; + } + + capList.resize(enumList.count); + enumList.list = capList.data(); + + return sai_query_attribute_enum_values_capability(gSwitchId, objType, attrId, &enumList); +} + +sai_status_t SwitchCapabilities::queryAttrCapabilitiesSai(sai_attr_capability_t &attrCap, sai_object_type_t objType, sai_attr_id_t attrId) const +{ + return sai_query_attribute_capability(gSwitchId, objType, attrId, &attrCap); +} + +void SwitchCapabilities::queryHashNativeHashFieldListEnumCapabilities() +{ + SWSS_LOG_ENTER(); + + std::vector hfList; + auto status = queryEnumCapabilitiesSai( + hfList, SAI_OBJECT_TYPE_HASH, SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) enum value capabilities", + toStr(SAI_OBJECT_TYPE_HASH, SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST).c_str() + ); + return; + } + + auto &hfSet = hashCapabilities.nativeHashFieldList.hfSet; + std::transform( + hfList.cbegin(), hfList.cend(), std::inserter(hfSet, hfSet.begin()), + [](sai_int32_t value) { return static_cast(value); } + ); + + hashCapabilities.nativeHashFieldList.isEnumSupported = true; +} + +void SwitchCapabilities::queryHashNativeHashFieldListAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_HASH, SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) capabilities", + toStr(SAI_OBJECT_TYPE_HASH, SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST).c_str() + ); + return; + } + + if (!attrCap.set_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) SET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_HASH, SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST).c_str() + ); + return; + } + + hashCapabilities.nativeHashFieldList.isAttrSupported = true; +} + +void SwitchCapabilities::queryHashCapabilities() +{ + queryHashNativeHashFieldListEnumCapabilities(); + queryHashNativeHashFieldListAttrCapabilities(); +} + +void SwitchCapabilities::querySwitchEcmpHashCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_HASH + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) capabilities", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_HASH).c_str() + ); + return; + } + + if (!attrCap.get_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) GET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_HASH).c_str() + ); + return; + } + + switchCapabilities.ecmpHash.isAttrSupported = true; +} + +void SwitchCapabilities::querySwitchLagHashCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_HASH + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) capabilities", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_HASH).c_str() + ); + return; + } + + if (!attrCap.get_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) GET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_HASH).c_str() + ); + return; + } + + switchCapabilities.lagHash.isAttrSupported = true; +} + +void SwitchCapabilities::querySwitchCapabilities() +{ + querySwitchEcmpHashCapabilities(); + querySwitchLagHashCapabilities(); +} + +void SwitchCapabilities::writeHashCapabilitiesToDb() +{ + SWSS_LOG_ENTER(); + + auto key = SwitchCapabilities::capTable.getKeyName(SWITCH_CAPABILITY_KEY); + + std::vector fvList = { + makeHashFieldCapDbEntry() + }; + + SwitchCapabilities::capTable.set(SWITCH_CAPABILITY_KEY, fvList); + + SWSS_LOG_NOTICE("Wrote hash enum capabilities to State DB: %s key", key.c_str()); +} + +void SwitchCapabilities::writeSwitchCapabilitiesToDb() +{ + SWSS_LOG_ENTER(); + + auto key = SwitchCapabilities::capTable.getKeyName(SWITCH_CAPABILITY_KEY); + + std::vector fvList = { + makeEcmpHashCapDbEntry(), + makeLagHashCapDbEntry() + }; + + SwitchCapabilities::capTable.set(SWITCH_CAPABILITY_KEY, fvList); + + SWSS_LOG_NOTICE("Wrote switch hash capabilities to State DB: %s key", key.c_str()); +} diff --git a/orchagent/switch/switch_capabilities.h b/orchagent/switch/switch_capabilities.h new file mode 100644 index 0000000000..47dcb0c7ec --- /dev/null +++ b/orchagent/switch/switch_capabilities.h @@ -0,0 +1,68 @@ +#pragma once + +extern "C" { +#include +#include +#include +} + +#include +#include + +#include +#include + +class SwitchCapabilities final +{ +public: + SwitchCapabilities(); + ~SwitchCapabilities() = default; + + bool isSwitchEcmpHashSupported() const; + bool isSwitchLagHashSupported() const; + + bool validateSwitchHashFieldCap(const std::set &hfSet) const; + +private: + swss::FieldValueTuple makeHashFieldCapDbEntry() const; + swss::FieldValueTuple makeEcmpHashCapDbEntry() const; + swss::FieldValueTuple makeLagHashCapDbEntry() const; + + sai_status_t queryEnumCapabilitiesSai(std::vector &capList, sai_object_type_t objType, sai_attr_id_t attrId) const; + sai_status_t queryAttrCapabilitiesSai(sai_attr_capability_t &attrCap, sai_object_type_t objType, sai_attr_id_t attrId) const; + + void queryHashNativeHashFieldListEnumCapabilities(); + void queryHashNativeHashFieldListAttrCapabilities(); + + void querySwitchEcmpHashCapabilities(); + void querySwitchLagHashCapabilities(); + + void queryHashCapabilities(); + void querySwitchCapabilities(); + + void writeHashCapabilitiesToDb(); + void writeSwitchCapabilitiesToDb(); + + // Hash SAI capabilities + struct { + struct { + std::set hfSet; + bool isEnumSupported = false; + bool isAttrSupported = false; + } nativeHashFieldList; + } hashCapabilities; + + // Switch SAI capabilities + struct { + struct { + bool isAttrSupported = false; + } ecmpHash; + + struct { + bool isAttrSupported = false; + } lagHash; + } switchCapabilities; + + static swss::DBConnector stateDb; + static swss::Table capTable; +}; diff --git a/orchagent/switch/switch_container.h b/orchagent/switch/switch_container.h new file mode 100644 index 0000000000..c56ae166f0 --- /dev/null +++ b/orchagent/switch/switch_container.h @@ -0,0 +1,28 @@ +#pragma once + +extern "C" { +#include +} + +#include +#include +#include + +class SwitchHash final +{ +public: + SwitchHash() = default; + ~SwitchHash() = default; + + struct { + std::set value; + bool is_set = false; + } ecmp_hash; + + struct { + std::set value; + bool is_set = false; + } lag_hash; + + std::unordered_map fieldValueMap; +}; diff --git a/orchagent/switch/switch_helper.cpp b/orchagent/switch/switch_helper.cpp new file mode 100644 index 0000000000..d91f382b25 --- /dev/null +++ b/orchagent/switch/switch_helper.cpp @@ -0,0 +1,142 @@ +// includes ----------------------------------------------------------------------------------------------------------- + +#include +#include +#include + +#include +#include + +#include "switch_schema.h" +#include "switch_helper.h" + +using namespace swss; + +// constants ---------------------------------------------------------------------------------------------------------- + +static const std::unordered_map swHashHashFieldMap = +{ + { SWITCH_HASH_FIELD_IN_PORT, SAI_NATIVE_HASH_FIELD_IN_PORT }, + { SWITCH_HASH_FIELD_DST_MAC, SAI_NATIVE_HASH_FIELD_DST_MAC }, + { SWITCH_HASH_FIELD_SRC_MAC, SAI_NATIVE_HASH_FIELD_SRC_MAC }, + { SWITCH_HASH_FIELD_ETHERTYPE, SAI_NATIVE_HASH_FIELD_ETHERTYPE }, + { SWITCH_HASH_FIELD_VLAN_ID, SAI_NATIVE_HASH_FIELD_VLAN_ID }, + { SWITCH_HASH_FIELD_IP_PROTOCOL, SAI_NATIVE_HASH_FIELD_IP_PROTOCOL }, + { SWITCH_HASH_FIELD_DST_IP, SAI_NATIVE_HASH_FIELD_DST_IP }, + { SWITCH_HASH_FIELD_SRC_IP, SAI_NATIVE_HASH_FIELD_SRC_IP }, + { SWITCH_HASH_FIELD_L4_DST_PORT, SAI_NATIVE_HASH_FIELD_L4_DST_PORT }, + { SWITCH_HASH_FIELD_L4_SRC_PORT, SAI_NATIVE_HASH_FIELD_L4_SRC_PORT }, + { SWITCH_HASH_FIELD_INNER_DST_MAC, SAI_NATIVE_HASH_FIELD_INNER_DST_MAC }, + { SWITCH_HASH_FIELD_INNER_SRC_MAC, SAI_NATIVE_HASH_FIELD_INNER_SRC_MAC }, + { SWITCH_HASH_FIELD_INNER_ETHERTYPE, SAI_NATIVE_HASH_FIELD_INNER_ETHERTYPE }, + { SWITCH_HASH_FIELD_INNER_IP_PROTOCOL, SAI_NATIVE_HASH_FIELD_INNER_IP_PROTOCOL }, + { SWITCH_HASH_FIELD_INNER_DST_IP, SAI_NATIVE_HASH_FIELD_INNER_DST_IP }, + { SWITCH_HASH_FIELD_INNER_SRC_IP, SAI_NATIVE_HASH_FIELD_INNER_SRC_IP }, + { SWITCH_HASH_FIELD_INNER_L4_DST_PORT, SAI_NATIVE_HASH_FIELD_INNER_L4_DST_PORT }, + { SWITCH_HASH_FIELD_INNER_L4_SRC_PORT, SAI_NATIVE_HASH_FIELD_INNER_L4_SRC_PORT } +}; + +// switch helper ------------------------------------------------------------------------------------------------------ + +const SwitchHash& SwitchHelper::getSwHash() const +{ + return swHash; +} + +void SwitchHelper::setSwHash(const SwitchHash &hash) +{ + swHash = hash; +} + +template +bool SwitchHelper::parseSwHashFieldList(T &obj, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + const auto &hfList = tokenize(value, ','); + + if (hfList.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty list is prohibited", field.c_str()); + return false; + } + + const auto &hfSet = std::unordered_set(hfList.cbegin(), hfList.cend()); + + if (hfSet.size() != hfList.size()) + { + SWSS_LOG_ERROR("Duplicate hash fields in field(%s): unexpected value(%s)", field.c_str(), value.c_str()); + return false; + } + + for (const auto &cit1 : hfSet) + { + const auto &cit2 = swHashHashFieldMap.find(cit1); + if (cit2 == swHashHashFieldMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + obj.value.insert(cit2->second); + } + + obj.is_set = true; + + return true; +} + +bool SwitchHelper::parseSwHashEcmpHash(SwitchHash &hash, const std::string &field, const std::string &value) const +{ + return parseSwHashFieldList(hash.ecmp_hash, field, value); +} + +bool SwitchHelper::parseSwHashLagHash(SwitchHash &hash, const std::string &field, const std::string &value) const +{ + return parseSwHashFieldList(hash.lag_hash, field, value); +} + +bool SwitchHelper::parseSwHash(SwitchHash &hash) const +{ + SWSS_LOG_ENTER(); + + for (const auto &cit : hash.fieldValueMap) + { + const auto &field = cit.first; + const auto &value = cit.second; + + if (field == SWITCH_HASH_ECMP_HASH) + { + if (!parseSwHashEcmpHash(hash, field, value)) + { + return false; + } + } + else if (field == SWITCH_HASH_LAG_HASH) + { + if (!parseSwHashLagHash(hash, field, value)) + { + return false; + } + } + else + { + SWSS_LOG_WARN("Unknown field(%s): skipping ...", field.c_str()); + } + } + + return validateSwHash(hash); +} + +bool SwitchHelper::validateSwHash(SwitchHash &hash) const +{ + SWSS_LOG_ENTER(); + + if (!hash.ecmp_hash.is_set && !hash.lag_hash.is_set) + { + SWSS_LOG_ERROR("Validation error: missing valid fields"); + return false; + } + + return true; +} diff --git a/orchagent/switch/switch_helper.h b/orchagent/switch/switch_helper.h new file mode 100644 index 0000000000..611ce2b6fb --- /dev/null +++ b/orchagent/switch/switch_helper.h @@ -0,0 +1,27 @@ +#pragma once + +#include "switch_container.h" + +class SwitchHelper final +{ +public: + SwitchHelper() = default; + ~SwitchHelper() = default; + + const SwitchHash& getSwHash() const; + void setSwHash(const SwitchHash &hash); + + bool parseSwHash(SwitchHash &hash) const; + +private: + template + bool parseSwHashFieldList(T &obj, const std::string &field, const std::string &value) const; + + bool parseSwHashEcmpHash(SwitchHash &hash, const std::string &field, const std::string &value) const; + bool parseSwHashLagHash(SwitchHash &hash, const std::string &field, const std::string &value) const; + + bool validateSwHash(SwitchHash &hash) const; + +private: + SwitchHash swHash; +}; diff --git a/orchagent/switch/switch_schema.h b/orchagent/switch/switch_schema.h new file mode 100644 index 0000000000..c836eff120 --- /dev/null +++ b/orchagent/switch/switch_schema.h @@ -0,0 +1,25 @@ +#pragma once + +// defines ------------------------------------------------------------------------------------------------------------ + +#define SWITCH_HASH_FIELD_IN_PORT "IN_PORT" +#define SWITCH_HASH_FIELD_DST_MAC "DST_MAC" +#define SWITCH_HASH_FIELD_SRC_MAC "SRC_MAC" +#define SWITCH_HASH_FIELD_ETHERTYPE "ETHERTYPE" +#define SWITCH_HASH_FIELD_VLAN_ID "VLAN_ID" +#define SWITCH_HASH_FIELD_IP_PROTOCOL "IP_PROTOCOL" +#define SWITCH_HASH_FIELD_DST_IP "DST_IP" +#define SWITCH_HASH_FIELD_SRC_IP "SRC_IP" +#define SWITCH_HASH_FIELD_L4_DST_PORT "L4_DST_PORT" +#define SWITCH_HASH_FIELD_L4_SRC_PORT "L4_SRC_PORT" +#define SWITCH_HASH_FIELD_INNER_DST_MAC "INNER_DST_MAC" +#define SWITCH_HASH_FIELD_INNER_SRC_MAC "INNER_SRC_MAC" +#define SWITCH_HASH_FIELD_INNER_ETHERTYPE "INNER_ETHERTYPE" +#define SWITCH_HASH_FIELD_INNER_IP_PROTOCOL "INNER_IP_PROTOCOL" +#define SWITCH_HASH_FIELD_INNER_DST_IP "INNER_DST_IP" +#define SWITCH_HASH_FIELD_INNER_SRC_IP "INNER_SRC_IP" +#define SWITCH_HASH_FIELD_INNER_L4_DST_PORT "INNER_L4_DST_PORT" +#define SWITCH_HASH_FIELD_INNER_L4_SRC_PORT "INNER_L4_SRC_PORT" + +#define SWITCH_HASH_ECMP_HASH "ecmp_hash" +#define SWITCH_HASH_LAG_HASH "lag_hash" diff --git a/orchagent/switchorch.cpp b/orchagent/switchorch.cpp index 48ecd1fd35..76c17812d4 100644 --- a/orchagent/switchorch.cpp +++ b/orchagent/switchorch.cpp @@ -9,6 +9,7 @@ #include "notificationproducer.h" #include "macaddress.h" #include "return_code.h" +#include "saihelper.h" using namespace std; using namespace swss; @@ -16,6 +17,7 @@ using namespace swss; extern sai_object_id_t gSwitchId; extern sai_switch_api_t *sai_switch_api; extern sai_acl_api_t *sai_acl_api; +extern sai_hash_api_t *sai_hash_api; extern MacAddress gVxlanMacAddress; extern CrmOrch *gCrmOrch; @@ -48,6 +50,27 @@ const map packet_action_map = const std::set switch_non_sai_attribute_set = {"ordered_ecmp"}; +void SwitchOrch::set_switch_pfc_dlr_init_capability() +{ + vector fvVector; + + /* Query PFC DLR INIT capability */ + bool rv = querySwitchCapability(SAI_OBJECT_TYPE_QUEUE, SAI_QUEUE_ATTR_PFC_DLR_INIT); + if (rv == false) + { + SWSS_LOG_INFO("Queue level PFC DLR INIT configuration is not supported"); + m_PfcDlrInitEnable = false; + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PFC_DLR_INIT_CAPABLE, "false"); + } + else + { + SWSS_LOG_INFO("Queue level PFC DLR INIT configuration is supported"); + m_PfcDlrInitEnable = true; + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PFC_DLR_INIT_CAPABLE, "true"); + } + set_switch_capability(fvVector); +} + SwitchOrch::SwitchOrch(DBConnector *db, vector& connectors, TableConnector switchTable): Orch(connectors), m_switchTable(switchTable.first, switchTable.second), @@ -60,8 +83,12 @@ SwitchOrch::SwitchOrch(DBConnector *db, vector& connectors, Tabl auto restartCheckNotifier = new Notifier(m_restartCheckNotificationConsumer, this, "RESTARTCHECK"); Orch::addExecutor(restartCheckNotifier); + set_switch_pfc_dlr_init_capability(); initSensorsTable(); querySwitchTpidCapability(); + querySwitchPortEgressSampleCapability(); + querySwitchHashDefaults(); + auto executorT = new ExecutableTimer(m_sensorsPollerTimer, this, "ASIC_SENSORS_POLL_TIMER"); Orch::addExecutor(executorT); } @@ -71,16 +98,14 @@ void SwitchOrch::initAclGroupsBindToSwitch() // Create an ACL group per stage, INGRESS, EGRESS and PRE_INGRESS for (auto stage_it : aclStageLookup) { - sai_object_id_t group_oid; - auto status = createAclGroup(fvValue(stage_it), &group_oid); + auto status = createAclGroup(fvValue(stage_it), &m_aclGroups[fvValue(stage_it)]); if (!status.ok()) { status.prepend("Failed to create ACL group for stage " + fvField(stage_it) + ": "); SWSS_LOG_THROW("%s", status.message().c_str()); } SWSS_LOG_NOTICE("Created ACL group for stage %s", fvField(stage_it).c_str()); - m_aclGroups[fvValue(stage_it)] = group_oid; - status = bindAclGroupToSwitch(fvValue(stage_it), group_oid); + status = bindAclGroupToSwitch(fvValue(stage_it), m_aclGroups[fvValue(stage_it)]); if (!status.ok()) { status.prepend("Failed to bind ACL group to stage " + fvField(stage_it) + ": "); @@ -89,12 +114,12 @@ void SwitchOrch::initAclGroupsBindToSwitch() } } -const std::map &SwitchOrch::getAclGroupOidsBindingToSwitch() +std::map &SwitchOrch::getAclGroupsBindingToSwitch() { return m_aclGroups; } -ReturnCode SwitchOrch::createAclGroup(const sai_acl_stage_t &group_stage, sai_object_id_t *acl_grp_oid) +ReturnCode SwitchOrch::createAclGroup(const sai_acl_stage_t &group_stage, referenced_object *acl_grp) { SWSS_LOG_ENTER(); @@ -115,8 +140,9 @@ ReturnCode SwitchOrch::createAclGroup(const sai_acl_stage_t &group_stage, sai_ob acl_grp_attr.value.s32list.list = bpoint_list.data(); acl_grp_attrs.push_back(acl_grp_attr); - CHECK_ERROR_AND_LOG_AND_RETURN(sai_acl_api->create_acl_table_group( - acl_grp_oid, gSwitchId, (uint32_t)acl_grp_attrs.size(), acl_grp_attrs.data()), + CHECK_ERROR_AND_LOG_AND_RETURN(sai_acl_api->create_acl_table_group(&acl_grp->m_saiObjectId, gSwitchId, + (uint32_t)acl_grp_attrs.size(), + acl_grp_attrs.data()), "Failed to create ACL group for stage " << group_stage); if (group_stage == SAI_ACL_STAGE_INGRESS || group_stage == SAI_ACL_STAGE_PRE_INGRESS || group_stage == SAI_ACL_STAGE_EGRESS) @@ -124,12 +150,12 @@ ReturnCode SwitchOrch::createAclGroup(const sai_acl_stage_t &group_stage, sai_ob gCrmOrch->incCrmAclUsedCounter(CrmResourceType::CRM_ACL_GROUP, (sai_acl_stage_t)group_stage, SAI_ACL_BIND_POINT_TYPE_SWITCH); } - SWSS_LOG_INFO("Suceeded to create ACL group %s in stage %d ", sai_serialize_object_id(*acl_grp_oid).c_str(), - group_stage); + SWSS_LOG_INFO("Suceeded to create ACL group %s in stage %d ", + sai_serialize_object_id(acl_grp->m_saiObjectId).c_str(), group_stage); return ReturnCode(); } -ReturnCode SwitchOrch::bindAclGroupToSwitch(const sai_acl_stage_t &group_stage, const sai_object_id_t &acl_grp_oid) +ReturnCode SwitchOrch::bindAclGroupToSwitch(const sai_acl_stage_t &group_stage, const referenced_object &acl_grp) { SWSS_LOG_ENTER(); @@ -137,17 +163,17 @@ ReturnCode SwitchOrch::bindAclGroupToSwitch(const sai_acl_stage_t &group_stage, if (switch_attr_it == aclStageToSwitchAttrLookup.end()) { LOG_ERROR_AND_RETURN(ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) - << "Failed to set ACL group(" << acl_grp_oid << ") to the SWITCH bind point at stage " - << group_stage); + << "Failed to set ACL group(" << acl_grp.m_saiObjectId + << ") to the SWITCH bind point at stage " << group_stage); } sai_attribute_t attr; attr.id = switch_attr_it->second; - attr.value.oid = acl_grp_oid; + attr.value.oid = acl_grp.m_saiObjectId; auto sai_status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); if (sai_status != SAI_STATUS_SUCCESS) { LOG_ERROR_AND_RETURN(ReturnCode(sai_status) << "[SAI] Failed to set_switch_attribute with attribute.id=" - << attr.id << " and acl group oid=" << acl_grp_oid); + << attr.id << " and acl group oid=" << acl_grp.m_saiObjectId); } return ReturnCode(); } @@ -450,24 +476,197 @@ void SwitchOrch::doAppSwitchTableTask(Consumer &consumer) } } +bool SwitchOrch::setSwitchHashFieldListSai(const SwitchHash &hash, bool isEcmpHash) const +{ + const auto &oid = isEcmpHash ? m_switchHashDefaults.ecmpHash.oid : m_switchHashDefaults.lagHash.oid; + const auto &hfSet = isEcmpHash ? hash.ecmp_hash.value : hash.lag_hash.value; + + std::vector hfList; + std::transform( + hfSet.cbegin(), hfSet.cend(), std::back_inserter(hfList), + [](sai_native_hash_field_t value) { return static_cast(value); } + ); + + sai_attribute_t attr; + + attr.id = SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST; + attr.value.s32list.list = hfList.data(); + attr.value.s32list.count = static_cast(hfList.size()); + + auto status = sai_hash_api->set_hash_attribute(oid, &attr); + return status == SAI_STATUS_SUCCESS; +} + +bool SwitchOrch::setSwitchHash(const SwitchHash &hash) +{ + SWSS_LOG_ENTER(); + + auto hObj = swHlpr.getSwHash(); + auto cfgUpd = false; + + if (hash.ecmp_hash.is_set) + { + if (hObj.ecmp_hash.value != hash.ecmp_hash.value) + { + if (swCap.isSwitchEcmpHashSupported()) + { + if (!swCap.validateSwitchHashFieldCap(hash.ecmp_hash.value)) + { + SWSS_LOG_ERROR("Failed to validate switch ECMP hash: capability is not supported"); + return false; + } + + if (!setSwitchHashFieldListSai(hash, true)) + { + SWSS_LOG_ERROR("Failed to set switch ECMP hash in SAI"); + return false; + } + + cfgUpd = true; + } + else + { + SWSS_LOG_WARN("Switch ECMP hash configuration is not supported: skipping ..."); + } + } + } + else + { + if (hObj.ecmp_hash.is_set) + { + SWSS_LOG_ERROR("Failed to remove switch ECMP hash configuration: operation is not supported"); + return false; + } + } + + if (hash.lag_hash.is_set) + { + if (hObj.lag_hash.value != hash.lag_hash.value) + { + if (swCap.isSwitchLagHashSupported()) + { + if (!swCap.validateSwitchHashFieldCap(hash.lag_hash.value)) + { + SWSS_LOG_ERROR("Failed to validate switch LAG hash: capability is not supported"); + return false; + } + + if (!setSwitchHashFieldListSai(hash, false)) + { + SWSS_LOG_ERROR("Failed to set switch LAG hash in SAI"); + return false; + } + + cfgUpd = true; + } + else + { + SWSS_LOG_WARN("Switch LAG hash configuration is not supported: skipping ..."); + } + } + } + else + { + if (hObj.lag_hash.is_set) + { + SWSS_LOG_ERROR("Failed to remove switch LAG hash configuration: operation is not supported"); + return false; + } + } + + // Don't update internal cache when config remains unchanged + if (!cfgUpd) + { + SWSS_LOG_NOTICE("Switch hash in SAI is up-to-date"); + return true; + } + + swHlpr.setSwHash(hash); + + SWSS_LOG_NOTICE("Set switch hash in SAI"); + + return true; +} + +void SwitchOrch::doCfgSwitchHashTableTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto &map = consumer.m_toSync; + auto it = map.begin(); + + while (it != map.end()) + { + auto keyOpFieldsValues = it->second; + auto key = kfvKey(keyOpFieldsValues); + auto op = kfvOp(keyOpFieldsValues); + + SWSS_LOG_INFO("KEY: %s, OP: %s", key.c_str(), op.c_str()); + + if (key.empty()) + { + SWSS_LOG_ERROR("Failed to parse switch hash key: empty string"); + it = map.erase(it); + continue; + } + + SwitchHash hash; + + if (op == SET_COMMAND) + { + for (const auto &cit : kfvFieldsValues(keyOpFieldsValues)) + { + auto fieldName = fvField(cit); + auto fieldValue = fvValue(cit); + + SWSS_LOG_INFO("FIELD: %s, VALUE: %s", fieldName.c_str(), fieldValue.c_str()); + + hash.fieldValueMap[fieldName] = fieldValue; + } + + if (swHlpr.parseSwHash(hash)) + { + if (!setSwitchHash(hash)) + { + SWSS_LOG_ERROR("Failed to set switch hash: ASIC and CONFIG DB are diverged"); + } + } + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_ERROR("Failed to remove switch hash: operation is not supported: ASIC and CONFIG DB are diverged"); + } + else + { + SWSS_LOG_ERROR("Unknown operation(%s)", op.c_str()); + } + + it = map.erase(it); + } +} + void SwitchOrch::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); - const string & table_name = consumer.getTableName(); - if (table_name == APP_SWITCH_TABLE_NAME) + const auto &tableName = consumer.getTableName(); + + if (tableName == APP_SWITCH_TABLE_NAME) { doAppSwitchTableTask(consumer); } - else if (table_name == CFG_ASIC_SENSORS_TABLE_NAME) + else if (tableName == CFG_ASIC_SENSORS_TABLE_NAME) { doCfgSensorsTableTask(consumer); } + else if (tableName == CFG_SWITCH_HASH_TABLE_NAME) + { + doCfgSwitchHashTableTask(consumer); + } else { - SWSS_LOG_ERROR("Unknown table : %s", table_name.c_str()); + SWSS_LOG_ERROR("Unknown table : %s", tableName.c_str()); } - } void SwitchOrch::doTask(NotificationConsumer& consumer) @@ -709,6 +908,35 @@ void SwitchOrch::set_switch_capability(const std::vector& value m_switchTable.set("switch", values); } +void SwitchOrch::querySwitchPortEgressSampleCapability() +{ + vector fvVector; + sai_status_t status = SAI_STATUS_SUCCESS; + sai_attr_capability_t capability; + + // Check if SAI is capable of handling Port egress sample. + status = sai_query_attribute_capability(gSwitchId, SAI_OBJECT_TYPE_PORT, + SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE, &capability); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_WARN("Could not query port egress Sample capability %d", status); + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PORT_EGRESS_SAMPLE_CAPABLE, "false"); + } + else + { + if (capability.set_implemented) + { + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PORT_EGRESS_SAMPLE_CAPABLE, "true"); + } + else + { + fvVector.emplace_back(SWITCH_CAPABILITY_TABLE_PORT_EGRESS_SAMPLE_CAPABLE, "false"); + } + SWSS_LOG_NOTICE("port egress Sample capability %d", capability.set_implemented); + } + set_switch_capability(fvVector); +} + void SwitchOrch::querySwitchTpidCapability() { SWSS_LOG_ENTER(); @@ -762,7 +990,39 @@ void SwitchOrch::querySwitchTpidCapability() } } -bool SwitchOrch::querySwitchDscpToTcCapability(sai_object_type_t sai_object, sai_attr_id_t attr_id) +bool SwitchOrch::getSwitchHashOidSai(sai_object_id_t &oid, bool isEcmpHash) const +{ + sai_attribute_t attr; + attr.id = isEcmpHash ? SAI_SWITCH_ATTR_ECMP_HASH : SAI_SWITCH_ATTR_LAG_HASH; + attr.value.oid = SAI_NULL_OBJECT_ID; + + auto status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + return false; + } + + oid = attr.value.oid; + + return true; +} + +void SwitchOrch::querySwitchHashDefaults() +{ + SWSS_LOG_ENTER(); + + if (!getSwitchHashOidSai(m_switchHashDefaults.ecmpHash.oid, true)) + { + SWSS_LOG_WARN("Failed to get switch ECMP hash OID"); + } + + if (!getSwitchHashOidSai(m_switchHashDefaults.lagHash.oid, false)) + { + SWSS_LOG_WARN("Failed to get switch LAG hash OID"); + } +} + +bool SwitchOrch::querySwitchCapability(sai_object_type_t sai_object, sai_attr_id_t attr_id) { SWSS_LOG_ENTER(); diff --git a/orchagent/switchorch.h b/orchagent/switchorch.h index 5b09a67640..c6ee9997f9 100644 --- a/orchagent/switchorch.h +++ b/orchagent/switchorch.h @@ -3,6 +3,8 @@ #include "acltable.h" #include "orch.h" #include "timer.h" +#include "switch/switch_capabilities.h" +#include "switch/switch_helper.h" #define DEFAULT_ASIC_SENSORS_POLLER_INTERVAL 60 #define ASIC_SENSORS_POLLER_STATUS "ASIC_SENSORS_POLLER_STATUS" @@ -11,6 +13,8 @@ #define SWITCH_CAPABILITY_TABLE_PORT_TPID_CAPABLE "PORT_TPID_CAPABLE" #define SWITCH_CAPABILITY_TABLE_LAG_TPID_CAPABLE "LAG_TPID_CAPABLE" #define SWITCH_CAPABILITY_TABLE_ORDERED_ECMP_CAPABLE "ORDERED_ECMP_CAPABLE" +#define SWITCH_CAPABILITY_TABLE_PFC_DLR_INIT_CAPABLE "PFC_DLR_INIT_CAPABLE" +#define SWITCH_CAPABILITY_TABLE_PORT_EGRESS_SAMPLE_CAPABLE "PORT_EGRESS_SAMPLE_CAPABLE" struct WarmRestartCheck { @@ -30,11 +34,13 @@ class SwitchOrch : public Orch void restartCheckReply(const std::string &op, const std::string &data, std::vector &values); bool setAgingFDB(uint32_t sec); void set_switch_capability(const std::vector& values); - bool querySwitchDscpToTcCapability(sai_object_type_t sai_object, sai_attr_id_t attr_id); + bool querySwitchCapability(sai_object_type_t sai_object, sai_attr_id_t attr_id); + bool checkPfcDlrInitEnable() { return m_PfcDlrInitEnable; } + void set_switch_pfc_dlr_init_capability(); // Return reference to ACL group created for each stage and the bind point is // the switch - const std::map &getAclGroupOidsBindingToSwitch(); + std::map &getAclGroupsBindingToSwitch(); // Initialize the ACL groups bind to Switch void initAclGroupsBindToSwitch(); @@ -43,10 +49,20 @@ class SwitchOrch : public Orch private: void doTask(Consumer &consumer); void doTask(swss::SelectableTimer &timer); + void doCfgSwitchHashTableTask(Consumer &consumer); void doCfgSensorsTableTask(Consumer &consumer); void doAppSwitchTableTask(Consumer &consumer); void initSensorsTable(); void querySwitchTpidCapability(); + void querySwitchPortEgressSampleCapability(); + + // Switch hash + bool setSwitchHashFieldListSai(const SwitchHash &hash, bool isEcmpHash) const; + bool setSwitchHash(const SwitchHash &hash); + + bool getSwitchHashOidSai(sai_object_id_t &oid, bool isEcmpHash) const; + void querySwitchHashDefaults(); + sai_status_t setSwitchTunnelVxlanParams(swss::FieldValueTuple &val); void setSwitchNonSaiAttributes(swss::FieldValueTuple &val); @@ -54,17 +70,17 @@ class SwitchOrch : public Orch // Create the default ACL group for the given stage, bind point is // SAI_ACL_BIND_POINT_TYPE_SWITCH and group type is // SAI_ACL_TABLE_GROUP_TYPE_PARALLEL. - ReturnCode createAclGroup(const sai_acl_stage_t &group_stage, sai_object_id_t *acl_grp_oid); + ReturnCode createAclGroup(const sai_acl_stage_t &group_stage, referenced_object *acl_grp); // Bind the ACL group to switch for the given stage. // Set the SAI_SWITCH_ATTR_{STAGE}_ACL with the group oid. - ReturnCode bindAclGroupToSwitch(const sai_acl_stage_t &group_stage, const sai_object_id_t &acl_grp_oid); + ReturnCode bindAclGroupToSwitch(const sai_acl_stage_t &group_stage, const referenced_object &acl_grp); swss::NotificationConsumer* m_restartCheckNotificationConsumer; void doTask(swss::NotificationConsumer& consumer); swss::DBConnector *m_db; swss::Table m_switchTable; - std::map m_aclGroups; + std::map m_aclGroups; sai_object_id_t m_switchTunnelId; // ASIC temperature sensors @@ -80,8 +96,25 @@ class SwitchOrch : public Orch bool m_sensorsAvgTempSupported = true; bool m_vxlanSportUserModeEnabled = false; bool m_orderedEcmpEnable = false; + bool m_PfcDlrInitEnable = false; + + // Switch hash SAI defaults + struct { + struct { + sai_object_id_t oid = SAI_NULL_OBJECT_ID; + } ecmpHash; + struct { + sai_object_id_t oid = SAI_NULL_OBJECT_ID; + } lagHash; + } m_switchHashDefaults; // Information contained in the request from // external program for orchagent pre-shutdown state check WarmRestartCheck m_warmRestartCheck = {false, false, false}; + + // Switch OA capabilities + SwitchCapabilities swCap; + + // Switch OA helper + SwitchHelper swHlpr; }; diff --git a/orchagent/swssnet.h b/orchagent/swssnet.h index be49708d4f..82b5b6f94f 100644 --- a/orchagent/swssnet.h +++ b/orchagent/swssnet.h @@ -3,6 +3,7 @@ // #pragma once +#include #include #include #include @@ -76,6 +77,53 @@ inline static sai_ip_prefix_t& copy(sai_ip_prefix_t& dst, const IpAddress& src) return dst; } +static int getPrefixLenFromAddrMask(const uint8_t *addr, int len) +{ + int i = 0; + uint8_t non_zero = 0xFF; + for (i = len - 1; i >=0; i--) + { + if (addr[i] != 0) + { + non_zero = addr[i]; + break; + } + } + + if (non_zero == 0xFF) + { + return (i + 1) * 8; + } + else + { + int j = 2; + while(((non_zero >> j) & 0x1) == 0) + { + ++j; + } + return (i + 1) * 8 - (j + 1); + } + +} + +inline static IpPrefix getIpPrefixFromSaiPrefix(const sai_ip_prefix_t& src) +{ + ip_addr_t ip; + switch(src.addr_family) + { + case SAI_IP_ADDR_FAMILY_IPV4: + ip.family = AF_INET; + ip.ip_addr.ipv4_addr = src.addr.ip4; + return IpPrefix(ip, getPrefixLenFromAddrMask(reinterpret_cast(&src.mask.ip4), 4)); + case SAI_IP_ADDR_FAMILY_IPV6: + ip.family = AF_INET6; + memcpy(ip.ip_addr.ipv6_addr, src.addr.ip6, 16); + return IpPrefix(ip, getPrefixLenFromAddrMask(src.mask.ip6, 16)); + default: + throw std::logic_error("Invalid family"); + } +} + inline static sai_ip_prefix_t& subnet(sai_ip_prefix_t& dst, const sai_ip_prefix_t& src) { dst.addr_family = src.addr_family; diff --git a/orchagent/tunneldecaporch.cpp b/orchagent/tunneldecaporch.cpp index 6ef2c96f74..065e78a0c0 100644 --- a/orchagent/tunneldecaporch.cpp +++ b/orchagent/tunneldecaporch.cpp @@ -5,6 +5,7 @@ #include "crmorch.h" #include "logger.h" #include "swssnet.h" +#include "qosorch.h" #define OVERLAY_RIF_DEFAULT_MTU 9100 @@ -17,6 +18,7 @@ extern sai_object_id_t gUnderlayIfId; extern sai_object_id_t gSwitchId; extern PortsOrch* gPortsOrch; extern CrmOrch* gCrmOrch; +extern QosOrch* gQosOrch; TunnelDecapOrch::TunnelDecapOrch(DBConnector *db, string tableName) : Orch(db, tableName) { @@ -31,7 +33,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) { return; } - + string table_name = consumer.getTableName(); auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) { @@ -48,10 +50,23 @@ void TunnelDecapOrch::doTask(Consumer& consumer) string ecn_mode; string encap_ecn_mode; string ttl_mode; + sai_object_id_t dscp_to_tc_map_id = SAI_NULL_OBJECT_ID; + sai_object_id_t tc_to_pg_map_id = SAI_NULL_OBJECT_ID; + // The tc_to_dscp_map_id and tc_to_queue_map_id are parsed here for muxorch to retrieve + sai_object_id_t tc_to_dscp_map_id = SAI_NULL_OBJECT_ID; + sai_object_id_t tc_to_queue_map_id = SAI_NULL_OBJECT_ID; + bool valid = true; + task_process_status task_status = task_process_status::task_success; + + sai_object_id_t tunnel_id = SAI_NULL_OBJECT_ID; // checking to see if the tunnel already exists bool exists = (tunnelTable.find(key) != tunnelTable.end()); + if (exists) + { + tunnel_id = tunnelTable[key].tunnel_id; + } if (op == SET_COMMAND) { @@ -114,7 +129,8 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), dscp_mode, tunnelTable.find(key)->second.tunnel_id); + setTunnelAttribute(fvField(i), dscp_mode, tunnel_id); + tunnelTable[key].dscp_mode = dscp_mode; } } else if (fvField(i) == "ecn_mode") @@ -128,7 +144,9 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), ecn_mode, tunnelTable.find(key)->second.tunnel_id); + SWSS_LOG_NOTICE("Skip setting ecn_mode since the SAI attribute SAI_TUNNEL_ATTR_DECAP_ECN_MODE is create only"); + valid = false; + break; } } else if (fvField(i) == "encap_ecn_mode") @@ -142,7 +160,9 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), encap_ecn_mode, tunnelTable.find(key)->second.tunnel_id); + SWSS_LOG_NOTICE("Skip setting encap_ecn_mode since the SAI attribute SAI_TUNNEL_ATTR_ENCAP_ECN_MODE is create only"); + valid = false; + break; } } else if (fvField(i) == "ttl_mode") @@ -156,16 +176,85 @@ void TunnelDecapOrch::doTask(Consumer& consumer) } if (exists) { - setTunnelAttribute(fvField(i), ttl_mode, tunnelTable.find(key)->second.tunnel_id); + setTunnelAttribute(fvField(i), ttl_mode, tunnel_id); + } + } + else if (fvField(i) == decap_dscp_to_tc_field_name) + { + dscp_to_tc_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, decap_dscp_to_tc_field_name, t); + if (dscp_to_tc_map_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_NOTICE("QoS map %s is not ready yet", decap_dscp_to_tc_field_name.c_str()); + task_status = task_process_status::task_need_retry; + break; + } + if (exists) + { + setTunnelAttribute(fvField(i), dscp_to_tc_map_id, tunnel_id); + } + } + else if (fvField(i) == decap_tc_to_pg_field_name) + { + tc_to_pg_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, decap_tc_to_pg_field_name, t); + if (tc_to_pg_map_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_NOTICE("QoS map %s is not ready yet", decap_tc_to_pg_field_name.c_str()); + task_status = task_process_status::task_need_retry; + break; + } + if (exists) + { + setTunnelAttribute(fvField(i), tc_to_pg_map_id, tunnel_id); + } + } + else if (fvField(i) == encap_tc_to_dscp_field_name) + { + tc_to_dscp_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, encap_tc_to_dscp_field_name, t); + if (tc_to_dscp_map_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_NOTICE("QoS map %s is not ready yet", encap_tc_to_dscp_field_name.c_str()); + task_status = task_process_status::task_need_retry; + break; + } + if (exists) + { + // Record only + tunnelTable[key].encap_tc_to_dscp_map_id = tc_to_dscp_map_id; + } + } + else if (fvField(i) == encap_tc_to_queue_field_name) + { + tc_to_queue_map_id = gQosOrch->resolveTunnelQosMap(table_name, key, encap_tc_to_queue_field_name, t); + if (tc_to_queue_map_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_NOTICE("QoS map %s is not ready yet", encap_tc_to_queue_field_name.c_str()); + task_status = task_process_status::task_need_retry; + break; + } + if (exists) + { + // Record only + tunnelTable[key].encap_tc_to_queue_map_id = tc_to_queue_map_id; } } } - // create new tunnel if it doesn't exists already + if (task_status == task_process_status::task_need_retry) + { + ++it; + continue; + } + + //create new tunnel if it doesn't exists already if (valid && !exists) { - if (addDecapTunnel(key, tunnel_type, ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode)) + + if (addDecapTunnel(key, tunnel_type, ip_addresses, p_src_ip, dscp_mode, ecn_mode, encap_ecn_mode, ttl_mode, + dscp_to_tc_map_id, tc_to_pg_map_id)) { + // Record only + tunnelTable[key].encap_tc_to_dscp_map_id = tc_to_dscp_map_id; + tunnelTable[key].encap_tc_to_queue_map_id = tc_to_queue_map_id; SWSS_LOG_NOTICE("Tunnel(s) added to ASIC_DB."); } else @@ -179,7 +268,7 @@ void TunnelDecapOrch::doTask(Consumer& consumer) { if (exists) { - removeDecapTunnel(key); + removeDecapTunnel(table_name, key); } else { @@ -202,21 +291,34 @@ void TunnelDecapOrch::doTask(Consumer& consumer) * @param[in] dscp - dscp mode (uniform/pipe) * @param[in] ecn - ecn mode (copy_from_outer/standard) * @param[in] ttl - ttl mode (uniform/pipe) + * @param[in] dscp_to_tc_map_id - Map ID for remapping DSCP to TC (decap) + * @param[in] tc_to_pg_map_id - Map ID for remapping TC to PG (decap) * * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip, IpAddress* p_src_ip, string dscp, string ecn, string encap_ecn, string ttl) +bool TunnelDecapOrch::addDecapTunnel( + string key, + string type, + IpAddresses dst_ip, + IpAddress* p_src_ip, + string dscp, + string ecn, + string encap_ecn, + string ttl, + sai_object_id_t dscp_to_tc_map_id, + sai_object_id_t tc_to_pg_map_id) { SWSS_LOG_ENTER(); sai_status_t status; - + IpAddress src_ip("0.0.0.0"); // adding tunnel attributes to array and writing to ASIC_DB sai_attribute_t attr; vector tunnel_attrs; sai_object_id_t overlayIfId; + TunnelTermType term_type = TUNNEL_TERM_TYPE_P2MP; // create the overlay router interface to create a LOOPBACK type router interface (decap) vector overlay_intf_attrs; @@ -264,6 +366,8 @@ bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip attr.id = SAI_TUNNEL_ATTR_ENCAP_SRC_IP; copy(attr.value.ipaddr, p_src_ip->to_string()); tunnel_attrs.push_back(attr); + src_ip = *p_src_ip; + term_type = TUNNEL_TERM_TYPE_P2P; } // decap ecn mode (copy from outer/standard) @@ -312,6 +416,22 @@ bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip } tunnel_attrs.push_back(attr); + // DSCP_TO_TC_MAP + if (dscp_to_tc_map_id != SAI_NULL_OBJECT_ID) + { + attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP; + attr.value.oid = dscp_to_tc_map_id; + tunnel_attrs.push_back(attr); + } + + //TC_TO_PG_MAP + if (tc_to_pg_map_id != SAI_NULL_OBJECT_ID) + { + attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP; + attr.value.oid = tc_to_pg_map_id; + tunnel_attrs.push_back(attr); + } + // write attributes to ASIC_DB sai_object_id_t tunnel_id; status = sai_tunnel_api->create_tunnel(&tunnel_id, gSwitchId, (uint32_t)tunnel_attrs.size(), tunnel_attrs.data()); @@ -325,10 +445,10 @@ bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip } } - tunnelTable[key] = { tunnel_id, overlayIfId, dst_ip, {} }; + tunnelTable[key] = { tunnel_id, overlayIfId, dst_ip, {}, dscp, SAI_NULL_OBJECT_ID, SAI_NULL_OBJECT_ID }; - // create a decap tunnel entry for every ip - if (!addDecapTunnelTermEntries(key, dst_ip, tunnel_id)) + // create a decap tunnel entry for every source_ip - dest_ip pair + if (!addDecapTunnelTermEntries(key, src_ip, dst_ip, tunnel_id, term_type)) { return false; } @@ -342,13 +462,15 @@ bool TunnelDecapOrch::addDecapTunnel(string key, string type, IpAddresses dst_ip * * Arguments: * @param[in] tunnelKey - key of the tunnel from APP_DB - * @param[in] dst_ip - destination ip addresses to decap + * @param[in] src_ip - source ip address of decap tunnel + * @param[in] dst_ips - destination ip addresses to decap * @param[in] tunnel_id - the id of the tunnel + * @param[in] term_type - P2P or P2MP. Other types (MP2P and MP2MP) not supported yet * * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses dst_ip, sai_object_id_t tunnel_id) +bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, swss::IpAddress src_ip, swss::IpAddresses dst_ips, sai_object_id_t tunnel_id, TunnelTermType tunnel_type) { SWSS_LOG_ENTER(); @@ -361,7 +483,14 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses ds tunnel_table_entry_attrs.push_back(attr); attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE; - attr.value.u32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP; + if (tunnel_type == TUNNEL_TERM_TYPE_P2P) + { + attr.value.u32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P; + } + else + { + attr.value.u32 = SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP; + } tunnel_table_entry_attrs.push_back(attr); attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE; @@ -372,19 +501,38 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses ds attr.value.oid = tunnel_id; tunnel_table_entry_attrs.push_back(attr); + if (tunnel_type == TUNNEL_TERM_TYPE_P2P) + { + // Set src ip for P2P only + attr.id = SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP; + copy(attr.value.ipaddr, src_ip); + tunnel_table_entry_attrs.push_back(attr); + } + TunnelEntry *tunnel_info = &tunnelTable.find(tunnelKey)->second; // loop through the IP list and create a new tunnel table entry for every IP (in network byte order) - set tunnel_ips = dst_ip.getIpAddresses(); + set tunnel_ips = dst_ips.getIpAddresses(); for (auto it = tunnel_ips.begin(); it != tunnel_ips.end(); ++it) { const IpAddress& ia = *it; - string ip = ia.to_string(); + string dst_ip = ia.to_string(); + // The key will be src_ip-dst_ip (like 10.1.1.1-20.2.2.2) if src_ip is not 0, + // or the key will contain dst_ip only + string key; + if (!src_ip.isZero()) + { + key = src_ip.to_string() + '-' + dst_ip; + } + else + { + key = dst_ip; + } - // check if the there's an entry already for the ip - if (existingIps.find(ip) != existingIps.end()) + // check if the there's an entry already for the key pair + if (existingIps.find(key) != existingIps.end()) { - SWSS_LOG_ERROR("%s already exists. Did not create entry.", ip.c_str()); + SWSS_LOG_NOTICE("%s already exists. Did not create entry.", key.c_str()); } else { @@ -397,7 +545,7 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses ds sai_status_t status = sai_tunnel_api->create_tunnel_term_table_entry(&tunnel_term_table_entry_id, gSwitchId, (uint32_t)tunnel_table_entry_attrs.size(), tunnel_table_entry_attrs.data()); if (status != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to create tunnel entry table for ip: %s", ip.c_str()); + SWSS_LOG_ERROR("Failed to create tunnel entry table for ip: %s", key.c_str()); task_process_status handle_status = handleSaiCreateStatus(SAI_API_TUNNEL, status); if (handle_status != task_success) { @@ -406,15 +554,15 @@ bool TunnelDecapOrch::addDecapTunnelTermEntries(string tunnelKey, IpAddresses ds } // insert into ip to entry mapping - existingIps.insert(ip); + existingIps.insert(key); // insert entry id and ip into tunnel mapping - tunnel_info->tunnel_term_info.push_back({ tunnel_term_table_entry_id, ip }); + tunnel_info->tunnel_term_info.push_back({ tunnel_term_table_entry_id, src_ip.to_string(), dst_ip, tunnel_type }); // pop the last element for the next loop tunnel_table_entry_attrs.pop_back(); - SWSS_LOG_NOTICE("Created tunnel entry for ip: %s", ip.c_str()); + SWSS_LOG_NOTICE("Created tunnel entry for ip: %s", dst_ip.c_str()); } } @@ -438,30 +586,6 @@ bool TunnelDecapOrch::setTunnelAttribute(string field, string value, sai_object_ sai_attribute_t attr; - if (field == "ecn_mode") - { - // decap ecn mode (copy from outer/standard) - attr.id = SAI_TUNNEL_ATTR_DECAP_ECN_MODE; - if (value == "copy_from_outer") - { - attr.value.s32 = SAI_TUNNEL_DECAP_ECN_MODE_COPY_FROM_OUTER; - } - else if (value == "standard") - { - attr.value.s32 = SAI_TUNNEL_DECAP_ECN_MODE_STANDARD; - } - } - - if (field == "encap_ecn_mode") - { - // encap ecn mode (only standard is supported) - attr.id = SAI_TUNNEL_ATTR_ENCAP_ECN_MODE; - if (value == "standard") - { - attr.value.s32 = SAI_TUNNEL_ENCAP_ECN_MODE_STANDARD; - } - } - if (field == "ttl_mode") { // ttl mode (uniform/pipe) @@ -504,6 +628,51 @@ bool TunnelDecapOrch::setTunnelAttribute(string field, string value, sai_object_ return true; } +/** + * Function Description: + * @brief sets attributes for a tunnel (decap_dscp_to_tc_map and decap_tc_to_pg_map are supported) + * + * Arguments: + * @param[in] field - field to set the attribute for + * @param[in] value - value to set the attribute to (sai_object_id) + * @param[in] existing_tunnel_id - the id of the tunnel you want to set the attribute for + * + * Return Values: + * @return true on success and false if there's an error + */ +bool TunnelDecapOrch::setTunnelAttribute(string field, sai_object_id_t value, sai_object_id_t existing_tunnel_id) +{ + + sai_attribute_t attr; + + if (field == decap_dscp_to_tc_field_name) + { + // TC remapping. + attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP; + attr.value.oid = value; + + } + else if (field == decap_tc_to_pg_field_name) + { + // TC to PG remapping + attr.id = SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP; + attr.value.oid = value; + } + + sai_status_t status = sai_tunnel_api->set_tunnel_attribute(existing_tunnel_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to set attribute %s with value %" PRIu64, field.c_str(), value); + task_process_status handle_status = handleSaiSetStatus(SAI_API_TUNNEL, status); + if (handle_status != task_success) + { + return parseHandleSaiStatusFailure(handle_status); + } + } + SWSS_LOG_NOTICE("Set attribute %s with value %" PRIu64, field.c_str(), value); + return true; +} + /** * Function Description: * @brief sets ips for a particular tunnel. deletes ips that are old and adds new ones @@ -530,7 +699,7 @@ bool TunnelDecapOrch::setIpAttribute(string key, IpAddresses new_ip_addresses, s for (auto it = tunnel_term_info_copy.begin(); it != tunnel_term_info_copy.end(); ++it) { TunnelTermEntry tunnel_entry_info = *it; - string ip = tunnel_entry_info.ip_address; + string ip = tunnel_entry_info.dst_ip; if (!new_ip_addresses.contains(ip)) { if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, ip)) @@ -541,12 +710,12 @@ bool TunnelDecapOrch::setIpAttribute(string key, IpAddresses new_ip_addresses, s else { // add the data into the tunnel_term_info - tunnel_info->tunnel_term_info.push_back({ tunnel_entry_info.tunnel_term_id, ip }); + tunnel_info->tunnel_term_info.push_back({ tunnel_entry_info.tunnel_term_id, "0.0.0.0", ip, TUNNEL_TERM_TYPE_P2MP }); } } // add all the new ip addresses - if(!addDecapTunnelTermEntries(key, new_ip_addresses, tunnel_id)) + if(!addDecapTunnelTermEntries(key, IpAddress(0), new_ip_addresses, tunnel_id, TUNNEL_TERM_TYPE_P2MP)) { return false; } @@ -559,12 +728,13 @@ bool TunnelDecapOrch::setIpAttribute(string key, IpAddresses new_ip_addresses, s * @brief remove decap tunnel * * Arguments: + * @param[in] table_name - name of the table in APP_DB * @param[in] key - key of the tunnel from APP_DB * * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::removeDecapTunnel(string key) +bool TunnelDecapOrch::removeDecapTunnel(string table_name, string key) { sai_status_t status; TunnelEntry *tunnel_info = &tunnelTable.find(key)->second; @@ -573,7 +743,17 @@ bool TunnelDecapOrch::removeDecapTunnel(string key) for (auto it = tunnel_info->tunnel_term_info.begin(); it != tunnel_info->tunnel_term_info.end(); ++it) { TunnelTermEntry tunnel_entry_info = *it; - if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, tunnel_entry_info.ip_address)) + string term_key; + swss::IpAddress src_ip(tunnel_entry_info.src_ip); + if (!src_ip.isZero()) + { + term_key = src_ip.to_string() + '-' + tunnel_entry_info.dst_ip; + } + else + { + term_key = tunnel_entry_info.dst_ip; + } + if (!removeDecapTunnelTermEntry(tunnel_entry_info.tunnel_term_id, term_key)) { return false; } @@ -605,6 +785,7 @@ bool TunnelDecapOrch::removeDecapTunnel(string key) } tunnelTable.erase(key); + gQosOrch->removeTunnelReference(table_name, key); return true; } @@ -618,7 +799,7 @@ bool TunnelDecapOrch::removeDecapTunnel(string key) * Return Values: * @return true on success and false if there's an error */ -bool TunnelDecapOrch::removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, string ip) +bool TunnelDecapOrch::removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, string key) { sai_status_t status; @@ -634,8 +815,8 @@ bool TunnelDecapOrch::removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, } // making sure to remove all instances of the ip address - existingIps.erase(ip); - SWSS_LOG_NOTICE("Removed decap tunnel term entry with ip address: %s", ip.c_str()); + existingIps.erase(key); + SWSS_LOG_NOTICE("Removed decap tunnel term entry with ip address: %s", key.c_str()); return true; } @@ -803,3 +984,38 @@ IpAddresses TunnelDecapOrch::getDstIpAddresses(std::string tunnelKey) return tunnelTable[tunnelKey].dst_ip_addrs; } + +std::string TunnelDecapOrch::getDscpMode(const std::string &tunnelKey) const +{ + auto iter = tunnelTable.find(tunnelKey); + if (iter == tunnelTable.end()) + { + SWSS_LOG_INFO("Tunnel not found %s", tunnelKey.c_str()); + return ""; + } + return iter->second.dscp_mode; +} + +bool TunnelDecapOrch::getQosMapId(const std::string &tunnelKey, const std::string &qos_table_type, sai_object_id_t &oid) const +{ + auto iter = tunnelTable.find(tunnelKey); + if (iter == tunnelTable.end()) + { + SWSS_LOG_INFO("Tunnel not found %s", tunnelKey.c_str()); + return false; + } + if (qos_table_type == encap_tc_to_dscp_field_name) + { + oid = iter->second.encap_tc_to_dscp_map_id; + } + else if (qos_table_type == encap_tc_to_queue_field_name) + { + oid = iter->second.encap_tc_to_queue_map_id; + } + else + { + SWSS_LOG_ERROR("Unsupported qos type %s", qos_table_type.c_str()); + return false; + } + return true; +} diff --git a/orchagent/tunneldecaporch.h b/orchagent/tunneldecaporch.h index f7b5f923d9..18cf4f8856 100644 --- a/orchagent/tunneldecaporch.h +++ b/orchagent/tunneldecaporch.h @@ -9,18 +9,34 @@ #include "ipaddress.h" #include "ipaddresses.h" + +enum TunnelTermType +{ + TUNNEL_TERM_TYPE_P2P, + TUNNEL_TERM_TYPE_P2MP +}; + +/* Constants */ +#define MUX_TUNNEL "MuxTunnel0" + + struct TunnelTermEntry { sai_object_id_t tunnel_term_id; - std::string ip_address; + std::string src_ip; + std::string dst_ip; + TunnelTermType term_type; }; struct TunnelEntry { - sai_object_id_t tunnel_id; // tunnel id - sai_object_id_t overlay_intf_id; // overlay interface id - swss::IpAddresses dst_ip_addrs; // destination ip addresses - std::vector tunnel_term_info; // tunnel_entry ids related to the tunnel abd ips related to the tunnel (all ips for tunnel entries that refer to this tunnel) + sai_object_id_t tunnel_id; // tunnel id + sai_object_id_t overlay_intf_id; // overlay interface id + swss::IpAddresses dst_ip_addrs; // destination ip addresses + std::vector tunnel_term_info; // tunnel_entry ids related to the tunnel abd ips related to the tunnel (all ips for tunnel entries that refer to this tunnel) + std::string dscp_mode; // dscp_mode, will be used in muxorch + sai_object_id_t encap_tc_to_dscp_map_id; // TC_TO_DSCP map id, will be used in muxorch + sai_object_id_t encap_tc_to_queue_map_id; // TC_TO_QUEUE map id, will be used in muxorch }; struct NexthopTunnel @@ -32,7 +48,10 @@ struct NexthopTunnel /* TunnelTable: key string, tunnel object id */ typedef std::map TunnelTable; -/* ExistingIps: ips that currently have term entries */ +/* + ExistingIps: ips that currently have term entries, + Key in ExistingIps is src_ip-dst_ip +*/ typedef std::unordered_set ExistingIps; /* Nexthop IP to refcount map */ @@ -49,20 +68,23 @@ class TunnelDecapOrch : public Orch sai_object_id_t createNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); bool removeNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); swss::IpAddresses getDstIpAddresses(std::string tunnelKey); - + std::string getDscpMode(const std::string &tunnelKey) const; + bool getQosMapId(const std::string &tunnelKey, const std::string &qos_table_type, sai_object_id_t &oid) const; private: TunnelTable tunnelTable; ExistingIps existingIps; TunnelNhs tunnelNhs; bool addDecapTunnel(std::string key, std::string type, swss::IpAddresses dst_ip, swss::IpAddress* p_src_ip, - std::string dscp, std::string ecn, std::string encap_ecn, std::string ttl); - bool removeDecapTunnel(std::string key); + std::string dscp, std::string ecn, std::string encap_ecn, std::string ttl, + sai_object_id_t dscp_to_tc_map_id, sai_object_id_t tc_to_pg_map_id); + bool removeDecapTunnel(std::string table_name, std::string key); - bool addDecapTunnelTermEntries(std::string tunnelKey, swss::IpAddresses dst_ip, sai_object_id_t tunnel_id); + bool addDecapTunnelTermEntries(std::string tunnelKey, swss::IpAddress src_ip, swss::IpAddresses dst_ip, sai_object_id_t tunnel_id, TunnelTermType type); bool removeDecapTunnelTermEntry(sai_object_id_t tunnel_term_id, std::string ip); bool setTunnelAttribute(std::string field, std::string value, sai_object_id_t existing_tunnel_id); + bool setTunnelAttribute(std::string field, sai_object_id_t value, sai_object_id_t existing_tunnel_id); bool setIpAttribute(std::string key, swss::IpAddresses new_ip_addresses, sai_object_id_t tunnel_id); sai_object_id_t getNextHopTunnel(std::string tunnelKey, swss::IpAddress& ipAddr); diff --git a/orchagent/vnetorch.cpp b/orchagent/vnetorch.cpp index 9640e0ee3a..4b4e91b978 100644 --- a/orchagent/vnetorch.cpp +++ b/orchagent/vnetorch.cpp @@ -21,6 +21,7 @@ #include "neighorch.h" #include "crmorch.h" #include "routeorch.h" +#include "flowcounterrouteorch.h" extern sai_virtual_router_api_t* sai_virtual_router_api; extern sai_route_api_t* sai_route_api; @@ -37,6 +38,7 @@ extern PortsOrch *gPortsOrch; extern IntfsOrch *gIntfsOrch; extern NeighOrch *gNeighOrch; extern CrmOrch *gCrmOrch; +extern FlowCounterRouteOrch *gFlowCounterRouteOrch; extern RouteOrch *gRouteOrch; extern MacAddress gVxlanMacAddress; extern BfdOrch *gBfdOrch; @@ -97,6 +99,7 @@ bool VNetVrfObject::createObj(vector& attrs) vnet_name_.c_str(), status); throw std::runtime_error("Failed to create VR object"); } + gFlowCounterRouteOrch->onAddVR(router_id); return true; }; @@ -169,6 +172,28 @@ bool VNetVrfObject::addRoute(IpPrefix& ipPrefix, NextHopGroupKey& nexthops) return true; } +void VNetVrfObject::addProfile(IpPrefix& ipPrefix, string& profile) +{ + profile_[ipPrefix] = profile; +} + +void VNetVrfObject::removeProfile(IpPrefix& ipPrefix) +{ + if (profile_.find(ipPrefix) != profile_.end()) + { + profile_.erase(ipPrefix); + } +} + +string VNetVrfObject::getProfile(IpPrefix& ipPrefix) +{ + if (profile_.find(ipPrefix) != profile_.end()) + { + return profile_[ipPrefix]; + } + return string(); +} + void VNetVrfObject::increaseNextHopRefCount(const nextHop& nh) { /* Return when there is no next hop (dropped) */ @@ -309,12 +334,16 @@ VNetVrfObject::~VNetVrfObject() set vr_ent = getVRids(); for (auto it : vr_ent) { - sai_status_t status = sai_virtual_router_api->remove_virtual_router(it); - if (status != SAI_STATUS_SUCCESS) + if (it != gVirtualRouterId) { - SWSS_LOG_ERROR("Failed to remove virtual router name: %s, rv:%d", - vnet_name_.c_str(), status); + sai_status_t status = sai_virtual_router_api->remove_virtual_router(it); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove virtual router name: %s, rv:%d", + vnet_name_.c_str(), status); + } } + gFlowCounterRouteOrch->onRemoveVR(it); } SWSS_LOG_INFO("VNET '%s' deleted ", vnet_name_.c_str()); @@ -400,6 +429,7 @@ bool VNetOrch::addOperation(const Request& request) uint32_t vni=0; string tunnel; string scope; + swss::MacAddress overlay_dmac; for (const auto& name: request.getAttrFieldNames()) { @@ -431,6 +461,10 @@ bool VNetOrch::addOperation(const Request& request) { advertise_prefix = request.getAttrBool("advertise_prefix"); } + else if (name == "overlay_dmac") + { + overlay_dmac = request.getAttrMacAddress("overlay_dmac"); + } else { SWSS_LOG_INFO("Unknown attribute: %s", name.c_str()); @@ -457,7 +491,7 @@ bool VNetOrch::addOperation(const Request& request) if (it == std::end(vnet_table_)) { - VNetInfo vnet_info = { tunnel, vni, peer_list, scope, advertise_prefix }; + VNetInfo vnet_info = { tunnel, vni, peer_list, scope, advertise_prefix, overlay_dmac }; obj = createObject(vnet_name, vnet_info, attrs); create = true; @@ -475,9 +509,14 @@ bool VNetOrch::addOperation(const Request& request) else { SWSS_LOG_NOTICE("VNET '%s' already exists ", vnet_name.c_str()); + if (!!overlay_dmac && overlay_dmac != it->second->getOverlayDMac()) + { + it->second->setOverlayDMac(overlay_dmac); + VNetRouteOrch* vnet_route_orch = gDirectory.get(); + vnet_route_orch->updateAllMonitoringSession(vnet_name); + } } } - if (create) { vnet_table_[vnet_name] = std::move(obj); @@ -551,6 +590,40 @@ bool VNetOrch::delOperation(const Request& request) return true; } +bool VNetOrch::getVrfIdByVnetName(const std::string& vnet_name, sai_object_id_t &vrf_id) +{ + if (!isVnetExists(vnet_name)) + { + return false; + } + + auto *vrf_obj = getTypePtr(vnet_name); + // Now we only support ingress VR for VNET, so just get ingress VR ID + // Once we support egress VR, need revisit here. + vrf_id = vrf_obj->getVRidIngress(); + return vrf_id != SAI_NULL_OBJECT_ID; +} + +bool VNetOrch::getVnetNameByVrfId(sai_object_id_t vrf_id, std::string& vnet_name) +{ + for (auto &entry : vnet_table_) + { + auto *vrf_obj = dynamic_cast(entry.second.get()); + if (!vrf_obj) + { + continue; + } + + if (vrf_obj->getVRidIngress() == vrf_id) + { + vnet_name = entry.first; + return true; + } + } + + return false; +} + /* * Vnet Route Handling */ @@ -583,6 +656,8 @@ static bool del_route(sai_object_id_t vr_id, sai_ip_prefix_t& ip_pfx) gCrmOrch->decCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); } + gFlowCounterRouteOrch->onRemoveMiscRouteEntry(vr_id, ip_pfx, false); + return true; } @@ -614,6 +689,8 @@ static bool add_route(sai_object_id_t vr_id, sai_ip_prefix_t& ip_pfx, sai_object gCrmOrch->incCrmResUsedCounter(CrmResourceType::CRM_IPV6_ROUTE); } + gFlowCounterRouteOrch->onAddMiscRouteEntry(vr_id, ip_pfx, false); + return true; } @@ -648,8 +725,11 @@ VNetRouteOrch::VNetRouteOrch(DBConnector *db, vector &tableNames, VNetOr handler_map_.insert(handler_pair(APP_VNET_RT_TUNNEL_TABLE_NAME, &VNetRouteOrch::handleTunnel)); state_db_ = shared_ptr(new DBConnector("STATE_DB", 0)); + app_db_ = shared_ptr(new DBConnector("APPL_DB", 0)); + state_vnet_rt_tunnel_table_ = unique_ptr
(new Table(state_db_.get(), STATE_VNET_RT_TUNNEL_TABLE_NAME)); state_vnet_rt_adv_table_ = unique_ptr
(new Table(state_db_.get(), STATE_ADVERTISE_NETWORK_TABLE_NAME)); + monitor_session_producer_ = unique_ptr
(new Table(app_db_.get(), APP_VNET_MONITOR_TABLE_NAME)); gBfdOrch->attach(this); } @@ -665,7 +745,7 @@ sai_object_id_t VNetRouteOrch::getNextHopGroupId(const string& vnet, const NextH return syncd_nexthop_groups_[vnet][nexthops].next_hop_group_id; } -bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &nexthops, VNetVrfObject *vrf_obj) +bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &nexthops, VNetVrfObject *vrf_obj, const string& monitoring) { SWSS_LOG_ENTER(); @@ -686,7 +766,7 @@ bool VNetRouteOrch::addNextHopGroup(const string& vnet, const NextHopGroupKey &n for (auto it : next_hop_set) { nh_seq_id_in_nhgrp[it] = ++seq_id; - if (nexthop_info_[vnet].find(it.ip_address) != nexthop_info_[vnet].end() && nexthop_info_[vnet][it.ip_address].bfd_state != SAI_BFD_SESSION_STATE_UP) + if (monitoring != "custom" && nexthop_info_[vnet].find(it.ip_address) != nexthop_info_[vnet].end() && nexthop_info_[vnet][it.ip_address].bfd_state != SAI_BFD_SESSION_STATE_UP) { continue; } @@ -825,9 +905,178 @@ bool VNetRouteOrch::removeNextHopGroup(const string& vnet, const NextHopGroupKey return true; } +bool VNetRouteOrch::createNextHopGroup(const string& vnet, + NextHopGroupKey& nexthops, + VNetVrfObject *vrf_obj, + const string& monitoring) +{ + + if (nexthops.getSize() == 0) + { + return true; + } + else if (nexthops.getSize() == 1) + { + NextHopKey nexthop(nexthops.to_string(), true); + NextHopGroupInfo next_hop_group_entry; + next_hop_group_entry.next_hop_group_id = vrf_obj->getTunnelNextHop(nexthop); + next_hop_group_entry.ref_count = 0; + if (monitoring == "custom" || nexthop_info_[vnet].find(nexthop.ip_address) == nexthop_info_[vnet].end() || nexthop_info_[vnet][nexthop.ip_address].bfd_state == SAI_BFD_SESSION_STATE_UP) + { + next_hop_group_entry.active_members[nexthop] = SAI_NULL_OBJECT_ID; + } + syncd_nexthop_groups_[vnet][nexthops] = next_hop_group_entry; + } + else + { + if (!addNextHopGroup(vnet, nexthops, vrf_obj, monitoring)) + { + SWSS_LOG_ERROR("Failed to create next hop group %s", nexthops.to_string().c_str()); + return false; + } + } + return true; +} + +NextHopGroupKey VNetRouteOrch::getActiveNHSet(const string& vnet, + NextHopGroupKey& nexthops, + const IpPrefix& ipPrefix) +{ + // This function takes a nexthop group key and iterates over the nexthops in that group + // to identify the ones which are active based on their monitor session state. + // These next hops are collected into another next hop group key called nhg_custom and returned. + NextHopGroupKey nhg_custom("", true); + set next_hop_set = nexthops.getNextHops(); + for (auto it : next_hop_set) + { + if(monitor_info_.find(vnet) != monitor_info_.end() && + monitor_info_[vnet].find(ipPrefix) != monitor_info_[vnet].end()) + { + for (auto monitor : monitor_info_[vnet][ipPrefix]) + { + if (monitor.second.endpoint == it) + { + if (monitor.second.state == MONITOR_SESSION_STATE_UP) + { + // monitor session exists and is up + nhg_custom.add(it); + + } + continue; + } + } + } + } + return nhg_custom; +} + +bool VNetRouteOrch::selectNextHopGroup(const string& vnet, + NextHopGroupKey& nexthops_primary, + NextHopGroupKey& nexthops_secondary, + const string& monitoring, + IpPrefix& ipPrefix, + VNetVrfObject *vrf_obj, + NextHopGroupKey& nexthops_selected, + const map& monitors) +{ + // This function returns the next hop group which is to be used to in the hardware. + // for non priority tunnel routes, this would return nexthops_primary or its subset if + // BFD sessions for the endpoits in the NHG are up. + // For priority tunnel scenario, it sets up endpoint monitors for both primary and secondary. + // This is followed by an attempt to create a NHG which can be subset of nexthops_primary + // depending on the endpoint monitor state. If no NHG from primary is created, we attempt + // the same for secondary. + if(nexthops_secondary.getSize() != 0 && monitoring == "custom") + { + auto it_route = syncd_tunnel_routes_[vnet].find(ipPrefix); + if (it_route == syncd_tunnel_routes_[vnet].end()) + { + setEndpointMonitor(vnet, monitors, nexthops_primary, monitoring, ipPrefix); + setEndpointMonitor(vnet, monitors, nexthops_secondary, monitoring, ipPrefix); + } + else + { + if (it_route->second.primary != nexthops_primary) + { + setEndpointMonitor(vnet, monitors, nexthops_primary, monitoring, ipPrefix); + } + if (it_route->second.secondary != nexthops_secondary) + { + setEndpointMonitor(vnet, monitors, nexthops_secondary, monitoring, ipPrefix); + } + nexthops_selected = it_route->second.nhg_key; + return true; + } + + NextHopGroupKey nhg_custom = getActiveNHSet( vnet, nexthops_primary, ipPrefix); + if (!hasNextHopGroup(vnet, nhg_custom)) + { + if (!createNextHopGroup(vnet, nhg_custom, vrf_obj, monitoring)) + { + SWSS_LOG_WARN("Failed to create Primary based custom next hop group. Cannot proceed."); + delEndpointMonitor(vnet, nexthops_primary, ipPrefix); + delEndpointMonitor(vnet, nexthops_secondary, ipPrefix); + monitor_info_[vnet].erase(ipPrefix); + + return false; + } + } + if (nhg_custom.getSize() > 0 ) + { + SWSS_LOG_INFO(" Created Primary based custom next hop group.%s", nhg_custom.to_string().c_str() ); + nexthops_selected = nhg_custom; + return true; + } + NextHopGroupKey nhg_custom_sec = getActiveNHSet( vnet, nexthops_secondary, ipPrefix); + + if (!hasNextHopGroup(vnet, nhg_custom_sec)) + { + if (!createNextHopGroup(vnet, nhg_custom_sec, vrf_obj, monitoring)) + { + SWSS_LOG_WARN("Failed to create secondary based custom next hop group. Cannot proceed."); + delEndpointMonitor(vnet, nexthops_primary, ipPrefix); + delEndpointMonitor(vnet, nexthops_secondary, ipPrefix); + monitor_info_[vnet].erase(ipPrefix); + + return false; + } + } + if (nhg_custom_sec.getSize() > 0 ) + { + SWSS_LOG_INFO(" Created Secondary based custom next hop group.(%s).", nhg_custom_sec.to_string().c_str() ); + nexthops_selected = nhg_custom_sec; + return true; + } + // nhg_custom is empty. we shall create a dummy enpty NHG for book keeping. + if (!hasNextHopGroup(vnet, nhg_custom) && !hasNextHopGroup(vnet, nhg_custom_sec) ) + { + NextHopGroupInfo next_hop_group_entry; + next_hop_group_entry.next_hop_group_id = SAI_NULL_OBJECT_ID; + next_hop_group_entry.ref_count = 0; + syncd_nexthop_groups_[vnet][nhg_custom] = next_hop_group_entry; + } + nexthops_selected = nhg_custom; + return true; + } + else if (!hasNextHopGroup(vnet, nexthops_primary)) + { + SWSS_LOG_INFO("Creating next hop group %s", nexthops_primary.to_string().c_str()); + setEndpointMonitor(vnet, monitors, nexthops_primary, monitoring, ipPrefix); + if (!createNextHopGroup(vnet, nexthops_primary, vrf_obj, monitoring)) + { + delEndpointMonitor(vnet, nexthops_primary, ipPrefix); + return false; + } + } + nexthops_selected = nexthops_primary; + return true; +} + template<> bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipPrefix, - NextHopGroupKey& nexthops, string& op, + NextHopGroupKey& nexthops, string& op, string& profile, + const string& monitoring, NextHopGroupKey& nexthops_secondary, + const IpPrefix& adv_prefix, const map& monitors) { SWSS_LOG_ENTER(); @@ -865,33 +1114,15 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP if (op == SET_COMMAND) { - sai_object_id_t nh_id; - if (!hasNextHopGroup(vnet, nexthops)) + sai_object_id_t nh_id = SAI_NULL_OBJECT_ID; + NextHopGroupKey active_nhg("", true); + if (!selectNextHopGroup(vnet, nexthops, nexthops_secondary, monitoring, ipPrefix, vrf_obj, active_nhg, monitors)) { - setEndpointMonitor(vnet, monitors, nexthops); - if (nexthops.getSize() == 1) - { - NextHopKey nexthop(nexthops.to_string(), true); - NextHopGroupInfo next_hop_group_entry; - next_hop_group_entry.next_hop_group_id = vrf_obj->getTunnelNextHop(nexthop); - next_hop_group_entry.ref_count = 0; - if (nexthop_info_[vnet].find(nexthop.ip_address) == nexthop_info_[vnet].end() || nexthop_info_[vnet][nexthop.ip_address].bfd_state == SAI_BFD_SESSION_STATE_UP) - { - next_hop_group_entry.active_members[nexthop] = SAI_NULL_OBJECT_ID; - } - syncd_nexthop_groups_[vnet][nexthops] = next_hop_group_entry; - } - else - { - if (!addNextHopGroup(vnet, nexthops, vrf_obj)) - { - delEndpointMonitor(vnet, nexthops); - SWSS_LOG_ERROR("Failed to create next hop group %s", nexthops.to_string().c_str()); - return false; - } - } + return true; } - nh_id = syncd_nexthop_groups_[vnet][nexthops].next_hop_group_id; + + // note: nh_id can be SAI_NULL_OBJECT_ID when active_nhg is empty. + nh_id = syncd_nexthop_groups_[vnet][active_nhg].next_hop_group_id; auto it_route = syncd_tunnel_routes_[vnet].find(ipPrefix); for (auto vr_id : vr_set) @@ -899,11 +1130,11 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP bool route_status = true; // Remove route if the nexthop group has no active endpoint - if (syncd_nexthop_groups_[vnet][nexthops].active_members.empty()) + if (syncd_nexthop_groups_[vnet][active_nhg].active_members.empty()) { if (it_route != syncd_tunnel_routes_[vnet].end()) { - NextHopGroupKey nhg = it_route->second; + NextHopGroupKey nhg = it_route->second.nhg_key; // Remove route when updating from a nhg with active member to another nhg without if (!syncd_nexthop_groups_[vnet][nhg].active_members.empty()) { @@ -919,12 +1150,12 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP } else { - NextHopGroupKey nhg = it_route->second; + NextHopGroupKey nhg = it_route->second.nhg_key; if (syncd_nexthop_groups_[vnet][nhg].active_members.empty()) { route_status = add_route(vr_id, pfx, nh_id); - } - else + } + else { route_status = update_route(vr_id, pfx, nh_id); } @@ -935,46 +1166,110 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP { SWSS_LOG_ERROR("Route add/update failed for %s, vr_id '0x%" PRIx64, ipPrefix.to_string().c_str(), vr_id); /* Clean up the newly created next hop group entry */ - if (nexthops.getSize() > 1) + if (active_nhg.getSize() > 1) { - removeNextHopGroup(vnet, nexthops, vrf_obj); + removeNextHopGroup(vnet, active_nhg, vrf_obj); } return false; } } - - if (it_route != syncd_tunnel_routes_[vnet].end()) + bool route_updated = false; + bool priority_route_updated = false; + if (it_route != syncd_tunnel_routes_[vnet].end() && + ((monitoring == "" && it_route->second.nhg_key != nexthops) || + (monitoring == "custom" && (it_route->second.primary != nexthops || it_route->second.secondary != nexthops_secondary)))) { - // In case of updating an existing route, decrease the reference count for the previous nexthop group - NextHopGroupKey nhg = it_route->second; - if(--syncd_nexthop_groups_[vnet][nhg].ref_count == 0) + route_updated = true; + NextHopGroupKey nhg = it_route->second.nhg_key; + if (monitoring == "custom") { - if (nexthops.getSize() > 1) + // if the previously active NHG is same as the newly created active NHG.case of primary secondary swap or + //when primary is active and secondary is changed or vice versa. In these cases we dont remove the NHG + // but only remove the monitors for the set which has changed. + if (it_route->second.primary != nexthops) { - removeNextHopGroup(vnet, nhg, vrf_obj); + delEndpointMonitor(vnet, it_route->second.primary, ipPrefix); } - else + if (it_route->second.secondary != nexthops_secondary) { - syncd_nexthop_groups_[vnet].erase(nhg); - NextHopKey nexthop(nhg.to_string(), true); - vrf_obj->removeTunnelNextHop(nexthop); + delEndpointMonitor(vnet, it_route->second.secondary, ipPrefix); } - delEndpointMonitor(vnet, nhg); + if (monitor_info_[vnet][ipPrefix].empty()) + { + monitor_info_[vnet].erase(ipPrefix); + } + priority_route_updated = true; } else { - syncd_nexthop_groups_[vnet][nhg].tunnel_routes.erase(ipPrefix); + // In case of updating an existing route, decrease the reference count for the previous nexthop group + if (--syncd_nexthop_groups_[vnet][nhg].ref_count == 0) + { + if (nhg.getSize() > 1) + { + removeNextHopGroup(vnet, nhg, vrf_obj); + } + else + { + syncd_nexthop_groups_[vnet].erase(nhg); + if(nhg.getSize() == 1) + { + NextHopKey nexthop(nhg.to_string(), true); + vrf_obj->removeTunnelNextHop(nexthop); + } + } + if (monitoring != "custom") + { + delEndpointMonitor(vnet, nhg, ipPrefix); + } + } + else + { + syncd_nexthop_groups_[vnet][nhg].tunnel_routes.erase(ipPrefix); + } + vrf_obj->removeRoute(ipPrefix); + vrf_obj->removeProfile(ipPrefix); } - vrf_obj->removeRoute(ipPrefix); } + if (!profile.empty()) + { + vrf_obj->addProfile(ipPrefix, profile); + } + if (it_route == syncd_tunnel_routes_[vnet].end() || route_updated) + { + syncd_nexthop_groups_[vnet][active_nhg].tunnel_routes.insert(ipPrefix); + VNetTunnelRouteEntry tunnel_route_entry; + tunnel_route_entry.nhg_key = active_nhg; + tunnel_route_entry.primary = nexthops; + tunnel_route_entry.secondary = nexthops_secondary; + syncd_tunnel_routes_[vnet][ipPrefix] = tunnel_route_entry; + syncd_nexthop_groups_[vnet][active_nhg].ref_count++; - syncd_nexthop_groups_[vnet][nexthops].tunnel_routes.insert(ipPrefix); - - syncd_tunnel_routes_[vnet][ipPrefix] = nexthops; - syncd_nexthop_groups_[vnet][nexthops].ref_count++; - vrf_obj->addRoute(ipPrefix, nexthops); + if (priority_route_updated) + { + MonitorUpdate update; + update.prefix = ipPrefix; + update.state = MONITOR_SESSION_STATE_UNKNOWN; + update.vnet = vnet; + updateVnetTunnelCustomMonitor(update); + return true; + } - postRouteState(vnet, ipPrefix, nexthops); + if (adv_prefix.to_string() != ipPrefix.to_string() && prefix_to_adv_prefix_.find(ipPrefix) == prefix_to_adv_prefix_.end()) + { + prefix_to_adv_prefix_[ipPrefix] = adv_prefix; + if (adv_prefix_refcount_.find(adv_prefix) == adv_prefix_refcount_.end()) + { + adv_prefix_refcount_[adv_prefix] = 0; + } + if(active_nhg.getSize() > 0) + { + adv_prefix_refcount_[adv_prefix] += 1; + } + } + vrf_obj->addRoute(ipPrefix, active_nhg); + } + postRouteState(vnet, ipPrefix, active_nhg, profile); } else if (op == DEL_COMMAND) { @@ -985,8 +1280,8 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP ipPrefix.to_string().c_str()); return true; } - NextHopGroupKey nhg = it_route->second; - + NextHopGroupKey nhg = it_route->second.nhg_key; + auto last_nhg_size = nhg.getSize(); for (auto vr_id : vr_set) { // If an nhg has no active member, the route should already be removed @@ -1009,15 +1304,29 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP else { syncd_nexthop_groups_[vnet].erase(nhg); - NextHopKey nexthop(nhg.to_string(), true); - vrf_obj->removeTunnelNextHop(nexthop); + // We need to check specifically if there is only one next hop active. + // In case of Priority routes we can end up in a situation where the active NHG has 0 nexthops. + if(nhg.getSize() == 1) + { + NextHopKey nexthop(nhg.to_string(), true); + vrf_obj->removeTunnelNextHop(nexthop); + } + } + if (monitor_info_[vnet].find(ipPrefix) == monitor_info_[vnet].end()) + { + delEndpointMonitor(vnet, nhg, ipPrefix); } - delEndpointMonitor(vnet, nhg); } else { syncd_nexthop_groups_[vnet][nhg].tunnel_routes.erase(ipPrefix); } + if (monitor_info_[vnet].find(ipPrefix) != monitor_info_[vnet].end()) + { + delEndpointMonitor(vnet, it_route->second.primary, ipPrefix); + delEndpointMonitor(vnet, it_route->second.secondary, ipPrefix); + monitor_info_[vnet].erase(ipPrefix); + } syncd_tunnel_routes_[vnet].erase(ipPrefix); if (syncd_tunnel_routes_[vnet].empty()) @@ -1026,10 +1335,24 @@ bool VNetRouteOrch::doRouteTask(const string& vnet, IpPrefix& ipP } vrf_obj->removeRoute(ipPrefix); + vrf_obj->removeProfile(ipPrefix); removeRouteState(vnet, ipPrefix); - } + if (prefix_to_adv_prefix_.find(ipPrefix) != prefix_to_adv_prefix_.end()) + { + auto adv_pfx = prefix_to_adv_prefix_[ipPrefix]; + prefix_to_adv_prefix_.erase(ipPrefix); + if (last_nhg_size > 0) + { + adv_prefix_refcount_[adv_pfx] -= 1; + if (adv_prefix_refcount_[adv_pfx] == 0) + { + adv_prefix_refcount_.erase(adv_pfx); + } + } + } + } return true; } @@ -1094,7 +1417,7 @@ bool VNetRouteOrch::updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, ipPrefix.to_string().c_str()); return true; } - NextHopGroupKey nhg = it_route->second; + NextHopGroupKey nhg = it_route->second.nhg_key; for (auto vr_id : vr_set) { @@ -1267,7 +1590,7 @@ bool VNetRouteOrch::handleRoutes(const Request& request) SWSS_LOG_INFO("VNET-RT '%s' op '%s' for ip %s", vnet_name.c_str(), op.c_str(), ip_pfx.to_string().c_str()); - + if (op == SET_COMMAND) { addRoute(vnet_name, ip_pfx, nh); @@ -1321,7 +1644,7 @@ void VNetRouteOrch::attach(Observer* observer, const IpAddress& dstAddr) dstAddr.to_string().c_str()); for (auto vnetEntry : bestRoute->second) { - VNetNextHopUpdate update = + VNetNextHopUpdate update = { SET_COMMAND, vnetEntry.first, // vnet name @@ -1362,7 +1685,7 @@ void VNetRouteOrch::detach(Observer* observer, const IpAddress& dstAddr) { for (auto vnetEntry : bestRoute->second) { - VNetNextHopUpdate update = + VNetNextHopUpdate update = { DEL_COMMAND, vnetEntry.first, // vnet name @@ -1383,12 +1706,12 @@ void VNetRouteOrch::addRoute(const std::string& vnet, const IpPrefix& ipPrefix, { if (ipPrefix.isAddressInSubnet(next_hop_observer.first)) { - auto route_insert_result = next_hop_observer.second.routeTable.emplace(ipPrefix, VNetEntry()); + auto route_insert_result = next_hop_observer.second.routeTable.emplace(ipPrefix, VNetEntry()); auto vnet_result_result = route_insert_result.first->second.emplace(vnet, nh); if (!vnet_result_result.second) { - if (vnet_result_result.first->second.ips == nh.ips + if (vnet_result_result.first->second.ips == nh.ips && vnet_result_result.first->second.ifname == nh.ifname) { continue; @@ -1399,7 +1722,7 @@ void VNetRouteOrch::addRoute(const std::string& vnet, const IpPrefix& ipPrefix, // If the inserted route is the best route. (Table should not be empty. Because we inserted a new entry above) if (route_insert_result.first == --next_hop_observer.second.routeTable.end()) { - VNetNextHopUpdate update = + VNetNextHopUpdate update = { SET_COMMAND, vnet, // vnet name @@ -1437,7 +1760,7 @@ void VNetRouteOrch::delRoute(const IpPrefix& ipPrefix) if ( itr == next_hop_observer->second.routeTable.end()) { SWSS_LOG_ERROR( - "Failed to find any ip(%s) belong to this route(%s).", + "Failed to find any ip(%s) belong to this route(%s).", next_hop_observer->first.to_string().c_str(), ipPrefix.to_string().c_str()); assert(false); @@ -1493,9 +1816,8 @@ void VNetRouteOrch::createBfdSession(const string& vnet, const NextHopKey& endpo FieldValueTuple fvTuple("local_addr", src_ip.to_string()); data.push_back(fvTuple); - + data.emplace_back("multihop", "true"); bfd_session_producer_.set(key, data); - bfd_sessions_[monitor_addr].bfd_state = SAI_BFD_SESSION_STATE_DOWN; } @@ -1527,7 +1849,78 @@ void VNetRouteOrch::removeBfdSession(const string& vnet, const NextHopKey& endpo bfd_sessions_.erase(monitor_addr); } -void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops) +void VNetRouteOrch::updateAllMonitoringSession(const string& vnet) +{ + SWSS_LOG_ENTER(); + vector data; + auto *vnet_obj = vnet_orch_->getTypePtr(vnet); + auto overlay_dmac = vnet_obj->getOverlayDMac(); + SWSS_LOG_INFO ("updating overlay dmac value to %s", overlay_dmac.to_string().c_str()); + + if (monitor_info_.find(vnet) != monitor_info_.end()) + { + for (auto prefix : monitor_info_[vnet]) + { + for (auto monitor_addr : monitor_info_[vnet][prefix.first]) + { + + string key = monitor_addr.first.to_string() + ":" + prefix.first.to_string(); + SWSS_LOG_INFO ("updating the overlay dmac of %s", key.c_str()); + + FieldValueTuple fvTuple1("packet_type", "vxlan"); + data.push_back(fvTuple1); + + FieldValueTuple fvTuple3("overlay_dmac", overlay_dmac.to_string()); + data.push_back(fvTuple3); + + monitor_session_producer_->set(key, data); + } + } + } +} + +void VNetRouteOrch::createMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr, IpPrefix& ipPrefix) +{ + SWSS_LOG_ENTER(); + + vector data; + auto *vnet_obj = vnet_orch_->getTypePtr(vnet); + + auto overlay_dmac = vnet_obj->getOverlayDMac(); + string key = monitor_addr.to_string() + ":" + ipPrefix.to_string(); + FieldValueTuple fvTuple1("packet_type", "vxlan"); + data.push_back(fvTuple1); + + FieldValueTuple fvTuple3("overlay_dmac", overlay_dmac.to_string()); + data.push_back(fvTuple3); + + monitor_session_producer_->set(key, data); + + MonitorSessionInfo info = monitor_info_[vnet][ipPrefix][monitor_addr]; + info.endpoint = endpoint; + info.ref_count = 1; + info.state = MONITOR_SESSION_STATE_DOWN; + monitor_info_[vnet][ipPrefix][monitor_addr] = info; + +} + +void VNetRouteOrch::removeMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& monitor_addr, IpPrefix& ipPrefix) +{ + SWSS_LOG_ENTER(); + + if (monitor_info_[vnet].find(ipPrefix) == monitor_info_[vnet].end() || + monitor_info_[vnet][ipPrefix].find(monitor_addr) == monitor_info_[vnet][ipPrefix].end()) + { + SWSS_LOG_NOTICE("Monitor session for prefix %s endpoint %s does not exist", ipPrefix.to_string().c_str(), endpoint.to_string().c_str()); + } + + string key = monitor_addr.to_string() + ":" + ipPrefix.to_string(); + + monitor_session_producer_->del(key); + monitor_info_[vnet][ipPrefix].erase(monitor_addr); +} + +void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops, const string& monitoring, IpPrefix& ipPrefix) { SWSS_LOG_ENTER(); @@ -1535,41 +1928,128 @@ void VNetRouteOrch::setEndpointMonitor(const string& vnet, const map next_hop_set = nexthops.getNextHops(); + if (next_hop_set.find(nh) != next_hop_set.end()) { - createBfdSession(vnet, nh, monitor_ip); + if (monitoring == "custom") + { + if (monitor_info_[vnet].find(ipPrefix) == monitor_info_[vnet].end() || + monitor_info_[vnet][ipPrefix].find(monitor_ip) == monitor_info_[vnet][ipPrefix].end()) + { + createMonitoringSession(vnet, nh, monitor_ip, ipPrefix); + } + else + { + SWSS_LOG_INFO("Monitoring session for prefix %s endpoint %s, monitor %s already exists", ipPrefix.to_string().c_str(), + nh.to_string().c_str(), monitor_ip.to_string().c_str()); + monitor_info_[vnet][ipPrefix][monitor_ip].ref_count += 1; + } + } + else + { + if (nexthop_info_[vnet].find(nh.ip_address) == nexthop_info_[vnet].end()) + { + createBfdSession(vnet, nh, monitor_ip); + } + nexthop_info_[vnet][nh.ip_address].ref_count++; + } } - - nexthop_info_[vnet][nh.ip_address].ref_count++; } } -void VNetRouteOrch::delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops) +void VNetRouteOrch::delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops, IpPrefix& ipPrefix) { SWSS_LOG_ENTER(); std::set nhks = nexthops.getNextHops(); + bool is_custom_monitoring = false; + if (monitor_info_[vnet].find(ipPrefix) != monitor_info_[vnet].end()) + { + is_custom_monitoring = true; + } for (auto nhk: nhks) { IpAddress ip = nhk.ip_address; - if (nexthop_info_[vnet].find(ip) != nexthop_info_[vnet].end()) { - if (--nexthop_info_[vnet][ip].ref_count == 0) + if (is_custom_monitoring) + { + for ( auto monitor : monitor_info_[vnet][ipPrefix]) + { + if (monitor.second.endpoint == nhk) + { + if (--monitor_info_[vnet][ipPrefix][monitor.first].ref_count == 0) + { + removeMonitoringSession(vnet, nhk, monitor.first, ipPrefix); + break; + } + } + } + } + else + { + if (nexthop_info_[vnet].find(ip) != nexthop_info_[vnet].end()) { + if (--nexthop_info_[vnet][ip].ref_count == 0) + { + IpAddress monitor_addr = nexthop_info_[vnet][ip].monitor_addr; + removeBfdSession(vnet, nhk, monitor_addr); + } + } + } + } +} + +void VNetRouteOrch::updateMonitorState(string& op, const IpPrefix& prefix, const IpAddress& monitor, string state) +{ + SWSS_LOG_ENTER(); + if( op == SET_COMMAND) + { + for (auto iter : monitor_info_) + { + std::string vnet = iter.first; + if (monitor_info_[vnet].find(prefix) != monitor_info_[vnet].end() && + monitor_info_[vnet][prefix].find(monitor) != monitor_info_[vnet][prefix].end()) { - removeBfdSession(vnet, nhk, nexthop_info_[vnet][ip].monitor_addr); + if (state =="up") + { + if (monitor_info_[vnet][prefix][monitor].state != MONITOR_SESSION_STATE_UP) + { + SWSS_LOG_NOTICE("Monitor session state for %s|%s (%s) changed from down to up", prefix.to_string().c_str(), + monitor.to_string().c_str(), monitor_info_[vnet][prefix][monitor].endpoint.ip_address.to_string().c_str()); + struct MonitorUpdate status_update; + status_update.state = MONITOR_SESSION_STATE_UP; + status_update.prefix = prefix; + status_update.monitor = monitor; + status_update.vnet = vnet; + updateVnetTunnelCustomMonitor(status_update); + } + } + else if (state =="down") + { + if (monitor_info_[vnet][prefix][monitor].state != MONITOR_SESSION_STATE_DOWN) + { + SWSS_LOG_NOTICE("Monitor session state for %s|%s (%s) changed from up to down", prefix.to_string().c_str(), + monitor.to_string().c_str(), monitor_info_[vnet][prefix][monitor].endpoint.ip_address.to_string().c_str()); + struct MonitorUpdate status_update; + status_update.state = MONITOR_SESSION_STATE_DOWN; + status_update.prefix = prefix; + status_update.monitor = monitor; + status_update.vnet = vnet; + updateVnetTunnelCustomMonitor(status_update); + } + } } } } } -void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops) +void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& profile) { const string state_db_key = vnet + state_db_key_delimiter + ipPrefix.to_string(); vector fvVector; - NextHopGroupInfo& nhg_info = syncd_nexthop_groups_[vnet][nexthops]; string route_state = nhg_info.active_members.empty() ? "inactive" : "active"; string ep_str = ""; int idx_ep = 0; + for (auto nh_pair : nhg_info.active_members) { NextHopKey nh = nh_pair.first; @@ -1582,15 +2062,26 @@ void VNetRouteOrch::postRouteState(const string& vnet, IpPrefix& ipPrefix, NextH state_vnet_rt_tunnel_table_->set(state_db_key, fvVector); + auto prefix_to_use = ipPrefix; + if (prefix_to_adv_prefix_.find(ipPrefix) != prefix_to_adv_prefix_.end()) + { + route_state = ""; + auto adv_pfx = prefix_to_adv_prefix_[ipPrefix]; + if (adv_prefix_refcount_[adv_pfx] == 1) + { + route_state = "active"; + prefix_to_use = adv_pfx; + } + } if (vnet_orch_->getAdvertisePrefix(vnet)) { if (route_state == "active") { - addRouteAdvertisement(ipPrefix); + addRouteAdvertisement(prefix_to_use, profile); } - else + else if (route_state == "inactive") { - removeRouteAdvertisement(ipPrefix); + removeRouteAdvertisement(prefix_to_use); } } } @@ -1599,14 +2090,33 @@ void VNetRouteOrch::removeRouteState(const string& vnet, IpPrefix& ipPrefix) { const string state_db_key = vnet + state_db_key_delimiter + ipPrefix.to_string(); state_vnet_rt_tunnel_table_->del(state_db_key); - removeRouteAdvertisement(ipPrefix); + + if(prefix_to_adv_prefix_.find(ipPrefix) !=prefix_to_adv_prefix_.end()) + { + auto adv_pfx = prefix_to_adv_prefix_[ipPrefix]; + if(adv_prefix_refcount_[adv_pfx] == 1) + { + removeRouteAdvertisement(adv_pfx); + } + } + else + { + removeRouteAdvertisement(ipPrefix); + } } -void VNetRouteOrch::addRouteAdvertisement(IpPrefix& ipPrefix) +void VNetRouteOrch::addRouteAdvertisement(IpPrefix& ipPrefix, string& profile) { const string key = ipPrefix.to_string(); vector fvs; - fvs.push_back(FieldValueTuple("", "")); + if (profile.empty()) + { + fvs.push_back(FieldValueTuple("", "")); + } + else + { + fvs.push_back(FieldValueTuple("profile", profile)); + } state_vnet_rt_adv_table_->set(key, fvs); } @@ -1646,7 +2156,7 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) size_t found_vrf = key.find(state_db_key_delimiter); if (found_vrf == string::npos) { - SWSS_LOG_ERROR("Failed to parse key %s, no vrf is given", key.c_str()); + SWSS_LOG_WARN("Failed to parse key %s, no vrf is given", key.c_str()); return; } @@ -1817,7 +2327,231 @@ void VNetRouteOrch::updateVnetTunnel(const BfdUpdate& update) // Post configured in State DB for (auto ip_pfx : syncd_nexthop_groups_[vnet][nexthops].tunnel_routes) { - postRouteState(vnet, ip_pfx, nexthops); + string profile = vrf_obj->getProfile(ip_pfx); + postRouteState(vnet, ip_pfx, nexthops, profile); + } + } +} + +void VNetRouteOrch::updateVnetTunnelCustomMonitor(const MonitorUpdate& update) +{ + SWSS_LOG_ENTER(); +// This function recieves updates from the MonitorOrch for the endpoints state. +// Based on the state of the endpoints for a particular route, this function attempts +// to construct the primary next hop group. if it fails to do so,it attempts to create +// the secondary next hop group. After that it applies the next hop group and deletes +// the old next hop group. +// This function is also called in the case when the route configuration is updated to +// apply the new next hop group. In this case, the caller sets the state to +// MONITOR_SESSION_STATE_UNKNOWN and config_update and updateRoute are set to true. +// This function should never recieve MONITOR_SESSION_STATE_UNKNOWN from MonitorOrch. + + auto prefix = update.prefix; + auto state = update.state; + auto monitor = update.monitor; + auto vnet = update.vnet; + bool updateRoute = false; + bool config_update = false; + if (state != MONITOR_SESSION_STATE_UNKNOWN) + { + monitor_info_[vnet][prefix][monitor].state = state; + } + else + { + // we are coming here as a result of route config update. We need to repost the route if applicable. + updateRoute = true; + config_update = true; + } + + auto route = syncd_tunnel_routes_[vnet].find(prefix); + if (route == syncd_tunnel_routes_[vnet].end()) + { + SWSS_LOG_ERROR("Unexpected! Monitor Update for absent route."); + return; + + } + auto *vrf_obj = vnet_orch_->getTypePtr(vnet); + set vr_set; + + auto l_fn = [&] (const string& vnet) { + auto *vnet_obj = vnet_orch_->getTypePtr(vnet); + sai_object_id_t vr_id = vnet_obj->getVRidIngress(); + vr_set.insert(vr_id); + }; + + l_fn(vnet); + + auto primary = route->second.primary; + auto secondary = route->second.secondary; + auto active_nhg = route->second.nhg_key; + NextHopGroupKey nhg_custom("", true); + sai_ip_prefix_t pfx; + copy(pfx, prefix); + NextHopGroupKey nhg_custom_primary = getActiveNHSet( vnet, primary, prefix); + NextHopGroupKey nhg_custom_secondary = getActiveNHSet( vnet, secondary, prefix); + if (nhg_custom_primary.getSize() > 0) + { + if (nhg_custom_primary != active_nhg ) + { + if (!hasNextHopGroup(vnet, nhg_custom_primary)) + { + if (!createNextHopGroup(vnet, nhg_custom_primary, vrf_obj, "custom")) + { + SWSS_LOG_WARN("Failed to create primary based custom next hop group. Cannot proceed."); + return; + } + } + updateRoute = true; + } + if (updateRoute) + { + nhg_custom = nhg_custom_primary; + } + } + else if (nhg_custom_secondary.getSize() > 0) + { + if (nhg_custom_secondary != active_nhg ) + { + if (!hasNextHopGroup(vnet, nhg_custom_secondary)) + { + if (!createNextHopGroup(vnet, nhg_custom_secondary, vrf_obj, "custom")) + { + SWSS_LOG_WARN("Failed to create primary based custom next hop group. Cannot proceed."); + return; + } + } + updateRoute = true; + } + if (updateRoute) + { + nhg_custom = nhg_custom_secondary; + } + } + else + { + //both HHG's are inactive, need to remove the route. + updateRoute = true; + } + + if (nhg_custom.getSize() == 0) + { + // nhg_custom is empty. we shall create a dummy empty NHG for book keeping. + SWSS_LOG_INFO(" Neither Primary or Secondary endpoints are up."); + if (!hasNextHopGroup(vnet, nhg_custom)) + { + NextHopGroupInfo next_hop_group_entry; + next_hop_group_entry.next_hop_group_id = SAI_NULL_OBJECT_ID; + next_hop_group_entry.ref_count = 0; + syncd_nexthop_groups_[vnet][nhg_custom] = next_hop_group_entry; + } + } + auto active_nhg_size = active_nhg.getSize(); + if (updateRoute) + { + for (auto vr_id : vr_set) + { + if (nhg_custom.getSize() == 0) + { + if (active_nhg_size > 0) + { + // we need to remove the route + del_route(vr_id, pfx); + } + } + else + { + bool route_status = true; + // note: nh_id can be SAI_NULL_OBJECT_ID when active_nhg is empty. + auto nh_id = syncd_nexthop_groups_[vnet][nhg_custom].next_hop_group_id; + if (active_nhg_size > 0) + { + // we need to replace the nhg in the route + route_status = update_route(vr_id, pfx, nh_id); + } + else + { + // we need to readd the route. + route_status = add_route(vr_id, pfx, nh_id); + } + if (!route_status) + { + SWSS_LOG_ERROR("Route add/update failed for %s, vr_id '0x%" PRIx64, prefix.to_string().c_str(), vr_id); + /* Clean up the newly created next hop group entry */ + if (nhg_custom.getSize() > 1) + { + removeNextHopGroup(vnet, nhg_custom, vrf_obj); + } + return; + } + vrf_obj->addRoute(prefix, nhg_custom); + } + } + if (config_update && nhg_custom != active_nhg) + { + // This convoluted logic has very good reason behind it. + // when a route configuration gets updated, if the new endpoints are same but primaries + // are changed, we must increase the ref count of active group to save it from premature + // deletion at this place. So, we increment the refcount of existing active_nhg in doRotueTask right + // before we call this function. Once here we need to undo this increment of refCount for the active_nhg + // which is no longer relevant. + syncd_nexthop_groups_[vnet][active_nhg].ref_count--; + } + + if(--syncd_nexthop_groups_[vnet][active_nhg].ref_count == 0) + { + if (active_nhg_size > 1) + { + removeNextHopGroup(vnet, active_nhg, vrf_obj); + } + else + { + syncd_nexthop_groups_[vnet].erase(active_nhg); + if(active_nhg_size == 1) + { + NextHopKey nexthop(active_nhg.to_string(), true); + vrf_obj->removeTunnelNextHop(nexthop); + } + } + } + else + { + syncd_nexthop_groups_[vnet][active_nhg].tunnel_routes.erase(prefix); + } + syncd_nexthop_groups_[vnet][nhg_custom].tunnel_routes.insert(prefix); + syncd_tunnel_routes_[vnet][prefix].nhg_key = nhg_custom; + if (nhg_custom != active_nhg) + { + syncd_nexthop_groups_[vnet][nhg_custom].ref_count++; + } + if (nhg_custom.getSize() == 0 && active_nhg_size > 0) + { + vrf_obj->removeRoute(prefix); + removeRouteState(vnet, prefix); + if (prefix_to_adv_prefix_.find(prefix) != prefix_to_adv_prefix_.end()) + { + auto adv_pfx = prefix_to_adv_prefix_[prefix]; + adv_prefix_refcount_[adv_pfx] -=1; + if (adv_prefix_refcount_[adv_pfx] == 0) + { + adv_prefix_refcount_.erase(adv_pfx); + } + } + } + else if (nhg_custom.getSize() > 0 && active_nhg_size == 0) + { + auto adv_prefix = prefix_to_adv_prefix_[prefix]; + if (adv_prefix_refcount_.find(adv_prefix) == adv_prefix_refcount_.end()) + { + adv_prefix_refcount_[adv_prefix] = 0; + } + adv_prefix_refcount_[adv_prefix] += 1; + string profile = vrf_obj->getProfile(prefix); + postRouteState(vnet, prefix, nhg_custom, profile); + } + else + { + string profile = vrf_obj->getProfile(prefix); + postRouteState(vnet, prefix, nhg_custom, profile); } } } @@ -1830,7 +2564,12 @@ bool VNetRouteOrch::handleTunnel(const Request& request) vector mac_list; vector vni_list; vector monitor_list; - + string profile = ""; + vector primary_list; + string monitoring; + swss::IpPrefix adv_prefix; + bool has_priority_ep = false; + bool has_adv_pfx = false; for (const auto& name: request.getAttrFieldNames()) { if (name == "endpoint") @@ -1851,6 +2590,23 @@ bool VNetRouteOrch::handleTunnel(const Request& request) { monitor_list = request.getAttrIPList(name); } + else if (name == "profile") + { + profile = request.getAttrString(name); + } + else if (name == "primary") + { + primary_list = request.getAttrIPList(name); + } + else if (name == "monitoring") + { + monitoring = request.getAttrString(name); + } + else if (name == "adv_prefix") + { + adv_prefix = request.getAttrIpPrefix(name); + has_adv_pfx = true; + } else { SWSS_LOG_INFO("Unknown attribute: %s", name.c_str()); @@ -1875,7 +2631,12 @@ bool VNetRouteOrch::handleTunnel(const Request& request) SWSS_LOG_ERROR("Peer monitor size of %zu does not match endpoint size of %zu", monitor_list.size(), ip_list.size()); return false; } - + if (!primary_list.empty() && monitor_list.empty()) + { + SWSS_LOG_ERROR("Primary/backup behaviour cannot function without endpoint monitoring."); + return true; + } + const std::string& vnet_name = request.getKeyString(0); auto ip_pfx = request.getKeyIpPrefix(1); auto op = request.getOperation(); @@ -1883,6 +2644,14 @@ bool VNetRouteOrch::handleTunnel(const Request& request) SWSS_LOG_INFO("VNET-RT '%s' op '%s' for pfx %s", vnet_name.c_str(), op.c_str(), ip_pfx.to_string().c_str()); + if (!primary_list.empty()) + { + has_priority_ep = true; + SWSS_LOG_INFO("Handling Priority Tunnel with prefix %s", ip_pfx.to_string().c_str()); + } + + NextHopGroupKey nhg_primary("", true); + NextHopGroupKey nhg_secondary("", true); NextHopGroupKey nhg("", true); map monitors; for (size_t idx_ip = 0; idx_ip < ip_list.size(); idx_ip++) @@ -1905,16 +2674,31 @@ bool VNetRouteOrch::handleTunnel(const Request& request) } NextHopKey nh(ip, mac, vni, true); - nhg.add(nh); if (!monitor_list.empty()) { monitors[nh] = monitor_list[idx_ip]; } + if (has_priority_ep) + { + if (std::find(primary_list.begin(), primary_list.end(), ip) != primary_list.end()) + { + // only add the primary endpoint ips. + nhg_primary.add(nh); + } + else + { + nhg_secondary.add(nh); + } + } + nhg.add(nh); + } + if (!has_adv_pfx) + { + adv_prefix = ip_pfx; } - if (vnet_orch_->isVnetExecVrf()) { - return doRouteTask(vnet_name, ip_pfx, nhg, op, monitors); + return doRouteTask(vnet_name, ip_pfx, (has_priority_ep == true) ? nhg_primary : nhg, op, profile, monitoring, nhg_secondary, adv_prefix, monitors); } return true; @@ -2060,3 +2844,44 @@ bool VNetCfgRouteOrch::doVnetRouteTask(const KeyOpFieldsValuesTuple & t, const s return true; } + +MonitorOrch::MonitorOrch(DBConnector *db, string tableName): + Orch2(db, tableName, request_) +{ + SWSS_LOG_ENTER(); +} + +MonitorOrch::~MonitorOrch(void) +{ + SWSS_LOG_ENTER(); +} + +bool MonitorOrch::addOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + auto monitor = request.getKeyIpAddress(0); + auto ip_Prefix = request.getKeyIpPrefix(1); + + auto session_state = request.getAttrString("state"); + SWSS_LOG_INFO("Added state table entry for monitor %s|%s", ip_Prefix.to_string().c_str(),monitor.to_string().c_str()); + + string op = SET_COMMAND; + VNetRouteOrch* vnet_route_orch = gDirectory.get(); + vnet_route_orch->updateMonitorState(op ,ip_Prefix, monitor, session_state ); + + return true; +} + +bool MonitorOrch::delOperation(const Request& request) +{ + SWSS_LOG_ENTER(); + auto monitor = request.getKeyIpAddress(0); + auto ip_Prefix = request.getKeyIpPrefix(1); + + SWSS_LOG_INFO("Deleting state table entry for monitor %s|%s", ip_Prefix.to_string().c_str(),monitor.to_string().c_str()); + VNetRouteOrch* vnet_route_orch = gDirectory.get(); + string op = DEL_COMMAND; + vnet_route_orch->updateMonitorState(op, ip_Prefix, monitor, "" ); + + return true; +} diff --git a/orchagent/vnetorch.h b/orchagent/vnetorch.h index 77c2785371..0cffa115fd 100644 --- a/orchagent/vnetorch.h +++ b/orchagent/vnetorch.h @@ -24,6 +24,14 @@ extern sai_object_id_t gVirtualRouterId; + +typedef enum +{ + MONITOR_SESSION_STATE_UNKNOWN, + MONITOR_SESSION_STATE_UP, + MONITOR_SESSION_STATE_DOWN, +} monitor_session_state_t; + const request_description_t vnet_request_description = { { REQ_T_STRING }, { @@ -34,6 +42,8 @@ const request_description_t vnet_request_description = { { "guid", REQ_T_STRING }, { "scope", REQ_T_STRING }, { "advertise_prefix", REQ_T_BOOL}, + { "overlay_dmac", REQ_T_MAC_ADDRESS}, + }, { "vxlan_tunnel", "vni" } // mandatory attributes }; @@ -59,6 +69,7 @@ struct VNetInfo set peers; string scope; bool advertise_prefix; + swss::MacAddress overlay_dmac; }; typedef map vrid_list_t; @@ -86,7 +97,8 @@ class VNetObject peer_list_(vnetInfo.peers), vni_(vnetInfo.vni), scope_(vnetInfo.scope), - advertise_prefix_(vnetInfo.advertise_prefix) + advertise_prefix_(vnetInfo.advertise_prefix), + overlay_dmac_(vnetInfo.overlay_dmac) { } virtual bool updateObj(vector&) = 0; @@ -121,6 +133,16 @@ class VNetObject return advertise_prefix_; } + swss::MacAddress getOverlayDMac() const + { + return overlay_dmac_; + } + + void setOverlayDMac(swss::MacAddress mac_addr) + { + overlay_dmac_ = mac_addr; + } + virtual ~VNetObject() noexcept(false) {}; private: @@ -129,6 +151,7 @@ class VNetObject uint32_t vni_; string scope_; bool advertise_prefix_; + swss::MacAddress overlay_dmac_; }; struct nextHop @@ -139,6 +162,7 @@ struct nextHop typedef std::map TunnelRoutes; typedef std::map RouteMap; +typedef std::map ProfileMap; class VNetVrfObject : public VNetObject { @@ -181,6 +205,10 @@ class VNetVrfObject : public VNetObject bool addRoute(IpPrefix& ipPrefix, nextHop& nh); bool removeRoute(IpPrefix& ipPrefix); + void addProfile(IpPrefix& ipPrefix, string& profile); + void removeProfile(IpPrefix& ipPrefix); + string getProfile(IpPrefix& ipPrefix); + size_t getRouteCount() const; bool getRouteNextHop(IpPrefix& ipPrefix, nextHop& nh); bool hasRoute(IpPrefix& ipPrefix); @@ -190,6 +218,9 @@ class VNetVrfObject : public VNetObject void increaseNextHopRefCount(const nextHop&); void decreaseNextHopRefCount(const nextHop&); + const RouteMap &getRouteMap() const { return routes_; } + const TunnelRoutes &getTunnelRoutes() const { return tunnels_; } + ~VNetVrfObject(); private: @@ -198,6 +229,7 @@ class VNetVrfObject : public VNetObject TunnelRoutes tunnels_; RouteMap routes_; + ProfileMap profile_; }; typedef std::unique_ptr VNetObject_T; @@ -247,6 +279,9 @@ class VNetOrch : public Orch2 return (vnet_exec_ == VNET_EXEC::VNET_EXEC_BRIDGE); } + bool getVrfIdByVnetName(const std::string& vnet_name, sai_object_id_t &vrf_id); + bool getVnetNameByVrfId(sai_object_id_t vrf_id, std::string& vnet_name); + private: virtual bool addOperation(const Request& request); virtual bool delOperation(const Request& request); @@ -269,10 +304,41 @@ const request_description_t vnet_route_description = { { "vni", REQ_T_STRING }, { "mac_address", REQ_T_STRING }, { "endpoint_monitor", REQ_T_IP_LIST }, + { "profile", REQ_T_STRING }, + { "primary", REQ_T_IP_LIST }, + { "monitoring", REQ_T_STRING }, + { "adv_prefix", REQ_T_IP_PREFIX }, }, { } }; +const request_description_t monitor_state_request_description = { + { REQ_T_IP, REQ_T_IP_PREFIX, }, + { + { "state", REQ_T_STRING }, + }, + { "state" } +}; + +class MonitorStateRequest : public Request +{ +public: + MonitorStateRequest() : Request(monitor_state_request_description, '|') { } +}; + +class MonitorOrch : public Orch2 +{ +public: + MonitorOrch(swss::DBConnector *db, std::string tableName); + virtual ~MonitorOrch(void); + +private: + virtual bool addOperation(const Request& request); + virtual bool delOperation(const Request& request); + + MonitorStateRequest request_; +}; + class VNetRouteRequest : public Request { public: @@ -313,9 +379,35 @@ struct BfdSessionInfo NextHopKey endpoint; }; +struct MonitorSessionInfo +{ + monitor_session_state_t state; + NextHopKey endpoint; + int ref_count; +}; + +struct MonitorUpdate +{ + monitor_session_state_t state; + IpAddress monitor; + IpPrefix prefix; + std::string vnet; +}; +struct VNetTunnelRouteEntry +{ + // The nhg_key is the key for the next hop group which is currently active in hardware. + // For priority routes, this can be a subset of eith primary or secondary NHG or an empty NHG. + NextHopGroupKey nhg_key; + // For regular Ecmp rotues the priamry and secondary fields wil lbe empty. For priority + // routes they wil lcontain the origna lprimary and secondary NHGs. + NextHopGroupKey primary; + NextHopGroupKey secondary; +}; + typedef std::map VNetNextHopGroupInfoTable; -typedef std::map VNetTunnelRouteTable; +typedef std::map VNetTunnelRouteTable; typedef std::map BfdSessionTable; +typedef std::map> MonitorSessionTable; typedef std::map VNetEndpointInfoTable; class VNetRouteOrch : public Orch2, public Subject, public Observer @@ -330,6 +422,8 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer void detach(Observer* observer, const IpAddress& dstAddr); void update(SubjectType, void *); + void updateMonitorState(string& op, const IpPrefix& prefix , const IpAddress& endpoint, string state); + void updateAllMonitoringSession(const string& vnet); private: virtual bool addOperation(const Request& request); @@ -343,23 +437,36 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer bool hasNextHopGroup(const string&, const NextHopGroupKey&); sai_object_id_t getNextHopGroupId(const string&, const NextHopGroupKey&); - bool addNextHopGroup(const string&, const NextHopGroupKey&, VNetVrfObject *vrf_obj); + bool addNextHopGroup(const string&, const NextHopGroupKey&, VNetVrfObject *vrf_obj, + const string& monitoring); bool removeNextHopGroup(const string&, const NextHopGroupKey&, VNetVrfObject *vrf_obj); + bool createNextHopGroup(const string&, NextHopGroupKey&, VNetVrfObject *vrf_obj, + const string& monitoring); + NextHopGroupKey getActiveNHSet(const string&, NextHopGroupKey&, const IpPrefix& ); + + bool selectNextHopGroup(const string&, NextHopGroupKey&, NextHopGroupKey&, const string&, IpPrefix&, + VNetVrfObject *vrf_obj, NextHopGroupKey&, + const std::map& monitors=std::map()); void createBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); void removeBfdSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr); - void setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops); - void delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops); - void postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops); + void createMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr, IpPrefix& ipPrefix); + void removeMonitoringSession(const string& vnet, const NextHopKey& endpoint, const IpAddress& ipAddr, IpPrefix& ipPrefix); + void setEndpointMonitor(const string& vnet, const map& monitors, NextHopGroupKey& nexthops, + const string& monitoring, IpPrefix& ipPrefix); + void delEndpointMonitor(const string& vnet, NextHopGroupKey& nexthops, IpPrefix& ipPrefix); + void postRouteState(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& profile); void removeRouteState(const string& vnet, IpPrefix& ipPrefix); - void addRouteAdvertisement(IpPrefix& ipPrefix); + void addRouteAdvertisement(IpPrefix& ipPrefix, string& profile); void removeRouteAdvertisement(IpPrefix& ipPrefix); void updateVnetTunnel(const BfdUpdate&); + void updateVnetTunnelCustomMonitor(const MonitorUpdate& update); bool updateTunnelRoute(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op); template - bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, + bool doRouteTask(const string& vnet, IpPrefix& ipPrefix, NextHopGroupKey& nexthops, string& op, string& profile, + const string& monitoring, NextHopGroupKey& nexthops_secondary, const IpPrefix& adv_prefix, const std::map& monitors=std::map()); template @@ -374,9 +481,14 @@ class VNetRouteOrch : public Orch2, public Subject, public Observer std::map syncd_nexthop_groups_; std::map syncd_tunnel_routes_; BfdSessionTable bfd_sessions_; + std::map monitor_info_; std::map nexthop_info_; + std::map prefix_to_adv_prefix_; + std::map adv_prefix_refcount_; ProducerStateTable bfd_session_producer_; + unique_ptr
monitor_session_producer_; shared_ptr state_db_; + shared_ptr app_db_; unique_ptr
state_vnet_rt_tunnel_table_; unique_ptr
state_vnet_rt_adv_table_; }; diff --git a/orchagent/vrforch.cpp b/orchagent/vrforch.cpp index 19ca5c0fd8..776cf1eb0f 100644 --- a/orchagent/vrforch.cpp +++ b/orchagent/vrforch.cpp @@ -11,6 +11,7 @@ #include "request_parser.h" #include "vrforch.h" #include "vxlanorch.h" +#include "flowcounterrouteorch.h" #include "directory.h" using namespace std; @@ -18,8 +19,10 @@ using namespace swss; extern sai_virtual_router_api_t* sai_virtual_router_api; extern sai_object_id_t gSwitchId; -extern Directory gDirectory; -extern PortsOrch* gPortsOrch; + +extern Directory gDirectory; +extern PortsOrch* gPortsOrch; +extern FlowCounterRouteOrch* gFlowCounterRouteOrch; bool VRFOrch::addOperation(const Request& request) { @@ -104,6 +107,7 @@ bool VRFOrch::addOperation(const Request& request) vrf_table_[vrf_name].vrf_id = router_id; vrf_table_[vrf_name].ref_count = 0; vrf_id_table_[router_id] = vrf_name; + gFlowCounterRouteOrch->onAddVR(router_id); if (vni != 0) { SWSS_LOG_INFO("VRF '%s' vni %d add", vrf_name.c_str(), vni); @@ -176,6 +180,8 @@ bool VRFOrch::delOperation(const Request& request) } } + gFlowCounterRouteOrch->onRemoveVR(router_id); + vrf_table_.erase(vrf_name); vrf_id_table_.erase(router_id); error = delVrfVNIMap(vrf_name, 0); diff --git a/orchagent/vrforch.h b/orchagent/vrforch.h index 195015fa08..07e0df55ec 100644 --- a/orchagent/vrforch.h +++ b/orchagent/vrforch.h @@ -155,6 +155,19 @@ class VRFOrch : public Orch2 return (-1); } } + + bool isL3VniVlan(const uint32_t vni) const + { + if (l3vni_table_.find(vni) != std::end(l3vni_table_)) + { + return l3vni_table_.at(vni).l3_vni; + } + else + { + return false; + } + } + int updateL3VniVlan(uint32_t vni, uint16_t vlan_id); private: virtual bool addOperation(const Request& request); diff --git a/orchagent/vxlanorch.cpp b/orchagent/vxlanorch.cpp index fc6a505a1f..1983cf7286 100644 --- a/orchagent/vxlanorch.cpp +++ b/orchagent/vxlanorch.cpp @@ -494,67 +494,6 @@ VxlanTunnel::~VxlanTunnel() src_creation_, false); } -bool VxlanTunnel::createTunnel(MAP_T encap, MAP_T decap, uint8_t encap_ttl) -{ - try - { - VxlanTunnelOrch* tunnel_orch = gDirectory.get(); - sai_ip_address_t ips, ipd, *ip=nullptr; - uint8_t mapper_list = 0; - swss::copy(ips, src_ip_); - - // Only a single mapper type is created - - if (decap == MAP_T::VNI_TO_BRIDGE) - { - TUNNELMAP_SET_BRIDGE(mapper_list); - } - else if (decap == MAP_T::VNI_TO_VLAN_ID) - { - TUNNELMAP_SET_VLAN(mapper_list); - } - else - { - TUNNELMAP_SET_VRF(mapper_list); - } - - createMapperHw(mapper_list, (encap == MAP_T::MAP_TO_INVALID) ? - TUNNEL_MAP_USE_DECAP_ONLY: TUNNEL_MAP_USE_DEDICATED_ENCAP_DECAP); - - if (encap != MAP_T::MAP_TO_INVALID) - { - ip = &ips; - } - - ids_.tunnel_id = create_tunnel(&ids_, ip, NULL, gUnderlayIfId, false, encap_ttl); - - if (ids_.tunnel_id != SAI_NULL_OBJECT_ID) - { - tunnel_orch->addTunnelToFlexCounter(ids_.tunnel_id, tunnel_name_); - } - - ip = nullptr; - if (!dst_ip_.isZero()) - { - swss::copy(ipd, dst_ip_); - ip = &ipd; - } - - ids_.tunnel_term_id = create_tunnel_termination(ids_.tunnel_id, ips, ip, gVirtualRouterId); - active_ = true; - tunnel_map_ = { encap, decap }; - } - catch (const std::runtime_error& error) - { - SWSS_LOG_ERROR("Error creating tunnel %s: %s", tunnel_name_.c_str(), error.what()); - // FIXME: add code to remove already created objects - return false; - } - - SWSS_LOG_NOTICE("Vxlan tunnel '%s' was created", tunnel_name_.c_str()); - return true; -} - sai_object_id_t VxlanTunnel::addEncapMapperEntry(sai_object_id_t obj, uint32_t vni, tunnel_map_type_t type) { const auto encap_id = getEncapMapId(type); @@ -1110,13 +1049,14 @@ void VxlanTunnel::updateRemoteEndPointIpRef(const std::string remote_vtep, bool it->second.ip_refcnt++; } SWSS_LOG_DEBUG("Incrementing remote end point %s reference to %d", remote_vtep.c_str(), - it->second.ip_refcnt); + tnl_users_[remote_vtep].ip_refcnt); } else { if (it == tnl_users_.end()) { SWSS_LOG_ERROR("Cannot decrement ref. End point not referenced %s", remote_vtep.c_str()); + return; } it->second.ip_refcnt--; @@ -1542,28 +1482,12 @@ bool VxlanTunnelOrch::removeVxlanTunnelMap(string tunnelName, uint32_t vni) tunnel_obj->vlan_vrf_vni_count--; if (tunnel_obj->vlan_vrf_vni_count == 0) { - auto tunnel_term_id = vxlan_tunnel_table_[tunnelName].get()->getTunnelTermId(); - try - { - remove_tunnel_termination(tunnel_term_id); - } - catch(const std::runtime_error& error) - { - SWSS_LOG_ERROR("Error removing tunnel term entry. Tunnel: %s. Error: %s", tunnelName.c_str(), error.what()); - return false; - } - - auto tunnel_id = vxlan_tunnel_table_[tunnelName].get()->getTunnelId(); - try - { - removeTunnelFromFlexCounter(tunnel_id, tunnelName); - remove_tunnel(tunnel_id); - } - catch(const std::runtime_error& error) - { - SWSS_LOG_ERROR("Error removing tunnel entry. Tunnel: %s. Error: %s", tunnelName.c_str(), error.what()); - return false; - } + uint8_t mapper_list = 0; + + TUNNELMAP_SET_VLAN(mapper_list); + TUNNELMAP_SET_VRF(mapper_list); + + tunnel_obj->deleteTunnelHw(mapper_list, TUNNEL_MAP_USE_DEDICATED_ENCAP_DECAP); } SWSS_LOG_NOTICE("Vxlan map entry deleted for tunnel '%s' with vni '%d'", tunnelName.c_str(), vni); @@ -1939,20 +1863,18 @@ bool VxlanTunnel::isTunnelReferenced() Port tunnelPort; bool dip_tunnels_used = tunnel_orch->isDipTunnelsSupported(); - ret = gPortsOrch->getPort(port_tunnel_name, tunnelPort); - if (!ret) - { - SWSS_LOG_ERROR("Get port failed for source vtep %s", port_tunnel_name.c_str()); - return false; - } - - if (dip_tunnels_used) { return (getDipTunnelCnt() != 0); } else { + ret = gPortsOrch->getPort(port_tunnel_name, tunnelPort); + if (!ret) + { + SWSS_LOG_ERROR("Get port failed for source vtep %s", port_tunnel_name.c_str()); + return false; + } if (tunnelPort.m_fdb_count != 0) { return true; @@ -1981,6 +1903,7 @@ bool VxlanTunnelMapOrch::addOperation(const Request& request) sai_vlan_id_t vlan_id = (sai_vlan_id_t)request.getAttrVlan("vlan"); Port tempPort; + bool isL3Vni = false; const auto full_tunnel_map_entry_name = request.getFullKey(); SWSS_LOG_INFO("Full name = %s",full_tunnel_map_entry_name.c_str()); @@ -2028,19 +1951,21 @@ bool VxlanTunnelMapOrch::addOperation(const Request& request) if (!tunnel_obj->isActive()) { //@Todo, currently only decap mapper is allowed - //tunnel_obj->createTunnel(MAP_T::MAP_TO_INVALID, MAP_T::VNI_TO_VLAN_ID); uint8_t mapper_list = 0; TUNNELMAP_SET_VLAN(mapper_list); TUNNELMAP_SET_VRF(mapper_list); tunnel_obj->createTunnelHw(mapper_list,TUNNEL_MAP_USE_DEDICATED_ENCAP_DECAP); - Port tunPort; - auto src_vtep = tunnel_obj->getSrcIP().to_string(); - if (!tunnel_orch->getTunnelPort(src_vtep, tunPort, true)) + if (!tunnel_orch->isDipTunnelsSupported()) { - auto port_tunnel_name = tunnel_orch->getTunnelPortName(src_vtep, true); - gPortsOrch->addTunnel(port_tunnel_name, tunnel_obj->getTunnelId(), false); - gPortsOrch->getPort(port_tunnel_name,tunPort); - gPortsOrch->addBridgePort(tunPort); + Port tunPort; + auto src_vtep = tunnel_obj->getSrcIP().to_string(); + if (!tunnel_orch->getTunnelPort(src_vtep, tunPort, true)) + { + auto port_tunnel_name = tunnel_orch->getTunnelPortName(src_vtep, true); + gPortsOrch->addTunnel(port_tunnel_name, tunnel_obj->getTunnelId(), false); + gPortsOrch->getPort(port_tunnel_name,tunPort); + gPortsOrch->addBridgePort(tunPort); + } } } @@ -2050,11 +1975,21 @@ bool VxlanTunnelMapOrch::addOperation(const Request& request) tunnel_obj->vlan_vrf_vni_count++; SWSS_LOG_INFO("vni count increased to %d",tunnel_obj->vlan_vrf_vni_count); + VRFOrch* vrf_orch = gDirectory.get(); + isL3Vni = vrf_orch->isL3VniVlan(vni_id); + try { - auto tunnel_map_entry_id = create_tunnel_map_entry(MAP_T::VNI_TO_VLAN_ID, - tunnel_map_id, vni_id, vlan_id); - vxlan_tunnel_map_table_[full_tunnel_map_entry_name].map_entry_id = tunnel_map_entry_id; + if (isL3Vni == false) + { + auto tunnel_map_entry_id = create_tunnel_map_entry(MAP_T::VNI_TO_VLAN_ID, + tunnel_map_id, vni_id, vlan_id); + vxlan_tunnel_map_table_[full_tunnel_map_entry_name].map_entry_id = tunnel_map_entry_id; + } + else + { + vxlan_tunnel_map_table_[full_tunnel_map_entry_name].map_entry_id = SAI_NULL_OBJECT_ID; + } vxlan_tunnel_map_table_[full_tunnel_map_entry_name].vlan_id = vlan_id; vxlan_tunnel_map_table_[full_tunnel_map_entry_name].vni_id = vni_id; } @@ -2132,26 +2067,27 @@ bool VxlanTunnelMapOrch::delOperation(const Request& request) auto port_tunnel_name = tunnel_orch->getTunnelPortName(src_vtep, true); bool ret; - ret = gPortsOrch->getPort(port_tunnel_name, tunnelPort); // If there are Dynamic DIP Tunnels referring to this SIP Tunnel // then mark it as pending for delete. if (!tunnel_obj->isTunnelReferenced()) { - if (!ret) + if (!tunnel_orch->isDipTunnelsSupported()) { - SWSS_LOG_ERROR("Get port failed for source vtep %s", port_tunnel_name.c_str()); - return true; + ret = gPortsOrch->getPort(port_tunnel_name, tunnelPort); + if (!ret) + { + SWSS_LOG_ERROR("Get port failed for source vtep %s", port_tunnel_name.c_str()); + return true; + } + ret = gPortsOrch->removeBridgePort(tunnelPort); + if (!ret) + { + SWSS_LOG_ERROR("Remove Bridge port failed for source vtep = %s fdbcount = %d", + port_tunnel_name.c_str(), tunnelPort.m_fdb_count); + return true; + } + gPortsOrch->removeTunnel(tunnelPort); } - ret = gPortsOrch->removeBridgePort(tunnelPort); - if (!ret) - { - SWSS_LOG_ERROR("Remove Bridge port failed for source vtep = %s fdbcount = %d", - port_tunnel_name.c_str(), tunnelPort.m_fdb_count); - return true; - } - - gPortsOrch->removeTunnel(tunnelPort); - uint8_t mapper_list=0; TUNNELMAP_SET_VLAN(mapper_list); TUNNELMAP_SET_VRF(mapper_list); @@ -2167,6 +2103,7 @@ bool VxlanTunnelMapOrch::delOperation(const Request& request) } else { + gPortsOrch->getPort(port_tunnel_name, tunnelPort); SWSS_LOG_WARN("Postponing the SIP Tunnel HW deletion Remote reference count = %d", gPortsOrch->getBridgePortReferenceCount(tunnelPort)); } @@ -2198,9 +2135,13 @@ bool VxlanTunnelMapOrch::delOperation(const Request& request) bool VxlanVrfMapOrch::addOperation(const Request& request) { SWSS_LOG_ENTER(); + std::string vniVlanMapName; + uint32_t vlan_id = 0; + sai_object_id_t tnl_map_entry_id = SAI_NULL_OBJECT_ID; auto tunnel_name = request.getKeyString(0); VxlanTunnelOrch* tunnel_orch = gDirectory.get(); + VxlanTunnelMapOrch* vxlan_tun_map_orch = gDirectory.get(); if (!tunnel_orch->isTunnelExists(tunnel_name)) { SWSS_LOG_WARN("Vxlan tunnel '%s' doesn't exist", tunnel_name.c_str()); @@ -2233,7 +2174,22 @@ bool VxlanVrfMapOrch::addOperation(const Request& request) { if (!tunnel_obj->isActive()) { - tunnel_obj->createTunnel(MAP_T::VRID_TO_VNI, MAP_T::VNI_TO_VRID); + uint8_t mapper_list = 0; + TUNNELMAP_SET_VLAN(mapper_list); + TUNNELMAP_SET_VRF(mapper_list); + tunnel_obj->createTunnelHw(mapper_list,TUNNEL_MAP_USE_DEDICATED_ENCAP_DECAP); + if (!tunnel_orch->isDipTunnelsSupported()) + { + Port tunPort; + auto src_vtep = tunnel_obj->getSrcIP().to_string(); + if (!tunnel_orch->getTunnelPort(src_vtep, tunPort, true)) + { + auto port_tunnel_name = tunnel_orch->getTunnelPortName(src_vtep, true); + gPortsOrch->addTunnel(port_tunnel_name, tunnel_obj->getTunnelId(), false); + gPortsOrch->getPort(port_tunnel_name,tunPort); + gPortsOrch->addBridgePort(tunPort); + } + } } vrf_id = vrf_orch->getVRFid(vrf_name); } @@ -2247,6 +2203,15 @@ bool VxlanVrfMapOrch::addOperation(const Request& request) vrf_map_entry_t entry; try { + entry.isL2Vni = vxlan_tun_map_orch->isVniVlanMapExists(vni_id, vniVlanMapName, &tnl_map_entry_id, &vlan_id); + entry.vni_id = vni_id; + if (entry.isL2Vni) + { + entry.vniVlanMapName = vniVlanMapName; + entry.vlan_id = vlan_id; + remove_tunnel_map_entry(tnl_map_entry_id); + SWSS_LOG_DEBUG("remove_tunnel_map_entry name %s, vlan %d, vni %d\n", entry.vniVlanMapName.c_str(), entry.vlan_id, entry.vni_id); + } /* * Create encap and decap mapper */ @@ -2278,7 +2243,12 @@ bool VxlanVrfMapOrch::delOperation(const Request& request) SWSS_LOG_ENTER(); VRFOrch* vrf_orch = gDirectory.get(); + VxlanTunnelOrch* tunnel_orch = gDirectory.get(); + VxlanTunnelMapOrch* vxlan_tun_map_orch = gDirectory.get(); const auto full_map_entry_name = request.getFullKey(); + std::string vniVlanMapName; + uint32_t vlan_id = 0; + sai_object_id_t tnl_map_entry_id = SAI_NULL_OBJECT_ID; if (!isVrfMapExists(full_map_entry_name)) { @@ -2300,6 +2270,9 @@ bool VxlanVrfMapOrch::delOperation(const Request& request) return false; } SWSS_LOG_NOTICE("VxlanVrfMapOrch VRF VNI mapping '%s' remove vrf %s", full_map_entry_name.c_str(), vrf_name.c_str()); + auto tunnel_name = request.getKeyString(0); + auto tunnel_obj = tunnel_orch->getVxlanTunnel(tunnel_name); + vrf_map_entry_t entry; try { @@ -2315,6 +2288,32 @@ bool VxlanVrfMapOrch::delOperation(const Request& request) vrf_orch->decreaseVrfRefCount(vrf_name); remove_tunnel_map_entry(entry.decap_id); vrf_orch->decreaseVrfRefCount(vrf_name); + + if (!entry.isL2Vni) + { + entry.isL2Vni = vxlan_tun_map_orch->isVniVlanMapExists(entry.vni_id, vniVlanMapName, &tnl_map_entry_id, &vlan_id); + SWSS_LOG_NOTICE("VxlanVrfMapOrch vni %d, isL2Vni %d\n", entry.vni_id, entry.isL2Vni); + + if (entry.isL2Vni) + { + entry.vniVlanMapName = vniVlanMapName; + entry.vlan_id = vlan_id; + SWSS_LOG_DEBUG("add_tunnel_map_entry name %s, vlan %d, vni %d\n", entry.vniVlanMapName.c_str(), entry.vlan_id, entry.vni_id); + } + } + if(entry.isL2Vni) + { + const auto tunnel_map_id = tunnel_obj->getDecapMapId(TUNNEL_MAP_T_VLAN); + SWSS_LOG_NOTICE("Adding tunnel map entry. Tunnel: %s %s",tunnel_name.c_str(),entry.vniVlanMapName.c_str()); + + SWSS_LOG_DEBUG("create_tunnel_map_entry vni %d, vlan %d\n", entry.vni_id, entry.vlan_id); + auto tunnel_map_entry_id = create_tunnel_map_entry(MAP_T::VNI_TO_VLAN_ID, + tunnel_map_id, entry.vni_id, (uint16_t)entry.vlan_id); + SWSS_LOG_DEBUG("updateTnlMapId name %s\n", entry.vniVlanMapName.c_str()); + + vxlan_tun_map_orch->updateTnlMapId(entry.vniVlanMapName, tunnel_map_entry_id); + } + vxlan_vrf_table_.erase(full_map_entry_name); vxlan_vrf_tunnel_.erase(vrf_name); } @@ -2348,8 +2347,21 @@ bool EvpnRemoteVnip2pOrch::addOperation(const Request& request) return true; } + EvpnNvoOrch* evpn_orch = gDirectory.get(); + auto vtep_ptr = evpn_orch->getEVPNVtep(); + if (!vtep_ptr) + { + SWSS_LOG_WARN("Remote VNI add: Source VTEP not found. remote=%s vid=%d", + remote_vtep.c_str(), vlan_id); + return true; + } + VxlanTunnelOrch* tunnel_orch = gDirectory.get(); Port tunnelPort, vlanPort; + VxlanTunnelMapOrch* vxlan_tun_map_orch = gDirectory.get(); + std::string vniVlanMapName; + uint32_t tmp_vlan_id = 0; + sai_object_id_t tnl_map_entry_id = SAI_NULL_OBJECT_ID; if (!gPortsOrch->getVlanByVlanId(vlan_id, vlanPort)) { @@ -2357,22 +2369,28 @@ bool EvpnRemoteVnip2pOrch::addOperation(const Request& request) return false; } + /* Remote end point can be added only after local VLAN to VNI map gets created */ + if (!vxlan_tun_map_orch->isVniVlanMapExists(vni_id, vniVlanMapName, &tnl_map_entry_id, &tmp_vlan_id)) + { + SWSS_LOG_WARN("Vxlan tunnel map is not created for vni:%d", vni_id); + return false; + } + + VRFOrch* vrf_orch = gDirectory.get(); + if (vrf_orch->isL3VniVlan(vni_id)) + { + SWSS_LOG_WARN("Ignoring remote VNI add for L3 VNI:%d, remote:%s", vni_id, remote_vtep.c_str()); + return false; + } + if (tunnel_orch->getTunnelPort(remote_vtep,tunnelPort)) { SWSS_LOG_INFO("Vxlan tunnelPort exists: %s", remote_vtep.c_str()); if (gPortsOrch->isVlanMember(vlanPort, tunnelPort)) { - EvpnNvoOrch* evpn_orch = gDirectory.get(); - auto vtep_ptr = evpn_orch->getEVPNVtep(); - if (!vtep_ptr) - { - SWSS_LOG_WARN("Remote VNI add: VTEP not found. remote=%s vid=%d", - remote_vtep.c_str(),vlan_id); - return true; - } SWSS_LOG_WARN("tunnelPort %s already member of vid %d", - remote_vtep.c_str(),vlan_id); + remote_vtep.c_str(),vlan_id); vtep_ptr->increment_spurious_imr_add(remote_vtep); return true; } @@ -2493,6 +2511,11 @@ bool EvpnRemoteVnip2mpOrch::addOperation(const Request& request) } VxlanTunnelOrch* tunnel_orch = gDirectory.get(); + VxlanTunnelMapOrch* vxlan_tun_map_orch = gDirectory.get(); + std::string vniVlanMapName; + uint32_t tmp_vlan_id = 0; + sai_object_id_t tnl_map_entry_id = SAI_NULL_OBJECT_ID; + Port tunnelPort, vlanPort; auto vtep_ptr = evpn_orch->getEVPNVtep(); if (!vtep_ptr) @@ -2508,6 +2531,20 @@ bool EvpnRemoteVnip2mpOrch::addOperation(const Request& request) return false; } + /* Remote end point can be added only after local VLAN to VNI map gets created */ + if (!vxlan_tun_map_orch->isVniVlanMapExists(vni_id, vniVlanMapName, &tnl_map_entry_id, &tmp_vlan_id)) + { + SWSS_LOG_WARN("Vxlan tunnel map is not created for vni: %d", vni_id); + return false; + } + + VRFOrch* vrf_orch = gDirectory.get(); + if (vrf_orch->isL3VniVlan(vni_id)) + { + SWSS_LOG_WARN("Ignoring remote VNI add for L3 VNI:%d, remote:%s", vni_id, end_point_ip.c_str()); + return false; + } + auto src_vtep = vtep_ptr->getSrcIP().to_string(); if (tunnel_orch->getTunnelPort(src_vtep,tunnelPort, true)) { @@ -2658,3 +2695,35 @@ bool EvpnNvoOrch::delOperation(const Request& request) return true; } + +bool VxlanTunnelMapOrch::isVniVlanMapExists(uint32_t vni_id, std::string& vniVlanMapName, sai_object_id_t *tnl_map_entry_id, uint32_t *vlan_id) +{ + SWSS_LOG_ENTER(); + bool map_entry_exists = false; + std::map::iterator it; + for(it = vxlan_tunnel_map_table_.begin(); it != vxlan_tunnel_map_table_.end(); it++) + { + auto full_tunnel_map_entry_name = it->first; + tunnel_map_entry_t tunnel_map_entry = it->second; + + if (vni_id == tunnel_map_entry.vni_id) + { + vniVlanMapName = full_tunnel_map_entry_name; + *tnl_map_entry_id = tunnel_map_entry.map_entry_id; + *vlan_id = tunnel_map_entry.vlan_id; + map_entry_exists = true; + SWSS_LOG_NOTICE("vniVlanMapName %s, vlan %d\n", vniVlanMapName.c_str(), *vlan_id); + break; + } + } + + return map_entry_exists; +} + +void VxlanTunnelMapOrch::updateTnlMapId(std::string vniVlanMapName, sai_object_id_t tunnel_map_id) +{ + SWSS_LOG_ENTER(); + SWSS_LOG_NOTICE("name %s\n", vniVlanMapName.c_str()); + vxlan_tunnel_map_table_[vniVlanMapName].map_entry_id = tunnel_map_id; +} + diff --git a/orchagent/vxlanorch.h b/orchagent/vxlanorch.h index 0b56e76faa..695f7441e0 100644 --- a/orchagent/vxlanorch.h +++ b/orchagent/vxlanorch.h @@ -37,7 +37,7 @@ typedef enum #define IS_TUNNELMAP_SET_BRIDGE(x) ((x)& (1< VxlanVrfTable; diff --git a/portsyncd/Makefile.am b/portsyncd/Makefile.am index 5bba269ab2..3db6187059 100644 --- a/portsyncd/Makefile.am +++ b/portsyncd/Makefile.am @@ -10,10 +10,15 @@ endif portsyncd_SOURCES = $(top_srcdir)/lib/gearboxutils.cpp portsyncd.cpp linksync.cpp $(top_srcdir)/cfgmgr/shellcmd.h -portsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -portsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -portsyncd_LDADD = -lnl-3 -lnl-route-3 -lswsscommon +portsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +portsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +portsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lswsscommon if GCOV_ENABLED portsyncd_LDADD += -lgcovpreload endif + +if ASAN_ENABLED +portsyncd_SOURCES += $(top_srcdir)/lib/asan.cpp +endif + diff --git a/portsyncd/linksync.cpp b/portsyncd/linksync.cpp index 4a2b351ee0..fc28411613 100644 --- a/portsyncd/linksync.cpp +++ b/portsyncd/linksync.cpp @@ -2,7 +2,7 @@ #include #include #include -#include +#include #include #include "logger.h" #include "netmsg.h" @@ -34,23 +34,16 @@ const string LAG_PREFIX = "PortChannel"; extern set g_portSet; extern bool g_init; -struct if_nameindex -{ - unsigned int if_index; - char *if_name; -}; -extern "C" { extern struct if_nameindex *if_nameindex (void) __THROW; } - LinkSync::LinkSync(DBConnector *appl_db, DBConnector *state_db) : m_portTableProducer(appl_db, APP_PORT_TABLE_NAME), m_portTable(appl_db, APP_PORT_TABLE_NAME), m_statePortTable(state_db, STATE_PORT_TABLE_NAME), m_stateMgmtPortTable(state_db, STATE_MGMT_PORT_TABLE_NAME) { - struct if_nameindex *if_ni, *idx_p; - if_ni = if_nameindex(); + std::shared_ptr if_ni(if_nameindex(), if_freenameindex); + struct if_nameindex *idx_p; - for (idx_p = if_ni; + for (idx_p = if_ni.get(); idx_p != NULL && idx_p->if_index != 0 && idx_p->if_name != NULL; idx_p++) { @@ -121,7 +114,7 @@ LinkSync::LinkSync(DBConnector *appl_db, DBConnector *state_db) : } } - for (idx_p = if_ni; + for (idx_p = if_ni.get(); idx_p != NULL && idx_p->if_index != 0 && idx_p->if_name != NULL; idx_p++) { @@ -212,10 +205,9 @@ void LinkSync::onMsg(int nlmsg_type, struct nl_object *obj) return; } - /* If netlink for this port has master, we ignore that for now - * This could be the case where the port was removed from VLAN bridge - */ - if (master) + /* Ignore DELLINK message if port has master, this is applicable to + * the case where port was part of VLAN bridge or LAG */ + if (master && nlmsg_type == RTM_DELLINK) { return; } diff --git a/portsyncd/portsyncd.cpp b/portsyncd/portsyncd.cpp index c55c1685af..4173f62e2a 100644 --- a/portsyncd/portsyncd.cpp +++ b/portsyncd/portsyncd.cpp @@ -42,43 +42,37 @@ void usage() cout << " this program will exit if configDB does not contain that info" << endl; } -void handlePortConfigFile(ProducerStateTable &p, string file, bool warm); void handlePortConfigFromConfigDB(ProducerStateTable &p, DBConnector &cfgDb, bool warm); -void handleVlanIntfFile(string file); -void handlePortConfig(ProducerStateTable &p, map &port_cfg_map); -void checkPortInitDone(DBConnector *appl_db); int main(int argc, char **argv) { - Logger::linkToDbNative("portsyncd"); - int opt; - map port_cfg_map; - - while ((opt = getopt(argc, argv, "v:h")) != -1 ) + try { - switch (opt) + Logger::linkToDbNative("portsyncd"); + int opt; + + while ((opt = getopt(argc, argv, "v:h")) != -1 ) { - case 'h': - usage(); - return 1; - default: /* '?' */ - usage(); - return EXIT_FAILURE; + switch (opt) + { + case 'h': + usage(); + return 1; + default: /* '?' */ + usage(); + return EXIT_FAILURE; + } } - } - DBConnector cfgDb("CONFIG_DB", 0); - DBConnector appl_db("APPL_DB", 0); - DBConnector state_db("STATE_DB", 0); - ProducerStateTable p(&appl_db, APP_PORT_TABLE_NAME); - SubscriberStateTable portCfg(&cfgDb, CFG_PORT_TABLE_NAME); + DBConnector cfgDb("CONFIG_DB", 0); + DBConnector appl_db("APPL_DB", 0); + DBConnector state_db("STATE_DB", 0); + ProducerStateTable p(&appl_db, APP_PORT_TABLE_NAME); - WarmStart::initialize("portsyncd", "swss"); - WarmStart::checkWarmStart("portsyncd", "swss"); - const bool warm = WarmStart::isWarmStart(); + WarmStart::initialize("portsyncd", "swss"); + WarmStart::checkWarmStart("portsyncd", "swss"); + const bool warm = WarmStart::isWarmStart(); - try - { NetLink netlink; Select s; @@ -93,7 +87,6 @@ int main(int argc, char **argv) NetDispatcher::getInstance().registerMessageHandler(RTM_DELLINK, &sync); s.addSelectable(&netlink); - s.addSelectable(&portCfg); while (true) { @@ -135,28 +128,6 @@ int main(int argc, char **argv) g_init = true; } - if (!port_cfg_map.empty()) - { - handlePortConfig(p, port_cfg_map); - } - } - else if (temps == (Selectable *)&portCfg) - { - std::deque entries; - portCfg.pops(entries); - - for (auto entry: entries) - { - string key = kfvKey(entry); - - if (port_cfg_map.find(key) != port_cfg_map.end()) - { - /* For now we simply drop previous pending port config */ - port_cfg_map.erase(key); - } - port_cfg_map[key] = entry; - } - handlePortConfig(p, port_cfg_map); } else { @@ -165,6 +136,16 @@ int main(int argc, char **argv) } } } + catch (const swss::RedisError& e) + { + cerr << "Exception \"" << e.what() << "\" was thrown in daemon" << endl; + return EXIT_FAILURE; + } + catch (const std::out_of_range& e) + { + cerr << "Exception \"" << e.what() << "\" was thrown in daemon" << endl; + return EXIT_FAILURE; + } catch (const std::exception& e) { cerr << "Exception \"" << e.what() << "\" was thrown in daemon" << endl; @@ -225,83 +206,3 @@ void handlePortConfigFromConfigDB(ProducerStateTable &p, DBConnector &cfgDb, boo } } - -void handlePortConfig(ProducerStateTable &p, map &port_cfg_map) -{ - string autoneg; - vector attrs; - vector autoneg_attrs; - vector force_attrs; - - auto it = port_cfg_map.begin(); - while (it != port_cfg_map.end()) - { - KeyOpFieldsValuesTuple entry = it->second; - string key = kfvKey(entry); - string op = kfvOp(entry); - auto values = kfvFieldsValues(entry); - - /* only push down port config when port is not in hostif create pending state */ - if (g_portSet.find(key) == g_portSet.end()) - { - /* No support for port delete yet */ - if (op == SET_COMMAND) - { - - for (auto i : values) - { - auto field = fvField(i); - if (field == "adv_speeds") - { - autoneg_attrs.push_back(i); - } - else if (field == "adv_interface_types") - { - autoneg_attrs.push_back(i); - } - else if (field == "speed") - { - force_attrs.push_back(i); - } - else if (field == "interface_type") - { - force_attrs.push_back(i); - } - else if (field == "autoneg") - { - autoneg = fvValue(i); - attrs.push_back(i); - } - else - { - attrs.push_back(i); - } - } - if (autoneg == "on") // autoneg is on, only put adv_speeds and adv_interface_types to APPL_DB - { - attrs.insert(attrs.end(), autoneg_attrs.begin(), autoneg_attrs.end()); - } - else if (autoneg == "off") // autoneg is off, only put speed and interface_type to APPL_DB - { - attrs.insert(attrs.end(), force_attrs.begin(), force_attrs.end()); - } - else // autoneg is not configured, put all attributes to APPL_DB - { - attrs.insert(attrs.end(), autoneg_attrs.begin(), autoneg_attrs.end()); - attrs.insert(attrs.end(), force_attrs.begin(), force_attrs.end()); - } - p.set(key, attrs); - attrs.clear(); - autoneg_attrs.clear(); - force_attrs.clear(); - autoneg.clear(); - } - - it = port_cfg_map.erase(it); - } - else - { - it++; - } - } -} diff --git a/swssconfig/Makefile.am b/swssconfig/Makefile.am index 590e7d9f56..3cfc0b9629 100644 --- a/swssconfig/Makefile.am +++ b/swssconfig/Makefile.am @@ -10,17 +10,23 @@ endif swssconfig_SOURCES = swssconfig.cpp -swssconfig_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -swssconfig_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -swssconfig_LDADD = -lswsscommon +swssconfig_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +swssconfig_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +swssconfig_LDADD = $(LDFLAGS_ASAN) -lswsscommon swssplayer_SOURCES = swssplayer.cpp -swssplayer_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -swssplayer_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -swssplayer_LDADD = -lswsscommon +swssplayer_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +swssplayer_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +swssplayer_LDADD = $(LDFLAGS_ASAN) -lswsscommon if GCOV_ENABLED swssconfig_LDADD += -lgcovpreload swssplayer_LDADD += -lgcovpreload endif + +if ASAN_ENABLED +swssconfig_SOURCES += $(top_srcdir)/lib/asan.cpp +swssplayer_SOURCES += $(top_srcdir)/lib/asan.cpp +endif + diff --git a/swssconfig/swssconfig.cpp b/swssconfig/swssconfig.cpp index a41ef0ecdd..e61d038381 100644 --- a/swssconfig/swssconfig.cpp +++ b/swssconfig/swssconfig.cpp @@ -41,7 +41,10 @@ void dump_db_item(KeyOpFieldsValuesTuple &db_item) bool write_db_data(vector &db_items) { - DBConnector db("APPL_DB", 0, true); + DBConnector db("APPL_DB", 0, false); + RedisPipeline pipeline(&db); // dtor of RedisPipeline will automatically flush data + unordered_map table_map; + for (auto &db_item : db_items) { dump_db_item(db_item); @@ -55,18 +58,19 @@ bool write_db_data(vector &db_items) } string table_name = key.substr(0, pos); string key_name = key.substr(pos + 1); - ProducerStateTable producer(&db, table_name); + auto ret = table_map.emplace(std::piecewise_construct, std::forward_as_tuple(table_name), std::forward_as_tuple(&pipeline, table_name, true)); if (kfvOp(db_item) == SET_COMMAND) - producer.set(key_name, kfvFieldsValues(db_item), SET_COMMAND); + ret.first->second.set(key_name, kfvFieldsValues(db_item), SET_COMMAND); else if (kfvOp(db_item) == DEL_COMMAND) - producer.del(key_name, DEL_COMMAND); + ret.first->second.del(key_name, DEL_COMMAND); else { SWSS_LOG_ERROR("Invalid operation: %s\n", kfvOp(db_item).c_str()); return false; } } + return true; } diff --git a/teamsyncd/Makefile.am b/teamsyncd/Makefile.am index 2939a52f2a..a13573bf25 100644 --- a/teamsyncd/Makefile.am +++ b/teamsyncd/Makefile.am @@ -10,10 +10,15 @@ endif teamsyncd_SOURCES = teamsyncd.cpp teamsync.cpp -teamsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -teamsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -teamsyncd_LDADD = -lnl-3 -lnl-route-3 -lhiredis -lswsscommon -lteam +teamsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +teamsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +teamsyncd_LDADD = $(LDFLAGS_ASAN) -lnl-3 -lnl-route-3 -lhiredis -lswsscommon -lteam if GCOV_ENABLED teamsyncd_LDADD += -lgcovpreload endif + +if ASAN_ENABLED +teamsyncd_SOURCES += $(top_srcdir)/lib/asan.cpp +endif + diff --git a/teamsyncd/teamsyncd.cpp b/teamsyncd/teamsyncd.cpp index c5190f46b1..95890be406 100644 --- a/teamsyncd/teamsyncd.cpp +++ b/teamsyncd/teamsyncd.cpp @@ -11,9 +11,16 @@ using namespace std; using namespace swss; bool received_sigterm = false; +static struct sigaction old_sigaction; void sig_handler(int signo) { + SWSS_LOG_ENTER(); + + if (old_sigaction.sa_handler != SIG_IGN && old_sigaction.sa_handler != SIG_DFL) { + old_sigaction.sa_handler(signo); + } + received_sigterm = true; return; } @@ -30,7 +37,13 @@ int main(int argc, char **argv) NetDispatcher::getInstance().registerMessageHandler(RTM_DELLINK, &sync); /* Register the signal handler for SIGTERM */ - signal(SIGTERM, sig_handler); + struct sigaction sigact = {}; + sigact.sa_handler = sig_handler; + if (sigaction(SIGTERM, &sigact, &old_sigaction)) + { + SWSS_LOG_ERROR("failed to setup SIGTERM action handler"); + exit(EXIT_FAILURE); + } try { diff --git a/tests/Makefile.am b/tests/Makefile.am index 0b6831be97..8f2aa131c4 100644 --- a/tests/Makefile.am +++ b/tests/Makefile.am @@ -1,3 +1,5 @@ +INCLUDES = -I $(top_srcdir)/lib + CFLAGS_SAI = -I /usr/include/sai TESTS = tests @@ -18,7 +20,7 @@ CFLAGS_GTEST = LDADD_GTEST = -L/usr/src/gtest tests_SOURCES = swssnet_ut.cpp request_parser_ut.cpp ../orchagent/request_parser.cpp \ - quoted_ut.cpp + quoted_ut.cpp ../lib/recorder.cpp tests_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) tests_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) -I../orchagent diff --git a/tests/README.md b/tests/README.md index 33c9c3479a..29bce875a2 100644 --- a/tests/README.md +++ b/tests/README.md @@ -35,7 +35,7 @@ SWSS, Redis, and all the other required components run inside a virtual switch D ``` sudo modprobe team - sudo apt install python3-pip net-tools ethtool vlan libnl-nf-3-200 libnl-cli-3-200 + sudo apt install python3-pip net-tools bridge-utils ethtool vlan libnl-nf-3-200 libnl-cli-3-200 sudo pip3 install docker pytest flaky redis distro dataclasses fstring ``` @@ -56,15 +56,16 @@ SWSS, Redis, and all the other required components run inside a virtual switch D ``` You can get these two packages by: - - [Building it from scratch](https://github.com/Azure/sonic-swss-common) - - Downloading the latest build from Jenkins: - - [Ubuntu 18.04](https://sonic-jenkins.westus2.cloudapp.azure.com/job/common/job/sonic-swss-common-build-ubuntu/lastSuccessfulBuild/artifact/target/) - - [Ubuntu 20.04](https://sonic-jenkins.westus2.cloudapp.azure.com/job/common/job/sonic-swss-common-build-ubuntu-20_04/lastSuccessfulBuild/artifact/target/) + - [Building it from scratch](https://github.com/sonic-net/sonic-swss-common) + - Downloading the latest build from Azure: + - [Ubuntu 20.04](https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master&definitionId=9&artifactName=sonic-swss-common.amd64.ubuntu20_04) 5. Load the `docker-sonic-vs.gz` file into docker. You can get the image by: - - [Building it from scratch](https://github.com/Azure/sonic-buildimage) - - [Downloading the latest build from Jenkins](https://sonic-jenkins.westus2.cloudapp.azure.com/job/vs/job/buildimage-vs-all/lastSuccessfulBuild/artifact/target/) - + - [Building it from scratch](https://github.com/sonic-net/sonic-buildimage) + - Downloading the latest build from Azure: + - [docker-sonic-vs-asan.gz](https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master&platform=vs&target=target/docker-sonic-vs-asan.gz) + - [docker-sonic-vs.gz](https://sonic-build.azurewebsites.net/api/sonic/artifacts?branchName=master&platform=vs&target=target/docker-sonic-vs.gz) + Once you have the file, you can load it into docker by running `docker load < docker-sonic-vs.gz`. ## Running the tests @@ -76,7 +77,7 @@ sudo pytest ## Setting up a persistent testbed For those developing new features for SWSS or the DVS framework, you might find it helpful to setup a persistent DVS container that you can inspect and make modifications to (e.g. using `dpkg -i` to install a new version of SWSS to test a new feature). -1. [Download `create_vnet.sh`](https://github.com/Azure/sonic-buildimage/blob/master/platform/vs/create_vnet.sh). +1. [Download `create_vnet.sh`](https://github.com/sonic-net/sonic-buildimage/blob/master/platform/vs/create_vnet.sh). 2. Setup a virtual server and network. **Note**: It is _highly_ recommended you include the `-n 32` option or you may run into problems running the tests later. diff --git a/tests/conftest.py b/tests/conftest.py index 4139cbfa15..b6a81c1dbf 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -22,9 +22,11 @@ from dvslib.dvs_pbh import DVSPbh from dvslib.dvs_route import DVSRoute from dvslib import dvs_vlan +from dvslib import dvs_port from dvslib import dvs_lag from dvslib import dvs_mirror from dvslib import dvs_policer +from dvslib import dvs_hash from buffer_model import enable_dynamic_buffer @@ -88,17 +90,23 @@ def pytest_addoption(parser): default="traditional", help="Buffer model") + parser.addoption("--graceful-stop", + action="store_true", + default=False, + help="Stop swss and syncd before stopping a conatainer") + def random_string(size=4, chars=string.ascii_uppercase + string.digits): return "".join(random.choice(chars) for x in range(size)) class AsicDbValidator(DVSDatabase): - def __init__(self, db_id: int, connector: str): + def __init__(self, db_id: int, connector: str, switch_type: str): DVSDatabase.__init__(self, db_id, connector) - self._wait_for_asic_db_to_initialize() - self._populate_default_asic_db_values() - self._generate_oid_to_interface_mapping() + if switch_type not in ['fabric']: + self._wait_for_asic_db_to_initialize() + self._populate_default_asic_db_values() + self._generate_oid_to_interface_mapping() def _wait_for_asic_db_to_initialize(self) -> None: """Wait up to 30 seconds for the default fields to appear in ASIC DB.""" @@ -144,6 +152,8 @@ def _populate_default_asic_db_values(self) -> None: self.default_acl_tables = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE") self.default_acl_entries = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY") + self.default_hash_keys = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_HASH") + self.default_copp_policers = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_POLICER") @@ -406,7 +416,7 @@ def create_servers(self): for i in range(NUM_PORTS): server = VirtualServer(self.ctn_sw.name, self.ctn_sw_pid, i) self.servers.append(server) - + def reset_dbs(self): # DB wrappers are declared here, lazy-loaded in the tests self.app_db = None @@ -491,7 +501,9 @@ def _polling_function(): wait_for_result(_polling_function, service_polling_config) def init_asic_db_validator(self) -> None: - self.asicdb = AsicDbValidator(self.ASIC_DB_ID, self.redis_sock) + self.get_config_db() + metadata = self.config_db.get_entry('DEVICE_METADATA|localhost', '') + self.asicdb = AsicDbValidator(self.ASIC_DB_ID, self.redis_sock, metadata.get("switch_type")) def init_appl_db_validator(self) -> None: self.appldb = ApplDbValidator(self.APPL_DB_ID, self.redis_sock) @@ -520,11 +532,13 @@ def _polling_function(): port_table_keys = app_db.get_keys("PORT_TABLE") return ("PortInitDone" in port_table_keys and "PortConfigDone" in port_table_keys, None) - wait_for_result(_polling_function, startup_polling_config) + if metadata.get('switch_type') not in ['fabric']: + wait_for_result(_polling_function, startup_polling_config) # Verify that all ports have been created - asic_db = self.get_asic_db() - asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_ports + 1) # +1 CPU Port + if metadata.get('switch_type') not in ['fabric']: + asic_db = self.get_asic_db() + asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_ports + 1) # +1 CPU Port # Verify that fabric ports are monitored in STATE_DB if metadata.get('switch_type', 'npu') in ['voq', 'fabric']: @@ -589,8 +603,8 @@ def restart(self) -> None: self.ctn_restart() self.check_ready_status_and_init_db() - def runcmd(self, cmd: str) -> Tuple[int, str]: - res = self.ctn.exec_run(cmd) + def runcmd(self, cmd: str, include_stderr=True) -> Tuple[int, str]: + res = self.ctn.exec_run(cmd, stdout=True, stderr=include_stderr) exitcode = res.exit_code out = res.output.decode("utf-8") @@ -657,6 +671,11 @@ def stop_swss(self): for pname in self.swssd: cmd += "supervisorctl stop {}; ".format(pname) self.runcmd(['sh', '-c', cmd]) + time.sleep(5) + + def stop_syncd(self): + self.runcmd(['sh', '-c', 'supervisorctl stop syncd']) + time.sleep(5) # deps: warm_reboot def start_zebra(self): @@ -668,7 +687,7 @@ def start_zebra(self): # deps: warm_reboot def stop_zebra(self): self.runcmd(['sh', '-c', 'pkill -9 zebra']) - time.sleep(1) + time.sleep(5) # deps: warm_reboot def start_fpmsyncd(self): @@ -1125,10 +1144,10 @@ def remove_fdb(self, vlan, mac): # deps: acl, fdb_update, fdb, intf_mac, mirror_port_erspan, mirror_port_span, # policer, port_dpb_vlan, vlan def setup_db(self): - self.pdb = swsscommon.DBConnector(0, self.redis_sock, 0) - self.adb = swsscommon.DBConnector(1, self.redis_sock, 0) - self.cdb = swsscommon.DBConnector(4, self.redis_sock, 0) - self.sdb = swsscommon.DBConnector(6, self.redis_sock, 0) + self.pdb = swsscommon.DBConnector(swsscommon.APPL_DB, self.redis_sock, 0) + self.adb = swsscommon.DBConnector(swsscommon.ASIC_DB, self.redis_sock, 0) + self.cdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, self.redis_sock, 0) + self.sdb = swsscommon.DBConnector(swsscommon.STATE_DB, self.redis_sock, 0) def getSwitchOid(self): tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH") @@ -1157,6 +1176,132 @@ def getCrmCounterValue(self, key, counter): if k[0] == counter: return int(k[1]) + def port_field_set(self, port, field, value): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "PORT") + fvs = swsscommon.FieldValuePairs([(field, value)]) + tbl.set(port, fvs) + time.sleep(1) + + def port_admin_set(self, port, status): + self.port_field_set(port, "admin_status", status) + + def interface_ip_add(self, port, ip_address): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "INTERFACE") + fvs = swsscommon.FieldValuePairs([("NULL", "NULL")]) + tbl.set(port, fvs) + tbl.set(port + "|" + ip_address, fvs) + time.sleep(1) + + def crm_poll_set(self, value): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "CRM") + fvs = swsscommon.FieldValuePairs([("polling_interval", value)]) + tbl.set("Config", fvs) + time.sleep(1) + + def clear_fdb(self): + adb = swsscommon.DBConnector(0, self.redis_sock, 0) + opdata = ["ALL", "ALL"] + msg = json.dumps(opdata,separators=(',',':')) + adb.publish('FLUSHFDBREQUEST', msg) + + def warm_restart_swss(self, enable): + db = swsscommon.DBConnector(6, self.redis_sock, 0) + + tbl = swsscommon.Table(db, "WARM_RESTART_ENABLE_TABLE") + fvs = swsscommon.FieldValuePairs([("enable",enable)]) + tbl.set("swss", fvs) + + # nat + def nat_mode_set(self, value): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "NAT_GLOBAL") + fvs = swsscommon.FieldValuePairs([("admin_mode", value)]) + tbl.set("Values", fvs) + time.sleep(1) + + def nat_timeout_set(self, value): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "NAT_GLOBAL") + fvs = swsscommon.FieldValuePairs([("nat_timeout", value)]) + tbl.set("Values", fvs) + time.sleep(1) + + def nat_udp_timeout_set(self, value): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "NAT_GLOBAL") + fvs = swsscommon.FieldValuePairs([("nat_udp_timeout", value)]) + tbl.set("Values", fvs) + time.sleep(1) + + def nat_tcp_timeout_set(self, value): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "NAT_GLOBAL") + fvs = swsscommon.FieldValuePairs([("nat_tcp_timeout", value)]) + tbl.set("Values", fvs) + time.sleep(1) + + def add_nat_basic_entry(self, external, internal): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "STATIC_NAT") + fvs = swsscommon.FieldValuePairs([("local_ip", internal)]) + tbl.set(external, fvs) + time.sleep(1) + + def del_nat_basic_entry(self, external): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "STATIC_NAT") + tbl._del(external) + time.sleep(1) + + def add_nat_udp_entry(self, external, extport, internal, intport): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "STATIC_NAPT") + fvs = swsscommon.FieldValuePairs([("local_ip", internal), ("local_port", intport)]) + tbl.set(external + "|UDP|" + extport, fvs) + time.sleep(1) + + def del_nat_udp_entry(self, external, extport): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "STATIC_NAPT") + tbl._del(external + "|UDP|" + extport) + time.sleep(1) + + def add_twice_nat_basic_entry(self, external, internal, nat_type, twice_nat_id): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "STATIC_NAT") + fvs = swsscommon.FieldValuePairs([("local_ip", internal), ("nat_type", nat_type), ("twice_nat_id", twice_nat_id)]) + tbl.set(external, fvs) + time.sleep(1) + + def del_twice_nat_basic_entry(self, external): + self.del_nat_basic_entry(external) + + def add_twice_nat_udp_entry(self, external, extport, internal, intport, nat_type, twice_nat_id): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + tbl = swsscommon.Table(cdb, "STATIC_NAPT") + fvs = swsscommon.FieldValuePairs([("local_ip", internal), ("local_port", intport), ("nat_type", nat_type), ("twice_nat_id", twice_nat_id)]) + tbl.set(external + "|UDP|" + extport, fvs) + time.sleep(1) + + def del_twice_nat_udp_entry(self, external, extport): + self.del_nat_udp_entry(external, extport) + + def set_nat_zone(self, interface, nat_zone): + cdb = swsscommon.DBConnector(4, self.redis_sock, 0) + if interface.startswith("PortChannel"): + tbl_name = "PORTCHANNEL_INTERFACE" + elif interface.startswith("Vlan"): + tbl_name = "VLAN_INTERFACE" + else: + tbl_name = "INTERFACE" + tbl = swsscommon.Table(cdb, tbl_name) + fvs = swsscommon.FieldValuePairs([("nat_zone", nat_zone)]) + tbl.set(interface, fvs) + time.sleep(1) + # deps: acl, crm, fdb def setReadOnlyAttr(self, obj, attr, val): db = swsscommon.DBConnector(swsscommon.ASIC_DB, self.redis_sock, 0) @@ -1196,6 +1341,7 @@ def get_asic_db(self) -> AsicDbValidator: db = DVSDatabase(self.ASIC_DB_ID, self.redis_sock) db.default_acl_tables = self.asicdb.default_acl_tables db.default_acl_entries = self.asicdb.default_acl_entries + db.default_hash_keys = self.asicdb.default_hash_keys db.default_copp_policers = self.asicdb.default_copp_policers db.port_name_map = self.asicdb.portnamemap db.default_vlan_id = self.asicdb.default_vlan_id @@ -1602,6 +1748,8 @@ def manage_dvs(request) -> str: max_cpu = request.config.getoption("--max_cpu") buffer_model = request.config.getoption("--buffer_model") force_recreate = request.config.getoption("--force-recreate-dvs") + graceful_stop = request.config.getoption("--graceful-stop") + dvs = None curr_dvs_env = [] # lgtm[py/unused-local-variable] @@ -1650,6 +1798,9 @@ def update_dvs(log_path, new_dvs_env=[]): yield update_dvs + if graceful_stop: + dvs.stop_swss() + dvs.stop_syncd() dvs.get_logs() dvs.destroy() @@ -1657,7 +1808,7 @@ def update_dvs(log_path, new_dvs_env=[]): dvs.runcmd("mv /etc/sonic/config_db.json.orig /etc/sonic/config_db.json") dvs.ctn_restart() -@pytest.yield_fixture(scope="module") +@pytest.fixture(scope="module") def dvs(request, manage_dvs) -> DockerVirtualSwitch: dvs_env = getattr(request.module, "DVS_ENV", []) name = request.config.getoption("--dvsname") @@ -1666,6 +1817,25 @@ def dvs(request, manage_dvs) -> DockerVirtualSwitch: return manage_dvs(log_path, dvs_env) @pytest.yield_fixture(scope="module") +def vst(request): + vctns = request.config.getoption("--vctns") + topo = request.config.getoption("--topo") + forcedvs = request.config.getoption("--forcedvs") + keeptb = request.config.getoption("--keeptb") + imgname = request.config.getoption("--imgname") + max_cpu = request.config.getoption("--max_cpu") + log_path = vctns if vctns else request.module.__name__ + dvs_env = getattr(request.module, "DVS_ENV", []) + if not topo: + # use ecmp topology as default + topo = "virtual_chassis/chassis_supervisor.json" + vct = DockerVirtualChassisTopology(vctns, imgname, keeptb, dvs_env, log_path, max_cpu, + forcedvs, topo) + yield vct + vct.get_logs(request.module.__name__) + vct.destroy() + +@pytest.fixture(scope="module") def vct(request): vctns = request.config.getoption("--vctns") topo = request.config.getoption("--topo") @@ -1684,10 +1854,11 @@ def vct(request): vct.get_logs(request.module.__name__) vct.destroy() -@pytest.yield_fixture + +@pytest.fixture def testlog(request, dvs): dvs.runcmd(f"logger -t pytest === start test {request.node.nodeid} ===") - yield testlog + yield dvs.runcmd(f"logger -t pytest === finish test {request.node.nodeid} ===") ################# DVSLIB module manager fixtures ############################# @@ -1713,13 +1884,14 @@ def dvs_route(request, dvs) -> DVSRoute: # FIXME: The rest of these also need to be reverted back to normal fixtures to # appease the linter. -@pytest.yield_fixture(scope="class") +@pytest.fixture(scope="class") def dvs_lag_manager(request, dvs): request.cls.dvs_lag = dvs_lag.DVSLag(dvs.get_asic_db(), - dvs.get_config_db()) + dvs.get_config_db(), + dvs) -@pytest.yield_fixture(scope="class") +@pytest.fixture(scope="class") def dvs_vlan_manager(request, dvs): request.cls.dvs_vlan = dvs_vlan.DVSVlan(dvs.get_asic_db(), dvs.get_config_db(), @@ -1728,7 +1900,14 @@ def dvs_vlan_manager(request, dvs): dvs.get_app_db()) -@pytest.yield_fixture(scope="class") +@pytest.fixture(scope="class") +def dvs_port_manager(request, dvs): + request.cls.dvs_port = dvs_port.DVSPort(dvs.get_asic_db(), + dvs.get_app_db(), + dvs.get_config_db()) + + +@pytest.fixture(scope="class") def dvs_mirror_manager(request, dvs): request.cls.dvs_mirror = dvs_mirror.DVSMirror(dvs.get_asic_db(), dvs.get_config_db(), @@ -1737,11 +1916,16 @@ def dvs_mirror_manager(request, dvs): dvs.get_app_db()) -@pytest.yield_fixture(scope="class") +@pytest.fixture(scope="class") def dvs_policer_manager(request, dvs): request.cls.dvs_policer = dvs_policer.DVSPolicer(dvs.get_asic_db(), dvs.get_config_db()) +@pytest.fixture(scope="class") +def dvs_hash_manager(request, dvs): + request.cls.dvs_hash = dvs_hash.DVSHash(dvs.get_asic_db(), + dvs.get_config_db()) + ##################### DPB fixtures ########################################### def create_dpb_config_file(dvs): cmd = "sonic-cfggen -j /etc/sonic/init_cfg.json -j /tmp/ports.json --print-data > /tmp/dpb_config_db.json" @@ -1755,7 +1939,8 @@ def remove_dpb_config_file(dvs): cmd = "mv /etc/sonic/config_db.json.bak /etc/sonic/config_db.json" dvs.runcmd(cmd) -@pytest.yield_fixture(scope="module") + +@pytest.fixture(scope="module") def dpb_setup_fixture(dvs): create_dpb_config_file(dvs) if dvs.vct is None: diff --git a/tests/dvslib/dvs_acl.py b/tests/dvslib/dvs_acl.py index 9111de7a8e..4315da3798 100644 --- a/tests/dvslib/dvs_acl.py +++ b/tests/dvslib/dvs_acl.py @@ -1,6 +1,6 @@ """Utilities for interacting with ACLs when writing VS tests.""" from typing import Callable, Dict, List - +from swsscommon import swsscommon class DVSAcl: """Manage ACL tables and rules on the virtual switch.""" @@ -16,6 +16,10 @@ class DVSAcl: ADB_ACL_TABLE_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE" ADB_ACL_GROUP_TABLE_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP" ADB_ACL_GROUP_MEMBER_TABLE_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP_MEMBER" + ADB_ACL_COUNTER_TABLE_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_ACL_COUNTER" + + STATE_DB_ACL_TABLE_TABLE_NAME = "ACL_TABLE_TABLE" + STATE_DB_ACL_RULE_TABLE_NAME = "ACL_RULE_TABLE" ADB_ACL_STAGE_LOOKUP = { "ingress": "SAI_ACL_STAGE_INGRESS", @@ -53,7 +57,8 @@ def create_acl_table_type( self, name: str, matches: List[str], - bpoint_types: List[str] + bpoint_types: List[str], + actions: List[str] ) -> None: """Create a new ACL table type in Config DB. @@ -61,10 +66,12 @@ def create_acl_table_type( name: The name for the new ACL table type. matches: A list of matches to use in ACL table. bpoint_types: A list of bind point types to use in ACL table. + actions: A list of actions to use in ACL table """ table_type_attrs = { "matches@": ",".join(matches), - "bind_points@": ",".join(bpoint_types) + "bind_points@": ",".join(bpoint_types), + "actions@": ",".join(actions) } self.config_db.create_entry(self.CDB_ACL_TABLE_TYPE_NAME, name, table_type_attrs) @@ -140,6 +147,19 @@ def remove_acl_table_type(self, name: str) -> None: """ self.config_db.delete_entry(self.CDB_ACL_TABLE_TYPE_NAME, name) + def get_acl_counter_ids(self, expected: int) -> List[str]: + """Get all of the ACL counter IDs in ASIC DB. + + This method will wait for the expected number of counters to exist, or fail. + + Args: + expected: The number of counters that are expected to be present in ASIC DB. + + Returns: + The list of ACL counter IDs in ASIC DB. + """ + return self.asic_db.wait_for_n_keys(self.ADB_ACL_COUNTER_TABLE_NAME, expected) + def get_acl_table_ids(self, expected: int) -> List[str]: """Get all of the ACL table IDs in ASIC DB. @@ -292,6 +312,26 @@ def verify_acl_table_port_binding( self.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, num_tables) + + def verify_acl_table_action_list( + self, + acl_table_id: str, + expected_action_list: List[str], + ) -> None: + """Verify that the ACL table has specified action list. + Args: + acl_table_id: The ACL table that is being checked. + expected_action_list: The expected action list set to the given ACL table. + """ + fvs = self.asic_db.wait_for_entry(self.ADB_ACL_TABLE_NAME, acl_table_id) + action_list_str = fvs.get('SAI_ACL_TABLE_ATTR_ACL_ACTION_TYPE_LIST') + action_count, actions = action_list_str.split(':') + action_list = actions.split(',') + assert (int(action_count) == len(action_list)) + for action in expected_action_list: + assert action in action_list + + def create_acl_rule( self, table_name: str, @@ -530,6 +570,39 @@ def verify_mirror_acl_rule( self._check_acl_entry_mirror_action(fvs, session_oid, stage) self._check_acl_entry_counters_map(acl_rule_id) + def verify_acl_rule_generic( + self, + sai_qualifiers: Dict[str, str], + acl_table_id: str = None, + acl_rule_id: str = None + ) -> None: + """Verify that an ACL rule has the correct ASIC DB representation. + + Args: + sai_qualifiers: The expected set of SAI qualifiers to be found in ASIC DB. + acl_table_id: A specific OID to check in ASIC DB. If left empty, this method + assumes that only one table exists in ASIC DB. + acl_rule_id: A specific OID to check in ASIC DB. If left empty, this method + assumes that only one rule exists in ASIC DB. + """ + if not acl_table_id: + acl_table_id = self.get_acl_table_ids(1)[0] + + if not acl_rule_id: + acl_rule_id = self._get_acl_rule_id() + + entry = self.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY", acl_rule_id) + + for k, v in entry.items(): + if k == "SAI_ACL_ENTRY_ATTR_TABLE_ID": + assert v == acl_table_id + elif k == "SAI_ACL_ENTRY_ATTR_ADMIN_STATE": + assert v == "true" + elif k in sai_qualifiers: + assert sai_qualifiers[k](v) + else: + assert False, "Unknown SAI qualifier: key={}, value={}".format(k, v) + def verify_acl_rule_set( self, priorities: List[str], @@ -670,3 +743,43 @@ def _check_acl_entry_counters_map(self, acl_entry_oid: str): rule_to_counter_map = self.counters_db.get_entry("ACL_COUNTER_RULE_MAP", "") counter_to_rule_map = {v: k for k, v in rule_to_counter_map.items()} assert counter_oid in counter_to_rule_map + + def verify_acl_table_status( + self, + acl_table_name, + expected_status + ) -> None: + """Verify that the STATE_DB status of ACL table is as expected. + + Args: + acl_table_name: The name of ACL table to check + expected_status: The expected status in STATE_DB + """ + if expected_status: + fvs = self.state_db.wait_for_entry(self.STATE_DB_ACL_TABLE_TABLE_NAME, acl_table_name) + assert len(fvs) > 0 + assert (fvs['status'] == expected_status) + else: + self.state_db.wait_for_deleted_entry(self.STATE_DB_ACL_TABLE_TABLE_NAME, acl_table_name) + + def verify_acl_rule_status( + self, + acl_table_name, + acl_rule_name, + expected_status + ) -> None: + """Verify that the STATE_DB status of ACL rule is as expected. + + Args: + acl_table_name: The name of ACL table to check + acl_rule_name: The name of ACL rule to check + expected_status: The expected status in STATE_DB + """ + key = acl_table_name + "|" + acl_rule_name + if expected_status: + fvs = self.state_db.wait_for_entry(self.STATE_DB_ACL_RULE_TABLE_NAME, key) + assert len(fvs) > 0 + assert (fvs['status'] == expected_status) + else: + self.state_db.wait_for_deleted_entry(self.STATE_DB_ACL_TABLE_TABLE_NAME, key) + diff --git a/tests/dvslib/dvs_common.py b/tests/dvslib/dvs_common.py index b2a09d5da7..0d81b4cf2e 100644 --- a/tests/dvslib/dvs_common.py +++ b/tests/dvslib/dvs_common.py @@ -17,7 +17,7 @@ class PollingConfig: """ polling_interval: float = 0.01 - timeout: float = 5.00 + timeout: float = 20.00 strict: bool = True def iterations(self) -> int: diff --git a/tests/dvslib/dvs_database.py b/tests/dvslib/dvs_database.py index f2657f7516..553c0d7710 100644 --- a/tests/dvslib/dvs_database.py +++ b/tests/dvslib/dvs_database.py @@ -6,6 +6,7 @@ """ from typing import Dict, List from swsscommon import swsscommon +from swsscommon.swsscommon import SonicDBConfig from dvslib.dvs_common import wait_for_result, PollingConfig @@ -21,6 +22,12 @@ def __init__(self, db_id: int, connector: str): redis (e.g. UNIX socket, TCP socket, etc.). """ self.db_connection = swsscommon.DBConnector(db_id, connector, 0) + self._separator = SonicDBConfig.getSeparator(self.db_connection) + + @property + def separator(self) -> str: + """Get DB separator.""" + return self._separator def create_entry(self, table_name: str, key: str, entry: Dict[str, str]) -> None: """Add the mapping {`key` -> `entry`} to the specified table. @@ -34,6 +41,24 @@ def create_entry(self, table_name: str, key: str, entry: Dict[str, str]) -> None formatted_entry = swsscommon.FieldValuePairs(list(entry.items())) table.set(key, formatted_entry) + def set_entry(self, table_name: str, key: str, entry: Dict[str, str]) -> None: + """Set entry of an existing key in the specified table. + + Args: + table_name: The name of the table. + key: The key that needs to be updated. + entry: A set of key-value pairs to be updated. + """ + table = swsscommon.Table(self.db_connection, table_name) + (status, fv_pairs) = table.get(key) + + formatted_entry = swsscommon.FieldValuePairs(list(entry.items())) + table.set(key, formatted_entry) + + if status: + for f in [ k for k, v in dict(fv_pairs).items() if k not in entry.keys() ]: + table.hdel(key, f) + def update_entry(self, table_name: str, key: str, entry: Dict[str, str]) -> None: """Update entry of an existing key in the specified table. @@ -74,6 +99,17 @@ def delete_entry(self, table_name: str, key: str) -> None: table = swsscommon.Table(self.db_connection, table_name) table._del(key) # pylint: disable=protected-access + def delete_field(self, table_name: str, key: str, field: str) -> None: + """Remove a field from an entry stored at `key` in the specified table. + + Args: + table_name: The name of the table where the entry is being removed. + key: The key that maps to the entry being removed. + field: The field that needs to be removed + """ + table = swsscommon.Table(self.db_connection, table_name) + table.hdel(key, field) + def get_keys(self, table_name: str) -> List[str]: """Get all of the keys stored in the specified table. diff --git a/tests/dvslib/dvs_hash.py b/tests/dvslib/dvs_hash.py new file mode 100644 index 0000000000..5ac896962c --- /dev/null +++ b/tests/dvslib/dvs_hash.py @@ -0,0 +1,80 @@ +"""Utilities for interacting with HASH objects when writing VS tests.""" +from typing import Dict, List + + +class DVSHash: + """Manage hash objects on the virtual switch.""" + + CDB_SWITCH_HASH = "SWITCH_HASH" + KEY_SWITCH_HASH_GLOBAL = "GLOBAL" + + ADB_HASH = "ASIC_STATE:SAI_OBJECT_TYPE_HASH" + + def __init__(self, asic_db, config_db): + """Create a new DVS hash manager.""" + self.asic_db = asic_db + self.config_db = config_db + + def update_switch_hash( + self, + qualifiers: Dict[str, str] + ) -> None: + """Update switch hash global in Config DB.""" + self.config_db.update_entry(self.CDB_SWITCH_HASH, self.KEY_SWITCH_HASH_GLOBAL, qualifiers) + + def get_hash_ids( + self, + expected: int = None + ) -> List[str]: + """Get all of the hash ids in ASIC DB. + + Args: + expected: The number of hash ids that are expected to be present in ASIC DB. + + Returns: + The list of hash ids in ASIC DB. + """ + if expected is None: + return self.asic_db.get_keys(self.ADB_HASH) + + num_keys = len(self.asic_db.default_hash_keys) + expected + keys = self.asic_db.wait_for_n_keys(self.ADB_HASH, num_keys) + + for k in self.asic_db.default_hash_keys: + assert k in keys + + return [k for k in keys if k not in self.asic_db.default_hash_keys] + + def verify_hash_count( + self, + expected: int + ) -> None: + """Verify that there are N hash objects in ASIC DB. + + Args: + expected: The number of hash ids that are expected to be present in ASIC DB. + """ + self.get_hash_ids(expected) + + def verify_hash_generic( + self, + sai_hash_id: str, + sai_qualifiers: Dict[str, str] + ) -> None: + """Verify that hash object has correct ASIC DB representation. + + Args: + sai_hash_id: The specific hash id to check in ASIC DB. + sai_qualifiers: The expected set of SAI qualifiers to be found in ASIC DB. + """ + entry = self.asic_db.wait_for_entry(self.ADB_HASH, sai_hash_id) + + for k, v in entry.items(): + if k == "NULL": + continue + elif k in sai_qualifiers: + if k == "SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST": + hfList = v[v.index(":")+1:].split(",") + assert set(sai_qualifiers[k]) == set(hfList) + else: + assert False, "Unknown SAI qualifier: key={}, value={}".format(k, v) diff --git a/tests/dvslib/dvs_lag.py b/tests/dvslib/dvs_lag.py index 06dd0c4217..3e8c27f11d 100644 --- a/tests/dvslib/dvs_lag.py +++ b/tests/dvslib/dvs_lag.py @@ -1,11 +1,14 @@ +import json + class DVSLag(object): - def __init__(self, adb, cdb): + def __init__(self, adb, cdb, dvs): self.asic_db = adb self.config_db = cdb + self.dvs = dvs - def create_port_channel(self, lag_id, admin_status="up", mtu="1500"): + def create_port_channel(self, lag_id, admin_status="up", mtu="1500", fast_rate=False): lag = "PortChannel{}".format(lag_id) - lag_entry = {"admin_status": admin_status, "mtu": mtu} + lag_entry = {"admin_status": admin_status, "mtu": mtu, "fast_rate": str(fast_rate).lower()} self.config_db.create_entry("PORTCHANNEL", lag, lag_entry) def remove_port_channel(self, lag_id): @@ -27,3 +30,12 @@ def get_and_verify_port_channel_members(self, expected_num): def get_and_verify_port_channel(self, expected_num): return self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_LAG", expected_num) + def dump_portchannel(self, lag_id): + lag = "PortChannel{}".format(lag_id) + output = self.dvs.runcmd("teamdctl {} state dump".format(lag))[1] + port_state_dump = json.loads(output) + return port_state_dump + + def get_and_verify_port_channel_fast_rate(self, lag_id, fast_rate): + assert self.dump_portchannel(lag_id)["runner"]["fast_rate"] == fast_rate + diff --git a/tests/dvslib/dvs_pbh.py b/tests/dvslib/dvs_pbh.py index 79a58681a9..2caf059adc 100644 --- a/tests/dvslib/dvs_pbh.py +++ b/tests/dvslib/dvs_pbh.py @@ -10,6 +10,9 @@ class DVSPbh: CDB_PBH_HASH = "PBH_HASH" CDB_PBH_HASH_FIELD = "PBH_HASH_FIELD" + ADB_PBH_HASH = "ASIC_STATE:SAI_OBJECT_TYPE_HASH" + ADB_PBH_HASH_FIELD = "ASIC_STATE:SAI_OBJECT_TYPE_FINE_GRAINED_HASH_FIELD" + def __init__(self, asic_db, config_db): """Create a new DVS PBH Manager.""" self.asic_db = asic_db @@ -60,6 +63,27 @@ def create_pbh_rule( self.config_db.create_entry(self.CDB_PBH_RULE, "{}|{}".format(table_name, rule_name), attr_dict) + def update_pbh_rule( + self, + table_name: str, + rule_name: str, + priority: str, + qualifiers: Dict[str, str], + hash_name: str, + packet_action: str = "SET_ECMP_HASH", + flow_counter: str = "DISABLED" + ) -> None: + """Update PBH rule in Config DB.""" + attr_dict = { + "priority": priority, + "hash": hash_name, + "packet_action": packet_action, + "flow_counter": flow_counter, + **qualifiers + } + + self.config_db.set_entry(self.CDB_PBH_RULE, "{}|{}".format(table_name, rule_name), attr_dict) + def remove_pbh_rule( self, table_name: str, @@ -87,13 +111,6 @@ def remove_pbh_hash( """Remove PBH hash from Config DB.""" self.config_db.delete_entry(self.CDB_PBH_HASH, hash_name) - def verify_pbh_hash_count( - self, - expected: int - ) -> None: - """Verify that there are N hash objects in ASIC DB.""" - self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_HASH", expected) - def create_pbh_hash_field( self, hash_field_name: str, @@ -124,4 +141,4 @@ def verify_pbh_hash_field_count( expected: int ) -> None: """Verify that there are N hash field objects in ASIC DB.""" - self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_FINE_GRAINED_HASH_FIELD", expected) + self.asic_db.wait_for_n_keys(self.ADB_PBH_HASH_FIELD, expected) diff --git a/tests/dvslib/dvs_port.py b/tests/dvslib/dvs_port.py new file mode 100644 index 0000000000..330245099c --- /dev/null +++ b/tests/dvslib/dvs_port.py @@ -0,0 +1,95 @@ +"""Utilities for interacting with PORT objects when writing VS tests.""" +from typing import Dict, List +from swsscommon import swsscommon + + +class DVSPort(object): + """Manage PORT objects on the virtual switch.""" + ASIC_DB = swsscommon.ASIC_DB + APPL_DB = swsscommon.APPL_DB + + CFGDB_PORT = "PORT" + APPDB_PORT = "PORT_TABLE" + ASICDB_PORT = "ASIC_STATE:SAI_OBJECT_TYPE_PORT" + + def __init__(self, asicdb, appdb, cfgdb): + self.asic_db = asicdb + self.app_db = appdb + self.config_db = cfgdb + + def create_port_generic( + self, + port_name: str, + lanes: str, + speed: str, + qualifiers: Dict[str, str] = {} + ) -> None: + """Create PORT in Config DB.""" + attr_dict = { + "lanes": lanes, + "speed": speed, + **qualifiers + } + + self.config_db.create_entry(self.CFGDB_PORT, port_name, attr_dict) + + def remove_port_generic( + self, + port_name: str + )-> None: + """Remove PORT from Config DB.""" + self.config_db.delete_entry(self.CFGDB_PORT, port_name) + + def remove_port(self, port_name): + self.config_db.delete_field("CABLE_LENGTH", "AZURE", port_name) + + port_bufferpg_keys = self.config_db.get_keys("BUFFER_PG|%s" % port_name) + for key in port_bufferpg_keys: + self.config_db.delete_entry("BUFFER_PG|%s|%s" % (port_name, key), "") + + port_bufferqueue_keys = self.config_db.get_keys("BUFFER_QUEUE|%s" % port_name) + for key in port_bufferqueue_keys: + self.config_db.delete_entry("BUFFER_QUEUE|%s|%s" % (port_name, key), "") + + self.config_db.delete_entry("BREAKOUT_CFG|%s" % port_name, "") + self.config_db.delete_entry("INTERFACE|%s" % port_name, "") + self.config_db.delete_entry("PORT", port_name) + + def update_port( + self, + port_name: str, + attr_dict: Dict[str, str] + ) -> None: + """Update PORT in Config DB.""" + self.config_db.update_entry(self.CFGDB_PORT, port_name, attr_dict) + + def get_port_ids( + self, + expected: int = None, + dbid: int = swsscommon.ASIC_DB + ) -> List[str]: + """Get all of the PORT objects in ASIC/APP DB.""" + conn = None + table = None + + if dbid == swsscommon.ASIC_DB: + conn = self.asic_db + table = self.ASICDB_PORT + elif dbid == swsscommon.APPL_DB: + conn = self.app_db + table = self.APPDB_PORT + else: + raise RuntimeError("Interface not implemented") + + if expected is None: + return conn.get_keys(table) + + return conn.wait_for_n_keys(table, expected) + + def verify_port_count( + self, + expected: int, + dbid: int = swsscommon.ASIC_DB + ) -> None: + """Verify that there are N PORT objects in ASIC/APP DB.""" + self.get_port_ids(expected, dbid) diff --git a/tests/dvslib/dvs_vlan.py b/tests/dvslib/dvs_vlan.py index 5ebbf51d45..418f3be666 100644 --- a/tests/dvslib/dvs_vlan.py +++ b/tests/dvslib/dvs_vlan.py @@ -13,6 +13,17 @@ def create_vlan(self, vlanID): vlan_entry = {"vlanid": vlanID} self.config_db.create_entry("VLAN", vlan, vlan_entry) + def create_vlan_interface(self, vlanID): + vlan = "Vlan{}".format(vlanID) + vlan_intf_entry = {} + self.config_db.create_entry("VLAN_INTERFACE", vlan, vlan_intf_entry) + + def set_vlan_intf_property(self, vlanID, property, value): + vlan_key = "Vlan{}".format(vlanID) + vlan_entry = self.config_db.get_entry("VLAN_INTERFACE", vlan_key) + vlan_entry[property] = value + self.config_db.update_entry("VLAN_INTERFACE", vlan_key, vlan_entry) + def create_vlan_hostif(self, vlan, hostif_name): vlan = "Vlan{}".format(vlan) vlan_entry = {"vlanid": vlan, "host_ifname": hostif_name} @@ -35,6 +46,10 @@ def remove_vlan_member(self, vlanID, interface): member = "Vlan{}|{}".format(vlanID, interface) self.config_db.delete_entry("VLAN_MEMBER", member) + def remove_vlan_interface(self, vlanID): + vlan = "Vlan{}".format(vlanID) + self.config_db.delete_entry("VLAN_INTERFACE", vlan) + def check_app_db_vlan_fields(self, fvs, admin_status="up", mtu="9100"): assert fvs.get("admin_status") == admin_status assert fvs.get("mtu") == mtu @@ -57,7 +72,7 @@ def get_and_verify_vlan_ids(self, polling_config=PollingConfig()): vlan_entries = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_VLAN", expected_num + 1, - polling_config) + polling_config=polling_config) return [v for v in vlan_entries if v != self.asic_db.default_vlan_id] @@ -91,10 +106,11 @@ def verify_vlan_hostif(self, hostif_name, hostifs_oid, vlan_oid): assert hostif.get("SAI_HOSTIF_ATTR_TYPE") == "SAI_HOSTIF_TYPE_NETDEV" assert hostif.get("SAI_HOSTIF_ATTR_OBJ_ID") == vlan_oid assert hostif.get("SAI_HOSTIF_ATTR_NAME") == hostif_name + assert hostif.get("SAI_HOSTIF_ATTR_QUEUE") == "7" def get_and_verify_vlan_hostif_ids(self, expected_num, polling_config=PollingConfig()): hostif_entries = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF", expected_num + 1, - polling_config) + polling_config=polling_config) return hostif_entries diff --git a/tests/evpn_tunnel.py b/tests/evpn_tunnel.py index 5002a1281c..346064e004 100644 --- a/tests/evpn_tunnel.py +++ b/tests/evpn_tunnel.py @@ -485,7 +485,23 @@ def check_vxlan_tunnel_map_entry(self, dvs, tunnel_name, vidlist, vnidlist): (exitcode, out) = dvs.runcmd(iplinkcmd) assert exitcode == 0, "Kernel device not created" - def check_vxlan_sip_tunnel_delete(self, dvs, tunnel_name, sip): + def check_vxlan_tunnel_map_entry_removed(self, dvs, tunnel_name, vidlist, vnidlist): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + expected_attributes_1 = { + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VLAN_ID', + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': self.tunnel_map_map[tunnel_name][0], + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_VALUE': vidlist[0], + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_KEY': vnidlist[0], + } + + for x in range(len(vidlist)): + expected_attributes_1['SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_VALUE'] = vidlist[x] + expected_attributes_1['SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_KEY'] = vnidlist[x] + ret = self.helper.get_key_with_attr(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, expected_attributes_1) + assert len(ret) == 0, "SIP TunnelMap entry not removed" + + def check_vxlan_sip_tunnel_delete(self, dvs, tunnel_name, sip, ignore_bp = True): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) @@ -511,36 +527,36 @@ def check_vxlan_sip_tunnel_delete(self, dvs, tunnel_name, sip): status, fvs = tbl.get(self.tunnel_map_map[tunnel_name][3]) assert status == False, "SIP Tunnel mapper3 not deleted from ASIC_DB" - tbl = swsscommon.Table(asic_db, self.ASIC_BRIDGE_PORT) - status, fvs = tbl.get(self.bridgeport_map[sip]) - assert status == False, "Tunnel bridgeport entry not deleted" + if not ignore_bp: + tbl = swsscommon.Table(asic_db, self.ASIC_BRIDGE_PORT) + status, fvs = tbl.get(self.bridgeport_map[sip]) + assert status == False, "Tunnel bridgeport entry not deleted" - def check_vxlan_sip_tunnel(self, dvs, tunnel_name, src_ip, vidlist, vnidlist, dst_ip = '0.0.0.0', skip_dst_ip = 'True'): + def check_vxlan_sip_tunnel(self, dvs, tunnel_name, src_ip, vidlist, vnidlist, + dst_ip = '0.0.0.0', skip_dst_ip = 'True', ignore_bp = True, + tunnel_map_entry_count = 3): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) tunnel_map_id = self.helper.get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) tunnel_id = self.helper.get_created_entry(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids) tunnel_term_id = self.helper.get_created_entry(asic_db, self.ASIC_TUNNEL_TERM_ENTRY, self.tunnel_term_ids) - tunnel_map_entry_id = self.helper.get_created_entries(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 3) # check that the vxlan tunnel termination are there assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP) == (len(self.tunnel_map_ids) + 4), "The TUNNEL_MAP wasn't created" - assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 3), "The TUNNEL_MAP_ENTRY is created" + assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + tunnel_map_entry_count), "The TUNNEL_MAP_ENTRY is created" assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TABLE) == (len(self.tunnel_ids) + 1), "The TUNNEL wasn't created" assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_TERM_ENTRY) == (len(self.tunnel_term_ids) + 1), "The TUNNEL_TERM_TABLE_ENTRY wasm't created" - self.helper.check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[2], - { - 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID', - } - ) + expected_attributes_1 = {} + expected_attributes_1['SAI_TUNNEL_MAP_ATTR_TYPE'] = 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID' + ret = self.helper.get_key_with_attr(asic_db, self.ASIC_TUNNEL_MAP, expected_attributes_1) + assert len(ret) == 1, "Unexpected number of tunnel maps created for type SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID" + + expected_attributes_1['SAI_TUNNEL_MAP_ATTR_TYPE'] = 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI' + ret = self.helper.get_key_with_attr(asic_db, self.ASIC_TUNNEL_MAP, expected_attributes_1) + assert len(ret) == 1, "Unexpected number of tunnel maps created for type SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI" - self.helper.check_object(asic_db, self.ASIC_TUNNEL_MAP, tunnel_map_id[3], - { - 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI', - } - ) decapstr = '2:' + tunnel_map_id[0] + ',' + tunnel_map_id[2] encapstr = '2:' + tunnel_map_id[1] + ',' + tunnel_map_id[3] @@ -571,7 +587,6 @@ def check_vxlan_sip_tunnel(self, dvs, tunnel_name, src_ip, vidlist, vnidlist, ds expected_attributes_1 = { 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VLAN_ID', - 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[0], 'SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_VALUE': vidlist[0], 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_KEY': vnidlist[0], } @@ -579,7 +594,8 @@ def check_vxlan_sip_tunnel(self, dvs, tunnel_name, src_ip, vidlist, vnidlist, ds for x in range(len(vidlist)): expected_attributes_1['SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_VALUE'] = vidlist[x] expected_attributes_1['SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_KEY'] = vnidlist[x] - self.helper.check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[x], expected_attributes_1) + self.helper.get_key_with_attr(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, expected_attributes_1) + assert len(ret) == 1, "Unexpected number of tunnel map entries created for VLAN to VNI mapping" expected_siptnl_attributes = { 'src_ip': src_ip, @@ -593,16 +609,18 @@ def check_vxlan_sip_tunnel(self, dvs, tunnel_name, src_ip, vidlist, vnidlist, ds assert len(ret) == 1, "More than 1 Tunn statetable entry created" self.tunnel_appdb[tunnel_name] = ret[0] - expected_bridgeport_attributes = { - 'SAI_BRIDGE_PORT_ATTR_TYPE': 'SAI_BRIDGE_PORT_TYPE_TUNNEL', - 'SAI_BRIDGE_PORT_ATTR_TUNNEL_ID': tunnel_id, - 'SAI_BRIDGE_PORT_ATTR_FDB_LEARNING_MODE': 'SAI_BRIDGE_PORT_FDB_LEARNING_MODE_DISABLE', - 'SAI_BRIDGE_PORT_ATTR_ADMIN_STATE': 'true', - } - ret = self.helper.get_key_with_attr(asic_db, self.ASIC_BRIDGE_PORT, expected_bridgeport_attributes) - assert len(ret) > 0, "Bridgeport entry not created" - assert len(ret) == 1, "More than 1 bridgeport entry created" - self.bridgeport_map[src_ip] = ret[0] + if not ignore_bp: + expected_bridgeport_attributes = { + 'SAI_BRIDGE_PORT_ATTR_TYPE': 'SAI_BRIDGE_PORT_TYPE_TUNNEL', + 'SAI_BRIDGE_PORT_ATTR_TUNNEL_ID': tunnel_id, + 'SAI_BRIDGE_PORT_ATTR_FDB_LEARNING_MODE': 'SAI_BRIDGE_PORT_FDB_LEARNING_MODE_DISABLE', + 'SAI_BRIDGE_PORT_ATTR_ADMIN_STATE': 'true', + } + ret = self.helper.get_key_with_attr(asic_db, self.ASIC_BRIDGE_PORT, expected_bridgeport_attributes) + assert len(ret) > 0, "Bridgeport entry not created" + assert len(ret) == 1, "More than 1 bridgeport entry created" + self.bridgeport_map[src_ip] = ret[0] + self.tunnel_map_ids.update(tunnel_map_id) self.tunnel_ids.add(tunnel_id) self.tunnel_term_ids.add(tunnel_term_id) @@ -679,6 +697,18 @@ def check_vxlan_dip_tunnel(self, dvs, vtep_name, src_ip, dip): self.bridgeport_map[dip] = ret[0] + def check_vxlan_dip_tunnel_not_created(self, dvs, vtep_name, src_ip, dip): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + + expected_state_attributes = { + 'src_ip': src_ip, + 'dst_ip': dip, + 'tnl_src': 'EVPN', + } + + ret = self.helper.get_key_with_attr(state_db, 'VXLAN_TUNNEL_TABLE', expected_state_attributes) + assert len(ret) == 0, "Tunnel Statetable entry created" + def check_vlan_extension_delete(self, dvs, vlan_name, dip): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -693,6 +723,17 @@ def check_vlan_extension_delete_p2mp(self, dvs, vlan_name, sip, dip): status, fvs = tbl.get(self.l2mcgroup_member_map[dip+vlan_name]) assert status == False, "L2MC Group Member entry not deleted" + def check_vlan_obj(self, dvs, vlan_name): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + expected_vlan_attributes = { + 'SAI_VLAN_ATTR_VLAN_ID': vlan_name, + } + ret = self.helper.get_key_with_attr(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN', expected_vlan_attributes) + assert len(ret) > 0, "VLAN entry not created" + assert len(ret) == 1, "More than 1 VLAN entry created" + + self.vlan_id_map[vlan_name] = ret[0] + def check_vlan_extension(self, dvs, vlan_name, dip): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) expected_vlan_attributes = { @@ -713,6 +754,25 @@ def check_vlan_extension(self, dvs, vlan_name, dip): assert len(ret) == 1, "More than 1 VLAN member created" self.vlan_member_map[dip+vlan_name] = ret[0] + def check_vlan_extension_not_created(self, dvs, vlan_name, dip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + expected_vlan_attributes = { + 'SAI_VLAN_ATTR_VLAN_ID': vlan_name, + } + ret = self.helper.get_key_with_attr(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN', expected_vlan_attributes) + assert len(ret) > 0, "VLAN entry not created" + assert len(ret) == 1, "More than 1 VLAN entry created" + + self.vlan_id_map[vlan_name] = ret[0] + + if dip in self.bridgeport_map: + expected_vlan_member_attributes = { + 'SAI_VLAN_MEMBER_ATTR_VLAN_ID': self.vlan_id_map[vlan_name], + 'SAI_VLAN_MEMBER_ATTR_BRIDGE_PORT_ID': self.bridgeport_map[dip], + } + ret = self.helper.get_key_with_attr(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER', expected_vlan_member_attributes) + assert len(ret) == 0, "VLAN member created" + def check_vlan_extension_p2mp(self, dvs, vlan_name, sip, dip): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN') @@ -759,6 +819,32 @@ def check_vlan_extension_p2mp(self, dvs, vlan_name, sip, dip): assert len(ret) == 1, "More than 1 L2MC group member created" self.l2mcgroup_member_map[dip+vlan_name] = ret[0] + def check_vlan_extension_not_created_p2mp(self, dvs, vlan_name, sip, dip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN') + expected_vlan_attributes = { + 'SAI_VLAN_ATTR_VLAN_ID': vlan_name, + } + ret = self.helper.get_key_with_attr(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN', expected_vlan_attributes) + assert len(ret) > 0, "VLAN entry not created" + assert len(ret) == 1, "More than 1 VLAN entry created" + + self.vlan_id_map[vlan_name] = ret[0] + status, fvs = tbl.get(self.vlan_id_map[vlan_name]) + + print(fvs) + + uuc_flood_type = None + bc_flood_type = None + uuc_flood_group = None + bc_flood_group = None + + for attr,value in fvs: + assert attr != 'SAI_VLAN_ATTR_UNKNOWN_UNICAST_FLOOD_CONTROL_TYPE', "Unknown unicast flood control type is set" + assert attr != 'SAI_VLAN_ATTR_BROADCAST_FLOOD_CONTROL_TYPE', "Broadcast flood control type is set" + assert attr != 'SAI_VLAN_ATTR_UNKNOWN_UNICAST_FLOOD_GROUP', "Unknown unicast flood group is set" + assert attr != 'SAI_VLAN_ATTR_UNKNOWN_UNICAST_FLOOD_CONTROL_TYPE', "Broadcast flood group is set" + def check_vxlan_tunnel_entry(self, dvs, tunnel_name, vnet_name, vni_id): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -797,37 +883,33 @@ def check_vxlan_tunnel_entry(self, dvs, tunnel_name, vnet_name, vni_id): def check_vxlan_tunnel_vrf_map_entry(self, dvs, tunnel_name, vrf_name, vni_id): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) - if (self.tunnel_map_map.get(tunnel_name) is None): - tunnel_map_id = self.helper.get_created_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) - else: - tunnel_map_id = self.tunnel_map_map[tunnel_name] - - tunnel_map_entry_id = self.helper.get_created_entries(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 3) + tunnel_map_entry_id = self.helper.get_created_entries(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 2) # check that the vxlan tunnel termination are there - assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 3), "The TUNNEL_MAP_ENTRY is created too early" + assert self.helper.how_many_entries_exist(asic_db, self.ASIC_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 2), "The TUNNEL_MAP_ENTRY is created too early" - self.helper.check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[1], + ret = self.helper.get_key_with_attr(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, { 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI', - 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[3], 'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_KEY': self.vr_map[vrf_name].get('ing'), 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_VALUE': vni_id, } ) - self.tunnel_map_vrf_entry_ids.update(tunnel_map_entry_id[1]) + assert len(ret) == 1, "Invalid number of tunnel map entries for SAI_TUNNEL_MAP_TYPE_VIRTUAL_ROUTER_ID_TO_VNI" + + self.tunnel_map_vrf_entry_ids.update(ret[0]) - self.helper.check_object(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[2], + ret = self.helper.get_key_with_attr(asic_db, self.ASIC_TUNNEL_MAP_ENTRY, { 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID', - 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_id[2], 'SAI_TUNNEL_MAP_ENTRY_ATTR_VNI_ID_KEY': vni_id, 'SAI_TUNNEL_MAP_ENTRY_ATTR_VIRTUAL_ROUTER_ID_VALUE': self.vr_map[vrf_name].get('egr'), } ) - self.tunnel_map_vrf_entry_ids.update(tunnel_map_entry_id[2]) + assert len(ret) == 1, "Invalid number of tunnel map entries for SAI_TUNNEL_MAP_TYPE_VNI_TO_VIRTUAL_ROUTER_ID" + self.tunnel_map_vrf_entry_ids.update(ret[0]) self.tunnel_map_entry_ids.update(tunnel_map_entry_id) def check_vxlan_tunnel_vrf_map_entry_remove(self, dvs, tunnel_name, vrf_name, vni_id): @@ -934,6 +1016,27 @@ def check_vrf_routes(self, dvs, prefix, vrf_name, endpoint, tunnel, mac="", vni= return True + def check_vrf_routes_absence(self, dvs, prefix, vrf_name, endpoint, tunnel, mac="", vni=0, no_update=0): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + vr_ids = self.vrf_route_ids(dvs, vrf_name) + count = len(vr_ids) + + # Check routes in ingress VRF + expected_attr = { + "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", + "SAI_NEXT_HOP_ATTR_IP": endpoint, + "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], + } + + if vni: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni}) + + if mac: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac}) + + self.helper.get_created_entries(asic_db, self.ASIC_NEXT_HOP, self.nhops, 0) + def check_vrf_routes_ecmp(self, dvs, prefix, vrf_name, tunnel, nh_count, no_update=0): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -1053,7 +1156,7 @@ def check_del_vrf_routes(self, dvs, prefix, vrf_name): assert found_route self.helper.check_deleted_object(asic_db, self.ASIC_ROUTE_ENTRY, self.route_id[vrf_name + ":" + prefix]) - self.route_id.clear() + del self.route_id[vrf_name + ":" + prefix] return True diff --git a/tests/gcov_support.sh b/tests/gcov_support.sh index 4200e20813..c7ddddb961 100755 --- a/tests/gcov_support.sh +++ b/tests/gcov_support.sh @@ -65,16 +65,6 @@ list_lcov_path() echo "Start searching .gcda files..." exec 4>$TMP_FILE find_gcda_file=`find ${gcda_dir} -name *.gcda` - echo "Start rm unused gcno files for speed up" - find_gcno_file=`find ${gcda_dir} -name *.gcno` - for line in ${find_gcno_file} - do - temp_gcda=${line/.gcno/$gcdastr} - if [ ! -f ${temp_gcda} ]; then - rm ${line} - fi - done - echo ${find_gcda_file} RESULT=${find_gcda_file} echo "$RESULT" >&4 @@ -93,8 +83,7 @@ lcov_genhtml_report() do local fullpath=$line local infoname=${INFO_FILE_PREFIX}${fullpath##*/}.info - htmldirname=${HTML_FILE_PREFIX}${fullpath##*/} - + echo ${fullpath} pushd ${fullpath} @@ -102,7 +91,7 @@ lcov_genhtml_report() echo "gcda count: $GCDA_COUNT" if [ $GCDA_COUNT -ge 1 ]; then echo "Executing lcov -c -d . -o ${infoname}" - lcov -c -d . -o ${infoname} + lcov -c -d . -o ${infoname} &>/dev/null if [ "$?" != "0" ]; then echo "lcov fail!" rm ${infoname} @@ -112,12 +101,6 @@ lcov_genhtml_report() done < ${gcda_file_range}/gcda_dir_list.txt } -rm_unused_gcno() -{ - cur_dir = $1/ - -} - # generate html reports for all eligible submodules lcov_genhtml_all() { @@ -143,10 +126,16 @@ lcov_merge_all() done < infolist lcov --extract total.info '*sonic-gcov/*' -o total.info + + # Remove unit test files. + lcov -o total.info -r total.info "*sonic-gcov/common_work/gcov/orchagent/p4orch/tests/*" + lcov -o total.info -r total.info "*sonic-gcov/common_work/gcov/tests/*" + cp $1/lcov_cobertura.py $1/common_work/gcov/ python $1/common_work/gcov/lcov_cobertura.py total.info -o coverage.xml - sed -i "s#common_work/#$1/common_work/#" coverage.xml + sed -i "s#common_work/gcov/##" coverage.xml + sed -i "s#common_work.gcov.##" coverage.xml cd gcov_output/ if [ ! -d ${ALLMERGE_DIR} ]; then @@ -196,12 +185,6 @@ gcov_set_environment() echo "cat list" cat ${CONTAINER_LIST} - - cd ${build_dir}/gcov_tmp/ - tar -zcvf sonic-gcov.tar.gz sonic-gcov/ - rm -rf sonic-gcov - cd ../../ - rm ${CONTAINER_LIST} } gcov_merge_info() @@ -219,7 +202,8 @@ gcov_support_generate_report() mkdir -p gcov_output/info #for same code path - mkdir -p common_work + mkdir -p common_work/gcov + tar -zxvf swss.tar.gz -C common_work/gcov cat container_dir_list while read line @@ -228,7 +212,6 @@ gcov_support_generate_report() echo ${container_id} cp -rf ${container_id}/* common_work - tar -zxvf swss.tar.gz -C common_work/gcov cd common_work/gcov/ find -name gcda*.tar.gz > tmp_gcda.txt while read LINE ; do @@ -238,13 +221,16 @@ gcov_support_generate_report() done < tmp_gcda.txt rm tmp_gcda.txt - find -name gcno*.tar.gz > tmp_gcno.txt - while read LINE ; do - echo ${LINE} - echo ${LINE%%.*} - tar -zxvf ${LINE} - done < tmp_gcno.txt - rm tmp_gcno.txt + gcno_count=`find -name "*.gcno" | wc -l` + if [ ${gcno_count} -lt 1 ]; then + find -name gcno*.tar.gz > tmp_gcno.txt + while read LINE ; do + echo ${LINE} + echo ${LINE%%.*} + tar -zxvf ${LINE} + done < tmp_gcno.txt + rm tmp_gcno.txt + fi cd - ls -lh common_work/* @@ -253,22 +239,21 @@ gcov_support_generate_report() echo "###lcov operation fail.." return 0 fi - cd common_work - find . -name "*.gcda" -o -name "*.gcno" -o -name "*.gz" -o -name "*.cpp" -o -name "*.h"| xargs rm -rf - cd ../ - cp -rf common_work/* ${container_id}/* - cd ${container_id} - find . -name "*.gcda" -o -name "*.gcno" -o -name "*.gz" -o -name "*.cpp" -o -name "*.h"| xargs rm -rf - cd ../ - - rm -rf common_work/* - - cp -rf ${container_id} gcov_output/ + mkdir -p gcov_output/${container_id} + cp -rf common_work/* gcov_output/${container_id}/* + pushd gcov_output/${container_id} + find . -name "*.gcda" -o -name "*.gcno" -o -name "*.gz" -o -name "*.cpp" -o -name "*.h" | xargs rm -rf + popd + pushd common_work + find . -name "*.gcda" -o -name "*.gz" -o -name "*.info" | xargs rm -rf + popd + rm -rf ${container_id} done < container_dir_list # generate report with code - mkdir -p common_work/gcov - tar -zxvf swss.tar.gz -C common_work/gcov + pushd common_work/gcov + find . -name "*.gcno" | xargs rm -rf + popd echo "### Make info generating completed !!" } @@ -344,7 +329,7 @@ gcov_support_collect_gcno() echo " === Start collecting .gcno files... === " submodule_name=$1 exec 3>$GCNO_LIST_FILE - find_command=`find -name *.gcno` + find_command=`find -name "*.gcno" -o -name "*.gcda"` echo "${find_command}" if [ -z "${find_command}" ]; then echo "### Error! no gcno files found!" diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index f82b556e47..522b45110b 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -2,13 +2,11 @@ FLEX_CTR_DIR = $(top_srcdir)/orchagent/flex_counter DEBUG_CTR_DIR = $(top_srcdir)/orchagent/debug_counter P4_ORCH_DIR = $(top_srcdir)/orchagent/p4orch -INCLUDES = -I $(FLEX_CTR_DIR) -I $(DEBUG_CTR_DIR) -I $(top_srcdir)/lib - CFLAGS_SAI = -I /usr/include/sai -TESTS = tests +TESTS = tests tests_intfmgrd tests_portsyncd tests_fpmsyncd tests_response_publisher -noinst_PROGRAMS = tests +noinst_PROGRAMS = tests tests_intfmgrd tests_portsyncd tests_fpmsyncd tests_response_publisher LDADD_SAI = -lsaimeta -lsaimetadata -lsaivs -lsairedis @@ -21,22 +19,44 @@ endif CFLAGS_GTEST = LDADD_GTEST = -L/usr/src/gtest +## Orchagent Unit Tests + +tests_INCLUDES = -I $(FLEX_CTR_DIR) -I $(DEBUG_CTR_DIR) -I $(top_srcdir)/lib -I$(top_srcdir)/cfgmgr -I$(top_srcdir)/orchagent -I$(P4_ORCH_DIR)/tests -I$(top_srcdir)/warmrestart + tests_SOURCES = aclorch_ut.cpp \ portsorch_ut.cpp \ routeorch_ut.cpp \ + qosorch_ut.cpp \ + bufferorch_ut.cpp \ + buffermgrdyn_ut.cpp \ + fdborch/flush_syncd_notif_ut.cpp \ + copp_ut.cpp \ + copporch_ut.cpp \ saispy_ut.cpp \ consumer_ut.cpp \ + sfloworh_ut.cpp \ ut_saihelper.cpp \ mock_orchagent_main.cpp \ mock_dbconnector.cpp \ mock_consumerstatetable.cpp \ + common/mock_shell_command.cpp \ mock_table.cpp \ mock_hiredis.cpp \ mock_redisreply.cpp \ bulker_ut.cpp \ + portmgr_ut.cpp \ + sflowmgrd_ut.cpp \ fake_response_publisher.cpp \ + swssnet_ut.cpp \ + flowcounterrouteorch_ut.cpp \ + orchdaemon_ut.cpp \ + intfsorch_ut.cpp \ + mux_rollback_ut.cpp \ + warmrestartassist_ut.cpp \ + test_failure_handling.cpp \ $(top_srcdir)/lib/gearboxutils.cpp \ $(top_srcdir)/lib/subintf.cpp \ + $(top_srcdir)/lib/recorder.cpp \ $(top_srcdir)/orchagent/orchdaemon.cpp \ $(top_srcdir)/orchagent/orch.cpp \ $(top_srcdir)/orchagent/notifications.cpp \ @@ -49,6 +69,7 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/cbf/nhgmaporch.cpp \ $(top_srcdir)/orchagent/neighorch.cpp \ $(top_srcdir)/orchagent/intfsorch.cpp \ + $(top_srcdir)/orchagent/port/porthlpr.cpp \ $(top_srcdir)/orchagent/portsorch.cpp \ $(top_srcdir)/orchagent/fabricportsorch.cpp \ $(top_srcdir)/orchagent/copporch.cpp \ @@ -58,12 +79,15 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/mirrororch.cpp \ $(top_srcdir)/orchagent/fdborch.cpp \ $(top_srcdir)/orchagent/aclorch.cpp \ + $(top_srcdir)/orchagent/pbh/pbhcap.cpp \ $(top_srcdir)/orchagent/pbh/pbhcnt.cpp \ $(top_srcdir)/orchagent/pbh/pbhmgr.cpp \ $(top_srcdir)/orchagent/pbh/pbhrule.cpp \ $(top_srcdir)/orchagent/pbhorch.cpp \ $(top_srcdir)/orchagent/saihelper.cpp \ $(top_srcdir)/orchagent/saiattr.cpp \ + $(top_srcdir)/orchagent/switch/switch_capabilities.cpp \ + $(top_srcdir)/orchagent/switch/switch_helper.cpp \ $(top_srcdir)/orchagent/switchorch.cpp \ $(top_srcdir)/orchagent/pfcwdorch.cpp \ $(top_srcdir)/orchagent/pfcactionhandler.cpp \ @@ -87,13 +111,19 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/macsecorch.cpp \ $(top_srcdir)/orchagent/lagid.cpp \ $(top_srcdir)/orchagent/bfdorch.cpp \ - $(top_srcdir)/orchagent/srv6orch.cpp + $(top_srcdir)/orchagent/srv6orch.cpp \ + $(top_srcdir)/orchagent/nvgreorch.cpp \ + $(top_srcdir)/cfgmgr/portmgr.cpp \ + $(top_srcdir)/cfgmgr/sflowmgr.cpp \ + $(top_srcdir)/cfgmgr/buffermgrdyn.cpp \ + $(top_srcdir)/warmrestart/warmRestartAssist.cpp -tests_SOURCES += $(FLEX_CTR_DIR)/flex_counter_manager.cpp $(FLEX_CTR_DIR)/flex_counter_stat_manager.cpp $(FLEX_CTR_DIR)/flow_counter_handler.cpp +tests_SOURCES += $(FLEX_CTR_DIR)/flex_counter_manager.cpp $(FLEX_CTR_DIR)/flex_counter_stat_manager.cpp $(FLEX_CTR_DIR)/flow_counter_handler.cpp $(FLEX_CTR_DIR)/flowcounterrouteorch.cpp tests_SOURCES += $(DEBUG_CTR_DIR)/debug_counter.cpp $(DEBUG_CTR_DIR)/drop_counter.cpp tests_SOURCES += $(P4_ORCH_DIR)/p4orch.cpp \ $(P4_ORCH_DIR)/p4orch_util.cpp \ $(P4_ORCH_DIR)/p4oidmapper.cpp \ + $(P4_ORCH_DIR)/tables_definition_manager.cpp \ $(P4_ORCH_DIR)/router_interface_manager.cpp \ $(P4_ORCH_DIR)/neighbor_manager.cpp \ $(P4_ORCH_DIR)/next_hop_manager.cpp \ @@ -102,9 +132,91 @@ tests_SOURCES += $(P4_ORCH_DIR)/p4orch.cpp \ $(P4_ORCH_DIR)/acl_table_manager.cpp \ $(P4_ORCH_DIR)/acl_rule_manager.cpp \ $(P4_ORCH_DIR)/wcmp_manager.cpp \ - $(P4_ORCH_DIR)/mirror_session_manager.cpp + $(P4_ORCH_DIR)/mirror_session_manager.cpp \ + $(P4_ORCH_DIR)/gre_tunnel_manager.cpp \ + $(P4_ORCH_DIR)/l3_admit_manager.cpp \ + $(P4_ORCH_DIR)/ext_tables_manager.cpp \ + $(P4_ORCH_DIR)/tests/mock_sai_switch.cpp tests_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) -tests_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) -I$(top_srcdir)/orchagent +tests_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_INCLUDES) tests_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis -lpthread \ - -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 + -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 -lgmock -lgmock_main + +## portsyncd unit tests + +tests_portsyncd_SOURCES = portsyncd/portsyncd_ut.cpp \ + $(top_srcdir)/lib/recorder.cpp \ + $(top_srcdir)/portsyncd/linksync.cpp \ + mock_dbconnector.cpp \ + common/mock_shell_command.cpp \ + mock_table.cpp \ + mock_hiredis.cpp \ + mock_redisreply.cpp + +tests_portsyncd_INCLUDES = -I $(top_srcdir)/portsyncd -I $(top_srcdir)/cfgmgr -I $(top_srcdir)/lib +tests_portsyncd_CXXFLAGS = -Wl,-wrap,if_nameindex -Wl,-wrap,if_freenameindex +tests_portsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) +tests_portsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(tests_portsyncd_INCLUDES) +tests_portsyncd_LDADD = $(LDADD_GTEST) -lnl-genl-3 -lhiredis -lhiredis \ + -lswsscommon -lswsscommon -lgtest -lgtest_main -lnl-3 -lnl-route-3 -lpthread + +## intfmgrd unit tests + +tests_intfmgrd_SOURCES = intfmgrd/intfmgr_ut.cpp \ + $(top_srcdir)/cfgmgr/intfmgr.cpp \ + $(top_srcdir)/lib/subintf.cpp \ + $(top_srcdir)/lib/recorder.cpp \ + $(top_srcdir)/orchagent/orch.cpp \ + $(top_srcdir)/orchagent/request_parser.cpp \ + mock_orchagent_main.cpp \ + mock_dbconnector.cpp \ + mock_table.cpp \ + mock_hiredis.cpp \ + fake_response_publisher.cpp \ + mock_redisreply.cpp \ + common/mock_shell_command.cpp + +tests_intfmgrd_INCLUDES = $(tests_INCLUDES) -I$(top_srcdir)/cfgmgr -I$(top_srcdir)/lib +tests_intfmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) +tests_intfmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_intfmgrd_INCLUDES) +tests_intfmgrd_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis \ + -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 -lpthread -lgmock -lgmock_main + +## fpmsyncd unit tests + +tests_fpmsyncd_SOURCES = fpmsyncd/test_fpmlink.cpp \ + fpmsyncd/test_routesync.cpp \ + fake_netlink.cpp \ + fake_warmstarthelper.cpp \ + fake_producerstatetable.cpp \ + mock_dbconnector.cpp \ + mock_table.cpp \ + mock_hiredis.cpp \ + $(top_srcdir)/warmrestart/ \ + $(top_srcdir)/fpmsyncd/fpmlink.cpp \ + $(top_srcdir)/fpmsyncd/routesync.cpp + +tests_fpmsyncd_INCLUDES = $(tests_INCLUDES) -I$(top_srcdir)/tests_fpmsyncd -I$(top_srcdir)/lib -I$(top_srcdir)/warmrestart +tests_fpmsyncd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) +tests_fpmsyncd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_fpmsyncd_INCLUDES) +tests_fpmsyncd_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis \ + -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 -lpthread -lgmock -lgmock_main + +## response publisher unit tests + +tests_response_publisher_SOURCES = response_publisher/response_publisher_ut.cpp \ + $(top_srcdir)/orchagent/response_publisher.cpp \ + $(top_srcdir)/lib/recorder.cpp \ + mock_orchagent_main.cpp \ + mock_dbconnector.cpp \ + mock_table.cpp \ + mock_hiredis.cpp \ + mock_redisreply.cpp + +tests_response_publisher_INCLUDES = $(tests_INCLUDES) +tests_response_publisher_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) +tests_response_publisher_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_GTEST) $(CFLAGS_SAI) $(tests_response_publisher_INCLUDES) +tests_response_publisher_LDADD = $(LDADD_GTEST) $(LDADD_SAI) -lnl-genl-3 -lhiredis -lhiredis \ + -lswsscommon -lswsscommon -lgtest -lgtest_main -lzmq -lnl-3 -lnl-route-3 -lpthread + diff --git a/tests/mock_tests/aclorch_ut.cpp b/tests/mock_tests/aclorch_ut.cpp index 295fed20ba..b03d219138 100644 --- a/tests/mock_tests/aclorch_ut.cpp +++ b/tests/mock_tests/aclorch_ut.cpp @@ -1,4 +1,5 @@ #include "ut_helper.h" +#include "flowcounterrouteorch.h" extern sai_object_id_t gSwitchId; @@ -6,6 +7,7 @@ extern SwitchOrch *gSwitchOrch; extern CrmOrch *gCrmOrch; extern PortsOrch *gPortsOrch; extern RouteOrch *gRouteOrch; +extern FlowCounterRouteOrch *gFlowCounterRouteOrch; extern IntfsOrch *gIntfsOrch; extern NeighOrch *gNeighOrch; extern FgNhgOrch *gFgNhgOrch; @@ -17,6 +19,7 @@ extern VRFOrch *gVrfOrch; extern sai_acl_api_t *sai_acl_api; extern sai_switch_api_t *sai_switch_api; +extern sai_hash_api_t *sai_hash_api; extern sai_port_api_t *sai_port_api; extern sai_vlan_api_t *sai_vlan_api; extern sai_bridge_api_t *sai_bridge_api; @@ -310,6 +313,7 @@ namespace aclorch_test ASSERT_EQ(status, SAI_STATUS_SUCCESS); sai_api_query(SAI_API_SWITCH, (void **)&sai_switch_api); + sai_api_query(SAI_API_HASH, (void **)&sai_hash_api); sai_api_query(SAI_API_BRIDGE, (void **)&sai_bridge_api); sai_api_query(SAI_API_PORT, (void **)&sai_port_api); sai_api_query(SAI_API_VLAN, (void **)&sai_vlan_api); @@ -372,6 +376,11 @@ namespace aclorch_test ASSERT_EQ(gPortsOrch, nullptr); gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + static const vector route_pattern_tables = { + CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, + }; + gFlowCounterRouteOrch = new FlowCounterRouteOrch(m_config_db.get(), route_pattern_tables); + ASSERT_EQ(gVrfOrch, nullptr); gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); @@ -419,7 +428,12 @@ namespace aclorch_test }; gRouteOrch = new RouteOrch(m_app_db.get(), route_tables, gSwitchOrch, gNeighOrch, gIntfsOrch, gVrfOrch, gFgNhgOrch, gSrv6Orch); - PolicerOrch *policer_orch = new PolicerOrch(m_config_db.get(), "POLICER"); + vector policer_tables = { + TableConnector(m_config_db.get(), CFG_POLICER_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_PORT_STORM_CONTROL_TABLE_NAME) + }; + TableConnector stateDbStorm(m_state_db.get(), "BUM_STORM_CAPABILITY"); + PolicerOrch *policer_orch = new PolicerOrch(policer_tables, gPortsOrch); TableConnector stateDbMirrorSession(m_state_db.get(), STATE_MIRROR_SESSION_TABLE_NAME); TableConnector confDbMirrorSession(m_config_db.get(), CFG_MIRROR_SESSION_TABLE_NAME); @@ -575,8 +589,7 @@ namespace aclorch_test return false; } - sai_attribute_t new_attr; - memset(&new_attr, 0, sizeof(new_attr)); + sai_attribute_t new_attr = {}; new_attr.id = attr.id; @@ -636,8 +649,7 @@ namespace aclorch_test return false; } - sai_attribute_t new_attr; - memset(&new_attr, 0, sizeof(new_attr)); + sai_attribute_t new_attr = {}; new_attr.id = attr.id; @@ -1399,7 +1411,7 @@ namespace aclorch_test { { ACL_TABLE_TYPE_MATCHES, - string(MATCH_SRC_IP) + comma + MATCH_ETHER_TYPE + comma + MATCH_L4_SRC_PORT_RANGE + string(MATCH_SRC_IP) + comma + MATCH_ETHER_TYPE + comma + MATCH_L4_SRC_PORT_RANGE + comma + MATCH_BTH_OPCODE + comma + MATCH_AETH_SYNDROME }, { ACL_TABLE_TYPE_BPOINT_TYPES, @@ -1421,6 +1433,8 @@ namespace aclorch_test { "SAI_ACL_TABLE_ATTR_FIELD_SRC_IP", "true" }, { "SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE", "true" }, { "SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE", "1:SAI_ACL_RANGE_TYPE_L4_SRC_PORT_RANGE" }, + { "SAI_ACL_TABLE_ATTR_FIELD_BTH_OPCODE", "true" }, + { "SAI_ACL_TABLE_ATTR_FIELD_AETH_SYNDROME", "true" }, }; ASSERT_TRUE(validateAclTable( @@ -1467,6 +1481,42 @@ namespace aclorch_test // DST_IP is not in the table type ASSERT_FALSE(orch->getAclRule(aclTableName, aclRuleName)); + orch->doAclRuleTask( + deque( + { + { + aclTableName + "|" + aclRuleName, + SET_COMMAND, + { + { ACTION_PACKET_ACTION, PACKET_ACTION_DROP }, + { MATCH_BTH_OPCODE, "0x60" }, + } + } + } + ) + ); + + // MATCH_BTH_OPCODE invalid format + ASSERT_FALSE(orch->getAclRule(aclTableName, aclRuleName)); + + orch->doAclRuleTask( + deque( + { + { + aclTableName + "|" + aclRuleName, + SET_COMMAND, + { + { ACTION_PACKET_ACTION, PACKET_ACTION_DROP }, + { MATCH_AETH_SYNDROME, "0x60" }, + } + } + } + ) + ); + + // MATCH_AETH_SYNDROME invalid format + ASSERT_FALSE(orch->getAclRule(aclTableName, aclRuleName)); + orch->doAclRuleTask( deque( { @@ -1476,6 +1526,8 @@ namespace aclorch_test { { MATCH_SRC_IP, "1.1.1.1/32" }, { ACTION_PACKET_ACTION, PACKET_ACTION_DROP }, + { MATCH_BTH_OPCODE, "0x60/0xff" }, + { MATCH_AETH_SYNDROME, "0x60/0x60" }, } } } @@ -1719,4 +1771,120 @@ namespace aclorch_test ASSERT_TRUE(orch->m_aclOrch->removeAclRule(rule->getTableId(), rule->getId())); } + TEST_F(AclOrchTest, deleteNonExistingRule) + { + string tableId = "acl_table"; + string ruleId = "acl_rule"; + + auto orch = createAclOrch(); + + // add acl table + auto kvfAclTable = deque({{ + tableId, + SET_COMMAND, + { + { ACL_TABLE_DESCRIPTION, "L3 table" }, + { ACL_TABLE_TYPE, TABLE_TYPE_L3 }, + { ACL_TABLE_STAGE, STAGE_INGRESS }, + { ACL_TABLE_PORTS, "1,2" } + } + }}); + + orch->doAclTableTask(kvfAclTable); + + // try to delete non existing acl rule + ASSERT_TRUE(orch->m_aclOrch->removeAclRule(tableId, ruleId)); + } + + sai_switch_api_t *old_sai_switch_api; + + // The following function is used to override SAI API get_switch_attribute to request passing + // mandatory ACL actions to SAI when creating mirror ACL table. + sai_status_t getSwitchAttribute(_In_ sai_object_id_t switch_id,_In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + if (attr_count == 1) + { + switch(attr_list[0].id) + { + case SAI_SWITCH_ATTR_MAX_ACL_ACTION_COUNT: + attr_list[0].value.u32 = 2; + return SAI_STATUS_SUCCESS; + case SAI_SWITCH_ATTR_ACL_STAGE_INGRESS: + case SAI_SWITCH_ATTR_ACL_STAGE_EGRESS: + attr_list[0].value.aclcapability.action_list.count = 2; + attr_list[0].value.aclcapability.action_list.list[0]= SAI_ACL_ACTION_TYPE_COUNTER; + attr_list[0].value.aclcapability.action_list.list[1]= + attr_list[0].id == SAI_SWITCH_ATTR_ACL_STAGE_INGRESS ? + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS : SAI_ACL_ACTION_TYPE_MIRROR_EGRESS; + attr_list[0].value.aclcapability.is_action_list_mandatory = true; + return SAI_STATUS_SUCCESS; + } + } + return old_sai_switch_api->get_switch_attribute(switch_id, attr_count, attr_list); + } + + TEST_F(AclOrchTest, AclTableCreationWithMandatoryActions) + { + // Override SAI API get_switch_attribute to request passing mandatory ACL actions to SAI + // when creating mirror ACL table. + old_sai_switch_api = sai_switch_api; + sai_switch_api_t new_sai_switch_api = *sai_switch_api; + sai_switch_api = &new_sai_switch_api; + sai_switch_api->get_switch_attribute = getSwitchAttribute; + + // Set platform env to enable support of MIRRORV6 ACL table. + bool unset_platform_env = false; + if (!getenv("platform")) + { + setenv("platform", VS_PLATFORM_SUBSTRING, 0); + unset_platform_env = true; + } + + auto orch = createAclOrch(); + + for (const auto &acl_table_type : { TABLE_TYPE_MIRROR, TABLE_TYPE_MIRRORV6, TABLE_TYPE_MIRROR_DSCP }) + { + for (const auto &acl_table_stage : { STAGE_INGRESS, STAGE_EGRESS }) + { + // Create ACL table. + string acl_table_id = "mirror_acl_table"; + auto kvfAclTable = deque( + { { acl_table_id, + SET_COMMAND, + { { ACL_TABLE_DESCRIPTION, acl_table_type }, + { ACL_TABLE_TYPE, acl_table_type }, + { ACL_TABLE_STAGE, acl_table_stage }, + { ACL_TABLE_PORTS, "1,2" } } } }); + orch->doAclTableTask(kvfAclTable); + auto acl_table = orch->getAclTable(acl_table_id); + ASSERT_NE(acl_table, nullptr); + + // Verify mandaotry ACL actions has been added. + auto acl_actions = acl_table->type.getActions(); + ASSERT_NE(acl_actions.find(SAI_ACL_ACTION_TYPE_COUNTER), acl_actions.end()); + sai_acl_action_type_t action = strcmp(acl_table_stage, STAGE_INGRESS) == 0 ? + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS : SAI_ACL_ACTION_TYPE_MIRROR_EGRESS; + ASSERT_NE(acl_actions.find(action), acl_actions.end()); + + // Delete ACL table. + kvfAclTable = deque( + { { acl_table_id, + DEL_COMMAND, + {} } }); + orch->doAclTableTask(kvfAclTable); + acl_table = orch->getAclTable(acl_table_id); + ASSERT_EQ(acl_table, nullptr); + } + } + + // Unset platform env. + if (unset_platform_env) + { + unsetenv("platform"); + } + + // Restore sai_switch_api. + sai_switch_api = old_sai_switch_api; + } } // namespace nsAclOrchTest diff --git a/tests/mock_tests/buffermgrdyn_ut.cpp b/tests/mock_tests/buffermgrdyn_ut.cpp new file mode 100644 index 0000000000..1c23a17410 --- /dev/null +++ b/tests/mock_tests/buffermgrdyn_ut.cpp @@ -0,0 +1,1426 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" +#define private public +#include "buffermgrdyn.h" +#undef private +#include "warm_restart.h" + +extern string gMySwitchType; + + +namespace buffermgrdyn_test +{ + using namespace std; + + shared_ptr m_app_db = make_shared("APPL_DB", 0); + shared_ptr m_config_db = make_shared("CONFIG_DB", 0); + shared_ptr m_state_db = make_shared("STATE_DB", 0); + shared_ptr m_app_state_db = make_shared("APPL_STATE_DB", 0); + + BufferMgrDynamic *m_dynamicBuffer; + SelectableTimer m_selectableTable(timespec({ .tv_sec = BUFFERMGR_TIMER_PERIOD, .tv_nsec = 0 }), 0); + Table portTable(m_config_db.get(), CFG_PORT_TABLE_NAME); + Table cableLengthTable(m_config_db.get(), CFG_PORT_CABLE_LEN_TABLE_NAME); + Table bufferPoolTable(m_config_db.get(), CFG_BUFFER_POOL_TABLE_NAME); + Table bufferProfileTable(m_config_db.get(), CFG_BUFFER_PROFILE_TABLE_NAME); + Table bufferPgTable(m_config_db.get(), CFG_BUFFER_PG_TABLE_NAME); + Table bufferQueueTable(m_config_db.get(), CFG_BUFFER_QUEUE_TABLE_NAME); + Table bufferIngProfileListTable(m_config_db.get(), CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME); + Table bufferEgrProfileListTable(m_config_db.get(), CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME); + Table defaultLosslessParameterTable(m_config_db.get(), CFG_DEFAULT_LOSSLESS_BUFFER_PARAMETER); + Table appPortTable(m_app_db.get(), APP_PORT_TABLE_NAME); + Table appBufferPoolTable(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table appBufferProfileTable(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); + Table appBufferPgTable(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); + Table appBufferQueueTable(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME); + Table appBufferIngProfileListTable(m_app_db.get(), APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME); + Table appBufferEgrProfileListTable(m_app_db.get(), APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME); + Table bufferMaxParamTable(m_state_db.get(), STATE_BUFFER_MAXIMUM_VALUE_TABLE); + Table statePortTable(m_state_db.get(), STATE_PORT_TABLE_NAME); + Table stateBufferTable(m_state_db.get(), STATE_BUFFER_MAXIMUM_VALUE_TABLE); + + map> zeroProfileMap; + vector zeroProfile; + + struct BufferMgrDynTest : public ::testing::Test + { + map> testBufferProfile; + map> testBufferPool; + + void SetUpReclaimingBuffer() + { + zeroProfileMap["ingress_zero_pool"] = { + {"mode", "static"}, + {"type", "ingress"}, + {"size", "0"} + }; + zeroProfileMap["ingress_lossy_pg_zero_profile"] = { + {"pool", "ingress_zero_pool"}, + {"size", "0"}, + {"static_th", "0"} + }; + zeroProfileMap["ingress_lossless_zero_profile"] = { + {"pool", "ingress_lossless_pool"}, + {"size", "0"}, + {"dynamic_th", "-8"} + }; + zeroProfileMap["egress_lossy_zero_profile"] = { + {"pool", "egress_lossy_pool"}, + {"size", "0"}, + {"dynamic_th", "-8"} + }; + zeroProfileMap["egress_lossless_zero_profile"] = { + {"pool", "egress_lossless_pool"}, + {"size", "0"}, + {"dynamic_th", "-8"} + }; + + zeroProfile = { + { + "BUFFER_POOL_TABLE:ingress_zero_pool", + "SET", + zeroProfileMap["ingress_zero_pool"] + }, + { + "BUFFER_PROFILE_TABLE:ingress_lossy_pg_zero_profile", + "SET", + zeroProfileMap["ingress_lossy_pg_zero_profile"] + }, + { + "BUFFER_PROFILE_TABLE:ingress_lossless_zero_profile", + "SET", + zeroProfileMap["ingress_lossless_zero_profile"] + }, + { + "BUFFER_PROFILE_TABLE:egress_lossy_zero_profile", + "SET", + zeroProfileMap["egress_lossy_zero_profile"] + }, + { + "BUFFER_PROFILE_TABLE:egress_lossless_zero_profile", + "SET", + zeroProfileMap["egress_lossless_zero_profile"] + }, + { + "control_fields", + "SET", + { + {"pgs_to_apply_zero_profile", "0"}, + {"ingress_zero_profile", "ingress_lossy_pg_zero_profile"} + } + } + }; + } + + BufferMgrDynTest() + { + testBufferPool["ingress_lossless_pool"] = { + {"mode", "dynamic"}, + {"type", "ingress"}, + {"size", "1024000"} + }; + testBufferPool["egress_lossless_pool"] = { + {"mode", "dynamic"}, + {"type", "egress"}, + {"size", "1024000"} + }; + testBufferPool["egress_lossy_pool"] = { + {"mode", "dynamic"}, + {"type", "egress"}, + {"size", "1024000"} + }; + + testBufferProfile["ingress_lossless_profile"] = { + {"dynamic_th", "7"}, + {"pool", "ingress_lossless_pool"}, + {"size", "0"} + }; + testBufferProfile["egress_lossless_profile"] = { + {"dynamic_th", "7"}, + {"pool", "egress_lossless_pool"}, + {"size", "0"} + }; + testBufferProfile["egress_lossy_profile"] = { + {"dynamic_th", "3"}, + {"pool", "egress_lossy_pool"}, + {"size", "0"} + }; + } + + void SetUp() override + { + setenv("ASIC_VENDOR", "mock_test", 1); + + testing_db::reset(); + + WarmStart::initialize("buffermgrd", "swss"); + WarmStart::checkWarmStart("buffermgrd", "swss"); + } + + void StartBufferManager(shared_ptr> zero_profile=nullptr) + { + // Init switch and create dependencies + vector buffer_table_connectors = { + TableConnector(m_config_db.get(), CFG_PORT_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_PORT_CABLE_LEN_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_POOL_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_PROFILE_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_PG_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_QUEUE_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME), + TableConnector(m_config_db.get(), CFG_DEFAULT_LOSSLESS_BUFFER_PARAMETER), + TableConnector(m_state_db.get(), STATE_BUFFER_MAXIMUM_VALUE_TABLE), + TableConnector(m_state_db.get(), STATE_PORT_TABLE_NAME) + }; + + m_dynamicBuffer = new BufferMgrDynamic(m_config_db.get(), m_state_db.get(), m_app_db.get(), m_app_state_db.get(), buffer_table_connectors, nullptr, zero_profile); + } + + void InitPort(const string &port="Ethernet0", const string &admin_status="up") + { + portTable.set(port, + { + {"speed", "100000"}, + {"mtu", "9100"}, + {"admin_status", admin_status} + }); + m_dynamicBuffer->addExistingData(&portTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void SetPortInitDone() + { + appPortTable.set("PortInitDone", + { + {"lanes", "0"} + }); + m_dynamicBuffer->addExistingData(&appPortTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitMmuSize() + { + bufferMaxParamTable.set("global", + { + {"mmu_size", "1024000"} + }); + if (m_dynamicBuffer) + m_dynamicBuffer->addExistingData(&bufferMaxParamTable); + } + + void InitDefaultLosslessParameter(const string &over_subscribe_ratio="") + { + if (over_subscribe_ratio.empty()) + { + defaultLosslessParameterTable.set("AZURE", + { + {"default_dynamic_th", "0"} + }); + } + else + { + defaultLosslessParameterTable.set("AZURE", + { + {"default_dynamic_th", "0"}, + {"over_subscribe_ratio", over_subscribe_ratio} + }); + } + if (m_dynamicBuffer) + { + m_dynamicBuffer->addExistingData(&defaultLosslessParameterTable); + static_cast(m_dynamicBuffer)->doTask(); + } + } + + void InitBufferPool() + { + for(auto &i: testBufferPool) + { + bufferPoolTable.set(i.first, i.second); + } + + m_dynamicBuffer->addExistingData(&bufferPoolTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void ClearBufferPool(const string &skippedPool="", const string &clearPool="") + { + std::deque entries; + for (auto &i: testBufferPool) + { + if (skippedPool == i.first) + continue; + if (!clearPool.empty() && clearPool != i.first) + continue; + entries.push_back({i.first, "DEL", {}}); + } + + auto consumer = dynamic_cast(m_dynamicBuffer->getExecutor(CFG_BUFFER_POOL_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitDefaultBufferProfile() + { + for (auto &i: testBufferProfile) + { + bufferProfileTable.set(i.first, i.second); + } + + m_dynamicBuffer->addExistingData(&bufferProfileTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void ClearBufferProfile() + { + std::deque entries; + for (auto &i: testBufferProfile) + entries.push_back({i.first, "DEL", {}}); + + auto consumer = dynamic_cast(m_dynamicBuffer->getExecutor(CFG_BUFFER_PROFILE_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitBufferPg(const string &key, const string &profile="NULL") + { + bufferPgTable.set(key, + { + {"profile", profile} + }); + m_dynamicBuffer->addExistingData(&bufferPgTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void ClearBufferObject(const string &key, const string &tableName) + { + std::deque entries; + entries.push_back({key, "DEL", {}}); + + auto consumer = dynamic_cast(m_dynamicBuffer->getExecutor(tableName)); + consumer->addToSync(entries); + static_cast(m_dynamicBuffer)->doTask(); + + Table tableObject(m_config_db.get(), tableName); + tableObject.del(key); + } + + void InitBufferQueue(const string &key, const string &profile) + { + bufferQueueTable.set(key, + { + {"profile", profile} + }); + m_dynamicBuffer->addExistingData(&bufferQueueTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitBufferProfileList(const string &ports, const string &profileList, Table &appDb) + { + appDb.set(ports, + { + {"profile_list", profileList} + }); + m_dynamicBuffer->addExistingData(&appDb); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitCableLength(const string &port, const string &length) + { + cableLengthTable.set("AZURE", + { + {port, length} + }); + m_dynamicBuffer->addExistingData(&cableLengthTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void HandleTable(Table &table) + { + m_dynamicBuffer->addExistingData(&table); + static_cast(m_dynamicBuffer)->doTask(); + } + + void CheckPool(buffer_pool_t &pool, const vector &tuples) + { + for (auto i : tuples) + { + if (fvField(i) == buffer_pool_type_field_name) + { + if (fvValue(i) == buffer_value_ingress) + ASSERT_EQ(pool.direction, BUFFER_INGRESS); + else + ASSERT_EQ(pool.direction, BUFFER_EGRESS); + } + else if (fvField(i) == buffer_pool_mode_field_name) + { + ASSERT_EQ(pool.mode, fvValue(i)); + } + else if (fvField(i) == buffer_size_field_name) + { + ASSERT_TRUE(!pool.dynamic_size); + ASSERT_EQ("1024000", fvValue(i)); + } + } + } + + void CheckProfile(buffer_profile_t &profile, const vector &tuples) + { + for (auto i : tuples) + { + if (fvField(i) == buffer_pool_field_name) + { + ASSERT_EQ(profile.pool_name, fvValue(i)); + if (strstr(profile.pool_name.c_str(), "ingress") != nullptr) + ASSERT_EQ(profile.direction, BUFFER_INGRESS); + else + ASSERT_EQ(profile.direction, BUFFER_EGRESS); + } + else if (fvField(i) == buffer_dynamic_th_field_name) + { + ASSERT_EQ(profile.threshold_mode, buffer_dynamic_th_field_name); + ASSERT_EQ(profile.threshold, fvValue(i)); + } + else if (fvField(i) == buffer_size_field_name) + { + ASSERT_EQ(profile.size, fvValue(i)); + } + } + } + + void CheckPg(const string &port, const string &key, const string &expectedProfile="") + { + vector fieldValues; + + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port][key].dynamic_calculated); + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port][key].lossless); + + auto existInDb = (!expectedProfile.empty()); + ASSERT_EQ(appBufferPgTable.get(key, fieldValues), existInDb); + if (existInDb) + { + ASSERT_EQ(m_dynamicBuffer->m_portPgLookup[port][key].running_profile_name, expectedProfile); + ASSERT_EQ(fvField(fieldValues[0]), "profile"); + ASSERT_EQ(fvValue(fieldValues[0]), expectedProfile); + } + } + + void CheckQueue(const string &port, const string &key, const string &expectedProfile, bool existInDb) + { + vector fieldValues; + + ASSERT_EQ(m_dynamicBuffer->m_portQueueLookup[port][key].running_profile_name, expectedProfile); + ASSERT_EQ(appBufferQueueTable.get(key, fieldValues), existInDb); + if (existInDb) + { + ASSERT_EQ(fvField(fieldValues[0]), "profile"); + ASSERT_EQ(fvValue(fieldValues[0]), expectedProfile); + } + } + + void CheckProfileList(const string &port, bool ingress, const string &profileList, bool existInDb=true) + { + vector fieldValues; + + auto direction = ingress ? BUFFER_INGRESS : BUFFER_EGRESS; + ASSERT_EQ(m_dynamicBuffer->m_portProfileListLookups[direction][port], profileList); + + auto &appDb = ingress ? appBufferIngProfileListTable : appBufferEgrProfileListTable; + + ASSERT_EQ(appDb.get(port, fieldValues), existInDb); + if (existInDb) + { + ASSERT_EQ(fieldValues.size(), 1); + ASSERT_EQ(fvField(fieldValues[0]), "profile_list"); + ASSERT_EQ(fvValue(fieldValues[0]), profileList); + } + } + + void CheckIfVectorsMatch(const vector &vec1, const vector &vec2) + { + ASSERT_EQ(vec1.size(), vec2.size()); + for (auto &i : vec1) + { + bool found = false; + for (auto &j : vec2) + { + if (i == j) + { + found = true; + break; + } + } + ASSERT_TRUE(found); + } + } + + void TearDown() override + { + delete m_dynamicBuffer; + m_dynamicBuffer = nullptr; + + unsetenv("ASIC_VENDOR"); + } + }; + + /* + * Dependencies + * 1. Buffer manager reads default lossless parameter and maximum mmu size at the beginning + * 2. Maximum mmu size will be pushed ahead of PortInitDone + * 3. Buffer pools can be ready at any time after PortInitDone + * 4. Buffer tables can be applied in any order + * 5. Port and buffer PG can be applied in any order + * 6. Sequence after config qos clear + */ + + /* + * Normal starting flow + * 1. Start buffer manager with default lossless parameter and maximum mmu size + * 2. PortInitDone + * 3. Cable length and port configuration + * 4. Buffer tables: BUFFER_POOL/BUFFER_PROFILE/BUFFER_PG + * 5. Queue and buffer profile lists with/without port created + */ + TEST_F(BufferMgrDynTest, BufferMgrTestNormalFlows) + { + vector fieldValues; + vector keys; + + // Prepare information that will be read at the beginning + InitDefaultLosslessParameter(); + InitMmuSize(); + + StartBufferManager(); + + InitPort(); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_INITIALIZING); + + SetPortInitDone(); + // Timer will be called + m_dynamicBuffer->doTask(m_selectableTable); + + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 0); + InitBufferPool(); + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 3); + appBufferPoolTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + for (auto i : testBufferPool) + { + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], testBufferPool[i.first]); + fieldValues.clear(); + appBufferPoolTable.get(i.first, fieldValues); + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], fieldValues); + } + + InitDefaultBufferProfile(); + appBufferProfileTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + for (auto i : testBufferProfile) + { + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], testBufferProfile[i.first]); + fieldValues.clear(); + appBufferProfileTable.get(i.first, fieldValues); + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], fieldValues); + } + + InitCableLength("Ethernet0", "5m"); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_READY); + + InitBufferPg("Ethernet0|3-4"); + + auto expectedProfile = "pg_lossless_100000_5m_profile"; + CheckPg("Ethernet0", "Ethernet0:3-4", expectedProfile); + auto &portPgMap = m_dynamicBuffer->m_bufferProfileLookup[expectedProfile].port_pgs; + ASSERT_EQ(portPgMap.size(), 1); + ASSERT_TRUE(portPgMap.find("Ethernet0:3-4") != portPgMap.end()); + + // Multiple port key + InitBufferPg("Ethernet2,Ethernet4|3-4"); + + CheckPg("Ethernet2", "Ethernet2:3-4"); + CheckPg("Ethernet4", "Ethernet4:3-4"); + + // Buffer queue, ingress and egress profile list table + InitPort("Ethernet2"); + InitPort("Ethernet4"); + + InitBufferQueue("Ethernet2,Ethernet4,Ethernet6|3-4", "egress_lossless_profile"); + CheckQueue("Ethernet2", "Ethernet2:3-4", "egress_lossless_profile", true); + CheckQueue("Ethernet4", "Ethernet4:3-4", "egress_lossless_profile", true); + + InitBufferProfileList("Ethernet2,Ethernet4,Ethernet6", "ingress_lossless_profile", bufferIngProfileListTable); + CheckProfileList("Ethernet2", true, "ingress_lossless_profile"); + CheckProfileList("Ethernet4", true, "ingress_lossless_profile"); + + InitBufferProfileList("Ethernet2,Ethernet4,Ethernet6", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + CheckProfileList("Ethernet2", false, "egress_lossless_profile,egress_lossy_profile"); + CheckProfileList("Ethernet4", false, "egress_lossless_profile,egress_lossy_profile"); + + // Check whether queue, profile lists have been applied after port created + InitPort("Ethernet6"); + CheckQueue("Ethernet6", "Ethernet6:3-4", "egress_lossless_profile", true); + CheckProfileList("Ethernet6", true, "ingress_lossless_profile"); + CheckProfileList("Ethernet6", false, "egress_lossless_profile,egress_lossy_profile"); + } + + /* + * Verify a buffer pool will not be created without corresponding item in BUFFER_POOL + * otherwise it interferes starting flow + * 1. Configure oversubscribe ratio + * 2. Check whether ingress_lossless_pool is created + */ + TEST_F(BufferMgrDynTest, BufferMgrTestNoPoolCreatedWithoutDb) + { + StartBufferManager(); + + InitMmuSize(); + InitDefaultLosslessParameter("0"); + InitPort("Ethernet0"); + + static_cast(m_dynamicBuffer)->doTask(); + m_dynamicBuffer->doTask(m_selectableTable); + + ASSERT_TRUE(m_dynamicBuffer->m_bufferPoolLookup.empty()); + + InitBufferPool(); + static_cast(m_dynamicBuffer)->doTask(); + + ASSERT_FALSE(m_dynamicBuffer->m_bufferPoolLookup.empty()); + } + + /* + * Sad flows test. Order is reversed in the following cases: + * - The buffer table creating. The tables referencing other tables are created first + * - Buffer manager starts with neither default lossless parameter nor maximum mmu size available + * + * 1. Start buffer manager without default lossless parameter and maximum mmu size + * 2. Buffer tables are applied in order: + * - Port configuration + * - BUFFER_QUEUE/buffer profile list + * - BUFFER_PG/BUFFER_PROFILE/BUFFER_POOL + * - PortInitDone + * 3. Cable length + * 4. Create a buffer profile with wrong threshold mode or direction + * and verify it will not be propagated to SAI + */ + TEST_F(BufferMgrDynTest, BufferMgrTestSadFlows) + { + vector ts; + vector fieldValues; + vector keys; + + StartBufferManager(); + + static_cast(m_dynamicBuffer)->doTask(); + + InitPort(); + + InitBufferPg("Ethernet0|3-4"); + // No item generated in BUFFER_PG_TABLE + CheckPg("Ethernet0", "Ethernet0:3-4"); + + InitBufferQueue("Ethernet0|3-4", "egress_lossless_profile"); + ASSERT_TRUE(m_dynamicBuffer->m_portQueueLookup["Ethernet0"]["Ethernet0:3-4"].running_profile_name.empty()); + + InitBufferProfileList("Ethernet0", "ingress_lossless_profile", bufferIngProfileListTable); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_INGRESS]["Ethernet0"].empty()); + + InitBufferProfileList("Ethernet0", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_EGRESS]["Ethernet0"].empty()); + + InitDefaultBufferProfile(); + appBufferProfileTable.getKeys(keys); + ASSERT_EQ(keys.size(), 0); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 0); + + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 0); + InitBufferPool(); + appBufferPoolTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + for (auto i : testBufferProfile) + { + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], testBufferProfile[i.first]); + fieldValues.clear(); + appBufferProfileTable.get(i.first, fieldValues); + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], fieldValues); + } + for (auto i : testBufferPool) + { + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], testBufferPool[i.first]); + fieldValues.clear(); + appBufferPoolTable.get(i.first, fieldValues); + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], fieldValues); + } + + ASSERT_EQ(m_dynamicBuffer->m_portPgLookup.size(), 1); + static_cast(m_dynamicBuffer)->doTask(); + CheckProfileList("Ethernet0", true, "ingress_lossless_profile", false); + CheckProfileList("Ethernet0", false, "egress_lossless_profile,egress_lossy_profile", false); + + // Initialize a port with all profiles undefined + InitPort("Ethernet8"); + InitBufferPg("Ethernet8|0", "ingress_not_defined_profile"); + InitBufferQueue("Ethernet8|0", "egress_not_defined_profile"); + InitBufferProfileList("Ethernet8", "egress_not_defined_profile", bufferEgrProfileListTable); + InitBufferProfileList("Ethernet8", "ingress_not_defined_profile", bufferIngProfileListTable); + + // All default buffer profiles should be generated and pushed into BUFFER_PROFILE_TABLE + static_cast(m_dynamicBuffer)->doTask(); + + InitMmuSize(); + SetPortInitDone(); + m_dynamicBuffer->doTask(m_selectableTable); + + InitDefaultLosslessParameter(); + m_dynamicBuffer->doTask(m_selectableTable); + + CheckPg("Ethernet0", "Ethernet0:3-4"); + InitCableLength("Ethernet0", "5m"); + auto expectedProfile = "pg_lossless_100000_5m_profile"; + CheckPg("Ethernet0", "Ethernet0:3-4", expectedProfile); + CheckQueue("Ethernet0", "Ethernet0:3-4", "egress_lossless_profile", true); + + CheckProfileList("Ethernet0", true, "ingress_lossless_profile", true); + CheckProfileList("Ethernet0", false, "egress_lossless_profile,egress_lossy_profile", true); + + // Check no items applied on port Ethernet8 + ASSERT_EQ(appBufferPgTable.get("Ethernet8:0", fieldValues), false); + CheckQueue("Ethernet8", "Ethernet8:0", "", false); + CheckProfileList("Ethernet8", true, "", false); + CheckProfileList("Ethernet8", false, "", false); + + // Configure the missing buffer profiles + bufferProfileTable.set("ingress_not_defined_profile", + { + {"pool", "ingress_lossless_pool"}, + {"dynamic_th", "0"}, + {"size", "0"} + }); + bufferProfileTable.set("egress_not_defined_profile", + { + {"pool", "egress_lossless_pool"}, + {"dynamic_th", "0"}, + {"size", "0"} + }); + m_dynamicBuffer->addExistingData(&bufferProfileTable); + // For buffer profile + static_cast(m_dynamicBuffer)->doTask(); + // For all other items + static_cast(m_dynamicBuffer)->doTask(); + ASSERT_EQ(appBufferPgTable.get("Ethernet8:0", fieldValues), true); + ASSERT_EQ(fvValue(fieldValues[0]), "ingress_not_defined_profile"); + CheckQueue("Ethernet8", "Ethernet8:0", "egress_not_defined_profile", true); + CheckProfileList("Ethernet8", true, "ingress_not_defined_profile", true); + CheckProfileList("Ethernet8", false, "egress_not_defined_profile", true); + + InitPort("Ethernet4"); + InitPort("Ethernet6"); + InitBufferQueue("Ethernet6|0-2", "egress_lossy_profile"); + InitBufferProfileList("Ethernet6", "ingress_lossless_profile", bufferIngProfileListTable); + + // Buffer queue/PG/profile lists with wrong direction should not overwrite the existing ones + vector ingressProfiles = {"egress_lossy_profile", "ingress_profile", ""}; + vector portsToTest = {"Ethernet0", "Ethernet4"}; + for (auto port : portsToTest) + { + for (auto ingressProfile : ingressProfiles) + { + InitBufferPg(port + "|3-4", ingressProfile); + if (port == "Ethernet0") + { + ASSERT_EQ(m_dynamicBuffer->m_portPgLookup["Ethernet0"]["Ethernet0:3-4"].running_profile_name, expectedProfile); + ASSERT_TRUE(appBufferPgTable.get("Ethernet0:3-4", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", expectedProfile}}); + } + else + { + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port].find(port + ":3-4") == m_dynamicBuffer->m_portPgLookup[port].end()); + ASSERT_FALSE(appBufferPgTable.get(port + ":3-4", fieldValues)); + } + } + } + + InitBufferQueue("Ethernet4|0-2", "ingress_lossless_profile"); + ASSERT_TRUE(m_dynamicBuffer->m_portQueueLookup["Ethernet4"]["Ethernet0:0-2"].running_profile_name.empty()); + ASSERT_FALSE(appBufferQueueTable.get("Ethernet4:0-2", fieldValues)); + // No pending notifications + ts.clear(); + m_dynamicBuffer->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + InitBufferQueue("Ethernet6|0-2", "ingress_lossless_profile"); + ASSERT_EQ(m_dynamicBuffer->m_portQueueLookup["Ethernet6"]["Ethernet6:0-2"].running_profile_name, "egress_lossy_profile"); + ASSERT_TRUE(appBufferQueueTable.get("Ethernet6:0-2", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_profile"}}); + // No pending notifications + m_dynamicBuffer->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + // Wrong direction + InitBufferProfileList("Ethernet4", "egress_lossless_profile", bufferIngProfileListTable); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_INGRESS]["Ethernet4"].empty()); + ASSERT_FALSE(appBufferIngProfileListTable.get("Ethernet4", fieldValues)); + // No pending notifications + m_dynamicBuffer->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + InitBufferProfileList("Ethernet6", "egress_lossless_profile", bufferIngProfileListTable); + ASSERT_EQ(m_dynamicBuffer->m_portProfileListLookups[BUFFER_INGRESS]["Ethernet6"], "ingress_lossless_profile"); + ASSERT_TRUE(appBufferIngProfileListTable.get("Ethernet6", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "ingress_lossless_profile"}}); + // No pending notifications + m_dynamicBuffer->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + // Profile with wrong mode should not override the existing entries + vector wrong_profile_names = {"ingress_lossless_profile", "wrong_param_profile"}; + vector> wrong_profile_patterns = { + // wrong threshold mode + { + {"pool", "ingress_lossless_pool"}, + {"static_th", "100"}, + {"size", "0"} + }, + // unconfigured pool + { + {"pool", "ingress_pool"}, + {"dynamic_th", "0"}, + {"size", "0"} + } + }; + auto expected_pending_tasks = 0; + for (auto wrong_profile_name : wrong_profile_names) + { + bool exist = (testBufferProfile.find(wrong_profile_name) != testBufferProfile.end()); + for (auto wrong_profile_pattern : wrong_profile_patterns) + { + bufferProfileTable.set(wrong_profile_name, wrong_profile_pattern); + m_dynamicBuffer->addExistingData(&bufferProfileTable); + static_cast(m_dynamicBuffer)->doTask(); + if (exist) + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[wrong_profile_name], testBufferProfile[wrong_profile_name]); + else + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.find(wrong_profile_name), m_dynamicBuffer->m_bufferProfileLookup.end()); + ASSERT_EQ(appBufferProfileTable.get(wrong_profile_name, fieldValues), exist); + // No pending notifications + ts.clear(); + m_dynamicBuffer->dumpPendingTasks(ts); + if (get<1>(wrong_profile_pattern[0]) == "ingress_pool") + expected_pending_tasks++; + ASSERT_EQ(ts.size(), expected_pending_tasks); + } + } + } + + /* + * Clear qos with reclaiming buffer + * + * To test clear qos flow with reclaiming buffer. + * 1. Init buffer manager as normal + * 2. Configure buffer for 2 ports with admin status being up and down respectively + * 3. Clear qos + * 4. Check whether all the buffer items have been removed + * 5. Repeat the flow from step 2 for two extra times: + * - Check whether buffer manager works correctly after clear qos + * - STATE_DB.BUFFER_MAX_PARAM is received before and after buffer items received + */ + TEST_F(BufferMgrDynTest, BufferMgrTestClearQosReclaimingBuffer) + { + vector fieldValues; + vector keys; + vector skippedPools = {"", "ingress_lossless_pool", ""}; + int round = 0; + + SetUpReclaimingBuffer(); + shared_ptr> zero_profile = make_shared>(zeroProfile); + + InitDefaultLosslessParameter(); + InitMmuSize(); + + StartBufferManager(zero_profile); + + statePortTable.set("Ethernet0", + { + {"supported_speeds", "100000,50000,40000,25000,10000,1000"} + }); + InitPort("Ethernet0", "down"); + InitPort("Ethernet4", "down"); + InitPort("Ethernet6", "down"); + InitPort("Ethernet8", "down"); + vector adminDownPorts = {"Ethernet0", "Ethernet4", "Ethernet6"}; + vector ports = {"Ethernet0", "Ethernet2", "Ethernet4", "Ethernet6"}; + InitPort("Ethernet2"); + InitCableLength("Ethernet2", "5m"); + auto expectedProfile = "pg_lossless_100000_5m_profile"; + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_ADMIN_DOWN); + + SetPortInitDone(); + for(auto &skippedPool : skippedPools) + { + // Call timer + m_dynamicBuffer->doTask(m_selectableTable); + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 0); + InitBufferPool(); + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 3); + appBufferPoolTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + for (auto i : testBufferPool) + { + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], testBufferPool[i.first]); + fieldValues.clear(); + appBufferPoolTable.get(i.first, fieldValues); + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], fieldValues); + } + + InitDefaultBufferProfile(); + appBufferProfileTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + for (auto i : testBufferProfile) + { + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], testBufferProfile[i.first]); + fieldValues.clear(); + appBufferProfileTable.get(i.first, fieldValues); + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], fieldValues); + } + + for (auto &adminDownPort : adminDownPorts) + { + InitBufferPg(adminDownPort + "|3-4", "NULL"); + InitBufferQueue(adminDownPort + "|3-4", "egress_lossless_profile"); + InitBufferQueue(adminDownPort + "|0-2", "egress_lossy_profile"); + InitBufferQueue(adminDownPort + "|5-6", "egress_lossy_profile"); + } + InitBufferPg("Ethernet0|0", "ingress_lossy_profile"); + InitBufferPg("Ethernet0|3-4"); + InitBufferProfileList("Ethernet0", "ingress_lossless_profile", bufferIngProfileListTable); + InitBufferProfileList("Ethernet0", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + + // Init buffer items for a normal port and check APPL_DB + InitBufferQueue("Ethernet2|3-4", "egress_lossless_profile"); + InitBufferQueue("Ethernet2|0-2", "egress_lossy_profile"); + InitBufferPg("Ethernet2|3-4"); + InitBufferProfileList("Ethernet2", "ingress_lossless_profile", bufferIngProfileListTable); + InitBufferProfileList("Ethernet2", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + + fieldValues.clear(); + ASSERT_TRUE(appBufferPgTable.get("Ethernet2:3-4", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", expectedProfile}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get("Ethernet2:0-2", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_profile"}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get("Ethernet2:3-4", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossless_profile"}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferIngProfileListTable.get("Ethernet2", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "ingress_lossless_profile"}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferEgrProfileListTable.get("Ethernet2", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "egress_lossless_profile,egress_lossy_profile"}}); + + // Buffer pools ready but the port is not ready to be reclaimed + m_dynamicBuffer->doTask(m_selectableTable); + + // Push maximum buffer parameters for the port in order to make it ready to reclaim + if (round == 0) + { + // To simulate different sequences + // The 1st round: STATE_DB.PORT_TABLE is updated after buffer items ready + // The 2nd, 3rd rounds: before + + for (auto &adminDownPort : adminDownPorts) + { + stateBufferTable.set(adminDownPort, + { + {"max_priority_groups", "8"}, + {"max_queues", "16"} + }); + } + stateBufferTable.set("Ethernet8", + { + {"max_priority_groups", "8"}, + {"max_queues", "16"} + }); + m_dynamicBuffer->addExistingData(&stateBufferTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + m_dynamicBuffer->doTask(m_selectableTable); + + // Check whether zero profiles and pool have been applied + appBufferPoolTable.getKeys(keys); + ASSERT_EQ(keys.size(), 4); + for (auto key : keys) + { + if (testBufferPool.find(key) == testBufferPool.end()) + { + fieldValues.clear(); + appBufferPoolTable.get(key, fieldValues); + CheckIfVectorsMatch(fieldValues, zeroProfileMap[key]); + } + } + + appBufferProfileTable.getKeys(keys); + for (auto key : keys) + { + if (testBufferProfile.find(key) == testBufferProfile.end()) + { + fieldValues.clear(); + appBufferProfileTable.get(key, fieldValues); + if (zeroProfileMap.find(key) == zeroProfileMap.end()) + CheckIfVectorsMatch(fieldValues, + { + {"xon", ""}, // Due to the limitation of mock lua scricpt call, + {"xoff", ""}, // we can not calculate the number + {"size", ""}, // so expected value is the empty string + {"pool", "ingress_lossless_pool"}, + {"dynamic_th", "0"} + }); + else + CheckIfVectorsMatch(fieldValues, zeroProfileMap[key]); + } + } + + for (auto &adminDownPort : adminDownPorts) + { + fieldValues.clear(); + ASSERT_TRUE(appBufferPgTable.get("Ethernet0:0", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "ingress_lossy_pg_zero_profile"}}); + ASSERT_FALSE(appBufferPgTable.get("Ethernet0:3-4", fieldValues)); + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get(adminDownPort + ":0-2", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_zero_profile"}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get(adminDownPort + ":3-4", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossless_zero_profile"}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get(adminDownPort + ":5-6", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_zero_profile"}}); + fieldValues.clear(); + } + ASSERT_TRUE(appBufferIngProfileListTable.get("Ethernet0", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "ingress_lossless_zero_profile"}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferEgrProfileListTable.get("Ethernet0", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "egress_lossless_zero_profile,egress_lossy_zero_profile"}}); + + // Configured but not applied items. There is an extra delay + m_dynamicBuffer->m_waitApplyAdditionalZeroProfiles = 0; + m_dynamicBuffer->doTask(m_selectableTable); + for (auto &adminDownPort : adminDownPorts) + { + ASSERT_TRUE(appBufferQueueTable.get(adminDownPort + ":7-15", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_zero_profile"}}); + fieldValues.clear(); + } + + if (round == 0) + { + ASSERT_TRUE(appBufferQueueTable.get("Ethernet8:0-15", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_zero_profile"}}); + fieldValues.clear(); + ASSERT_TRUE(appBufferPgTable.get("Ethernet8:0", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "ingress_lossy_pg_zero_profile"}}); + fieldValues.clear(); + ClearBufferObject("Ethernet8", CFG_PORT_TABLE_NAME); + ASSERT_FALSE(appBufferPgTable.get("Ethernet8:0", fieldValues)); + ASSERT_FALSE(appBufferQueueTable.get("Ethernet8:0-15", fieldValues)); + } + + ClearBufferObject("Ethernet0|3-4", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet4|5-6", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet4|0-2", CFG_BUFFER_QUEUE_TABLE_NAME); + // Clear all qos tables + ClearBufferPool(skippedPool); + ClearBufferProfile(); + ClearBufferObject("Ethernet0|0", CFG_BUFFER_PG_TABLE_NAME); + for (auto &adminDownPort : adminDownPorts) + { + ClearBufferObject(adminDownPort + "|3-4", CFG_BUFFER_PG_TABLE_NAME); + } + ClearBufferObject("Ethernet2|3-4", CFG_BUFFER_PG_TABLE_NAME); + ClearBufferObject("Ethernet0|0-2", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet2|0-2", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet2|3-4", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet0|5-6", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet4|3-4", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet6|0-2", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet6|3-4", CFG_BUFFER_QUEUE_TABLE_NAME); + ClearBufferObject("Ethernet6|5-6", CFG_BUFFER_QUEUE_TABLE_NAME); + for (auto &port : ports) + { + ClearBufferObject(port, CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME); + ClearBufferObject(port, CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME); + } + + // Run timer + m_dynamicBuffer->doTask(m_selectableTable); + + if (!skippedPool.empty()) + { + // Clear the pool that was skipped in the previous step + // This is to simulate the case where all the pools are not removed in one-shot + ClearBufferPool("", skippedPool); + m_dynamicBuffer->doTask(m_selectableTable); + } + + // All internal data and APPL_DB has been cleared + ASSERT_TRUE((appBufferPgTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferQueueTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferProfileTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferPoolTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferIngProfileListTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferEgrProfileListTable.getKeys(keys), keys.empty())); + ASSERT_TRUE(m_dynamicBuffer->m_bufferPoolLookup.empty()); + ASSERT_TRUE(m_dynamicBuffer->m_bufferProfileLookup.empty()); + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup.empty()); + ASSERT_TRUE(m_dynamicBuffer->m_portQueueLookup.empty()); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_EGRESS].empty()); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_INGRESS].empty()); + + round++; + } + } + + + /* + * Clear qos with reclaiming buffer sad flows + * Reclaiming buffer should be triggered via any single buffer item + */ + TEST_F(BufferMgrDynTest, BufferMgrTestReclaimingBufferSadFlows) + { + vector fieldValues; + vector keys; + vector> bufferItems; + + bufferItems.emplace_back(bufferPgTable, "Ethernet0:0", "ingress_lossy_profile", appBufferPgTable, "profile", "ingress_lossy_pg_zero_profile"); + bufferItems.emplace_back(bufferPgTable, "Ethernet0:3-4", "NULL", appBufferPgTable, "", ""); + bufferItems.emplace_back(bufferQueueTable, "Ethernet0:0-2", "egress_lossy_profile", appBufferQueueTable, "profile", "egress_lossy_zero_profile"); + bufferItems.emplace_back(bufferQueueTable, "Ethernet0:3-4", "egress_lossless_profile", appBufferQueueTable, "profile", "egress_lossless_zero_profile"); + bufferItems.emplace_back(bufferIngProfileListTable, "Ethernet0", "ingress_lossless_profile", appBufferIngProfileListTable, "profile_list", "ingress_lossless_zero_profile"); + bufferItems.emplace_back(bufferEgrProfileListTable, "Ethernet0", "egress_lossless_profile,egress_lossy_profile", appBufferEgrProfileListTable, "profile_list", "egress_lossless_zero_profile,egress_lossy_zero_profile"); + + SetUpReclaimingBuffer(); + shared_ptr> zero_profile = make_shared>(zeroProfile); + + InitDefaultLosslessParameter(); + InitMmuSize(); + + StartBufferManager(zero_profile); + + stateBufferTable.set("Ethernet0", + { + {"max_priority_groups", "8"}, + {"max_queues", "16"} + }); + m_dynamicBuffer->addExistingData(&stateBufferTable); + static_cast(m_dynamicBuffer)->doTask(); + + InitPort("Ethernet0", "down"); + + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_ADMIN_DOWN); + + SetPortInitDone(); + m_dynamicBuffer->doTask(m_selectableTable); + + // After "config qos clear" the zero buffer profiles are unloaded + m_dynamicBuffer->unloadZeroPoolAndProfiles(); + + // Starts with empty buffer tables + for(auto &bufferItem : bufferItems) + { + auto &cfgTable = get<0>(bufferItem); + auto &key = get<1>(bufferItem); + auto &profile = get<2>(bufferItem); + auto &appTable = get<3>(bufferItem); + auto &fieldName = get<4>(bufferItem); + auto &expectedProfile = get<5>(bufferItem); + + cfgTable.set(key, + { + {fieldName, profile} + }); + m_dynamicBuffer->addExistingData(&cfgTable); + static_cast(m_dynamicBuffer)->doTask(); + + ASSERT_FALSE(m_dynamicBuffer->m_bufferCompletelyInitialized); + ASSERT_FALSE(m_dynamicBuffer->m_zeroProfilesLoaded); + ASSERT_TRUE(m_dynamicBuffer->m_portInitDone); + ASSERT_TRUE(m_dynamicBuffer->m_pendingApplyZeroProfilePorts.find("Ethernet0") != m_dynamicBuffer->m_pendingApplyZeroProfilePorts.end()); + + InitBufferPool(); + InitDefaultBufferProfile(); + + m_dynamicBuffer->doTask(m_selectableTable); + + // Another doTask to ensure all the dependent tables have been drained + // after buffer pools and profiles have been drained + static_cast(m_dynamicBuffer)->doTask(); + + if (expectedProfile.empty()) + { + ASSERT_FALSE(appTable.get(key, fieldValues)); + } + else + { + ASSERT_TRUE(appTable.get(key, fieldValues)); + CheckIfVectorsMatch(fieldValues, {{fieldName, expectedProfile}}); + } + + m_dynamicBuffer->m_waitApplyAdditionalZeroProfiles = 0; + m_dynamicBuffer->doTask(m_selectableTable); + + ASSERT_TRUE(m_dynamicBuffer->m_pendingApplyZeroProfilePorts.empty()); + ASSERT_TRUE(m_dynamicBuffer->m_bufferCompletelyInitialized); + + // Simulate clear qos + ClearBufferPool(); + ClearBufferProfile(); + + // Call timer + m_dynamicBuffer->doTask(m_selectableTable); + } + } + + /* + * Port removing flow + */ + TEST_F(BufferMgrDynTest, BufferMgrTestRemovePort) + { + vector fieldValues; + vector keys; + vector statuses = {"up", "down"}; + + // Prepare information that will be read at the beginning + InitDefaultLosslessParameter(); + InitMmuSize(); + + shared_ptr> zero_profile = make_shared>(zeroProfile); + StartBufferManager(zero_profile); + + SetPortInitDone(); + // Timer will be called + m_dynamicBuffer->doTask(m_selectableTable); + + InitBufferPool(); + appBufferPoolTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + InitDefaultBufferProfile(); + appBufferProfileTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + + m_dynamicBuffer->m_bufferCompletelyInitialized = true; + m_dynamicBuffer->m_waitApplyAdditionalZeroProfiles = 0; + InitCableLength("Ethernet0", "5m"); + + for(auto status : statuses) + { + bool admin_up = (status == "up"); + + InitPort("Ethernet0", status); + ASSERT_TRUE(m_dynamicBuffer->m_portInfoLookup.find("Ethernet0") != m_dynamicBuffer->m_portInfoLookup.end()); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, admin_up ? PORT_READY : PORT_ADMIN_DOWN); + + // Init port buffer items + InitBufferQueue("Ethernet0|3-4", "egress_lossless_profile"); + InitBufferProfileList("Ethernet0", "ingress_lossless_profile", bufferIngProfileListTable); + InitBufferPg("Ethernet0|3-4"); + if (admin_up) + { + InitBufferProfileList("Ethernet0", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + + auto expectedProfile = "pg_lossless_100000_5m_profile"; + CheckPg("Ethernet0", "Ethernet0:3-4", expectedProfile); + CheckQueue("Ethernet0", "Ethernet0:3-4", "egress_lossless_profile", true); + CheckProfileList("Ethernet0", true, "ingress_lossless_profile"); + CheckProfileList("Ethernet0", false, "egress_lossless_profile,egress_lossy_profile"); + } + else + { + InitBufferPg("Ethernet0|0", "ingress_lossy_profile"); + + stateBufferTable.set("Ethernet0", + { + {"max_priority_groups", "8"}, + {"max_queues", "16"} + }); + m_dynamicBuffer->addExistingData(&stateBufferTable); + static_cast(m_dynamicBuffer)->doTask(); + + // Make sure profile list is applied after maximum buffer parameter table + InitBufferProfileList("Ethernet0", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + + fieldValues.clear(); + ASSERT_TRUE(appBufferPgTable.get("Ethernet0:0", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "ingress_lossy_pg_zero_profile"}}); + + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get("Ethernet0:3-4", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossless_zero_profile"}}); + + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get("Ethernet0:0-2", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_zero_profile"}}); + + fieldValues.clear(); + ASSERT_TRUE(appBufferQueueTable.get("Ethernet0:5-15", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_zero_profile"}}); + + fieldValues.clear(); + ASSERT_TRUE(appBufferIngProfileListTable.get("Ethernet0", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "ingress_lossless_zero_profile"}}); + + fieldValues.clear(); + ASSERT_TRUE(appBufferEgrProfileListTable.get("Ethernet0", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "egress_lossless_zero_profile,egress_lossy_zero_profile"}}); + + ClearBufferObject("Ethernet0|0", CFG_BUFFER_PG_TABLE_NAME); + } + + // Remove port + ClearBufferObject("Ethernet0", CFG_PORT_TABLE_NAME); + ASSERT_FALSE(m_dynamicBuffer->m_portPgLookup.empty()); + ClearBufferObject("Ethernet0", CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME); + ClearBufferObject("Ethernet0", CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME); + ClearBufferObject("Ethernet0|3-4", CFG_BUFFER_PG_TABLE_NAME); + ClearBufferObject("Ethernet0|3-4", CFG_BUFFER_QUEUE_TABLE_NAME); + static_cast(m_dynamicBuffer)->doTask(); + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup.empty()); + ASSERT_TRUE(m_dynamicBuffer->m_portQueueLookup.empty()); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_INGRESS].empty()); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_EGRESS].empty()); + ASSERT_TRUE((appBufferPgTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferQueueTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferIngProfileListTable.getKeys(keys), keys.empty())); + ASSERT_TRUE((appBufferEgrProfileListTable.getKeys(keys), keys.empty())); + } + } + + /* + * Port configuration flow + * Port table items are received in different order + */ + TEST_F(BufferMgrDynTest, BufferMgrTestPortConfigFlow) + { + // Prepare information that will be read at the beginning + StartBufferManager(); + + /* + * Speed, admin up, cable length + */ + portTable.set("Ethernet0", + { + {"speed", "100000"} + }); + HandleTable(portTable); + ASSERT_TRUE(m_dynamicBuffer->m_portInfoLookup.find("Ethernet0") != m_dynamicBuffer->m_portInfoLookup.end()); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_ADMIN_DOWN); + + portTable.set("Ethernet0", + { + {"speed", "100000"}, + {"admin_status", "up"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_INITIALIZING); + + cableLengthTable.set("AZURE", + { + {"Ethernet0", "5m"} + }); + HandleTable(cableLengthTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_READY); + + /* + * Speed, admin down, cable length, admin up + */ + portTable.set("Ethernet4", + { + {"speed", "100000"}, + {"admin_status", "down"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet4"].state, PORT_ADMIN_DOWN); + cableLengthTable.set("AZURE", + { + {"Ethernet4", "5m"} + }); + HandleTable(cableLengthTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet4"].state, PORT_ADMIN_DOWN); + portTable.set("Ethernet4", + { + {"speed", "100000"}, + {"admin_status", "up"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet4"].state, PORT_READY); + + /* + * Auto-negotiation: supported speeds received after port table + */ + portTable.set("Ethernet8", + { + {"speed", "100000"}, + {"admin_status", "up"}, + {"autoneg", "on"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].state, PORT_INITIALIZING); + ASSERT_TRUE(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].effective_speed.empty()); + + cableLengthTable.set("AZURE", + { + {"Ethernet8", "5m"} + }); + HandleTable(cableLengthTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].state, PORT_INITIALIZING); + + statePortTable.set("Ethernet8", + { + {"supported_speeds", "100000,50000,40000,25000,10000,1000"} + }); + HandleTable(statePortTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].effective_speed, "100000"); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].state, PORT_READY); + + /* + * Auto-negotiation: supported speeds received before port table + */ + statePortTable.set("Ethernet12", + { + {"supported_speeds", "100000,50000,40000,25000,10000,1000"} + }); + HandleTable(statePortTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet12"].supported_speeds, "100000,50000,40000,25000,10000,1000"); + + portTable.set("Ethernet12", + { + {"speed", "100000"}, + {"admin_status", "up"}, + {"autoneg", "on"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet12"].state, PORT_INITIALIZING); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet12"].effective_speed, "100000"); + + cableLengthTable.set("AZURE", + { + {"Ethernet12", "5m"} + }); + HandleTable(cableLengthTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet12"].state, PORT_READY); + } +} diff --git a/tests/mock_tests/bufferorch_ut.cpp b/tests/mock_tests/bufferorch_ut.cpp new file mode 100644 index 0000000000..0ffb1997b5 --- /dev/null +++ b/tests/mock_tests/bufferorch_ut.cpp @@ -0,0 +1,677 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" +#include "mock_response_publisher.h" + +extern string gMySwitchType; + +extern std::unique_ptr gMockResponsePublisher; + +namespace bufferorch_test +{ + using namespace std; + + sai_port_api_t ut_sai_port_api; + sai_port_api_t *pold_sai_port_api; + sai_buffer_api_t ut_sai_buffer_api; + sai_buffer_api_t *pold_sai_buffer_api; + sai_queue_api_t ut_sai_queue_api; + sai_queue_api_t *pold_sai_queue_api; + + shared_ptr m_app_db; + shared_ptr m_app_state_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_chassis_app_db; + + uint32_t _ut_stub_expected_profile_count; + uint32_t _ut_stub_port_profile_list_add_count; + uint32_t _ut_stub_port_profile_list_del_count; + sai_port_attr_t _ut_stub_expected_profile_list_type; + sai_status_t _ut_stub_sai_set_port_attribute( + _In_ sai_object_id_t port_id, + _In_ const sai_attribute_t *attr) + { + if (attr[0].id == _ut_stub_expected_profile_list_type) + { + if (_ut_stub_expected_profile_count == attr[0].value.objlist.count) + { + if (_ut_stub_expected_profile_count != 0) + { + _ut_stub_port_profile_list_add_count++; + } + else + { + _ut_stub_port_profile_list_del_count++; + } + } + } + return pold_sai_port_api->set_port_attribute(port_id, attr); + } + + uint32_t _ut_stub_set_pg_count; + sai_status_t _ut_stub_sai_set_ingress_priority_group_attribute( + _In_ sai_object_id_t ingress_priority_group_id, + _In_ const sai_attribute_t *attr) + { + _ut_stub_set_pg_count++; + return pold_sai_buffer_api->set_ingress_priority_group_attribute(ingress_priority_group_id, attr); + } + + uint32_t _ut_stub_set_queue_count; + sai_status_t _ut_stub_sai_set_queue_attribute( + _In_ sai_object_id_t queue_id, + _In_ const sai_attribute_t *attr) + { + _ut_stub_set_queue_count++; + return pold_sai_queue_api->set_queue_attribute(queue_id, attr); + } + + void _hook_sai_apis() + { + ut_sai_port_api = *sai_port_api; + pold_sai_port_api = sai_port_api; + ut_sai_port_api.set_port_attribute = _ut_stub_sai_set_port_attribute; + sai_port_api = &ut_sai_port_api; + + ut_sai_buffer_api = *sai_buffer_api; + pold_sai_buffer_api = sai_buffer_api; + ut_sai_buffer_api.set_ingress_priority_group_attribute = _ut_stub_sai_set_ingress_priority_group_attribute; + sai_buffer_api = &ut_sai_buffer_api; + + ut_sai_queue_api = *sai_queue_api; + pold_sai_queue_api = sai_queue_api; + ut_sai_queue_api.set_queue_attribute = _ut_stub_sai_set_queue_attribute; + sai_queue_api = &ut_sai_queue_api; + } + + void _unhook_sai_apis() + { + sai_port_api = pold_sai_port_api; + sai_buffer_api = pold_sai_buffer_api; + sai_queue_api = pold_sai_queue_api; + } + + struct BufferOrchTest : public ::testing::Test + { + BufferOrchTest() + { + } + + void CheckDependency(const string &referencingTableName, const string &referencingObjectName, const string &field, const string &dependentTableName, const string &dependentObjectNames="") + { + auto &bufferTypeMaps = BufferOrch::m_buffer_type_maps; + auto &referencingTable = (*bufferTypeMaps[referencingTableName]); + auto &dependentTable = (*bufferTypeMaps[dependentTableName]); + + if (dependentObjectNames.empty()) + { + ASSERT_TRUE(referencingTable[referencingObjectName].m_objsReferencingByMe[field].empty()); + } + else + { + auto objects = tokenize(dependentObjectNames, ','); + string reference; + for (auto &object : objects) + { + reference += dependentTableName + ":" + object + ","; + ASSERT_EQ(dependentTable[object].m_objsDependingOnMe.count(referencingObjectName), 1); + } + //reference.pop(); + ASSERT_EQ(referencingTable[referencingObjectName].m_objsReferencingByMe[field] + ",", reference); + } + } + + void RemoveItem(const string &table, const string &key) + { + std::deque entries; + entries.push_back({key, "DEL", {}}); + auto consumer = dynamic_cast(gBufferOrch->getExecutor(table)); + consumer->addToSync(entries); + } + + void SetUp() override + { + ASSERT_EQ(sai_route_api, nullptr); + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + + // Init switch and create dependencies + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + m_app_state_db = make_shared("APPL_STATE_DB", 0); + if(gMySwitchType == "voq") + m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); + + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + auto status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + // Get the default virtual router ID + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + + // Create dependencies ... + + const int portsorch_base_pri = 40; + + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + vector flex_counter_tables = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); + gDirectory.set(flexCounterOrch); + + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + + ASSERT_EQ(gVrfOrch, nullptr); + gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + + ASSERT_EQ(gIntfsOrch, nullptr); + gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); + + const int fdborch_pri = 20; + + vector app_fdb_tables = { + { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_MCLAG_FDB_TABLE_NAME, fdborch_pri} + }; + + TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); + TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); + ASSERT_EQ(gFdbOrch, nullptr); + gFdbOrch = new FdbOrch(m_app_db.get(), app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + + ASSERT_EQ(gNeighOrch, nullptr); + gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + + vector qos_tables = { + CFG_TC_TO_QUEUE_MAP_TABLE_NAME, + CFG_SCHEDULER_TABLE_NAME, + CFG_DSCP_TO_TC_MAP_TABLE_NAME, + CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME, + CFG_DOT1P_TO_TC_MAP_TABLE_NAME, + CFG_QUEUE_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME, + CFG_WRED_PROFILE_TABLE_NAME, + CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, + CFG_DSCP_TO_FC_MAP_TABLE_NAME, + CFG_EXP_TO_FC_MAP_TABLE_NAME + }; + gQosOrch = new QosOrch(m_config_db.get(), qos_tables); + + // Recreate buffer orch to read populated data + vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME }; + + gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + Table bufferPoolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table bufferProfileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); + + bufferPoolTable.set("ingress_lossless_pool", + { + {"size", "1024000"}, + {"mode", "dynamic"}, + {"type", "ingress"} + }); + bufferPoolTable.set("ingress_lossy_pool", + { + {"size", "1024000"}, + {"mode", "dynamic"}, + {"type", "ingress"} + }); + bufferProfileTable.set("ingress_lossless_profile", + { + {"pool", "ingress_lossless_pool"}, + {"size", "0"}, + {"dynamic_th", "0"} + }); + bufferProfileTable.set("ingress_lossy_profile", + { + {"pool", "ingress_lossy_pool"}, + {"size", "0"}, + {"dynamic_th", "0"} + }); + + gBufferOrch->addExistingData(&bufferPoolTable); + gBufferOrch->addExistingData(&bufferProfileTable); + + static_cast(gBufferOrch)->doTask(); + } + + void TearDown() override + { + auto buffer_maps = BufferOrch::m_buffer_type_maps; + for (auto &i : buffer_maps) + { + i.second->clear(); + } + + gDirectory.m_values.clear(); + + delete gCrmOrch; + gCrmOrch = nullptr; + + delete gSwitchOrch; + gSwitchOrch = nullptr; + + delete gVrfOrch; + gVrfOrch = nullptr; + + delete gIntfsOrch; + gIntfsOrch = nullptr; + + delete gNeighOrch; + gNeighOrch = nullptr; + + delete gFdbOrch; + gFdbOrch = nullptr; + + delete gPortsOrch; + gPortsOrch = nullptr; + + delete gQosOrch; + gQosOrch = nullptr; + + ut_helper::uninitSaiApi(); + } + }; + + TEST_F(BufferOrchTest, BufferOrchTestSharedHeadroomPool) + { + gMockResponsePublisher = std::make_unique(); + + Table bufferPoolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table bufferPoolStateTable = Table(m_app_state_db.get(), APP_BUFFER_POOL_TABLE_NAME); + + bufferPoolTable.set("ingress_lossless_pool", + { + {"xoff", "10240"} + }); + gBufferOrch->addExistingData(&bufferPoolTable); + EXPECT_CALL(*gMockResponsePublisher, publish(APP_BUFFER_POOL_TABLE_NAME, "ingress_lossless_pool", std::vector{{"xoff", "10240"}}, ReturnCode(SAI_STATUS_SUCCESS), true)).Times(1); + static_cast(gBufferOrch)->doTask(); + + gMockResponsePublisher.reset(); + } + + TEST_F(BufferOrchTest, BufferOrchTestBufferPgReferencingObjRemoveThenAdd) + { + _hook_sai_apis(); + vector ts; + std::deque entries; + Table bufferPgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); + + bufferPgTable.set("Ethernet0:0", + { + {"profile", "ingress_lossy_profile"} + }); + gBufferOrch->addExistingData(&bufferPgTable); + static_cast(gBufferOrch)->doTask(); + CheckDependency(APP_BUFFER_PG_TABLE_NAME, "Ethernet0:0", "profile", APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile"); + + // Remove referenced obj + entries.push_back({"ingress_lossy_profile", "DEL", {}}); + auto bufferProfileConsumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PROFILE_TABLE_NAME)); + bufferProfileConsumer->addToSync(entries); + entries.clear(); + // Drain BUFFER_PROFILE_TABLE + static_cast(gBufferOrch)->doTask(); + // Make sure the dependency remains + CheckDependency(APP_BUFFER_PG_TABLE_NAME, "Ethernet0:0", "profile", APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile"); + // Make sure the notification isn't drained + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 1); + ASSERT_EQ(ts[0], "BUFFER_PROFILE_TABLE:ingress_lossy_profile|DEL"); + ts.clear(); + + // Remove and readd referencing obj + entries.push_back({"Ethernet0:0", "DEL", {}}); + entries.push_back({"Ethernet0:0", "SET", + { + {"profile", "ingress_lossy_profile"} + }}); + auto bufferPgConsumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); + bufferPgConsumer->addToSync(entries); + entries.clear(); + // Drain the BUFFER_PG_TABLE + static_cast(gBufferOrch)->doTask(); + // Drain the BUFFER_PROFILE_TABLE which contains items need to retry + static_cast(gBufferOrch)->doTask(); + // The dependency should be removed + CheckDependency(APP_BUFFER_PG_TABLE_NAME, "Ethernet0:0", "profile", APP_BUFFER_PROFILE_TABLE_NAME); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 1); + ASSERT_EQ(ts[0], "BUFFER_PG_TABLE:Ethernet0:0|SET|profile:ingress_lossy_profile"); + ts.clear(); + + // Re-create referenced obj + entries.push_back({"ingress_lossy_profile", "SET", + { + {"pool", "ingress_lossy_pool"}, + {"size", "0"}, + {"dynamic_th", "0"} + }}); + bufferProfileConsumer->addToSync(entries); + entries.clear(); + // Drain BUFFER_PROFILE_TABLE table + auto sai_pg_attr_set_count = _ut_stub_set_pg_count; + static_cast(gBufferOrch)->doTask(); + // Make sure the dependency recovers + CheckDependency(APP_BUFFER_PG_TABLE_NAME, "Ethernet0:0", "profile", APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile"); + ASSERT_EQ(++sai_pg_attr_set_count, _ut_stub_set_pg_count); + + // All items have been drained + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + // Try applying the same profile, which should not call SAI API + entries.push_back({"Ethernet0:0", "SET", + { + {"profile", "ingress_lossy_profile"} + }}); + bufferPgConsumer->addToSync(entries); + entries.clear(); + sai_pg_attr_set_count = _ut_stub_set_pg_count; + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(sai_pg_attr_set_count, _ut_stub_set_pg_count); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + _unhook_sai_apis(); + } + + TEST_F(BufferOrchTest, BufferOrchTestReferencingObjRemoveThenAdd) + { + _hook_sai_apis(); + vector ts; + std::deque entries; + Table bufferProfileListTable = Table(m_app_db.get(), APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME); + bufferProfileListTable.set("Ethernet0", + { + {"profile_list", "ingress_lossy_profile,ingress_lossless_profile"} + }); + gBufferOrch->addExistingData(&bufferProfileListTable); + auto sai_port_profile_list_create_count = _ut_stub_port_profile_list_add_count; + _ut_stub_expected_profile_count = 2; + _ut_stub_expected_profile_list_type = SAI_PORT_ATTR_QOS_INGRESS_BUFFER_PROFILE_LIST; + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(++sai_port_profile_list_create_count, _ut_stub_port_profile_list_add_count); + CheckDependency(APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, "Ethernet0", "profile_list", + APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile,ingress_lossless_profile"); + + // Remove and recreate the referenced profile + entries.push_back({"ingress_lossy_profile", "DEL", {}}); + entries.push_back({"ingress_lossy_profile", "SET", + { + {"pool", "ingress_lossy_pool"}, + {"size", "0"}, + {"dynamic_th", "0"} + }}); + auto consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PROFILE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain BUFFER_PROFILE_TABLE table + static_cast(gBufferOrch)->doTask(); + // Make sure the dependency recovers + CheckDependency(APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, "Ethernet0", "profile_list", + APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile,ingress_lossless_profile"); + // Make sure the notification isn't drained + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 2); + ASSERT_EQ(ts[0], "BUFFER_PROFILE_TABLE:ingress_lossy_profile|DEL"); + ASSERT_EQ(ts[1], "BUFFER_PROFILE_TABLE:ingress_lossy_profile|SET|pool:ingress_lossy_pool|size:0|dynamic_th:0"); + ts.clear(); + + // Remove and recreate the referenced pool + entries.push_back({"ingress_lossy_pool", "DEL", {}}); + entries.push_back({"ingress_lossy_pool", "SET", + { + {"type", "ingress"}, + {"size", "1024000"}, + {"mode", "dynamic"} + }}); + consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_POOL_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain BUFFER_POOL_TABLE table + static_cast(gBufferOrch)->doTask(); + // Make sure the dependency recovers + CheckDependency(APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile", "pool", + APP_BUFFER_POOL_TABLE_NAME, "ingress_lossy_pool"); + // Make sure the notification isn't drained + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 4); + ASSERT_EQ(ts[0], "BUFFER_POOL_TABLE:ingress_lossy_pool|DEL"); + ASSERT_EQ(ts[1], "BUFFER_POOL_TABLE:ingress_lossy_pool|SET|type:ingress|size:1024000|mode:dynamic"); + ASSERT_EQ(ts[2], "BUFFER_PROFILE_TABLE:ingress_lossy_profile|DEL"); + ASSERT_EQ(ts[3], "BUFFER_PROFILE_TABLE:ingress_lossy_profile|SET|pool:ingress_lossy_pool|size:0|dynamic_th:0"); + ts.clear(); + + // Remove ingress port profile list + entries.push_back({"Ethernet0", "DEL", {}}); + consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE table + _ut_stub_expected_profile_count = 0; + auto sai_port_profile_list_remove_count = _ut_stub_port_profile_list_del_count; + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(++sai_port_profile_list_remove_count, _ut_stub_port_profile_list_del_count); + // Drain BUFFER_PROFILE_TABLE del operation + static_cast(gBufferOrch)->doTask(); + // Drain BUFFER_POOL_TABLE del operation + static_cast(gBufferOrch)->doTask(); + // Drain the rest create operations + static_cast(gBufferOrch)->doTask(); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + // As an side-effect, all pending notifications should be drained + ASSERT_TRUE(ts.empty()); + + // Apply a buffer item only if it is changed + _ut_stub_expected_profile_list_type = SAI_PORT_ATTR_QOS_INGRESS_BUFFER_PROFILE_LIST; + _ut_stub_expected_profile_count = 1; + entries.push_back({"Ethernet0", "SET", + { + {"profile_list", "ingress_lossy_profile"} + }}); + consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME)); + consumer->addToSync(entries); + sai_port_profile_list_create_count = _ut_stub_port_profile_list_add_count; + // Drain BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE table + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(++sai_port_profile_list_create_count, _ut_stub_port_profile_list_add_count); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + // Try applying it for the second time, which should not call SAI API + consumer->addToSync(entries); + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(sai_port_profile_list_create_count, _ut_stub_port_profile_list_add_count); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + // To satisfy the coverage requirement + bufferProfileListTable.set("Ethernet0", + { + {"profile_list", "ingress_no_exist_profile"} + }); + gBufferOrch->addExistingData(&bufferProfileListTable); + static_cast(gBufferOrch)->doTask(); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts[0], "BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE:Ethernet0|SET|profile_list:ingress_no_exist_profile"); + ts.clear(); + + _unhook_sai_apis(); + } + + TEST_F(BufferOrchTest, BufferOrchTestCreateAndRemoveEgressProfileList) + { + _hook_sai_apis(); + vector ts; + std::deque entries; + Table bufferPoolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table bufferProfileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); + Table bufferProfileListTable = Table(m_app_db.get(), APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME); + + // To satisfy the coverage requirement + bufferProfileListTable.set("Ethernet0", + { + {"profile_list", "egress_lossless_profile"} + }); + + gBufferOrch->addExistingData(&bufferProfileListTable); + static_cast(gBufferOrch)->doTask(); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts[0], "BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE:Ethernet0|SET|profile_list:egress_lossless_profile"); + ts.clear(); + + bufferPoolTable.set("egress_lossless_pool", + { + {"size", "1024000"}, + {"mode", "dynamic"}, + {"type", "egress"} + }); + bufferProfileTable.set("egress_lossless_profile", + { + {"pool", "egress_lossless_pool"}, + {"size", "0"}, + {"dynamic_th", "0"} + }); + + gBufferOrch->addExistingData(&bufferPoolTable); + gBufferOrch->addExistingData(&bufferProfileTable); + + auto sai_port_profile_list_create_count = _ut_stub_port_profile_list_add_count; + _ut_stub_expected_profile_count = 1; + _ut_stub_expected_profile_list_type = SAI_PORT_ATTR_QOS_EGRESS_BUFFER_PROFILE_LIST; + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(++sai_port_profile_list_create_count, _ut_stub_port_profile_list_add_count); + CheckDependency(APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, "Ethernet0", "profile_list", + APP_BUFFER_PROFILE_TABLE_NAME, "egress_lossless_profile"); + + // Try applying it for the second time, which should not call SAI API + entries.push_back({"Ethernet0", "SET", + { + {"profile_list", "egress_lossless_profile"} + }}); + auto consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(sai_port_profile_list_create_count, _ut_stub_port_profile_list_add_count); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + // Remove egress port profile list + entries.push_back({"Ethernet0", "DEL", {}}); + consumer->addToSync(entries); + entries.clear(); + // Drain BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE table + _ut_stub_expected_profile_count = 0; + auto sai_port_profile_list_remove_count = _ut_stub_port_profile_list_del_count; + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(++sai_port_profile_list_remove_count, _ut_stub_port_profile_list_del_count); + + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + // Queue table + entries.push_back({"Ethernet0:0", "SET", + { + {"profile", "egress_lossless_profile"} + }}); + consumer = dynamic_cast(gBufferOrch->getExecutor(APP_BUFFER_QUEUE_TABLE_NAME)); + consumer->addToSync(entries); + auto sai_queue_set_count = _ut_stub_set_queue_count; + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(++sai_queue_set_count, _ut_stub_set_queue_count); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + consumer->addToSync(entries); + static_cast(gBufferOrch)->doTask(); + ASSERT_EQ(sai_queue_set_count, _ut_stub_set_queue_count); + static_cast(gBufferOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_apis(); + } +} diff --git a/tests/mock_tests/common/mock_shell_command.cpp b/tests/mock_tests/common/mock_shell_command.cpp new file mode 100644 index 0000000000..f56bf4ddd9 --- /dev/null +++ b/tests/mock_tests/common/mock_shell_command.cpp @@ -0,0 +1,25 @@ +#include +#include + +/* Override this pointer for custom behavior */ +int (*callback)(const std::string &cmd, std::string &stdout) = nullptr; + +int mockCmdReturn = 0; +std::string mockCmdStdcout = ""; +std::vector mockCallArgs; + +namespace swss { + int exec(const std::string &cmd, std::string &stdout) + { + if (callback != nullptr) + { + return callback(cmd, stdout); + } + else + { + mockCallArgs.push_back(cmd); + stdout = mockCmdStdcout; + return mockCmdReturn; + } + } +} diff --git a/tests/mock_tests/copp_cfg.json b/tests/mock_tests/copp_cfg.json new file mode 100644 index 0000000000..46d921b827 --- /dev/null +++ b/tests/mock_tests/copp_cfg.json @@ -0,0 +1,111 @@ +{ + "COPP_GROUP": { + "default": { + "queue": "0", + "meter_type":"packets", + "mode":"sr_tcm", + "cir":"600", + "cbs":"600", + "red_action":"drop" + }, + "queue4_group1": { + "trap_action":"trap", + "trap_priority":"4", + "queue": "4" + }, + "queue4_group2": { + "trap_action":"copy", + "trap_priority":"4", + "queue": "4", + "meter_type":"packets", + "mode":"sr_tcm", + "cir":"600", + "cbs":"600", + "red_action":"drop" + }, + "queue4_group3": { + "trap_action":"trap", + "trap_priority":"4", + "queue": "4" + }, + "queue1_group1": { + "trap_action":"trap", + "trap_priority":"1", + "queue": "1", + "meter_type":"packets", + "mode":"sr_tcm", + "cir":"6000", + "cbs":"6000", + "red_action":"drop" + }, + "queue1_group2": { + "trap_action":"trap", + "trap_priority":"1", + "queue": "1", + "meter_type":"packets", + "mode":"sr_tcm", + "cir":"600", + "cbs":"600", + "red_action":"drop" + }, + "queue2_group1": { + "cbs": "1000", + "cir": "1000", + "genetlink_mcgrp_name": "packets", + "genetlink_name": "psample", + "meter_type": "packets", + "mode": "sr_tcm", + "queue": "2", + "red_action": "drop", + "trap_action": "trap", + "trap_priority": "1" + + } + }, + "COPP_TRAP": { + "bgp": { + "trap_ids": "bgp,bgpv6", + "trap_group": "queue4_group1" + }, + "lacp": { + "trap_ids": "lacp", + "trap_group": "queue4_group1", + "always_enabled": "true" + }, + "arp": { + "trap_ids": "arp_req,arp_resp,neigh_discovery", + "trap_group": "queue4_group2", + "always_enabled": "true" + }, + "lldp": { + "trap_ids": "lldp", + "trap_group": "queue4_group3" + }, + "dhcp_relay": { + "trap_ids": "dhcp,dhcpv6", + "trap_group": "queue4_group3" + }, + "udld": { + "trap_ids": "udld", + "trap_group": "queue4_group3", + "always_enabled": "true" + }, + "ip2me": { + "trap_ids": "ip2me", + "trap_group": "queue1_group1", + "always_enabled": "true" + }, + "macsec": { + "trap_ids": "eapol", + "trap_group": "queue4_group3" + }, + "nat": { + "trap_ids": "src_nat_miss,dest_nat_miss", + "trap_group": "queue1_group2" + }, + "sflow": { + "trap_group": "queue2_group1", + "trap_ids": "sample_packet" + } + } +} diff --git a/tests/mock_tests/copp_ut.cpp b/tests/mock_tests/copp_ut.cpp new file mode 100644 index 0000000000..1c3b766e1c --- /dev/null +++ b/tests/mock_tests/copp_ut.cpp @@ -0,0 +1,76 @@ +#include "gtest/gtest.h" +#include +#include "schema.h" +#include "warm_restart.h" +#include "ut_helper.h" +#include "coppmgr.h" +#include "coppmgr.cpp" +#include +#include +using namespace std; +using namespace swss; + +void create_init_file() +{ + int status = system("sudo mkdir /etc/sonic/"); + ASSERT_EQ(status, 0); + + status = system("sudo chmod 777 /etc/sonic/"); + ASSERT_EQ(status, 0); + + status = system("sudo cp copp_cfg.json /etc/sonic/"); + ASSERT_EQ(status, 0); +} + +void cleanup() +{ + int status = system("sudo rm -rf /etc/sonic/"); + ASSERT_EQ(status, 0); +} + +TEST(CoppMgrTest, CoppTest) +{ + create_init_file(); + + const vector cfg_copp_tables = { + CFG_COPP_TRAP_TABLE_NAME, + CFG_COPP_GROUP_TABLE_NAME, + CFG_FEATURE_TABLE_NAME, + }; + + WarmStart::initialize("coppmgrd", "swss"); + WarmStart::checkWarmStart("coppmgrd", "swss"); + + DBConnector cfgDb("CONFIG_DB", 0); + DBConnector appDb("APPL_DB", 0); + DBConnector stateDb("STATE_DB", 0); + + /* The test will set an entry with queue1_group1|cbs value which differs from the init value + * found in the copp_cfg.json file. Then coppmgr constructor will be called and it will detect + * that there is already an entry for queue1_group1|cbs with different value and it should be + * overwritten with the init value. + * hget will verify that this indeed happened. + */ + Table coppTable = Table(&appDb, APP_COPP_TABLE_NAME); + coppTable.set("queue1_group1", + { + {"cbs", "6100"}, + {"cir", "6000"}, + {"meter_type", "packets"}, + {"mode", "sr_tcm"}, + {"queue", "1"}, + {"red_action", "drop"}, + {"trap_action", "trap"}, + {"trap_priority", "1"}, + {"trap_ids", "ip2me"} + }); + + CoppMgr coppmgr(&cfgDb, &appDb, &stateDb, cfg_copp_tables); + + string overide_val; + coppTable.hget("queue1_group1", "cbs",overide_val); + EXPECT_EQ( overide_val, "6000"); + + cleanup(); +} + diff --git a/tests/mock_tests/copporch_ut.cpp b/tests/mock_tests/copporch_ut.cpp new file mode 100644 index 0000000000..fa7c360f01 --- /dev/null +++ b/tests/mock_tests/copporch_ut.cpp @@ -0,0 +1,505 @@ +#include +#include +#include +#include +#include + +#include "ut_helper.h" +#include "mock_orchagent_main.h" + +using namespace swss; + +namespace copporch_test +{ + class MockCoppOrch final + { + public: + MockCoppOrch() + { + this->appDb = std::make_shared("APPL_DB", 0); + this->coppOrch = std::make_shared(this->appDb.get(), APP_COPP_TABLE_NAME); + } + ~MockCoppOrch() = default; + + void doCoppTableTask(const std::deque &entries) + { + // ConsumerStateTable is used for APP DB + auto consumer = std::unique_ptr(new Consumer( + new ConsumerStateTable(this->appDb.get(), APP_COPP_TABLE_NAME, 1, 1), + this->coppOrch.get(), APP_COPP_TABLE_NAME + )); + + consumer->addToSync(entries); + static_cast(this->coppOrch.get())->doTask(*consumer); + } + + CoppOrch& get() + { + return *coppOrch; + } + + private: + std::shared_ptr coppOrch; + std::shared_ptr appDb; + }; + + class CoppOrchTest : public ::testing::Test + { + public: + CoppOrchTest() + { + this->initDb(); + } + virtual ~CoppOrchTest() = default; + + void SetUp() override + { + this->initSaiApi(); + this->initSwitch(); + this->initOrch(); + this->initPorts(); + } + + void TearDown() override + { + this->deinitOrch(); + this->deinitSwitch(); + this->deinitSaiApi(); + } + + private: + void initSaiApi() + { + std::map profileMap = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + auto status = ut_helper::initSaiApi(profileMap); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + } + + void deinitSaiApi() + { + auto status = ut_helper::uninitSaiApi(); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + } + + void initSwitch() + { + sai_status_t status; + sai_attribute_t attr; + + // Create switch + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + // Get switch default virtual router ID + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + } + + void deinitSwitch() + { + // Remove switch + auto status = sai_switch_api->remove_switch(gSwitchId); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gSwitchId = SAI_NULL_OBJECT_ID; + gVirtualRouterId = SAI_NULL_OBJECT_ID; + } + + void initOrch() + { + // + // SwitchOrch + // + + TableConnector switchCapTableStateDb(this->stateDb.get(), "SWITCH_CAPABILITY"); + TableConnector asicSensorsTableCfgDb(this->configDb.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector switchTableAppDb(this->appDb.get(), APP_SWITCH_TABLE_NAME); + + std::vector switchTableList = { + asicSensorsTableCfgDb, + switchTableAppDb + }; + + gSwitchOrch = new SwitchOrch(this->appDb.get(), switchTableList, switchCapTableStateDb); + gDirectory.set(gSwitchOrch); + resourcesList.push_back(gSwitchOrch); + + // + // PortsOrch + // + + const int portsorchBasePri = 40; + + std::vector portTableList = { + { APP_PORT_TABLE_NAME, portsorchBasePri + 5 }, + { APP_VLAN_TABLE_NAME, portsorchBasePri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorchBasePri }, + { APP_LAG_TABLE_NAME, portsorchBasePri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorchBasePri } + }; + + gPortsOrch = new PortsOrch(this->appDb.get(), this->stateDb.get(), portTableList, this->chassisAppDb.get()); + gDirectory.set(gPortsOrch); + resourcesList.push_back(gPortsOrch); + + // + // QosOrch + // + + std::vector qosTableList = { + CFG_TC_TO_QUEUE_MAP_TABLE_NAME, + CFG_SCHEDULER_TABLE_NAME, + CFG_DSCP_TO_TC_MAP_TABLE_NAME, + CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME, + CFG_DOT1P_TO_TC_MAP_TABLE_NAME, + CFG_QUEUE_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME, + CFG_WRED_PROFILE_TABLE_NAME, + CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, + CFG_DSCP_TO_FC_MAP_TABLE_NAME, + CFG_EXP_TO_FC_MAP_TABLE_NAME + }; + gQosOrch = new QosOrch(this->configDb.get(), qosTableList); + gDirectory.set(gQosOrch); + resourcesList.push_back(gQosOrch); + + // + // BufferOrch + // + + std::vector bufferTableList = { + APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME + }; + gBufferOrch = new BufferOrch(this->appDb.get(), this->configDb.get(), this->stateDb.get(), bufferTableList); + gDirectory.set(gBufferOrch); + resourcesList.push_back(gBufferOrch); + + // + // PolicerOrch + // + + vector policer_tables = { + TableConnector(this->configDb.get(), CFG_POLICER_TABLE_NAME), + TableConnector(this->configDb.get(), CFG_PORT_STORM_CONTROL_TABLE_NAME) + }; + auto policerOrch = new PolicerOrch(policer_tables, gPortsOrch); + gDirectory.set(policerOrch); + resourcesList.push_back(policerOrch); + + // + // FlexCounterOrch + // + + std::vector flexCounterTableList = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + + auto flexCounterOrch = new FlexCounterOrch(this->configDb.get(), flexCounterTableList); + gDirectory.set(flexCounterOrch); + resourcesList.push_back(flexCounterOrch); + } + + void deinitOrch() + { + std::reverse(this->resourcesList.begin(), this->resourcesList.end()); + for (auto &it : this->resourcesList) + { + delete it; + } + + gSwitchOrch = nullptr; + gPortsOrch = nullptr; + gQosOrch = nullptr; + gBufferOrch = nullptr; + + Portal::DirectoryInternal::clear(gDirectory); + EXPECT_TRUE(Portal::DirectoryInternal::empty(gDirectory)); + } + + void initPorts() + { + auto portTable = Table(this->appDb.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &cit : ports) + { + portTable.set(cit.first, cit.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + // Set PortInitDone + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + } + + void initDb() + { + this->appDb = std::make_shared("APPL_DB", 0); + this->configDb = std::make_shared("CONFIG_DB", 0); + this->stateDb = std::make_shared("STATE_DB", 0); + this->chassisAppDb = std::make_shared("CHASSIS_APP_DB", 0); + } + + std::shared_ptr appDb; + std::shared_ptr configDb; + std::shared_ptr stateDb; + std::shared_ptr chassisAppDb; + + std::vector resourcesList; + }; + + TEST_F(CoppOrchTest, TrapGroup_AddRemove) + { + const std::string trapGroupName = "queue4_group1"; + + MockCoppOrch coppOrch; + + // Create CoPP Trap Group + { + auto tableKofvt = std::deque( + { + { + trapGroupName, + SET_COMMAND, + { + { copp_trap_action_field, "trap" }, + { copp_trap_priority_field, "4" }, + { copp_queue_field, "4" } + } + } + } + ); + coppOrch.doCoppTableTask(tableKofvt); + + const auto &trapGroupMap = coppOrch.get().getTrapGroupMap(); + const auto &cit = trapGroupMap.find(trapGroupName); + EXPECT_TRUE(cit != trapGroupMap.end()); + } + + // Delete CoPP Trap Group + { + auto tableKofvt = std::deque( + { { trapGroupName, DEL_COMMAND, { } } } + ); + coppOrch.doCoppTableTask(tableKofvt); + + const auto &trapGroupMap = coppOrch.get().getTrapGroupMap(); + const auto &cit = trapGroupMap.find(trapGroupName); + EXPECT_TRUE(cit == trapGroupMap.end()); + } + } + + TEST_F(CoppOrchTest, TrapGroupWithPolicer_AddRemove) + { + const std::string trapGroupName = "queue4_group2"; + + MockCoppOrch coppOrch; + + // Create CoPP Trap Group + { + auto tableKofvt = std::deque( + { + { + trapGroupName, + SET_COMMAND, + { + { copp_trap_action_field, "copy" }, + { copp_trap_priority_field, "4" }, + { copp_queue_field, "4" }, + { copp_policer_meter_type_field, "packets" }, + { copp_policer_mode_field, "sr_tcm" }, + { copp_policer_cir_field, "600" }, + { copp_policer_cbs_field, "600" }, + { copp_policer_action_red_field, "drop" } + } + } + } + ); + coppOrch.doCoppTableTask(tableKofvt); + + const auto &trapGroupMap = coppOrch.get().getTrapGroupMap(); + const auto &cit1 = trapGroupMap.find(trapGroupName); + EXPECT_TRUE(cit1 != trapGroupMap.end()); + + const auto &trapGroupPolicerMap = Portal::CoppOrchInternal::getTrapGroupPolicerMap(coppOrch.get()); + const auto &trapGroupOid = cit1->second; + const auto &cit2 = trapGroupPolicerMap.find(trapGroupOid); + EXPECT_TRUE(cit2 != trapGroupPolicerMap.end()); + } + + // Delete CoPP Trap Group + { + auto tableKofvt = std::deque( + { { trapGroupName, DEL_COMMAND, { } } } + ); + coppOrch.doCoppTableTask(tableKofvt); + + const auto &trapGroupMap = coppOrch.get().getTrapGroupMap(); + const auto &cit = trapGroupMap.find(trapGroupName); + EXPECT_TRUE(cit == trapGroupMap.end()); + + const auto &trapGroupPolicerMap = Portal::CoppOrchInternal::getTrapGroupPolicerMap(coppOrch.get()); + EXPECT_TRUE(trapGroupPolicerMap.empty()); + } + } + + TEST_F(CoppOrchTest, Trap_AddRemove) + { + const std::string trapGroupName = "queue4_group1"; + const std::string trapNameList = "bgp,bgpv6"; + const std::set trapIDSet = { + SAI_HOSTIF_TRAP_TYPE_BGP, + SAI_HOSTIF_TRAP_TYPE_BGPV6 + }; + + MockCoppOrch coppOrch; + + // Create CoPP Trap + { + auto tableKofvt = std::deque( + { + { + trapGroupName, + SET_COMMAND, + { + { copp_trap_action_field, "trap" }, + { copp_trap_priority_field, "4" }, + { copp_queue_field, "4" }, + { copp_trap_id_list, trapNameList } + } + } + } + ); + coppOrch.doCoppTableTask(tableKofvt); + + const auto &trapGroupMap = coppOrch.get().getTrapGroupMap(); + const auto &cit = trapGroupMap.find(trapGroupName); + EXPECT_TRUE(cit != trapGroupMap.end()); + + const auto &tgOid = cit->second; + const auto &tidList = Portal::CoppOrchInternal::getTrapIdsFromTrapGroup(coppOrch.get(), tgOid); + const auto &tidSet = std::set(tidList.begin(), tidList.end()); + EXPECT_TRUE(trapIDSet == tidSet); + } + + // Delete CoPP Trap + { + auto tableKofvt = std::deque( + { { trapGroupName, DEL_COMMAND, { } } } + ); + coppOrch.doCoppTableTask(tableKofvt); + + const auto &trapGroupMap = coppOrch.get().getTrapGroupMap(); + const auto &cit1 = trapGroupMap.find(trapGroupName); + EXPECT_TRUE(cit1 == trapGroupMap.end()); + + const auto &trapGroupIdMap = Portal::CoppOrchInternal::getTrapGroupIdMap(coppOrch.get()); + const auto &cit2 = trapGroupIdMap.find(SAI_HOSTIF_TRAP_TYPE_TTL_ERROR); + EXPECT_TRUE(cit2 != trapGroupIdMap.end()); + ASSERT_EQ(trapGroupIdMap.size(), 1); + } + } + + TEST_F(CoppOrchTest, TrapWithPolicer_AddRemove) + { + const std::string trapGroupName = "queue4_group2"; + const std::string trapNameList = "arp_req,arp_resp,neigh_discovery"; + const std::set trapIDSet = { + SAI_HOSTIF_TRAP_TYPE_ARP_REQUEST, + SAI_HOSTIF_TRAP_TYPE_ARP_RESPONSE, + SAI_HOSTIF_TRAP_TYPE_IPV6_NEIGHBOR_DISCOVERY + }; + + MockCoppOrch coppOrch; + + // Create CoPP Trap + { + auto tableKofvt = std::deque( + { + { + trapGroupName, + SET_COMMAND, + { + { copp_trap_action_field, "copy" }, + { copp_trap_priority_field, "4" }, + { copp_queue_field, "4" }, + { copp_policer_meter_type_field, "packets" }, + { copp_policer_mode_field, "sr_tcm" }, + { copp_policer_cir_field, "600" }, + { copp_policer_cbs_field, "600" }, + { copp_policer_action_red_field, "drop" }, + { copp_trap_id_list, trapNameList } + } + } + } + ); + coppOrch.doCoppTableTask(tableKofvt); + + const auto &trapGroupMap = coppOrch.get().getTrapGroupMap(); + const auto &cit1 = trapGroupMap.find(trapGroupName); + EXPECT_TRUE(cit1 != trapGroupMap.end()); + + const auto &trapGroupPolicerMap = Portal::CoppOrchInternal::getTrapGroupPolicerMap(coppOrch.get()); + const auto &trapGroupOid = cit1->second; + const auto &cit2 = trapGroupPolicerMap.find(trapGroupOid); + EXPECT_TRUE(cit2 != trapGroupPolicerMap.end()); + + const auto &tidList = Portal::CoppOrchInternal::getTrapIdsFromTrapGroup(coppOrch.get(), trapGroupOid); + const auto &tidSet = std::set(tidList.begin(), tidList.end()); + EXPECT_TRUE(trapIDSet == tidSet); + } + + // Delete CoPP Trap + { + auto tableKofvt = std::deque( + { { trapGroupName, DEL_COMMAND, { } } } + ); + coppOrch.doCoppTableTask(tableKofvt); + + const auto &trapGroupMap = coppOrch.get().getTrapGroupMap(); + const auto &cit1 = trapGroupMap.find(trapGroupName); + EXPECT_TRUE(cit1 == trapGroupMap.end()); + + const auto &trapGroupPolicerMap = Portal::CoppOrchInternal::getTrapGroupPolicerMap(coppOrch.get()); + EXPECT_TRUE(trapGroupPolicerMap.empty()); + + const auto &trapGroupIdMap = Portal::CoppOrchInternal::getTrapGroupIdMap(coppOrch.get()); + const auto &cit2 = trapGroupIdMap.find(SAI_HOSTIF_TRAP_TYPE_TTL_ERROR); + EXPECT_TRUE(cit2 != trapGroupIdMap.end()); + ASSERT_EQ(trapGroupIdMap.size(), 1); + } + } +} diff --git a/tests/mock_tests/database_config.json b/tests/mock_tests/database_config.json index 8301848683..baf705ea23 100644 --- a/tests/mock_tests/database_config.json +++ b/tests/mock_tests/database_config.json @@ -27,11 +27,6 @@ "separator": ":", "instance" : "redis" }, - "LOGLEVEL_DB" : { - "id" : 3, - "separator": ":", - "instance" : "redis" - }, "CONFIG_DB" : { "id" : 4, "separator": "|", @@ -57,6 +52,21 @@ "separator": "|", "instance" : "redis" }, + "GB_ASIC_DB" : { + "id" : 9, + "separator": ":", + "instance" : "redis" + }, + "GB_COUNTERS_DB" : { + "id" : 10, + "separator": ":", + "instance" : "redis" + }, + "GB_FLEX_COUNTER_DB" : { + "id" : 11, + "separator": ":", + "instance" : "redis" + }, "CHASSIS_APP_DB" : { "id" : 12, "separator": "|", diff --git a/tests/mock_tests/fake_netlink.cpp b/tests/mock_tests/fake_netlink.cpp new file mode 100644 index 0000000000..2370e13129 --- /dev/null +++ b/tests/mock_tests/fake_netlink.cpp @@ -0,0 +1,18 @@ +#include +#include + +static rtnl_link* g_fakeLink = [](){ + auto fakeLink = rtnl_link_alloc(); + rtnl_link_set_ifindex(fakeLink, 42); + return fakeLink; +}(); + +extern "C" +{ + +struct rtnl_link* rtnl_link_get_by_name(struct nl_cache *cache, const char *name) +{ + return g_fakeLink; +} + +} diff --git a/tests/mock_tests/fake_producerstatetable.cpp b/tests/mock_tests/fake_producerstatetable.cpp new file mode 100644 index 0000000000..6221556f63 --- /dev/null +++ b/tests/mock_tests/fake_producerstatetable.cpp @@ -0,0 +1,11 @@ +#include "producerstatetable.h" + +using namespace std; + +namespace swss +{ +ProducerStateTable::ProducerStateTable(RedisPipeline *pipeline, const string &tableName, bool buffered) + : TableBase(tableName, SonicDBConfig::getSeparator(pipeline->getDBConnector())), TableName_KeySet(tableName) {} + +ProducerStateTable::~ProducerStateTable() {} +} diff --git a/tests/mock_tests/fake_response_publisher.cpp b/tests/mock_tests/fake_response_publisher.cpp index 94480913d5..29a28d2360 100644 --- a/tests/mock_tests/fake_response_publisher.cpp +++ b/tests/mock_tests/fake_response_publisher.cpp @@ -2,21 +2,42 @@ #include #include "response_publisher.h" +#include "mock_response_publisher.h" -ResponsePublisher::ResponsePublisher() : m_db("APPL_STATE_DB", 0) {} +/* This mock plugs into this fake response publisher implementation + * when needed to test code that uses response publisher. */ +std::unique_ptr gMockResponsePublisher; + +ResponsePublisher::ResponsePublisher(bool buffered) : m_db(std::make_unique("APPL_STATE_DB", 0)), m_buffered(buffered) {} void ResponsePublisher::publish( const std::string& table, const std::string& key, const std::vector& intent_attrs, const ReturnCode& status, - const std::vector& state_attrs, bool replace) {} + const std::vector& state_attrs, bool replace) +{ + if (gMockResponsePublisher) + { + gMockResponsePublisher->publish(table, key, intent_attrs, status, state_attrs, replace); + } +} void ResponsePublisher::publish( const std::string& table, const std::string& key, const std::vector& intent_attrs, - const ReturnCode& status, bool replace) {} + const ReturnCode& status, bool replace) +{ + if (gMockResponsePublisher) + { + gMockResponsePublisher->publish(table, key, intent_attrs, status, replace); + } +} void ResponsePublisher::writeToDB( const std::string& table, const std::string& key, const std::vector& values, const std::string& op, bool replace) {} + +void ResponsePublisher::flush() {} + +void ResponsePublisher::setBuffered(bool buffered) {} diff --git a/tests/mock_tests/fake_warmstarthelper.cpp b/tests/mock_tests/fake_warmstarthelper.cpp new file mode 100644 index 0000000000..147227df15 --- /dev/null +++ b/tests/mock_tests/fake_warmstarthelper.cpp @@ -0,0 +1,79 @@ +#include "warmRestartHelper.h" + +static swss::DBConnector gDb("APPL_DB", 0); + +namespace swss { + +WarmStartHelper::WarmStartHelper(RedisPipeline *pipeline, + ProducerStateTable *syncTable, + const std::string &syncTableName, + const std::string &dockerName, + const std::string &appName) : + m_restorationTable(&gDb, "") +{ +} + +WarmStartHelper::~WarmStartHelper() +{ +} + +void WarmStartHelper::setState(WarmStart::WarmStartState state) +{ +} + +WarmStart::WarmStartState WarmStartHelper::getState() const +{ + return WarmStart::WarmStartState::INITIALIZED; +} + +bool WarmStartHelper::checkAndStart() +{ + return false; +} + +bool WarmStartHelper::isReconciled() const +{ + return false; +} + +bool WarmStartHelper::inProgress() const +{ + return false; +} + +uint32_t WarmStartHelper::getRestartTimer() const +{ + return 0; +} + +bool WarmStartHelper::runRestoration() +{ + return false; +} + +void WarmStartHelper::insertRefreshMap(const KeyOpFieldsValuesTuple &kfv) +{ +} + +void WarmStartHelper::reconcile() +{ +} + +const std::string WarmStartHelper::printKFV(const std::string &key, + const std::vector &fv) +{ + return ""; +} + +bool WarmStartHelper::compareAllFV(const std::vector &left, + const std::vector &right) +{ + return false; +} + +bool WarmStartHelper::compareOneFV(const std::string &v1, const std::string &v2) +{ + return false; +} + +} diff --git a/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp b/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp new file mode 100644 index 0000000000..e6bd8bea1c --- /dev/null +++ b/tests/mock_tests/fdborch/flush_syncd_notif_ut.cpp @@ -0,0 +1,536 @@ +#include "../ut_helper.h" +#include "../mock_orchagent_main.h" +#include "../mock_table.h" +#include "port.h" +#define private public // Need to modify internal cache +#include "portsorch.h" +#include "fdborch.h" +#include "crmorch.h" +#undef private + +#define ETH0 "Ethernet0" +#define VLAN40 "Vlan40" +#define VXLAN_REMOTE "Vxlan_1.1.1.1" + +extern redisReply *mockReply; +extern CrmOrch* gCrmOrch; + +/* +Test Fixture +*/ +namespace fdb_syncd_flush_test +{ + + sai_fdb_api_t ut_sai_fdb_api; + sai_fdb_api_t *pold_sai_fdb_api; + + sai_status_t _ut_stub_sai_create_fdb_entry ( + _In_ const sai_fdb_entry_t *fdb_entry, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + return SAI_STATUS_SUCCESS; + } + void _hook_sai_fdb_api() + { + ut_sai_fdb_api = *sai_fdb_api; + pold_sai_fdb_api = sai_fdb_api; + ut_sai_fdb_api.create_fdb_entry = _ut_stub_sai_create_fdb_entry; + sai_fdb_api = &ut_sai_fdb_api; + } + void _unhook_sai_fdb_api() + { + sai_fdb_api = pold_sai_fdb_api; + } + struct FdbOrchTest : public ::testing::Test + { + std::shared_ptr m_config_db; + std::shared_ptr m_app_db; + std::shared_ptr m_state_db; + std::shared_ptr m_asic_db; + std::shared_ptr m_chassis_app_db; + std::shared_ptr m_portsOrch; + std::shared_ptr m_fdborch; + + virtual void SetUp() override + { + + testing_db::reset(); + + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + + /* Create Switch */ + sai_attribute_t attr; + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + auto status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + m_config_db = std::make_shared("CONFIG_DB", 0); + m_app_db = std::make_shared("APPL_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + m_asic_db = std::make_shared("ASIC_DB", 0); + + // Construct dependencies + // 1) Portsorch + const int portsorch_base_pri = 40; + + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + m_portsOrch = std::make_shared(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + + // 2) Crmorch + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + VxlanTunnelOrch *vxlan_tunnel_orch_1 = new VxlanTunnelOrch(m_state_db.get(), m_app_db.get(), APP_VXLAN_TUNNEL_TABLE_NAME); + gDirectory.set(vxlan_tunnel_orch_1); + + // Construct fdborch + vector app_fdb_tables = { + { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_MCLAG_FDB_TABLE_NAME, FdbOrch::fdborch_pri} + }; + + TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); + TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); + + m_fdborch = std::make_shared(m_app_db.get(), + app_fdb_tables, + stateDbFdb, + stateMclagDbFdb, + m_portsOrch.get()); + } + + virtual void TearDown() override { + delete gCrmOrch; + gCrmOrch = nullptr; + gDirectory.m_values.clear(); + ut_helper::uninitSaiApi(); + } + }; + + /* Helper Methods */ + void setUpVlan(PortsOrch* m_portsOrch){ + /* Updates portsOrch internal cache for Vlan40 */ + std::string alias = VLAN40; + sai_object_id_t oid = 0x26000000000796; + + Port vlan(alias, Port::VLAN); + vlan.m_vlan_info.vlan_oid = oid; + vlan.m_vlan_info.vlan_id = 40; + vlan.m_members = set(); + + m_portsOrch->m_portList[alias] = vlan; + m_portsOrch->m_port_ref_count[alias] = 0; + m_portsOrch->saiOidToAlias[oid] = alias; + } + + void setUpPort(PortsOrch* m_portsOrch){ + /* Updates portsOrch internal cache for Ethernet0 */ + std::string alias = ETH0; + sai_object_id_t oid = 0x10000000004a4; + + Port port(alias, Port::PHY); + port.m_index = 1; + port.m_port_id = oid; + port.m_hif_id = 0xd00000000056e; + + m_portsOrch->m_portList[alias] = port; + m_portsOrch->saiOidToAlias[oid] = alias; + } + + void setUpVxlanPort(PortsOrch* m_portsOrch){ + /* Updates portsOrch internal cache for Ethernet0 */ + std::string alias = VXLAN_REMOTE; + sai_object_id_t oid = 0x10000000004a5; + + Port port(alias, Port::PHY); + m_portsOrch->m_portList[alias] = port; + m_portsOrch->saiOidToAlias[oid] = alias; + } + + + void setUpVlanMember(PortsOrch* m_portsOrch){ + /* Updates portsOrch internal cache for adding Ethernet0 into Vlan40 */ + sai_object_id_t bridge_port_id = 0x3a000000002c33; + + /* Add Bridge Port */ + m_portsOrch->m_portList[ETH0].m_bridge_port_id = bridge_port_id; + m_portsOrch->saiOidToAlias[bridge_port_id] = ETH0; + m_portsOrch->m_portList[VLAN40].m_members.insert(ETH0); + } + + void setUpVxlanMember(PortsOrch* m_portsOrch){ + /* Updates portsOrch internal cache for adding Ethernet0 into Vlan40 */ + sai_object_id_t bridge_port_id = 0x3a000000002c34; + + /* Add Bridge Port */ + m_portsOrch->m_portList[VXLAN_REMOTE].m_bridge_port_id = bridge_port_id; + m_portsOrch->saiOidToAlias[bridge_port_id] = VXLAN_REMOTE; + m_portsOrch->m_portList[VLAN40].m_members.insert(VXLAN_REMOTE); + } + + void triggerUpdate(FdbOrch* m_fdborch, + sai_fdb_event_t type, + vector mac_addr, + sai_object_id_t bridge_port_id, + sai_object_id_t bv_id){ + sai_fdb_entry_t entry; + for (int i = 0; i < (int)mac_addr.size(); i++){ + *(entry.mac_address+i) = mac_addr[i]; + } + entry.bv_id = bv_id; + m_fdborch->update(type, &entry, bridge_port_id, SAI_FDB_ENTRY_TYPE_DYNAMIC); + } +} + +namespace fdb_syncd_flush_test +{ + /* Test Consolidated Flush Per Vlan and Per Port */ + TEST_F(FdbOrchTest, ConsolidatedFlushVlanandPort) + { + ASSERT_NE(m_portsOrch, nullptr); + setUpVlan(m_portsOrch.get()); + setUpPort(m_portsOrch.get()); + ASSERT_NE(m_portsOrch->m_portList.find(VLAN40), m_portsOrch->m_portList.end()); + ASSERT_NE(m_portsOrch->m_portList.find(ETH0), m_portsOrch->m_portList.end()); + setUpVlanMember(m_portsOrch.get()); + + /* Event 1: Learn a dynamic FDB Entry */ + // 7c:fe:90:12:22:ec + vector mac_addr = {124, 254, 144, 18, 34, 236}; + triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_LEARNED, mac_addr, m_portsOrch->m_portList[ETH0].m_bridge_port_id, + m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid); + + string port; + string entry_type; + + /* Make sure fdb_count is incremented as expected */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 1); + ASSERT_EQ(m_portsOrch->m_portList[ETH0].m_fdb_count, 1); + + /* Make sure state db is updated as expected */ + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "port", port), true); + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "type", entry_type), true); + + ASSERT_EQ(port, "Ethernet0"); + ASSERT_EQ(entry_type, "dynamic"); + + /* Event 2: Generate a FDB Flush per port and per vlan */ + vector flush_mac_addr = {0, 0, 0, 0, 0, 0}; + for (map::iterator it = m_fdborch->m_entries.begin(); it != m_fdborch->m_entries.end(); it++) + { + it->second.is_flush_pending = true; + } + triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_FLUSHED, flush_mac_addr, m_portsOrch->m_portList[ETH0].m_bridge_port_id, + m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid); + + /* make sure fdb_counters are decremented */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 0); + ASSERT_EQ(m_portsOrch->m_portList[ETH0].m_fdb_count, 0); + + /* Make sure state db is cleared */ + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "port", port), false); + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "type", entry_type), false); + } + + /* Test Consolidated Flush All */ + TEST_F(FdbOrchTest, ConsolidatedFlushAll) + { + ASSERT_NE(m_portsOrch, nullptr); + setUpVlan(m_portsOrch.get()); + setUpPort(m_portsOrch.get()); + ASSERT_NE(m_portsOrch->m_portList.find(VLAN40), m_portsOrch->m_portList.end()); + ASSERT_NE(m_portsOrch->m_portList.find(ETH0), m_portsOrch->m_portList.end()); + setUpVlanMember(m_portsOrch.get()); + + /* Event 1: Learn a dynamic FDB Entry */ + // 7c:fe:90:12:22:ec + vector mac_addr = {124, 254, 144, 18, 34, 236}; + triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_LEARNED, mac_addr, m_portsOrch->m_portList[ETH0].m_bridge_port_id, + m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid); + + string port; + string entry_type; + + /* Make sure fdb_count is incremented as expected */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 1); + ASSERT_EQ(m_portsOrch->m_portList[ETH0].m_fdb_count, 1); + + /* Make sure state db is updated as expected */ + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "port", port), true); + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "type", entry_type), true); + + ASSERT_EQ(port, "Ethernet0"); + ASSERT_EQ(entry_type, "dynamic"); + + /* Event2: Send a Consolidated Flush response from syncd */ + vector flush_mac_addr = {0, 0, 0, 0, 0, 0}; + for (map::iterator it = m_fdborch->m_entries.begin(); it != m_fdborch->m_entries.end(); it++) + { + it->second.is_flush_pending = true; + } + triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_FLUSHED, flush_mac_addr, SAI_NULL_OBJECT_ID, + SAI_NULL_OBJECT_ID); + + /* make sure fdb_counters are decremented */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 0); + ASSERT_EQ(m_portsOrch->m_portList[ETH0].m_fdb_count, 0); + + /* Make sure state db is cleared */ + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "port", port), false); + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "type", entry_type), false); + } + + /* Test Consolidated Flush per VLAN BV_ID */ + TEST_F(FdbOrchTest, ConsolidatedFlushVlan) + { + ASSERT_NE(m_portsOrch, nullptr); + setUpVlan(m_portsOrch.get()); + setUpPort(m_portsOrch.get()); + ASSERT_NE(m_portsOrch->m_portList.find(VLAN40), m_portsOrch->m_portList.end()); + ASSERT_NE(m_portsOrch->m_portList.find(ETH0), m_portsOrch->m_portList.end()); + setUpVlanMember(m_portsOrch.get()); + + /* Event 1: Learn a dynamic FDB Entry */ + // 7c:fe:90:12:22:ec + vector mac_addr = {124, 254, 144, 18, 34, 236}; + triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_LEARNED, mac_addr, m_portsOrch->m_portList[ETH0].m_bridge_port_id, + m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid); + + string port; + string entry_type; + + /* Make sure fdb_count is incremented as expected */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 1); + ASSERT_EQ(m_portsOrch->m_portList[ETH0].m_fdb_count, 1); + + /* Make sure state db is updated as expected */ + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "port", port), true); + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "type", entry_type), true); + + ASSERT_EQ(port, "Ethernet0"); + ASSERT_EQ(entry_type, "dynamic"); + + /* Event2: Send a Consolidated Flush response from syncd for vlan */ + vector flush_mac_addr = {0, 0, 0, 0, 0, 0}; + for (map::iterator it = m_fdborch->m_entries.begin(); it != m_fdborch->m_entries.end(); it++) + { + it->second.is_flush_pending = true; + } + triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_FLUSHED, flush_mac_addr, SAI_NULL_OBJECT_ID, + m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid); + + /* make sure fdb_counters are decremented */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 0); + ASSERT_EQ(m_portsOrch->m_portList[ETH0].m_fdb_count, 0); + + /* Make sure state db is cleared */ + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "port", port), false); + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "type", entry_type), false); + } + + /* Test Consolidated Flush per bridge port id */ + TEST_F(FdbOrchTest, ConsolidatedFlushPort) + { + ASSERT_NE(m_portsOrch, nullptr); + setUpVlan(m_portsOrch.get()); + setUpPort(m_portsOrch.get()); + ASSERT_NE(m_portsOrch->m_portList.find(VLAN40), m_portsOrch->m_portList.end()); + ASSERT_NE(m_portsOrch->m_portList.find(ETH0), m_portsOrch->m_portList.end()); + setUpVlanMember(m_portsOrch.get()); + + /* Event 1: Learn a dynamic FDB Entry */ + // 7c:fe:90:12:22:ec + vector mac_addr = {124, 254, 144, 18, 34, 236}; + triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_LEARNED, mac_addr, m_portsOrch->m_portList[ETH0].m_bridge_port_id, + m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid); + + string port; + string entry_type; + + /* Make sure fdb_count is incremented as expected */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 1); + ASSERT_EQ(m_portsOrch->m_portList[ETH0].m_fdb_count, 1); + + /* Make sure state db is updated as expected */ + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "port", port), true); + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "type", entry_type), true); + + ASSERT_EQ(port, "Ethernet0"); + ASSERT_EQ(entry_type, "dynamic"); + + /* Event2: Send a Consolidated Flush response from syncd for a port */ + vector flush_mac_addr = {0, 0, 0, 0, 0, 0}; + for (map::iterator it = m_fdborch->m_entries.begin(); it != m_fdborch->m_entries.end(); it++) + { + it->second.is_flush_pending = true; + } + triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_FLUSHED, flush_mac_addr, m_portsOrch->m_portList[ETH0].m_bridge_port_id, + SAI_NULL_OBJECT_ID); + + /* make sure fdb_counters are decremented */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 0); + ASSERT_EQ(m_portsOrch->m_portList[ETH0].m_fdb_count, 0); + + /* Make sure state db is cleared */ + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "port", port), false); + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "type", entry_type), false); + } + + //* Test Consolidated Flush Per Vlan and Per Port, but the bridge_port_id from the internal cache is already deleted */ + TEST_F(FdbOrchTest, ConsolidatedFlushVlanandPortBridgeportDeleted) + { + ASSERT_NE(m_portsOrch, nullptr); + setUpVlan(m_portsOrch.get()); + setUpPort(m_portsOrch.get()); + ASSERT_NE(m_portsOrch->m_portList.find(VLAN40), m_portsOrch->m_portList.end()); + ASSERT_NE(m_portsOrch->m_portList.find(ETH0), m_portsOrch->m_portList.end()); + setUpVlanMember(m_portsOrch.get()); + + /* Event 1: Learn a dynamic FDB Entry */ + // 7c:fe:90:12:22:ec + vector mac_addr = {124, 254, 144, 18, 34, 236}; + triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_LEARNED, mac_addr, m_portsOrch->m_portList[ETH0].m_bridge_port_id, + m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid); + + string port; + string entry_type; + + /* Make sure fdb_count is incremented as expected */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 1); + ASSERT_EQ(m_portsOrch->m_portList[ETH0].m_fdb_count, 1); + + /* Make sure state db is updated as expected */ + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "port", port), true); + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "type", entry_type), true); + + ASSERT_EQ(port, "Ethernet0"); + ASSERT_EQ(entry_type, "dynamic"); + + auto bridge_port_oid = m_portsOrch->m_portList[ETH0].m_bridge_port_id; + + /* Delete the bridge_port_oid in the internal OA cache */ + m_portsOrch->m_portList[ETH0].m_bridge_port_id = SAI_NULL_OBJECT_ID; + m_portsOrch->saiOidToAlias.erase(bridge_port_oid); + + /* Event 2: Generate a FDB Flush per port and per vlan */ + vector flush_mac_addr = {0, 0, 0, 0, 0, 0}; + for (map::iterator it = m_fdborch->m_entries.begin(); it != m_fdborch->m_entries.end(); it++) + { + it->second.is_flush_pending = true; + } + triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_FLUSHED, flush_mac_addr, bridge_port_oid, + m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid); + + /* make sure fdb_counter for Vlan is decremented */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 0); + ASSERT_EQ(m_portsOrch->m_portList[ETH0].m_fdb_count, 0); + + /* Make sure state db is cleared */ + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "port", port), false); + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "type", entry_type), false); + } + + /* Test Flush Per Vlan and Per Port */ + TEST_F(FdbOrchTest, NonConsolidatedFlushVlanandPort) + { + ASSERT_NE(m_portsOrch, nullptr); + setUpVlan(m_portsOrch.get()); + setUpPort(m_portsOrch.get()); + ASSERT_NE(m_portsOrch->m_portList.find(VLAN40), m_portsOrch->m_portList.end()); + ASSERT_NE(m_portsOrch->m_portList.find(ETH0), m_portsOrch->m_portList.end()); + setUpVlanMember(m_portsOrch.get()); + + /* Event 1: Learn a dynamic FDB Entry */ + // 7c:fe:90:12:22:ec + vector mac_addr = {124, 254, 144, 18, 34, 236}; + triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_LEARNED, mac_addr, m_portsOrch->m_portList[ETH0].m_bridge_port_id, + m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid); + + string port; + string entry_type; + + /* Make sure fdb_count is incremented as expected */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 1); + ASSERT_EQ(m_portsOrch->m_portList[ETH0].m_fdb_count, 1); + + /* Make sure state db is updated as expected */ + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "port", port), true); + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "type", entry_type), true); + + ASSERT_EQ(port, "Ethernet0"); + ASSERT_EQ(entry_type, "dynamic"); + + /* Event 2: Generate a non-consilidated FDB Flush per port and per vlan */ + vector flush_mac_addr = {124, 254, 144, 18, 34, 236}; + for (map::iterator it = m_fdborch->m_entries.begin(); it != m_fdborch->m_entries.end(); it++) + { + it->second.is_flush_pending = true; + } + triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_FLUSHED, flush_mac_addr, m_portsOrch->m_portList[ETH0].m_bridge_port_id, + m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid); + + /* make sure fdb_counters are decremented */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 0); + ASSERT_EQ(m_portsOrch->m_portList[ETH0].m_fdb_count, 0); + + /* Make sure state db is cleared */ + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "port", port), false); + ASSERT_EQ(m_fdborch->m_fdbStateTable.hget("Vlan40:7c:fe:90:12:22:ec", "type", entry_type), false); + } + + /* Test Consolidated Flush with origin VXLAN */ + TEST_F(FdbOrchTest, ConsolidatedFlushAllVxLAN) + { + _hook_sai_fdb_api(); + ASSERT_NE(m_portsOrch, nullptr); + setUpVlan(m_portsOrch.get()); + setUpVxlanPort(m_portsOrch.get()); + ASSERT_NE(m_portsOrch->m_portList.find(VLAN40), m_portsOrch->m_portList.end()); + ASSERT_NE(m_portsOrch->m_portList.find(VXLAN_REMOTE), m_portsOrch->m_portList.end()); + setUpVxlanMember(m_portsOrch.get()); + + FdbData fdbData; + fdbData.bridge_port_id = SAI_NULL_OBJECT_ID; + fdbData.type = "dynamic"; + fdbData.origin = FDB_ORIGIN_VXLAN_ADVERTIZED; + fdbData.remote_ip = "1.1.1.1"; + fdbData.esi = ""; + fdbData.vni = 100; + FdbEntry entry; + + MacAddress mac1 = MacAddress("52:54:00:ac:3a:99"); + entry.mac = mac1; + entry.port_name = VXLAN_REMOTE; + + entry.bv_id = m_portsOrch->m_portList[VLAN40].m_vlan_info.vlan_oid; + m_fdborch->addFdbEntry(entry, VXLAN_REMOTE, fdbData); + + /* Make sure fdb_count is incremented as expected */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 1); + ASSERT_EQ(m_portsOrch->m_portList[VXLAN_REMOTE].m_fdb_count, 1); + + /* Event2: Send a Consolidated Flush response from syncd */ + vector flush_mac_addr = {0, 0, 0, 0, 0, 0}; + triggerUpdate(m_fdborch.get(), SAI_FDB_EVENT_FLUSHED, flush_mac_addr, SAI_NULL_OBJECT_ID, + SAI_NULL_OBJECT_ID); + + /* make sure fdb_counters are decremented */ + ASSERT_EQ(m_portsOrch->m_portList[VLAN40].m_fdb_count, 1); + ASSERT_EQ(m_portsOrch->m_portList[VXLAN_REMOTE].m_fdb_count, 1); + _unhook_sai_fdb_api(); + } +} diff --git a/tests/mock_tests/flowcounterrouteorch_ut.cpp b/tests/mock_tests/flowcounterrouteorch_ut.cpp new file mode 100644 index 0000000000..10a52c6b05 --- /dev/null +++ b/tests/mock_tests/flowcounterrouteorch_ut.cpp @@ -0,0 +1,400 @@ +#define private public +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" + +extern string gMySwitchType; + +namespace flowcounterrouteorch_test +{ + using namespace std; + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_chassis_app_db; + + int num_created_counter; + sai_counter_api_t ut_sai_counter_api; + sai_counter_api_t *pold_sai_counter_api; + sai_create_counter_fn old_create_counter; + sai_remove_counter_fn old_remove_counter; + + sai_status_t _ut_stub_create_counter( + _Out_ sai_object_id_t *counter_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + num_created_counter ++; + return old_create_counter(counter_id, switch_id, attr_count, attr_list); + } + + sai_status_t _ut_stub_remove_counter(_In_ sai_object_id_t counter_id) + { + num_created_counter --; + return old_remove_counter(counter_id); + } + + struct FlowcounterRouteOrchTest : public ::testing::Test + { + FlowcounterRouteOrchTest() + { + return; + } + + void SetUp() override + { + ASSERT_EQ(sai_route_api, nullptr); + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + + old_create_counter = sai_counter_api->create_counter; + old_remove_counter = sai_counter_api->remove_counter; + + pold_sai_counter_api = sai_counter_api; + ut_sai_counter_api = *sai_counter_api; + sai_counter_api = &ut_sai_counter_api; + + // Mock sai API + sai_counter_api->create_counter = _ut_stub_create_counter; + sai_counter_api->remove_counter = _ut_stub_remove_counter; + + // Init switch and create dependencies + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + if(gMySwitchType == "voq") + m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); + + sai_attribute_t attr; + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + auto status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + // Get the default virtual router ID + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + + + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + + // Create dependencies ... + TableConnector stateDbBfdSessionTable(m_state_db.get(), STATE_BFD_SESSION_TABLE_NAME); + gBfdOrch = new BfdOrch(m_app_db.get(), APP_BFD_SESSION_TABLE_NAME, stateDbBfdSessionTable); + + const int portsorch_base_pri = 40; + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + vector flex_counter_tables = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); + gDirectory.set(flexCounterOrch); + + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + + vector vnet_tables = { + APP_VNET_RT_TABLE_NAME, + APP_VNET_RT_TUNNEL_TABLE_NAME + }; + + vector cfg_vnet_tables = { + CFG_VNET_RT_TABLE_NAME, + CFG_VNET_RT_TUNNEL_TABLE_NAME + }; + + auto* vnet_orch = new VNetOrch(m_app_db.get(), APP_VNET_TABLE_NAME); + gDirectory.set(vnet_orch); + auto* cfg_vnet_rt_orch = new VNetCfgRouteOrch(m_config_db.get(), m_app_db.get(), cfg_vnet_tables); + gDirectory.set(cfg_vnet_rt_orch); + auto* vnet_rt_orch = new VNetRouteOrch(m_app_db.get(), vnet_tables, vnet_orch); + gDirectory.set(vnet_rt_orch); + ASSERT_EQ(gVrfOrch, nullptr); + gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + gDirectory.set(gVrfOrch); + + ASSERT_EQ(gIntfsOrch, nullptr); + gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); + + const int fdborch_pri = 20; + + vector app_fdb_tables = { + { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_MCLAG_FDB_TABLE_NAME, fdborch_pri} + }; + + TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); + TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); + ASSERT_EQ(gFdbOrch, nullptr); + gFdbOrch = new FdbOrch(m_app_db.get(), app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + + ASSERT_EQ(gNeighOrch, nullptr); + gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + + auto* tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + vector mux_tables = { + CFG_MUX_CABLE_TABLE_NAME, + CFG_PEER_SWITCH_TABLE_NAME + }; + auto* mux_orch = new MuxOrch(m_config_db.get(), mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); + gDirectory.set(mux_orch); + + ASSERT_EQ(gFgNhgOrch, nullptr); + const int fgnhgorch_pri = 15; + + vector fgnhg_tables = { + { CFG_FG_NHG, fgnhgorch_pri }, + { CFG_FG_NHG_PREFIX, fgnhgorch_pri }, + { CFG_FG_NHG_MEMBER, fgnhgorch_pri } + }; + gFgNhgOrch = new FgNhgOrch(m_config_db.get(), m_app_db.get(), m_state_db.get(), fgnhg_tables, gNeighOrch, gIntfsOrch, gVrfOrch); + + ASSERT_EQ(gSrv6Orch, nullptr); + vector srv6_tables = { + APP_SRV6_SID_LIST_TABLE_NAME, + APP_SRV6_MY_SID_TABLE_NAME + }; + gSrv6Orch = new Srv6Orch(m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); + + // Start FlowCounterRouteOrch + static const vector route_pattern_tables = { + CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, + }; + gFlowCounterRouteOrch = new FlowCounterRouteOrch(m_config_db.get(), route_pattern_tables); + + ASSERT_EQ(gRouteOrch, nullptr); + const int routeorch_pri = 5; + vector route_tables = { + { APP_ROUTE_TABLE_NAME, routeorch_pri }, + { APP_LABEL_ROUTE_TABLE_NAME, routeorch_pri } + }; + gRouteOrch = new RouteOrch(m_app_db.get(), route_tables, gSwitchOrch, gNeighOrch, gIntfsOrch, gVrfOrch, gFgNhgOrch, gSrv6Orch); + gNhgOrch = new NhgOrch(m_app_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + + // Recreate buffer orch to read populated data + vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME }; + + gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + // Prepare interface table + Table intfTable = Table(m_app_db.get(), APP_INTF_TABLE_NAME); + intfTable.set("Ethernet0", { {"NULL", "NULL" }, + {"mac_addr", "00:00:00:00:00:00" }}); + intfTable.set("Ethernet0:10.0.0.1/24", { { "scope", "global" }, + { "family", "IPv4" }}); + gIntfsOrch->addExistingData(&intfTable); + static_cast(gIntfsOrch)->doTask(); + + // Prepare neighbor table + Table neighborTable = Table(m_app_db.get(), APP_NEIGH_TABLE_NAME); + + map neighborIp2Mac = {{"10.0.0.2", "00:00:0a:00:00:02" }, + {"10.0.0.3", "00:00:0a:00:00:03" } }; + neighborTable.set("Ethernet0:10.0.0.2", { {"neigh", neighborIp2Mac["10.0.0.2"]}, + {"family", "IPv4" }}); + neighborTable.set("Ethernet0:10.0.0.3", { {"neigh", neighborIp2Mac["10.0.0.3"]}, + {"family", "IPv4" }}); + gNeighOrch->addExistingData(&neighborTable); + static_cast(gNeighOrch)->doTask(); + + //Prepare route table + Table routeTable = Table(m_app_db.get(), APP_ROUTE_TABLE_NAME); + routeTable.set("1.1.1.1/32", { {"ifname", "Ethernet0" }, + {"nexthop", "10.0.0.2" }}); + routeTable.set("0.0.0.0/0", { {"ifname", "Ethernet0" }, + {"nexthop", "10.0.0.2" }}); + gRouteOrch->addExistingData(&routeTable); + static_cast(gRouteOrch)->doTask(); + + // Enable flow counter + std::deque entries; + entries.push_back({"FLOW_CNT_ROUTE", "SET", { {"FLEX_COUNTER_STATUS", "enable"}, {"POLL_INTERVAL", "10000"}}}); + auto consumer = dynamic_cast(flexCounterOrch->getExecutor(CFG_FLEX_COUNTER_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(flexCounterOrch)->doTask(); + + static_cast(gFlowCounterRouteOrch)->doTask(); + return; + } + + void TearDown() override + { + gDirectory.m_values.clear(); + + delete gCrmOrch; + gCrmOrch = nullptr; + + delete gSwitchOrch; + gSwitchOrch = nullptr; + + delete gBfdOrch; + gBfdOrch = nullptr; + + delete gNeighOrch; + gNeighOrch = nullptr; + + delete gFdbOrch; + gFdbOrch = nullptr; + + delete gPortsOrch; + gPortsOrch = nullptr; + + delete gIntfsOrch; + gIntfsOrch = nullptr; + + delete gFgNhgOrch; + gFgNhgOrch = nullptr; + + delete gSrv6Orch; + gSrv6Orch = nullptr; + + delete gRouteOrch; + gRouteOrch = nullptr; + + delete gNhgOrch; + gNhgOrch = nullptr; + + delete gBufferOrch; + gBufferOrch = nullptr; + + delete gVrfOrch; + gVrfOrch = nullptr; + + delete gFlowCounterRouteOrch; + gFlowCounterRouteOrch = nullptr; + + sai_counter_api = pold_sai_counter_api; + ut_helper::uninitSaiApi(); + return; + } + }; + + TEST_F(FlowcounterRouteOrchTest, FlowcounterRouteOrchTestPatternAddDel) + { + std::deque entries; + // Setting route pattern + auto current_counter_num = num_created_counter; + entries.push_back({"1.1.1.0/24", "SET", { {"max_match_count", "10"}}}); + auto consumer = dynamic_cast(gFlowCounterRouteOrch->getExecutor(CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gFlowCounterRouteOrch)->doTask(); + ASSERT_TRUE(num_created_counter - current_counter_num == 1); + + // Deleting route pattern + current_counter_num = num_created_counter; + entries.push_back({"1.1.1.0/24", "DEL", { {"max_match_count", "10"}}}); + consumer->addToSync(entries); + static_cast(gFlowCounterRouteOrch)->doTask(); + ASSERT_TRUE(current_counter_num - num_created_counter == 1); + + } + + TEST_F(FlowcounterRouteOrchTest, DelayAddVRF) + { + std::deque entries; + // Setting route pattern with VRF does not exist + auto current_counter_num = num_created_counter; + entries.push_back({"Vrf1|1.1.1.0/24", "SET", { {"max_match_count", "10"}}}); + auto consumer = dynamic_cast(gFlowCounterRouteOrch->getExecutor(CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gFlowCounterRouteOrch)->doTask(); + ASSERT_TRUE(num_created_counter - current_counter_num == 0); + + // Create VRF + entries.push_back({"Vrf1", "SET", { {"v4", "true"} }}); + auto vrf_consumer = dynamic_cast(gVrfOrch->getExecutor(APP_VRF_TABLE_NAME)); + vrf_consumer->addToSync(entries); + static_cast(gVrfOrch)->doTask(); + ASSERT_TRUE(num_created_counter - current_counter_num == 0); + + // Add route to VRF + Table routeTable = Table(m_app_db.get(), APP_ROUTE_TABLE_NAME); + routeTable.set("Vrf1:1.1.1.1/32", { {"ifname", "Ethernet0" }, + {"nexthop", "10.0.0.2" }}); + gRouteOrch->addExistingData(&routeTable); + static_cast(gRouteOrch)->doTask(); + ASSERT_TRUE(num_created_counter - current_counter_num == 1); + + // Deleting route pattern + current_counter_num = num_created_counter; + entries.clear(); + entries.push_back({"Vrf1|1.1.1.0/24", "DEL", { {"max_match_count", "10"}}}); + consumer->addToSync(entries); + static_cast(gFlowCounterRouteOrch)->doTask(); + ASSERT_TRUE(current_counter_num - num_created_counter == 1); + + // Deleting VRF + entries.push_back({"Vrf1", "DEL", { {"v4", "true"} }}); + vrf_consumer->addToSync(entries); + static_cast(gVrfOrch)->doTask(); + } +} \ No newline at end of file diff --git a/tests/mock_tests/fpmsyncd/test_fpmlink.cpp b/tests/mock_tests/fpmsyncd/test_fpmlink.cpp new file mode 100644 index 0000000000..258ba669a8 --- /dev/null +++ b/tests/mock_tests/fpmsyncd/test_fpmlink.cpp @@ -0,0 +1,71 @@ +#include "fpmsyncd/fpmlink.h" + +#include + +#include +#include + +using namespace swss; + +using ::testing::_; + +class MockMsgHandler : public NetMsg +{ +public: + MOCK_METHOD2(onMsg, void(int, nl_object*)); +}; + +class FpmLinkTest : public ::testing::Test +{ +public: + void SetUp() override + { + NetDispatcher::getInstance().registerMessageHandler(RTM_NEWROUTE, &m_mock); + NetDispatcher::getInstance().registerMessageHandler(RTM_DELROUTE, &m_mock); + } + + void TearDown() override + { + NetDispatcher::getInstance().unregisterMessageHandler(RTM_NEWROUTE); + NetDispatcher::getInstance().unregisterMessageHandler(RTM_DELROUTE); + } + + DBConnector m_db{"APPL_DB", 0}; + RedisPipeline m_pipeline{&m_db, 1}; + RouteSync m_routeSync{&m_pipeline}; + FpmLink m_fpm{&m_routeSync}; + MockMsgHandler m_mock; +}; + +TEST_F(FpmLinkTest, SingleNlMessageInFpmMessage) +{ + // Single FPM message containing single RTM_NEWROUTE + alignas(fpm_msg_hdr_t) unsigned char fpmMsgBuffer[] = { + 0x01, 0x01, 0x00, 0x40, 0x3C, 0x00, 0x00, 0x00, 0x18, 0x00, 0x01, 0x05, 0x00, 0x00, 0x00, 0x00, 0xE0, + 0x12, 0x6F, 0xC4, 0x02, 0x18, 0x00, 0x00, 0xFE, 0x02, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, + 0x01, 0x00, 0x01, 0x01, 0x01, 0x00, 0x08, 0x00, 0x06, 0x00, 0x14, 0x00, 0x00, 0x00, 0x08, 0x00, 0x05, + 0x00, 0xAC, 0x1E, 0x38, 0xA6, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00 + }; + + EXPECT_CALL(m_mock, onMsg(_, _)).Times(1); + + m_fpm.processFpmMessage(reinterpret_cast(static_cast(fpmMsgBuffer))); +} + +TEST_F(FpmLinkTest, TwoNlMessagesInFpmMessage) +{ + // Single FPM message containing RTM_DELROUTE and RTM_NEWROUTE + alignas(fpm_msg_hdr_t) unsigned char fpmMsgBuffer[] = { + 0x01, 0x01, 0x00, 0x6C, 0x2C, 0x00, 0x00, 0x00, 0x19, 0x00, 0x01, 0x04, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x12, + 0x6F, 0xC4, 0x02, 0x18, 0x00, 0x00, 0xFE, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x01, 0x00, + 0x01, 0x01, 0x01, 0x00, 0x08, 0x00, 0x06, 0x00, 0x14, 0x00, 0x00, 0x00, 0x3C, 0x00, 0x00, 0x00, 0x18, 0x00, + 0x01, 0x05, 0x00, 0x00, 0x00, 0x00, 0xE0, 0x12, 0x6F, 0xC4, 0x02, 0x18, 0x00, 0x00, 0xFE, 0x02, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x01, 0x00, 0x01, 0x01, 0x01, 0x00, 0x08, 0x00, 0x06, 0x00, 0x14, 0x00, + 0x00, 0x00, 0x08, 0x00, 0x05, 0x00, 0xAC, 0x1E, 0x38, 0xA7, 0x08, 0x00, 0x04, 0x00, 0x06, 0x00, 0x00, 0x00 + }; + + EXPECT_CALL(m_mock, onMsg(_, _)).Times(2); + + m_fpm.processFpmMessage(reinterpret_cast(static_cast(fpmMsgBuffer))); +} + diff --git a/tests/mock_tests/fpmsyncd/test_routesync.cpp b/tests/mock_tests/fpmsyncd/test_routesync.cpp new file mode 100644 index 0000000000..debfa16d21 --- /dev/null +++ b/tests/mock_tests/fpmsyncd/test_routesync.cpp @@ -0,0 +1,172 @@ +#include "fpmsyncd/routesync.h" + +#include +#include + +using namespace swss; + +using ::testing::_; + +class MockFpm : public FpmInterface +{ +public: + MockFpm(RouteSync* routeSync) : + m_routeSync(routeSync) + { + m_routeSync->onFpmConnected(*this); + } + + ~MockFpm() override + { + m_routeSync->onFpmDisconnected(); + } + + MOCK_METHOD1(send, bool(nlmsghdr*)); + MOCK_METHOD0(getFd, int()); + MOCK_METHOD0(readData, uint64_t()); + +private: + RouteSync* m_routeSync{}; +}; + +class FpmSyncdResponseTest : public ::testing::Test +{ +public: + void SetUp() override + { + EXPECT_EQ(rtnl_route_read_protocol_names(DefaultRtProtoPath), 0); + m_routeSync.setSuppressionEnabled(true); + } + + void TearDown() override + { + } + + DBConnector m_db{"APPL_DB", 0}; + RedisPipeline m_pipeline{&m_db, 1}; + RouteSync m_routeSync{&m_pipeline}; + MockFpm m_mockFpm{&m_routeSync}; +}; + +TEST_F(FpmSyncdResponseTest, RouteResponseFeedbackV4) +{ + // Expect the message to zebra is sent + EXPECT_CALL(m_mockFpm, send(_)).WillOnce([&](nlmsghdr* hdr) -> bool { + rtnl_route* routeObject{}; + + rtnl_route_parse(hdr, &routeObject); + + // table is 0 when no in default VRF + EXPECT_EQ(rtnl_route_get_table(routeObject), 0); + EXPECT_EQ(rtnl_route_get_protocol(routeObject), RTPROT_KERNEL); + + // Offload flag is set + EXPECT_EQ(rtnl_route_get_flags(routeObject) & RTM_F_OFFLOAD, RTM_F_OFFLOAD); + + return true; + }); + + m_routeSync.onRouteResponse("1.0.0.0/24", { + {"err_str", "SWSS_RC_SUCCESS"}, + {"protocol", "kernel"}, + }); +} + +TEST_F(FpmSyncdResponseTest, RouteResponseFeedbackV4Vrf) +{ + // Expect the message to zebra is sent + EXPECT_CALL(m_mockFpm, send(_)).WillOnce([&](nlmsghdr* hdr) -> bool { + rtnl_route* routeObject{}; + + rtnl_route_parse(hdr, &routeObject); + + // table is 42 (returned by fake link cache) when in non default VRF + EXPECT_EQ(rtnl_route_get_table(routeObject), 42); + EXPECT_EQ(rtnl_route_get_protocol(routeObject), 200); + + // Offload flag is set + EXPECT_EQ(rtnl_route_get_flags(routeObject) & RTM_F_OFFLOAD, RTM_F_OFFLOAD); + + return true; + }); + + m_routeSync.onRouteResponse("Vrf0:1.0.0.0/24", { + {"err_str", "SWSS_RC_SUCCESS"}, + {"protocol", "200"}, + }); +} + +TEST_F(FpmSyncdResponseTest, RouteResponseFeedbackV6) +{ + // Expect the message to zebra is sent + EXPECT_CALL(m_mockFpm, send(_)).WillOnce([&](nlmsghdr* hdr) -> bool { + rtnl_route* routeObject{}; + + rtnl_route_parse(hdr, &routeObject); + + // table is 0 when no in default VRF + EXPECT_EQ(rtnl_route_get_table(routeObject), 0); + EXPECT_EQ(rtnl_route_get_protocol(routeObject), RTPROT_KERNEL); + + // Offload flag is set + EXPECT_EQ(rtnl_route_get_flags(routeObject) & RTM_F_OFFLOAD, RTM_F_OFFLOAD); + + return true; + }); + + m_routeSync.onRouteResponse("1::/64", { + {"err_str", "SWSS_RC_SUCCESS"}, + {"protocol", "kernel"}, + }); +} + +TEST_F(FpmSyncdResponseTest, RouteResponseFeedbackV6Vrf) +{ + // Expect the message to zebra is sent + EXPECT_CALL(m_mockFpm, send(_)).WillOnce([&](nlmsghdr* hdr) -> bool { + rtnl_route* routeObject{}; + + rtnl_route_parse(hdr, &routeObject); + + // table is 42 (returned by fake link cache) when in non default VRF + EXPECT_EQ(rtnl_route_get_table(routeObject), 42); + EXPECT_EQ(rtnl_route_get_protocol(routeObject), 200); + + // Offload flag is set + EXPECT_EQ(rtnl_route_get_flags(routeObject) & RTM_F_OFFLOAD, RTM_F_OFFLOAD); + + return true; + }); + + m_routeSync.onRouteResponse("Vrf0:1::/64", { + {"err_str", "SWSS_RC_SUCCESS"}, + {"protocol", "200"}, + }); +} + +TEST_F(FpmSyncdResponseTest, WarmRestart) +{ + std::vector fieldValues = { + {"protocol", "kernel"}, + }; + + DBConnector applStateDb{"APPL_STATE_DB", 0}; + Table routeStateTable{&applStateDb, APP_ROUTE_TABLE_NAME}; + + routeStateTable.set("1.0.0.0/24", fieldValues); + routeStateTable.set("2.0.0.0/24", fieldValues); + routeStateTable.set("Vrf0:3.0.0.0/24", fieldValues); + + EXPECT_CALL(m_mockFpm, send(_)).Times(3).WillRepeatedly([&](nlmsghdr* hdr) -> bool { + rtnl_route* routeObject{}; + + rtnl_route_parse(hdr, &routeObject); + + // Offload flag is set + EXPECT_EQ(rtnl_route_get_flags(routeObject) & RTM_F_OFFLOAD, RTM_F_OFFLOAD); + + return true; + }); + + m_routeSync.onWarmStartEnd(applStateDb); +} diff --git a/tests/mock_tests/intfmgrd/intfmgr_ut.cpp b/tests/mock_tests/intfmgrd/intfmgr_ut.cpp new file mode 100644 index 0000000000..ef43cdeb6b --- /dev/null +++ b/tests/mock_tests/intfmgrd/intfmgr_ut.cpp @@ -0,0 +1,130 @@ +#include "gtest/gtest.h" +#include +#include +#include +#include +#include "../mock_table.h" +#include "warm_restart.h" +#define private public +#include "intfmgr.h" +#undef private + +extern int (*callback)(const std::string &cmd, std::string &stdout); +extern std::vector mockCallArgs; + +bool Ethernet0IPv6Set = false; + +int cb(const std::string &cmd, std::string &stdout){ + mockCallArgs.push_back(cmd); + if (cmd == "sysctl -w net.ipv6.conf.\"Ethernet0\".disable_ipv6=0") Ethernet0IPv6Set = true; + else if (cmd.find("/sbin/ip -6 address \"add\"") == 0) { + return Ethernet0IPv6Set ? 0 : 2; + } + else if (cmd == "/sbin/ip link set \"Ethernet64.10\" \"up\""){ + return 1; + } + else { + return 0; + } + return 0; +} + +// Test Fixture +namespace intfmgr_ut +{ + struct IntfMgrTest : public ::testing::Test + { + std::shared_ptr m_config_db; + std::shared_ptr m_app_db; + std::shared_ptr m_state_db; + std::vector cfg_intf_tables; + + virtual void SetUp() override + { + testing_db::reset(); + m_config_db = std::make_shared("CONFIG_DB", 0); + m_app_db = std::make_shared("APPL_DB", 0); + m_state_db = std::make_shared("STATE_DB", 0); + + swss::WarmStart::initialize("intfmgrd", "swss"); + + std::vector tables = { + CFG_INTF_TABLE_NAME, + CFG_LAG_INTF_TABLE_NAME, + CFG_VLAN_INTF_TABLE_NAME, + CFG_LOOPBACK_INTERFACE_TABLE_NAME, + CFG_VLAN_SUB_INTF_TABLE_NAME, + CFG_VOQ_INBAND_INTERFACE_TABLE_NAME, + }; + cfg_intf_tables = tables; + mockCallArgs.clear(); + callback = cb; + } + }; + + TEST_F(IntfMgrTest, testSettingIpv6Flag){ + Ethernet0IPv6Set = false; + swss::IntfMgr intfmgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_intf_tables); + /* Set portStateTable */ + std::vector values; + values.emplace_back("state", "ok"); + intfmgr.m_statePortTable.set("Ethernet0", values, "SET", ""); + /* Set m_stateIntfTable */ + values.clear(); + values.emplace_back("vrf", ""); + intfmgr.m_stateIntfTable.set("Ethernet0", values, "SET", ""); + /* Set Ipv6 prefix */ + const std::vector& keys = {"Ethernet0", "2001::8/64"}; + const std::vector data; + intfmgr.doIntfAddrTask(keys, data, "SET"); + int ip_cmd_called = 0; + for (auto cmd : mockCallArgs){ + if (cmd.find("/sbin/ip -6 address \"add\"") == 0){ + ip_cmd_called++; + } + } + ASSERT_EQ(ip_cmd_called, 2); + } + + TEST_F(IntfMgrTest, testNoSettingIpv6Flag){ + Ethernet0IPv6Set = true; // Assuming it is already set by SDK + swss::IntfMgr intfmgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_intf_tables); + /* Set portStateTable */ + std::vector values; + values.emplace_back("state", "ok"); + intfmgr.m_statePortTable.set("Ethernet0", values, "SET", ""); + /* Set m_stateIntfTable */ + values.clear(); + values.emplace_back("vrf", ""); + intfmgr.m_stateIntfTable.set("Ethernet0", values, "SET", ""); + /* Set Ipv6 prefix */ + const std::vector& keys = {"Ethernet0", "2001::8/64"}; + const std::vector data; + intfmgr.doIntfAddrTask(keys, data, "SET"); + int ip_cmd_called = 0; + for (auto cmd : mockCallArgs){ + if (cmd.find("/sbin/ip -6 address \"add\"") == 0){ + ip_cmd_called++; + } + } + ASSERT_EQ(ip_cmd_called, 1); + } + + //This test except no runtime error when the set admin status command failed + //and the subinterface has not ok status (for example not existing subinterface) + TEST_F(IntfMgrTest, testSetAdminStatusFailToNotOkSubInt){ + swss::IntfMgr intfmgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_intf_tables); + intfmgr.setHostSubIntfAdminStatus("Ethernet64.10", "up", "up"); + } + + //This test except runtime error when the set admin status command failed + //and the subinterface has ok status + TEST_F(IntfMgrTest, testSetAdminStatusFailToOkSubInt){ + swss::IntfMgr intfmgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_intf_tables); + /* Set portStateTable */ + std::vector values; + values.emplace_back("state", "ok"); + intfmgr.m_statePortTable.set("Ethernet64.10", values, "SET", ""); + EXPECT_THROW(intfmgr.setHostSubIntfAdminStatus("Ethernet64.10", "up", "up"), std::runtime_error); + } +} diff --git a/tests/mock_tests/intfsorch_ut.cpp b/tests/mock_tests/intfsorch_ut.cpp new file mode 100644 index 0000000000..60041520bd --- /dev/null +++ b/tests/mock_tests/intfsorch_ut.cpp @@ -0,0 +1,330 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#include "gtest/gtest.h" +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" +#include +#include + + + +namespace intfsorch_test +{ + using namespace std; + + int create_rif_count = 0; + int remove_rif_count = 0; + sai_router_interface_api_t *pold_sai_rif_api; + sai_router_interface_api_t ut_sai_rif_api; + + sai_status_t _ut_create_router_interface( + _Out_ sai_object_id_t *router_interface_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + ++create_rif_count; + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_remove_router_interface( + _In_ sai_object_id_t router_interface_id) + { + ++remove_rif_count; + return SAI_STATUS_SUCCESS; + } + + struct IntfsOrchTest : public ::testing::Test + { + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_chassis_app_db; + + //sai_router_interface_api_t *old_sai_rif_api_ptr; + + //sai_create_router_interface_fn old_create_rif; + //sai_remove_router_interface_fn old_remove_rif; + void SetUp() override + { + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + pold_sai_rif_api = sai_router_intfs_api; + ut_sai_rif_api = *sai_router_intfs_api; + sai_router_intfs_api = &ut_sai_rif_api; + + sai_router_intfs_api->create_router_interface = _ut_create_router_interface; + sai_router_intfs_api->remove_router_interface = _ut_remove_router_interface; + + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); + + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + auto status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + + + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + + // Create dependencies ... + TableConnector stateDbBfdSessionTable(m_state_db.get(), STATE_BFD_SESSION_TABLE_NAME); + gBfdOrch = new BfdOrch(m_app_db.get(), APP_BFD_SESSION_TABLE_NAME, stateDbBfdSessionTable); + + const int portsorch_base_pri = 40; + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + vector flex_counter_tables = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); + gDirectory.set(flexCounterOrch); + + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + + vector vnet_tables = { + APP_VNET_RT_TABLE_NAME, + APP_VNET_RT_TUNNEL_TABLE_NAME + }; + + vector cfg_vnet_tables = { + CFG_VNET_RT_TABLE_NAME, + CFG_VNET_RT_TUNNEL_TABLE_NAME + }; + + auto* vnet_orch = new VNetOrch(m_app_db.get(), APP_VNET_TABLE_NAME); + gDirectory.set(vnet_orch); + auto* cfg_vnet_rt_orch = new VNetCfgRouteOrch(m_config_db.get(), m_app_db.get(), cfg_vnet_tables); + gDirectory.set(cfg_vnet_rt_orch); + auto* vnet_rt_orch = new VNetRouteOrch(m_app_db.get(), vnet_tables, vnet_orch); + gDirectory.set(vnet_rt_orch); + ASSERT_EQ(gVrfOrch, nullptr); + gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + gDirectory.set(gVrfOrch); + + ASSERT_EQ(gIntfsOrch, nullptr); + gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); + + const int fdborch_pri = 20; + + vector app_fdb_tables = { + { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_MCLAG_FDB_TABLE_NAME, fdborch_pri} + }; + + TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); + TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); + ASSERT_EQ(gFdbOrch, nullptr); + gFdbOrch = new FdbOrch(m_app_db.get(), app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + + ASSERT_EQ(gNeighOrch, nullptr); + gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + + auto* tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + vector mux_tables = { + CFG_MUX_CABLE_TABLE_NAME, + CFG_PEER_SWITCH_TABLE_NAME + }; + auto* mux_orch = new MuxOrch(m_config_db.get(), mux_tables, tunnel_decap_orch, gNeighOrch, gFdbOrch); + gDirectory.set(mux_orch); + + ASSERT_EQ(gFgNhgOrch, nullptr); + const int fgnhgorch_pri = 15; + + vector fgnhg_tables = { + { CFG_FG_NHG, fgnhgorch_pri }, + { CFG_FG_NHG_PREFIX, fgnhgorch_pri }, + { CFG_FG_NHG_MEMBER, fgnhgorch_pri } + }; + gFgNhgOrch = new FgNhgOrch(m_config_db.get(), m_app_db.get(), m_state_db.get(), fgnhg_tables, gNeighOrch, gIntfsOrch, gVrfOrch); + + ASSERT_EQ(gSrv6Orch, nullptr); + vector srv6_tables = { + APP_SRV6_SID_LIST_TABLE_NAME, + APP_SRV6_MY_SID_TABLE_NAME + }; + gSrv6Orch = new Srv6Orch(m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); + + // Start FlowCounterRouteOrch + static const vector route_pattern_tables = { + CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, + }; + gFlowCounterRouteOrch = new FlowCounterRouteOrch(m_config_db.get(), route_pattern_tables); + + ASSERT_EQ(gRouteOrch, nullptr); + const int routeorch_pri = 5; + vector route_tables = { + { APP_ROUTE_TABLE_NAME, routeorch_pri }, + { APP_LABEL_ROUTE_TABLE_NAME, routeorch_pri } + }; + gRouteOrch = new RouteOrch(m_app_db.get(), route_tables, gSwitchOrch, gNeighOrch, gIntfsOrch, gVrfOrch, gFgNhgOrch, gSrv6Orch); + gNhgOrch = new NhgOrch(m_app_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + + // Recreate buffer orch to read populated data + vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME }; + + gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + } + + void TearDown() override + { + gDirectory.m_values.clear(); + + delete gCrmOrch; + gCrmOrch = nullptr; + + delete gSwitchOrch; + gSwitchOrch = nullptr; + + delete gBfdOrch; + gBfdOrch = nullptr; + + delete gNeighOrch; + gNeighOrch = nullptr; + + delete gFdbOrch; + gFdbOrch = nullptr; + + delete gPortsOrch; + gPortsOrch = nullptr; + + delete gIntfsOrch; + gIntfsOrch = nullptr; + + delete gFgNhgOrch; + gFgNhgOrch = nullptr; + + delete gSrv6Orch; + gSrv6Orch = nullptr; + + delete gRouteOrch; + gRouteOrch = nullptr; + + delete gNhgOrch; + gNhgOrch = nullptr; + + delete gBufferOrch; + gBufferOrch = nullptr; + + delete gVrfOrch; + gVrfOrch = nullptr; + + delete gFlowCounterRouteOrch; + gFlowCounterRouteOrch = nullptr; + + sai_router_intfs_api = pold_sai_rif_api; + ut_helper::uninitSaiApi(); + } + }; + + TEST_F(IntfsOrchTest, IntfsOrchDeleteCreateRetry) + { + // create a interface + std::deque entries; + entries.push_back({"Ethernet0", "SET", { {"mtu", "9100"}}}); + auto consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + auto current_create_count = create_rif_count; + static_cast(gIntfsOrch)->doTask(); + ASSERT_EQ(current_create_count + 1, create_rif_count); + + // create dependency to the interface + gIntfsOrch->increaseRouterIntfsRefCount("Ethernet0"); + + // delete the interface, expect retry because dependency exists + entries.clear(); + entries.push_back({"Ethernet0", "DEL", { {} }}); + consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + auto current_remove_count = remove_rif_count; + static_cast(gIntfsOrch)->doTask(); + ASSERT_EQ(current_remove_count, remove_rif_count); + + // create the interface again, expect retry because interface is in removing + entries.clear(); + entries.push_back({"Ethernet0", "SET", { {"mtu", "9100"}}}); + consumer = dynamic_cast(gIntfsOrch->getExecutor(APP_INTF_TABLE_NAME)); + consumer->addToSync(entries); + current_create_count = create_rif_count; + static_cast(gIntfsOrch)->doTask(); + ASSERT_EQ(current_create_count, create_rif_count); + + // remove the dependency, expect delete and create a new one + gIntfsOrch->decreaseRouterIntfsRefCount("Ethernet0"); + current_create_count = create_rif_count; + current_remove_count = remove_rif_count; + static_cast(gIntfsOrch)->doTask(); + ASSERT_EQ(current_create_count + 1, create_rif_count); + ASSERT_EQ(current_remove_count + 1, remove_rif_count); + } +} \ No newline at end of file diff --git a/tests/mock_tests/mock_dbconnector.cpp b/tests/mock_tests/mock_dbconnector.cpp index 362e1446d3..7cabdc2224 100644 --- a/tests/mock_tests/mock_dbconnector.cpp +++ b/tests/mock_tests/mock_dbconnector.cpp @@ -5,6 +5,8 @@ #include #include +#include + #include "dbconnector.h" namespace swss diff --git a/tests/mock_tests/mock_orchagent_main.cpp b/tests/mock_tests/mock_orchagent_main.cpp index 62a03dc770..e709824707 100644 --- a/tests/mock_tests/mock_orchagent_main.cpp +++ b/tests/mock_tests/mock_orchagent_main.cpp @@ -1,6 +1,6 @@ extern "C" { -#include "sai.h" -#include "saistatus.h" +#include +#include } #include "orchdaemon.h" @@ -12,15 +12,6 @@ sai_object_id_t gSwitchId = SAI_NULL_OBJECT_ID; MacAddress gMacAddress; MacAddress gVxlanMacAddress; -#define DEFAULT_BATCH_SIZE 128 -int gBatchSize = DEFAULT_BATCH_SIZE; - -bool gSairedisRecord = true; -bool gSwssRecord = true; -bool gLogRotate = false; -bool gSaiRedisLogRotate = false; -ofstream gRecordOfs; -string gRecordFile; string gMySwitchType = "switch"; int32_t gVoqMySwitchId = 0; string gMyHostName = "Linecard1"; diff --git a/tests/mock_tests/mock_orchagent_main.h b/tests/mock_tests/mock_orchagent_main.h index 04e82e74eb..93c1588b9b 100644 --- a/tests/mock_tests/mock_orchagent_main.h +++ b/tests/mock_tests/mock_orchagent_main.h @@ -5,11 +5,18 @@ #include "crmorch.h" #include "portsorch.h" #include "routeorch.h" +#include "flowcounterrouteorch.h" #include "intfsorch.h" #include "neighorch.h" #include "fdborch.h" #include "mirrororch.h" +#define private public #include "bufferorch.h" +#include "qosorch.h" +#define protected public +#include "pfcwdorch.h" +#undef protected +#undef private #include "vrforch.h" #include "vnetorch.h" #include "vxlanorch.h" @@ -19,15 +26,10 @@ #include "tunneldecaporch.h" #include "muxorch.h" #include "nhgorch.h" +#include "copporch.h" #include "directory.h" extern int gBatchSize; -extern bool gSwssRecord; -extern bool gSairedisRecord; -extern bool gLogRotate; -extern bool gSaiRedisLogRotate; -extern ofstream gRecordOfs; -extern string gRecordFile; extern MacAddress gMacAddress; extern MacAddress gVxlanMacAddress; @@ -41,18 +43,25 @@ extern CrmOrch *gCrmOrch; extern PortsOrch *gPortsOrch; extern FgNhgOrch *gFgNhgOrch; extern RouteOrch *gRouteOrch; +extern FlowCounterRouteOrch *gFlowCounterRouteOrch; extern IntfsOrch *gIntfsOrch; extern NeighOrch *gNeighOrch; extern FdbOrch *gFdbOrch; extern MirrorOrch *gMirrorOrch; extern BufferOrch *gBufferOrch; +extern QosOrch *gQosOrch; +template PfcWdSwOrch *gPfcwdOrch; extern VRFOrch *gVrfOrch; extern NhgOrch *gNhgOrch; extern Srv6Orch *gSrv6Orch; +extern BfdOrch *gBfdOrch; +extern AclOrch *gAclOrch; +extern PolicerOrch *gPolicerOrch; extern Directory gDirectory; extern sai_acl_api_t *sai_acl_api; extern sai_switch_api_t *sai_switch_api; +extern sai_hash_api_t *sai_hash_api; extern sai_virtual_router_api_t *sai_virtual_router_api; extern sai_port_api_t *sai_port_api; extern sai_lag_api_t *sai_lag_api; @@ -63,8 +72,17 @@ extern sai_route_api_t *sai_route_api; extern sai_neighbor_api_t *sai_neighbor_api; extern sai_tunnel_api_t *sai_tunnel_api; extern sai_next_hop_api_t *sai_next_hop_api; +extern sai_next_hop_group_api_t *sai_next_hop_group_api; extern sai_hostif_api_t *sai_hostif_api; +extern sai_policer_api_t *sai_policer_api; extern sai_buffer_api_t *sai_buffer_api; +extern sai_qos_map_api_t *sai_qos_map_api; +extern sai_scheduler_api_t *sai_scheduler_api; +extern sai_scheduler_group_api_t *sai_scheduler_group_api; +extern sai_wred_api_t *sai_wred_api; extern sai_queue_api_t *sai_queue_api; extern sai_udf_api_t* sai_udf_api; extern sai_mpls_api_t* sai_mpls_api; +extern sai_counter_api_t* sai_counter_api; +extern sai_samplepacket_api_t *sai_samplepacket_api; +extern sai_fdb_api_t* sai_fdb_api; diff --git a/tests/mock_tests/mock_sai_api.h b/tests/mock_tests/mock_sai_api.h new file mode 100644 index 0000000000..63d8921bf1 --- /dev/null +++ b/tests/mock_tests/mock_sai_api.h @@ -0,0 +1,149 @@ +#include "mock_orchagent_main.h" +#include + +using ::testing::Return; +using ::testing::NiceMock; + +std::set apply_mock_fns; +std::set remove_mock_fns; + +#define CREATE_PARAMS(sai_object_type) _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list +#define REMOVE_PARAMS(sai_object_type) _In_ const sai_##sai_object_type##_entry_t *sai_object_type##_entry +#define CREATE_ARGS(sai_object_type) sai_object_type##_entry, attr_count, attr_list +#define REMOVE_ARGS(sai_object_type) sai_object_type##_entry +#define GENERIC_CREATE_PARAMS(sai_object_type) _Out_ sai_object_id_t *sai_object_type##_id, _In_ sai_object_id_t switch_id, _In_ uint32_t attr_count, _In_ const sai_attribute_t *attr_list +#define GENERIC_REMOVE_PARAMS(sai_object_type) _In_ sai_object_id_t sai_object_type##_id +#define GENERIC_CREATE_ARGS(sai_object_type) sai_object_type##_id, switch_id, attr_count, attr_list +#define GENERIC_REMOVE_ARGS(sai_object_type) sai_object_type##_id + +/* +The macro DEFINE_SAI_API_MOCK will perform the steps to mock the SAI API for the sai_object_type it is called on: +1. Create a pointer to store the original API +2. Create a new SAI_API where we can safely mock without affecting the original API +3. Define a class with mocked methods to create and remove the object type (to be used with gMock) +4. Create a pointer of the above class +5. Define two wrapper functions to create and remove the object type that has the same signature as the original SAI API function +6. Define a method to apply the mock +7. Define a method to remove the mock +*/ +#define DEFINE_SAI_API_MOCK(sai_object_type) \ + sai_##sai_object_type##_api_t *old_sai_##sai_object_type##_api; \ + sai_##sai_object_type##_api_t ut_sai_##sai_object_type##_api; \ + class mock_sai_##sai_object_type##_api_t \ + { \ + public: \ + mock_sai_##sai_object_type##_api_t() \ + { \ + ON_CALL(*this, create_##sai_object_type##_entry) \ + .WillByDefault( \ + [this](CREATE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_object_type##_api->create_##sai_object_type##_entry(CREATE_ARGS(sai_object_type)); \ + }); \ + ON_CALL(*this, remove_##sai_object_type##_entry) \ + .WillByDefault( \ + [this](REMOVE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_object_type##_api->remove_##sai_object_type##_entry(REMOVE_ARGS(sai_object_type)); \ + }); \ + } \ + MOCK_METHOD3(create_##sai_object_type##_entry, sai_status_t(CREATE_PARAMS(sai_object_type))); \ + MOCK_METHOD1(remove_##sai_object_type##_entry, sai_status_t(REMOVE_PARAMS(sai_object_type))); \ + }; \ + mock_sai_##sai_object_type##_api_t *mock_sai_##sai_object_type##_api; \ + sai_status_t mock_create_##sai_object_type##_entry(CREATE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_object_type##_api->create_##sai_object_type##_entry(CREATE_ARGS(sai_object_type)); \ + } \ + sai_status_t mock_remove_##sai_object_type##_entry(REMOVE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_object_type##_api->remove_##sai_object_type##_entry(REMOVE_ARGS(sai_object_type)); \ + } \ + void apply_sai_##sai_object_type##_api_mock() \ + { \ + mock_sai_##sai_object_type##_api = new NiceMock(); \ + \ + old_sai_##sai_object_type##_api = sai_##sai_object_type##_api; \ + ut_sai_##sai_object_type##_api = *sai_##sai_object_type##_api; \ + sai_##sai_object_type##_api = &ut_sai_##sai_object_type##_api; \ + \ + sai_##sai_object_type##_api->create_##sai_object_type##_entry = mock_create_##sai_object_type##_entry; \ + sai_##sai_object_type##_api->remove_##sai_object_type##_entry = mock_remove_##sai_object_type##_entry; \ + } \ + void remove_sai_##sai_object_type##_api_mock() \ + { \ + sai_##sai_object_type##_api = old_sai_##sai_object_type##_api; \ + delete mock_sai_##sai_object_type##_api; \ + } + +#define DEFINE_SAI_GENERIC_API_MOCK(sai_api_name, sai_object_type) \ + sai_##sai_api_name##_api_t *old_sai_##sai_api_name##_api; \ + sai_##sai_api_name##_api_t ut_sai_##sai_api_name##_api; \ + class mock_sai_##sai_api_name##_api_t \ + { \ + public: \ + mock_sai_##sai_api_name##_api_t() \ + { \ + ON_CALL(*this, create_##sai_object_type) \ + .WillByDefault( \ + [this](GENERIC_CREATE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_api_name##_api->create_##sai_object_type(GENERIC_CREATE_ARGS(sai_object_type)); \ + }); \ + ON_CALL(*this, remove_##sai_object_type) \ + .WillByDefault( \ + [this](GENERIC_REMOVE_PARAMS(sai_object_type)) { \ + return old_sai_##sai_api_name##_api->remove_##sai_object_type(GENERIC_REMOVE_ARGS(sai_object_type)); \ + }); \ + } \ + MOCK_METHOD4(create_##sai_object_type, sai_status_t(GENERIC_CREATE_PARAMS(sai_object_type))); \ + MOCK_METHOD1(remove_##sai_object_type, sai_status_t(GENERIC_REMOVE_PARAMS(sai_object_type))); \ + }; \ + mock_sai_##sai_api_name##_api_t *mock_sai_##sai_api_name##_api; \ + sai_status_t mock_create_##sai_object_type(GENERIC_CREATE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_api_name##_api->create_##sai_object_type(GENERIC_CREATE_ARGS(sai_object_type)); \ + } \ + sai_status_t mock_remove_##sai_object_type(GENERIC_REMOVE_PARAMS(sai_object_type)) \ + { \ + return mock_sai_##sai_api_name##_api->remove_##sai_object_type(GENERIC_REMOVE_ARGS(sai_object_type)); \ + } \ + void apply_sai_##sai_api_name##_api_mock() \ + { \ + mock_sai_##sai_api_name##_api = new NiceMock(); \ + \ + old_sai_##sai_api_name##_api = sai_##sai_api_name##_api; \ + ut_sai_##sai_api_name##_api = *sai_##sai_api_name##_api; \ + sai_##sai_api_name##_api = &ut_sai_##sai_api_name##_api; \ + \ + sai_##sai_api_name##_api->create_##sai_object_type = mock_create_##sai_object_type; \ + sai_##sai_api_name##_api->remove_##sai_object_type = mock_remove_##sai_object_type; \ + } \ + void remove_sai_##sai_api_name##_api_mock() \ + { \ + sai_##sai_api_name##_api = old_sai_##sai_api_name##_api; \ + delete mock_sai_##sai_api_name##_api; \ + } + +// Stores pointers to mock apply/remove functions to avoid needing to manually call each function +#define INIT_SAI_API_MOCK(sai_object_type) \ + apply_mock_fns.insert(&apply_sai_##sai_object_type##_api_mock); \ + remove_mock_fns.insert(&remove_sai_##sai_object_type##_api_mock); + +void MockSaiApis() +{ + if (apply_mock_fns.empty()) + { + EXPECT_TRUE(false) << "No mock application functions found. Did you call DEFINE_SAI_API_MOCK and INIT_SAI_API_MOCK for the necessary SAI object type?"; + } + + for (auto apply_fn : apply_mock_fns) + { + (*apply_fn)(); + } +} + +void RestoreSaiApis() +{ + for (auto remove_fn : remove_mock_fns) + { + (*remove_fn)(); + } +} diff --git a/tests/mock_tests/mock_sai_bridge.h b/tests/mock_tests/mock_sai_bridge.h new file mode 100644 index 0000000000..8141ca66bb --- /dev/null +++ b/tests/mock_tests/mock_sai_bridge.h @@ -0,0 +1,34 @@ +// Define classes and functions to mock SAI bridge functions. +#pragma once + +#include + +extern "C" +{ +#include "sai.h" +} + +// Mock class including mock functions mapping to SAI bridge functions. +class MockSaiBridge +{ + public: + MOCK_METHOD4(create_bridge_port, sai_status_t(sai_object_id_t *bridge_port_id, + sai_object_id_t switch_id, + uint32_t attr_count, + const sai_attribute_t *attr_list)); +}; + +// Note that before mock functions below are used, mock_sai_bridge must be +// initialized to point to an instance of MockSaiBridge. +MockSaiBridge *mock_sai_bridge; + +sai_status_t mock_create_bridge_port(sai_object_id_t *bridge_port_id, + sai_object_id_t switch_id, + uint32_t attr_count, + const sai_attribute_t *attr_list) +{ + return mock_sai_bridge->create_bridge_port(bridge_port_id, switch_id, attr_count, attr_list); +} + + + diff --git a/tests/mock_tests/mock_shell_command.cpp b/tests/mock_tests/mock_shell_command.cpp new file mode 100644 index 0000000000..f3ccfbfe5e --- /dev/null +++ b/tests/mock_tests/mock_shell_command.cpp @@ -0,0 +1,15 @@ +#include +#include + +int mockCmdReturn = 0; +std::string mockCmdStdcout = ""; +std::vector mockCallArgs; + +namespace swss { + int exec(const std::string &cmd, std::string &stdout) + { + mockCallArgs.push_back(cmd); + stdout = mockCmdStdcout; + return mockCmdReturn; + } +} diff --git a/tests/mock_tests/mock_table.cpp b/tests/mock_tests/mock_table.cpp index 29011c30a0..4d512a9835 100644 --- a/tests/mock_tests/mock_table.cpp +++ b/tests/mock_tests/mock_table.cpp @@ -1,4 +1,6 @@ #include "table.h" +#include "producerstatetable.h" +#include using TableDataT = std::map>; using TablesT = std::map; @@ -71,4 +73,52 @@ namespace swss keys.push_back(it.first); } } + + void Table::del(const std::string &key, const std::string& /* op */, const std::string& /*prefix*/) + { + auto table = gDB[m_pipe->getDbId()].find(getTableName()); + if (table != gDB[m_pipe->getDbId()].end()){ + table->second.erase(key); + } + } + + void ProducerStateTable::set(const std::string &key, + const std::vector &values, + const std::string &op, + const std::string &prefix) + { + auto &table = gDB[m_pipe->getDbId()][getTableName()]; + auto iter = table.find(key); + if (iter == table.end()) + { + table[key] = values; + } + else + { + std::vector new_values(values); + std::set field_set; + for (auto &value : values) + { + field_set.insert(fvField(value)); + } + for (auto &value : iter->second) + { + auto &field = fvField(value); + if (field_set.find(field) != field_set.end()) + { + continue; + } + new_values.push_back(value); + } + iter->second.swap(new_values); + } + } + + void ProducerStateTable::del(const std::string &key, + const std::string &op, + const std::string &prefix) + { + auto &table = gDB[m_pipe->getDbId()][getTableName()]; + table.erase(key); + } } diff --git a/tests/mock_tests/mux_rollback_ut.cpp b/tests/mock_tests/mux_rollback_ut.cpp new file mode 100644 index 0000000000..578b6c817b --- /dev/null +++ b/tests/mock_tests/mux_rollback_ut.cpp @@ -0,0 +1,499 @@ +#define private public +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_sai_api.h" +#include "gtest/gtest.h" +#include + +DEFINE_SAI_API_MOCK(neighbor); +DEFINE_SAI_API_MOCK(route); +DEFINE_SAI_GENERIC_API_MOCK(acl, acl_entry); +DEFINE_SAI_GENERIC_API_MOCK(next_hop, next_hop); + +namespace mux_rollback_test +{ + using namespace std; + using ::testing::Return; + using ::testing::Throw; + + static const string PEER_SWITCH_HOSTNAME = "peer_hostname"; + static const string PEER_IPV4_ADDRESS = "1.1.1.1"; + static const string TEST_INTERFACE = "Ethernet4"; + static const string ACTIVE = "active"; + static const string STANDBY = "standby"; + static const string STATE = "state"; + static const string VLAN_NAME = "Vlan1000"; + static const string SERVER_IP = "192.168.0.2"; + + class MuxRollbackTest : public ::testing::Test + { + protected: + std::vector ut_orch_list; + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_chassis_app_db; + MuxOrch *m_MuxOrch; + MuxCableOrch *m_MuxCableOrch; + MuxCable *m_MuxCable; + TunnelDecapOrch *m_TunnelDecapOrch; + MuxStateOrch *m_MuxStateOrch; + FlexCounterOrch *m_FlexCounterOrch; + mock_sai_neighbor_api_t mock_sai_neighbor_api_; + + void SetMuxStateFromAppDb(std::string state) + { + Table mux_cable_table = Table(m_app_db.get(), APP_MUX_CABLE_TABLE_NAME); + mux_cable_table.set(TEST_INTERFACE, { { STATE, state } }); + m_MuxCableOrch->addExistingData(&mux_cable_table); + static_cast(m_MuxCableOrch)->doTask(); + } + + void SetAndAssertMuxState(std::string state) + { + m_MuxCable->setState(state); + EXPECT_EQ(state, m_MuxCable->getState()); + } + + void ApplyDualTorConfigs() + { + Table peer_switch_table = Table(m_config_db.get(), CFG_PEER_SWITCH_TABLE_NAME); + Table tunnel_table = Table(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + Table mux_cable_table = Table(m_config_db.get(), CFG_MUX_CABLE_TABLE_NAME); + Table port_table = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table vlan_table = Table(m_app_db.get(), APP_VLAN_TABLE_NAME); + Table vlan_member_table = Table(m_app_db.get(), APP_VLAN_MEMBER_TABLE_NAME); + Table neigh_table = Table(m_app_db.get(), APP_NEIGH_TABLE_NAME); + Table intf_table = Table(m_app_db.get(), APP_INTF_TABLE_NAME); + + auto ports = ut_helper::getInitialSaiPorts(); + port_table.set(TEST_INTERFACE, ports[TEST_INTERFACE]); + port_table.set("PortConfigDone", { { "count", to_string(1) } }); + port_table.set("PortInitDone", { {} }); + + neigh_table.set( + VLAN_NAME + neigh_table.getTableNameSeparator() + SERVER_IP, { { "neigh", "62:f9:65:10:2f:04" }, + { "family", "IPv4" } }); + + vlan_table.set(VLAN_NAME, { { "admin_status", "up" }, + { "mtu", "9100" }, + { "mac", "00:aa:bb:cc:dd:ee" } }); + vlan_member_table.set( + VLAN_NAME + vlan_member_table.getTableNameSeparator() + TEST_INTERFACE, + { { "tagging_mode", "untagged" } }); + + intf_table.set(VLAN_NAME, { { "grat_arp", "enabled" }, + { "proxy_arp", "enabled" }, + { "mac_addr", "00:00:00:00:00:00" } }); + intf_table.set( + VLAN_NAME + neigh_table.getTableNameSeparator() + "192.168.0.1/21", { + { "scope", "global" }, + { "family", "IPv4" }, + }); + + tunnel_table.set(MUX_TUNNEL, { { "dscp_mode", "uniform" }, + { "dst_ip", "2.2.2.2" }, + { "ecn_mode", "copy_from_outer" }, + { "encap_ecn_mode", "standard" }, + { "ttl_mode", "pipe" }, + { "tunnel_type", "IPINIP" } }); + + peer_switch_table.set(PEER_SWITCH_HOSTNAME, { { "address_ipv4", PEER_IPV4_ADDRESS } }); + + mux_cable_table.set(TEST_INTERFACE, { { "server_ipv4", SERVER_IP + "/32" }, + { "server_ipv6", "a::a/128" }, + { "state", "auto" } }); + + gPortsOrch->addExistingData(&port_table); + gPortsOrch->addExistingData(&vlan_table); + gPortsOrch->addExistingData(&vlan_member_table); + static_cast(gPortsOrch)->doTask(); + + gIntfsOrch->addExistingData(&intf_table); + static_cast(gIntfsOrch)->doTask(); + + m_TunnelDecapOrch->addExistingData(&tunnel_table); + static_cast(m_TunnelDecapOrch)->doTask(); + + m_MuxOrch->addExistingData(&peer_switch_table); + static_cast(m_MuxOrch)->doTask(); + + m_MuxOrch->addExistingData(&mux_cable_table); + static_cast(m_MuxOrch)->doTask(); + + gNeighOrch->addExistingData(&neigh_table); + static_cast(gNeighOrch)->doTask(); + + m_MuxCable = m_MuxOrch->getMuxCable(TEST_INTERFACE); + + // We always expect the mux to be initialized to standby + EXPECT_EQ(STANDBY, m_MuxCable->getState()); + } + + void PrepareSai() + { + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + sai_status_t status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + + /* Create a loopback underlay router interface */ + vector underlay_intf_attrs; + + sai_attribute_t underlay_intf_attr; + underlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID; + underlay_intf_attr.value.oid = gVirtualRouterId; + underlay_intf_attrs.push_back(underlay_intf_attr); + + underlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_TYPE; + underlay_intf_attr.value.s32 = SAI_ROUTER_INTERFACE_TYPE_LOOPBACK; + underlay_intf_attrs.push_back(underlay_intf_attr); + + underlay_intf_attr.id = SAI_ROUTER_INTERFACE_ATTR_MTU; + underlay_intf_attr.value.u32 = 9100; + underlay_intf_attrs.push_back(underlay_intf_attr); + + status = sai_router_intfs_api->create_router_interface(&gUnderlayIfId, gSwitchId, (uint32_t)underlay_intf_attrs.size(), underlay_intf_attrs.data()); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + } + + void SetUp() override + { + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); + + PrepareSai(); + + const int portsorch_base_pri = 40; + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + vector flex_counter_tables = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + + m_FlexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); + gDirectory.set(m_FlexCounterOrch); + ut_orch_list.push_back((Orch **)&m_FlexCounterOrch); + + static const vector route_pattern_tables = { + CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, + }; + gFlowCounterRouteOrch = new FlowCounterRouteOrch(m_config_db.get(), route_pattern_tables); + gDirectory.set(gFlowCounterRouteOrch); + ut_orch_list.push_back((Orch **)&gFlowCounterRouteOrch); + + gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + gDirectory.set(gVrfOrch); + ut_orch_list.push_back((Orch **)&gVrfOrch); + + gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); + gDirectory.set(gIntfsOrch); + ut_orch_list.push_back((Orch **)&gIntfsOrch); + + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + gDirectory.set(gPortsOrch); + ut_orch_list.push_back((Orch **)&gPortsOrch); + + const int fgnhgorch_pri = 15; + + vector fgnhg_tables = { + { CFG_FG_NHG, fgnhgorch_pri }, + { CFG_FG_NHG_PREFIX, fgnhgorch_pri }, + { CFG_FG_NHG_MEMBER, fgnhgorch_pri } + }; + + gFgNhgOrch = new FgNhgOrch(m_config_db.get(), m_app_db.get(), m_state_db.get(), fgnhg_tables, gNeighOrch, gIntfsOrch, gVrfOrch); + gDirectory.set(gFgNhgOrch); + ut_orch_list.push_back((Orch **)&gFgNhgOrch); + + const int fdborch_pri = 20; + + vector app_fdb_tables = { + { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri }, + { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri }, + { APP_MCLAG_FDB_TABLE_NAME, fdborch_pri } + }; + + TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); + TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); + gFdbOrch = new FdbOrch(m_app_db.get(), app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + gDirectory.set(gFdbOrch); + ut_orch_list.push_back((Orch **)&gFdbOrch); + + gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + gDirectory.set(gNeighOrch); + ut_orch_list.push_back((Orch **)&gNeighOrch); + + m_TunnelDecapOrch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + gDirectory.set(m_TunnelDecapOrch); + ut_orch_list.push_back((Orch **)&m_TunnelDecapOrch); + vector mux_tables = { + CFG_MUX_CABLE_TABLE_NAME, + CFG_PEER_SWITCH_TABLE_NAME + }; + + vector buffer_tables = { + APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME + }; + gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); + + TableConnector stateDbSwitchTable(m_state_db.get(), STATE_SWITCH_CAPABILITY_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + vector policer_tables = { + TableConnector(m_config_db.get(), CFG_POLICER_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_PORT_STORM_CONTROL_TABLE_NAME) + }; + + TableConnector stateDbStorm(m_state_db.get(), STATE_BUM_STORM_CAPABILITY_TABLE_NAME); + gPolicerOrch = new PolicerOrch(policer_tables, gPortsOrch); + gDirectory.set(gPolicerOrch); + ut_orch_list.push_back((Orch **)&gPolicerOrch); + + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + gDirectory.set(gSwitchOrch); + ut_orch_list.push_back((Orch **)&gSwitchOrch); + + gNhgOrch = new NhgOrch(m_app_db.get(), APP_NEXTHOP_GROUP_TABLE_NAME); + gDirectory.set(gNhgOrch); + ut_orch_list.push_back((Orch **)&gNhgOrch); + + vector srv6_tables = { + APP_SRV6_SID_LIST_TABLE_NAME, + APP_SRV6_MY_SID_TABLE_NAME + }; + gSrv6Orch = new Srv6Orch(m_app_db.get(), srv6_tables, gSwitchOrch, gVrfOrch, gNeighOrch); + gDirectory.set(gSrv6Orch); + ut_orch_list.push_back((Orch **)&gSrv6Orch); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + gDirectory.set(gCrmOrch); + ut_orch_list.push_back((Orch **)&gCrmOrch); + + const int routeorch_pri = 5; + vector route_tables = { + { APP_ROUTE_TABLE_NAME, routeorch_pri }, + { APP_LABEL_ROUTE_TABLE_NAME, routeorch_pri } + }; + gRouteOrch = new RouteOrch(m_app_db.get(), route_tables, gSwitchOrch, gNeighOrch, gIntfsOrch, gVrfOrch, gFgNhgOrch, gSrv6Orch); + gDirectory.set(gRouteOrch); + ut_orch_list.push_back((Orch **)&gRouteOrch); + TableConnector stateDbMirrorSession(m_state_db.get(), STATE_MIRROR_SESSION_TABLE_NAME); + TableConnector confDbMirrorSession(m_config_db.get(), CFG_MIRROR_SESSION_TABLE_NAME); + gMirrorOrch = new MirrorOrch(stateDbMirrorSession, confDbMirrorSession, gPortsOrch, gRouteOrch, gNeighOrch, gFdbOrch, gPolicerOrch); + gDirectory.set(gMirrorOrch); + ut_orch_list.push_back((Orch **)&gMirrorOrch); + + TableConnector confDbAclTable(m_config_db.get(), CFG_ACL_TABLE_TABLE_NAME); + TableConnector confDbAclTableType(m_config_db.get(), CFG_ACL_TABLE_TYPE_TABLE_NAME); + TableConnector confDbAclRuleTable(m_config_db.get(), CFG_ACL_RULE_TABLE_NAME); + TableConnector appDbAclTable(m_app_db.get(), APP_ACL_TABLE_TABLE_NAME); + TableConnector appDbAclTableType(m_app_db.get(), APP_ACL_TABLE_TYPE_TABLE_NAME); + TableConnector appDbAclRuleTable(m_app_db.get(), APP_ACL_RULE_TABLE_NAME); + + vector acl_table_connectors = { + confDbAclTableType, + confDbAclTable, + confDbAclRuleTable, + appDbAclTable, + appDbAclRuleTable, + appDbAclTableType, + }; + gAclOrch = new AclOrch(acl_table_connectors, m_state_db.get(), + gSwitchOrch, gPortsOrch, gMirrorOrch, gNeighOrch, gRouteOrch, NULL); + gDirectory.set(gAclOrch); + ut_orch_list.push_back((Orch **)&gAclOrch); + + m_MuxOrch = new MuxOrch(m_config_db.get(), mux_tables, m_TunnelDecapOrch, gNeighOrch, gFdbOrch); + gDirectory.set(m_MuxOrch); + ut_orch_list.push_back((Orch **)&m_MuxOrch); + + m_MuxCableOrch = new MuxCableOrch(m_app_db.get(), m_state_db.get(), APP_MUX_CABLE_TABLE_NAME); + gDirectory.set(m_MuxCableOrch); + ut_orch_list.push_back((Orch **)&m_MuxCableOrch); + + m_MuxStateOrch = new MuxStateOrch(m_state_db.get(), STATE_HW_MUX_CABLE_TABLE_NAME); + gDirectory.set(m_MuxStateOrch); + ut_orch_list.push_back((Orch **)&m_MuxStateOrch); + + ApplyDualTorConfigs(); + INIT_SAI_API_MOCK(neighbor); + INIT_SAI_API_MOCK(route); + INIT_SAI_API_MOCK(acl); + INIT_SAI_API_MOCK(next_hop); + MockSaiApis(); + } + + void TearDown() override + { + for (std::vector::reverse_iterator rit = ut_orch_list.rbegin(); rit != ut_orch_list.rend(); ++rit) + { + Orch **orch = *rit; + delete *orch; + *orch = nullptr; + } + + gDirectory.m_values.clear(); + + RestoreSaiApis(); + ut_helper::uninitSaiApi(); + } + }; + + TEST_F(MuxRollbackTest, StandbyToActiveNeighborAlreadyExists) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) + .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + SetAndAssertMuxState(ACTIVE); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyNeighborNotFound) + { + SetAndAssertMuxState(ACTIVE); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) + .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + SetAndAssertMuxState(STANDBY); + } + + TEST_F(MuxRollbackTest, StandbyToActiveRouteNotFound) + { + EXPECT_CALL(*mock_sai_route_api, remove_route_entry) + .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + SetAndAssertMuxState(ACTIVE); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyRouteAlreadyExists) + { + SetAndAssertMuxState(ACTIVE); + EXPECT_CALL(*mock_sai_route_api, create_route_entry) + .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + SetAndAssertMuxState(STANDBY); + } + + TEST_F(MuxRollbackTest, StandbyToActiveAclNotFound) + { + EXPECT_CALL(*mock_sai_acl_api, remove_acl_entry) + .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + SetAndAssertMuxState(ACTIVE); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyAclAlreadyExists) + { + SetAndAssertMuxState(ACTIVE); + EXPECT_CALL(*mock_sai_acl_api, create_acl_entry) + .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + SetAndAssertMuxState(STANDBY); + } + + TEST_F(MuxRollbackTest, StandbyToActiveNextHopAlreadyExists) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) + .WillOnce(Return(SAI_STATUS_ITEM_ALREADY_EXISTS)); + SetAndAssertMuxState(ACTIVE); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyNextHopNotFound) + { + SetAndAssertMuxState(ACTIVE); + EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop) + .WillOnce(Return(SAI_STATUS_ITEM_NOT_FOUND)); + SetAndAssertMuxState(STANDBY); + } + + TEST_F(MuxRollbackTest, StandbyToActiveRuntimeErrorRollbackToStandby) + { + EXPECT_CALL(*mock_sai_route_api, remove_route_entry) + .WillOnce(Throw(runtime_error("Mock runtime error"))); + SetMuxStateFromAppDb(ACTIVE); + EXPECT_EQ(STANDBY, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyRuntimeErrorRollbackToActive) + { + SetAndAssertMuxState(ACTIVE); + EXPECT_CALL(*mock_sai_route_api, create_route_entry) + .WillOnce(Throw(runtime_error("Mock runtime error"))); + SetMuxStateFromAppDb(STANDBY); + EXPECT_EQ(ACTIVE, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, StandbyToActiveLogicErrorRollbackToStandby) + { + EXPECT_CALL(*mock_sai_neighbor_api, create_neighbor_entry) + .WillOnce(Throw(logic_error("Mock logic error"))); + SetMuxStateFromAppDb(ACTIVE); + EXPECT_EQ(STANDBY, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyLogicErrorRollbackToActive) + { + SetAndAssertMuxState(ACTIVE); + EXPECT_CALL(*mock_sai_neighbor_api, remove_neighbor_entry) + .WillOnce(Throw(logic_error("Mock logic error"))); + SetMuxStateFromAppDb(STANDBY); + EXPECT_EQ(ACTIVE, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, StandbyToActiveExceptionRollbackToStandby) + { + EXPECT_CALL(*mock_sai_next_hop_api, create_next_hop) + .WillOnce(Throw(exception())); + SetMuxStateFromAppDb(ACTIVE); + EXPECT_EQ(STANDBY, m_MuxCable->getState()); + } + + TEST_F(MuxRollbackTest, ActiveToStandbyExceptionRollbackToActive) + { + SetAndAssertMuxState(ACTIVE); + EXPECT_CALL(*mock_sai_next_hop_api, remove_next_hop) + .WillOnce(Throw(exception())); + SetMuxStateFromAppDb(STANDBY); + EXPECT_EQ(ACTIVE, m_MuxCable->getState()); + } +} diff --git a/tests/mock_tests/orchdaemon_ut.cpp b/tests/mock_tests/orchdaemon_ut.cpp new file mode 100644 index 0000000000..3a2d2169f8 --- /dev/null +++ b/tests/mock_tests/orchdaemon_ut.cpp @@ -0,0 +1,52 @@ +#include "orchdaemon.h" +#include "dbconnector.h" +#include +#include +#include "mock_sai_switch.h" + +extern sai_switch_api_t* sai_switch_api; +sai_switch_api_t test_sai_switch; + +namespace orchdaemon_test +{ + + using ::testing::_; + using ::testing::Return; + using ::testing::StrictMock; + + DBConnector appl_db("APPL_DB", 0); + DBConnector state_db("STATE_DB", 0); + DBConnector config_db("CONFIG_DB", 0); + DBConnector counters_db("COUNTERS_DB", 0); + + class OrchDaemonTest : public ::testing::Test + { + public: + StrictMock mock_sai_switch_; + + OrchDaemon* orchd; + + OrchDaemonTest() + { + mock_sai_switch = &mock_sai_switch_; + sai_switch_api = &test_sai_switch; + sai_switch_api->get_switch_attribute = &mock_get_switch_attribute; + sai_switch_api->set_switch_attribute = &mock_set_switch_attribute; + + orchd = new OrchDaemon(&appl_db, &config_db, &state_db, &counters_db); + + }; + + ~OrchDaemonTest() + { + sai_switch_api = nullptr; + }; + }; + + TEST_F(OrchDaemonTest, logRotate) + { + EXPECT_CALL(mock_sai_switch_, set_switch_attribute( _, _)).WillOnce(Return(SAI_STATUS_SUCCESS)); + + orchd->logRotate(); + } +} diff --git a/tests/mock_tests/portal.h b/tests/mock_tests/portal.h index c2438e8e1c..8f0c4ab2db 100644 --- a/tests/mock_tests/portal.h +++ b/tests/mock_tests/portal.h @@ -5,6 +5,9 @@ #include "aclorch.h" #include "crmorch.h" +#include "copporch.h" +#include "sfloworch.h" +#include "directory.h" #undef protected #undef private @@ -59,4 +62,57 @@ struct Portal crmOrch->getResAvailableCounters(); } }; + + struct CoppOrchInternal + { + static TrapGroupPolicerTable getTrapGroupPolicerMap(CoppOrch &obj) + { + return obj.m_trap_group_policer_map; + } + + static TrapIdTrapObjectsTable getTrapGroupIdMap(CoppOrch &obj) + { + return obj.m_syncdTrapIds; + } + + static std::vector getTrapIdsFromTrapGroup(CoppOrch &obj, sai_object_id_t trapGroupOid) + { + std::vector trapIdList; + obj.getTrapIdsFromTrapGroup(trapGroupOid, trapIdList); + return trapIdList; + } + }; + + struct SflowOrchInternal + { + static bool getSflowStatusEnable(SflowOrch &obj) + { + return obj.m_sflowStatus; + } + + static SflowRateSampleMap getSflowSampleMap(SflowOrch &obj) + { + return obj.m_sflowRateSampleMap; + } + + static SflowPortInfoMap getSflowPortInfoMap(SflowOrch &obj) + { + return obj.m_sflowPortInfoMap; + } + }; + + struct DirectoryInternal + { + template + static void clear(Directory &obj) + { + obj.m_values.clear(); + } + + template + static bool empty(Directory &obj) + { + return obj.m_values.empty(); + } + }; }; diff --git a/tests/mock_tests/portmgr_ut.cpp b/tests/mock_tests/portmgr_ut.cpp new file mode 100644 index 0000000000..27dc61e03e --- /dev/null +++ b/tests/mock_tests/portmgr_ut.cpp @@ -0,0 +1,126 @@ +#include "portmgr.h" +#include "gtest/gtest.h" +#include "mock_table.h" +#include "redisutility.h" + +extern std::vector mockCallArgs; + +namespace portmgr_ut +{ + using namespace swss; + using namespace std; + + struct PortMgrTest : public ::testing::Test + { + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_portMgr; + PortMgrTest() + { + m_app_db = make_shared( + "APPL_DB", 0); + m_config_db = make_shared( + "CONFIG_DB", 0); + m_state_db = make_shared( + "STATE_DB", 0); + } + + virtual void SetUp() override + { + ::testing_db::reset(); + vector cfg_port_tables = { + CFG_PORT_TABLE_NAME, + }; + m_portMgr.reset(new PortMgr(m_config_db.get(), m_app_db.get(), m_state_db.get(), cfg_port_tables)); + } + }; + + TEST_F(PortMgrTest, DoTask) + { + Table state_port_table(m_state_db.get(), STATE_PORT_TABLE_NAME); + Table app_port_table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table cfg_port_table(m_config_db.get(), CFG_PORT_TABLE_NAME); + + // Port is not ready, verify that doTask does not handle port configuration + + cfg_port_table.set("Ethernet0", { + {"speed", "100000"}, + {"index", "1"} + }); + mockCallArgs.clear(); + m_portMgr->addExistingData(&cfg_port_table); + m_portMgr->doTask(); + ASSERT_TRUE(mockCallArgs.empty()); + std::vector values; + app_port_table.get("Ethernet0", values); + auto value_opt = swss::fvsGetValue(values, "mtu", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ(DEFAULT_MTU_STR, value_opt.get()); + value_opt = swss::fvsGetValue(values, "admin_status", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ(DEFAULT_ADMIN_STATUS_STR, value_opt.get()); + value_opt = swss::fvsGetValue(values, "speed", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ("100000", value_opt.get()); + value_opt = swss::fvsGetValue(values, "index", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ("1", value_opt.get()); + + // Set port state to ok, verify that doTask handle port configuration + state_port_table.set("Ethernet0", { + {"state", "ok"} + }); + m_portMgr->doTask(); + ASSERT_EQ(size_t(2), mockCallArgs.size()); + ASSERT_EQ("/sbin/ip link set dev \"Ethernet0\" mtu \"9100\"", mockCallArgs[0]); + ASSERT_EQ("/sbin/ip link set dev \"Ethernet0\" down", mockCallArgs[1]); + + // Set port admin_status, verify that it could override the default value + cfg_port_table.set("Ethernet0", { + {"admin_status", "up"} + }); + m_portMgr->addExistingData(&cfg_port_table); + m_portMgr->doTask(); + app_port_table.get("Ethernet0", values); + value_opt = swss::fvsGetValue(values, "admin_status", true); + ASSERT_TRUE(value_opt); + ASSERT_EQ("up", value_opt.get()); + } + + TEST_F(PortMgrTest, ConfigureDuringRetry) + { + Table state_port_table(m_state_db.get(), STATE_PORT_TABLE_NAME); + Table app_port_table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table cfg_port_table(m_config_db.get(), CFG_PORT_TABLE_NAME); + + cfg_port_table.set("Ethernet0", { + {"speed", "100000"}, + {"index", "1"} + }); + + mockCallArgs.clear(); + m_portMgr->addExistingData(&cfg_port_table); + m_portMgr->doTask(); + ASSERT_TRUE(mockCallArgs.empty()); + + cfg_port_table.set("Ethernet0", { + {"speed", "50000"}, + {"index", "1"}, + {"mtu", "1518"}, + {"admin_status", "up"} + }); + + m_portMgr->addExistingData(&cfg_port_table); + m_portMgr->doTask(); + ASSERT_TRUE(mockCallArgs.empty()); + + state_port_table.set("Ethernet0", { + {"state", "ok"} + }); + m_portMgr->doTask(); + ASSERT_EQ(size_t(2), mockCallArgs.size()); + ASSERT_EQ("/sbin/ip link set dev \"Ethernet0\" mtu \"1518\"", mockCallArgs[0]); + ASSERT_EQ("/sbin/ip link set dev \"Ethernet0\" up", mockCallArgs[1]); + } +} diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 853fdbfb69..32e4ed2917 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -7,17 +7,251 @@ #include "mock_orchagent_main.h" #include "mock_table.h" #include "notifier.h" +#include "mock_sai_bridge.h" +#define private public #include "pfcactionhandler.h" +#include "switchorch.h" +#include +#undef private +#define private public +#include "warm_restart.h" +#undef private #include extern redisReply *mockReply; +using ::testing::_; +using ::testing::StrictMock; namespace portsorch_test { - using namespace std; + // SAI default ports + std::map> defaultPortList; + + sai_port_api_t ut_sai_port_api; + sai_port_api_t *pold_sai_port_api; + sai_switch_api_t ut_sai_switch_api; + sai_switch_api_t *pold_sai_switch_api; + + bool not_support_fetching_fec; + vector mock_port_fec_modes = {SAI_PORT_FEC_MODE_RS, SAI_PORT_FEC_MODE_FC}; + + sai_status_t _ut_stub_sai_get_port_attribute( + _In_ sai_object_id_t port_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + sai_status_t status; + if (attr_count == 1 && attr_list[0].id == SAI_PORT_ATTR_SUPPORTED_FEC_MODE) + { + if (not_support_fetching_fec) + { + status = SAI_STATUS_NOT_IMPLEMENTED; + } + else + { + uint32_t i; + for (i = 0; i < attr_list[0].value.s32list.count && i < mock_port_fec_modes.size(); i++) + { + attr_list[0].value.s32list.list[i] = mock_port_fec_modes[i]; + } + attr_list[0].value.s32list.count = i; + status = SAI_STATUS_SUCCESS; + } + } + else + { + status = pold_sai_port_api->get_port_attribute(port_id, attr_count, attr_list); + } + return status; + } + + uint32_t _sai_set_port_fec_count; + int32_t _sai_port_fec_mode; + uint32_t _sai_set_pfc_mode_count; + sai_status_t _ut_stub_sai_set_port_attribute( + _In_ sai_object_id_t port_id, + _In_ const sai_attribute_t *attr) + { + if (attr[0].id == SAI_PORT_ATTR_FEC_MODE) + { + _sai_set_port_fec_count++; + _sai_port_fec_mode = attr[0].value.s32; + } + else if (attr[0].id == SAI_PORT_ATTR_AUTO_NEG_MODE) + { + /* Simulating failure case */ + return SAI_STATUS_FAILURE; + } + else if (attr[0].id == SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_COMBINED) + { + _sai_set_pfc_mode_count++; + } + return pold_sai_port_api->set_port_attribute(port_id, attr); + } + + uint32_t *_sai_syncd_notifications_count; + int32_t *_sai_syncd_notification_event; + uint32_t _sai_switch_dlr_packet_action_count; + uint32_t _sai_switch_dlr_packet_action; + sai_status_t _ut_stub_sai_set_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ const sai_attribute_t *attr) + { + if (attr[0].id == SAI_REDIS_SWITCH_ATTR_NOTIFY_SYNCD) + { + *_sai_syncd_notifications_count =+ 1; + *_sai_syncd_notification_event = attr[0].value.s32; + } + else if (attr[0].id == SAI_SWITCH_ATTR_PFC_DLR_PACKET_ACTION) + { + _sai_switch_dlr_packet_action_count++; + _sai_switch_dlr_packet_action = attr[0].value.s32; + } + return pold_sai_switch_api->set_switch_attribute(switch_id, attr); + } + + void _hook_sai_port_api() + { + ut_sai_port_api = *sai_port_api; + pold_sai_port_api = sai_port_api; + ut_sai_port_api.get_port_attribute = _ut_stub_sai_get_port_attribute; + ut_sai_port_api.set_port_attribute = _ut_stub_sai_set_port_attribute; + sai_port_api = &ut_sai_port_api; + } + + void _unhook_sai_port_api() + { + sai_port_api = pold_sai_port_api; + } + + void _hook_sai_switch_api() + { + ut_sai_switch_api = *sai_switch_api; + pold_sai_switch_api = sai_switch_api; + ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + } + + void _unhook_sai_switch_api() + { + sai_switch_api = pold_sai_switch_api; + } + + sai_queue_api_t ut_sai_queue_api; + sai_queue_api_t *pold_sai_queue_api; + int _sai_set_queue_attr_count = 0; + + sai_status_t _ut_stub_sai_set_queue_attribute(sai_object_id_t queue_id, const sai_attribute_t *attr) + { + if(attr->id == SAI_QUEUE_ATTR_PFC_DLR_INIT) + { + if(attr->value.booldata == true) + { + _sai_set_queue_attr_count++; + } + else + { + _sai_set_queue_attr_count--; + } + } + return SAI_STATUS_SUCCESS; + } + + uint32_t _sai_get_queue_attr_count; + bool _sai_mock_queue_attr = false; + sai_status_t _ut_stub_sai_get_queue_attribute( + _In_ sai_object_id_t queue_id, + _In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + if (_sai_mock_queue_attr) + { + _sai_get_queue_attr_count++; + for (auto i = 0u; i < attr_count; i++) + { + if (attr_list[i].id == SAI_QUEUE_ATTR_TYPE) + { + attr_list[i].value.s32 = static_cast(SAI_QUEUE_TYPE_UNICAST); + } + else if (attr_list[i].id == SAI_QUEUE_ATTR_INDEX) + { + attr_list[i].value.u8 = 0; + } + else + { + pold_sai_queue_api->get_queue_attribute(queue_id, 1, &attr_list[i]); + } + } + } + + return SAI_STATUS_SUCCESS; + } + + void _hook_sai_queue_api() + { + _sai_mock_queue_attr = true; + ut_sai_queue_api = *sai_queue_api; + pold_sai_queue_api = sai_queue_api; + ut_sai_queue_api.set_queue_attribute = _ut_stub_sai_set_queue_attribute; + ut_sai_queue_api.get_queue_attribute = _ut_stub_sai_get_queue_attribute; + sai_queue_api = &ut_sai_queue_api; + } + + void _unhook_sai_queue_api() + { + sai_queue_api = pold_sai_queue_api; + _sai_mock_queue_attr = false; + } + + sai_bridge_api_t ut_sai_bridge_api; + sai_bridge_api_t *org_sai_bridge_api; + + void _hook_sai_bridge_api() + { + ut_sai_bridge_api = *sai_bridge_api; + org_sai_bridge_api = sai_bridge_api; + sai_bridge_api = &ut_sai_bridge_api; + } + + void _unhook_sai_bridge_api() + { + sai_bridge_api = org_sai_bridge_api; + } + + void cleanupPorts(PortsOrch *obj) + { + // Get CPU port + Port p; + obj->getCpuPort(p); + + // Get port list + auto portList = obj->getAllPorts(); + portList.erase(p.m_alias); + + // Generate port config + std::deque kfvList; + + for (const auto &cit : portList) + { + kfvList.push_back({ cit.first, DEL_COMMAND, { } }); + } + + // Refill consumer + auto consumer = dynamic_cast(obj->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(kfvList); + + // Apply configuration + static_cast(obj)->doTask(); + + // Dump pending tasks + std::vector taskList; + obj->dumpPendingTasks(taskList); + ASSERT_TRUE(taskList.empty()); + } + struct PortsOrchTest : public ::testing::Test { shared_ptr m_app_db; @@ -49,6 +283,17 @@ namespace portsorch_test ::testing_db::reset(); // Create dependencies ... + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); const int portsorch_base_pri = 40; @@ -62,13 +307,14 @@ namespace portsorch_test ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, APP_BUFFER_PROFILE_TABLE_NAME, APP_BUFFER_QUEUE_TABLE_NAME, @@ -97,12 +343,74 @@ namespace portsorch_test ASSERT_EQ(gNeighOrch, nullptr); gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + + vector qos_tables = { + CFG_TC_TO_QUEUE_MAP_TABLE_NAME, + CFG_SCHEDULER_TABLE_NAME, + CFG_DSCP_TO_TC_MAP_TABLE_NAME, + CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME, + CFG_DOT1P_TO_TC_MAP_TABLE_NAME, + CFG_QUEUE_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME, + CFG_WRED_PROFILE_TABLE_NAME, + CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, + CFG_DSCP_TO_FC_MAP_TABLE_NAME, + CFG_EXP_TO_FC_MAP_TABLE_NAME, + CFG_TC_TO_DSCP_MAP_TABLE_NAME + }; + gQosOrch = new QosOrch(m_config_db.get(), qos_tables); + + vector pfc_wd_tables = { + CFG_PFC_WD_TABLE_NAME + }; + + static const vector portStatIds = + { + SAI_PORT_STAT_PFC_0_RX_PKTS, + SAI_PORT_STAT_PFC_1_RX_PKTS, + SAI_PORT_STAT_PFC_2_RX_PKTS, + SAI_PORT_STAT_PFC_3_RX_PKTS, + SAI_PORT_STAT_PFC_4_RX_PKTS, + SAI_PORT_STAT_PFC_5_RX_PKTS, + SAI_PORT_STAT_PFC_6_RX_PKTS, + SAI_PORT_STAT_PFC_7_RX_PKTS, + SAI_PORT_STAT_PFC_0_ON2OFF_RX_PKTS, + SAI_PORT_STAT_PFC_1_ON2OFF_RX_PKTS, + SAI_PORT_STAT_PFC_2_ON2OFF_RX_PKTS, + SAI_PORT_STAT_PFC_3_ON2OFF_RX_PKTS, + SAI_PORT_STAT_PFC_4_ON2OFF_RX_PKTS, + SAI_PORT_STAT_PFC_5_ON2OFF_RX_PKTS, + SAI_PORT_STAT_PFC_6_ON2OFF_RX_PKTS, + SAI_PORT_STAT_PFC_7_ON2OFF_RX_PKTS, + }; + + static const vector queueStatIds = + { + SAI_QUEUE_STAT_PACKETS, + SAI_QUEUE_STAT_CURR_OCCUPANCY_BYTES, + }; + + static const vector queueAttrIds = + { + SAI_QUEUE_ATTR_PAUSE_STATUS, + }; + ASSERT_EQ((gPfcwdOrch), nullptr); + gPfcwdOrch = new PfcWdSwOrch(m_config_db.get(), pfc_wd_tables, portStatIds, queueStatIds, queueAttrIds, 100); + } virtual void TearDown() override { ::testing_db::reset(); + auto buffer_maps = BufferOrch::m_buffer_type_maps; + for (auto &i : buffer_maps) + { + i.second->clear(); + } + delete gNeighOrch; gNeighOrch = nullptr; delete gFdbOrch; @@ -113,10 +421,17 @@ namespace portsorch_test gPortsOrch = nullptr; delete gBufferOrch; gBufferOrch = nullptr; + delete gPfcwdOrch; + gPfcwdOrch = nullptr; + delete gQosOrch; + gQosOrch = nullptr; + delete gSwitchOrch; + gSwitchOrch = nullptr; // clear orchs saved in directory gDirectory.m_values.clear(); } + static void SetUpTestCase() { // Init switch and create dependencies @@ -152,6 +467,10 @@ namespace portsorch_test ASSERT_EQ(status, SAI_STATUS_SUCCESS); gVirtualRouterId = attr.value.oid; + + // Get SAI default ports + defaultPortList = ut_helper::getInitialSaiPorts(); + ASSERT_TRUE(!defaultPortList.empty()); } static void TearDownTestCase() @@ -165,6 +484,592 @@ namespace portsorch_test }; + // TEST_F(PortsOrchTest, PortBulkCreateRemove) + // { + // auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // // Get SAI default ports + // auto &ports = defaultPortList; + // ASSERT_TRUE(!ports.empty()); + + // // Generate port config + // for (std::uint32_t idx1 = 0, idx2 = 1; idx1 < ports.size() * 4; idx1 += 4, idx2++) + // { + // std::stringstream key; + // key << FRONT_PANEL_PORT_PREFIX << idx1; + + // std::stringstream alias; + // alias << "etp" << idx2; + + // std::stringstream index; + // index << idx2; + + // std::stringstream lanes; + // lanes << idx1 << "," << idx1 + 1 << "," << idx1 + 2 << "," << idx1 + 3; + + // std::vector fvList = { + // { "alias", alias.str() }, + // { "index", index.str() }, + // { "lanes", lanes.str() }, + // { "speed", "100000" }, + // { "autoneg", "off" }, + // { "adv_speeds", "all" }, + // { "interface_type", "none" }, + // { "adv_interface_types", "all" }, + // { "fec", "rs" }, + // { "mtu", "9100" }, + // { "tpid", "0x8100" }, + // { "pfc_asym", "off" }, + // { "admin_status", "up" }, + // { "description", "FP port" } + // }; + + // portTable.set(key.str(), fvList); + // } + + // // Set PortConfigDone + // portTable.set("PortConfigDone", { { "count", std::to_string(ports.size()) } }); + + // // Refill consumer + // gPortsOrch->addExistingData(&portTable); + + // // Apply configuration + // static_cast(gPortsOrch)->doTask(); + + // // Port count: 32 Data + 1 CPU + // ASSERT_EQ(gPortsOrch->getAllPorts().size(), ports.size() + 1); + + // // Dump pending tasks + // std::vector taskList; + // gPortsOrch->dumpPendingTasks(taskList); + // ASSERT_TRUE(taskList.empty()); + + // // Cleanup ports + // cleanupPorts(gPortsOrch); + // } + + // TEST_F(PortsOrchTest, PortBasicConfig) + // { + // auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // // Get SAI default ports + // auto &ports = defaultPortList; + // ASSERT_TRUE(!ports.empty()); + + // // Generate port config + // for (const auto &cit : ports) + // { + // portTable.set(cit.first, cit.second); + // } + + // // Set PortConfigDone + // portTable.set("PortConfigDone", { { "count", std::to_string(ports.size()) } }); + + // // Refill consumer + // gPortsOrch->addExistingData(&portTable); + + // // Apply configuration + // static_cast(gPortsOrch)->doTask(); + + // // Port count: 32 Data + 1 CPU + // ASSERT_EQ(gPortsOrch->getAllPorts().size(), ports.size() + 1); + + // // Generate port config + // std::deque kfvList = {{ + // "Ethernet0", + // SET_COMMAND, { + // { "speed", "100000" }, + // { "autoneg", "on" }, + // { "adv_speeds", "1000,10000,100000" }, + // { "interface_type", "CR" }, + // { "adv_interface_types", "CR,CR2,CR4,CR8" }, + // { "fec", "fc" }, + // { "mtu", "9100" }, + // { "tpid", "0x9100" }, + // { "pfc_asym", "on" }, + // { "link_training", "on" }, + // { "admin_status", "up" } + // } + // }}; + + // // Refill consumer + // auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + // consumer->addToSync(kfvList); + + // // Apply configuration + // static_cast(gPortsOrch)->doTask(); + + // // Get port + // Port p; + // ASSERT_TRUE(gPortsOrch->getPort("Ethernet0", p)); + + // // Verify speed + // ASSERT_EQ(p.m_speed, 100000); + + // // Verify auto-negotiation + // ASSERT_TRUE(p.m_autoneg); + + // // Verify advertised speed + // std::set adv_speeds = { 1000, 10000, 100000 }; + // ASSERT_EQ(p.m_adv_speeds, adv_speeds); + + // // Verify interface type + // ASSERT_EQ(p.m_interface_type, SAI_PORT_INTERFACE_TYPE_CR); + + // // Verify advertised interface type + // std::set adv_interface_types = { + // SAI_PORT_INTERFACE_TYPE_CR, + // SAI_PORT_INTERFACE_TYPE_CR2, + // SAI_PORT_INTERFACE_TYPE_CR4, + // SAI_PORT_INTERFACE_TYPE_CR8 + // }; + // ASSERT_EQ(p.m_adv_interface_types, adv_interface_types); + + // // Verify FEC + // ASSERT_EQ(p.m_fec_mode, SAI_PORT_FEC_MODE_FC); + + // // Verify MTU + // ASSERT_EQ(p.m_mtu, 9100); + + // // Verify TPID + // ASSERT_EQ(p.m_tpid, 0x9100); + + // // Verify asymmetric PFC + // ASSERT_EQ(p.m_pfc_asym, SAI_PORT_PRIORITY_FLOW_CONTROL_MODE_SEPARATE); + + // // Verify link training + // ASSERT_TRUE(p.m_link_training); + + // // Verify admin status + // ASSERT_TRUE(p.m_admin_state_up); + + // // Dump pending tasks + // std::vector taskList; + // gPortsOrch->dumpPendingTasks(taskList); + // ASSERT_TRUE(taskList.empty()); + + // // Cleanup ports + // cleanupPorts(gPortsOrch); + // } + + // TEST_F(PortsOrchTest, PortAdvancedConfig) + // { + // auto portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // // Get SAI default ports + // auto &ports = defaultPortList; + // ASSERT_TRUE(!ports.empty()); + + // // Generate port config + // for (const auto &cit : ports) + // { + // portTable.set(cit.first, cit.second); + // } + + // // Set PortConfigDone + // portTable.set("PortConfigDone", { { "count", std::to_string(ports.size()) } }); + + // // Refill consumer + // gPortsOrch->addExistingData(&portTable); + + // // Apply configuration + // static_cast(gPortsOrch)->doTask(); + + // // Port count: 32 Data + 1 CPU + // ASSERT_EQ(gPortsOrch->getAllPorts().size(), ports.size() + 1); + + // // Generate port serdes config + // std::deque kfvList = {{ + // "Ethernet0", + // SET_COMMAND, { + // { "preemphasis", "0xcad0,0xc6e0,0xc6e0,0xd2b0" }, + // { "idriver", "0x5,0x3,0x4,0x1" }, + // { "ipredriver", "0x1,0x4,0x3,0x5" }, + // { "pre1", "0xfff0,0xfff2,0xfff1,0xfff3" }, + // { "pre2", "0xfff0,0xfff2,0xfff1,0xfff3" }, + // { "pre3", "0xfff0,0xfff2,0xfff1,0xfff3" }, + // { "main", "0x90,0x92,0x91,0x93" }, + // { "post1", "0x10,0x12,0x11,0x13" }, + // { "post2", "0x10,0x12,0x11,0x13" }, + // { "post3", "0x10,0x12,0x11,0x13" }, + // { "attn", "0x80,0x82,0x81,0x83" } + // } + // }}; + + // // Refill consumer + // auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + // consumer->addToSync(kfvList); + + // // Apply configuration + // static_cast(gPortsOrch)->doTask(); + + // // Get port + // Port p; + // ASSERT_TRUE(gPortsOrch->getPort("Ethernet0", p)); + + // // Verify preemphasis + // std::vector preemphasis = { 0xcad0, 0xc6e0, 0xc6e0, 0xd2b0 }; + // ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_PREEMPHASIS), preemphasis); + + // // Verify idriver + // std::vector idriver = { 0x5, 0x3, 0x4, 0x1 }; + // ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_IDRIVER), idriver); + + // // Verify ipredriver + // std::vector ipredriver = { 0x1, 0x4, 0x3, 0x5 }; + // ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_IPREDRIVER), ipredriver); + + // // Verify pre1 + // std::vector pre1 = { 0xfff0, 0xfff2, 0xfff1, 0xfff3 }; + // ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_PRE1), pre1); + + // // Verify pre2 + // std::vector pre2 = { 0xfff0, 0xfff2, 0xfff1, 0xfff3 }; + // ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_PRE2), pre2); + + // // Verify pre3 + // std::vector pre3 = { 0xfff0, 0xfff2, 0xfff1, 0xfff3 }; + // ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_PRE3), pre3); + + // // Verify main + // std::vector main = { 0x90, 0x92, 0x91, 0x93 }; + // ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_MAIN), main); + + // // Verify post1 + // std::vector post1 = { 0x10, 0x12, 0x11, 0x13 }; + // ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_POST1), post1); + + // // Verify post2 + // std::vector post2 = { 0x10, 0x12, 0x11, 0x13 }; + // ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_POST2), post2); + + // // Verify post3 + // std::vector post3 = { 0x10, 0x12, 0x11, 0x13 }; + // ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_POST3), post3); + + // // Verify attn + // std::vector attn = { 0x80, 0x82, 0x81, 0x83 }; + // ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_ATTN), attn); + + // // Dump pending tasks + // std::vector taskList; + // gPortsOrch->dumpPendingTasks(taskList); + // ASSERT_TRUE(taskList.empty()); + + // // Cleanup ports + // cleanupPorts(gPortsOrch); + // } + + // /** + // * Test that verifies PortsOrch::getPort() on a port that has been deleted + // */ + // TEST_F(PortsOrchTest, GetPortTest) + // { + // _hook_sai_queue_api(); + // Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + // std::deque entries; + + // // Get SAI default ports to populate DB + // auto &ports = defaultPortList; + // ASSERT_TRUE(!ports.empty()); + + // for (const auto &it : ports) + // { + // portTable.set(it.first, it.second); + // } + + // // Set PortConfigDone + // portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // // refill consumer + // gPortsOrch->addExistingData(&portTable); + + // // Apply configuration : + // // create ports + // static_cast(gPortsOrch)->doTask(); + + // Port port; + // ASSERT_TRUE(gPortsOrch->getPort("Ethernet0", port)); + // ASSERT_NE(port.m_port_id, SAI_NULL_OBJECT_ID); + + // // Get queue info + // string type; + // uint8_t index; + // auto queue_id = port.m_queue_ids[0]; + // auto ut_sai_get_queue_attr_count = _sai_get_queue_attr_count; + // gPortsOrch->getQueueTypeAndIndex(queue_id, type, index); + // ASSERT_EQ(type, "SAI_QUEUE_TYPE_UNICAST"); + // ASSERT_EQ(index, 0); + // type = ""; + // index = 255; + // gPortsOrch->getQueueTypeAndIndex(queue_id, type, index); + // ASSERT_EQ(type, "SAI_QUEUE_TYPE_UNICAST"); + // ASSERT_EQ(index, 0); + // ASSERT_EQ(++ut_sai_get_queue_attr_count, _sai_get_queue_attr_count); + + // // Delete port + // entries.push_back({"Ethernet0", "DEL", {}}); + // auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + // consumer->addToSync(entries); + // static_cast(gPortsOrch)->doTask(); + // entries.clear(); + + // ASSERT_FALSE(gPortsOrch->getPort(port.m_port_id, port)); + // ASSERT_EQ(gPortsOrch->m_queueInfo.find(queue_id), gPortsOrch->m_queueInfo.end()); + // _unhook_sai_queue_api(); + // } + + /** + * Test case: PortsOrch::addBridgePort() does not add router port to .1Q bridge + */ + TEST_F(PortsOrchTest, addBridgePortOnRouterPort) + { + _hook_sai_bridge_api(); + + StrictMock mock_sai_bridge_; + mock_sai_bridge = &mock_sai_bridge_; + sai_bridge_api->create_bridge_port = mock_create_bridge_port; + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone, PortInitDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + // Apply configuration : create ports + static_cast(gPortsOrch)->doTask(); + + // Get first port and set its rif id to simulate it is router port + Port port; + gPortsOrch->getPort("Ethernet0", port); + port.m_rif_id = 1; + + ASSERT_FALSE(gPortsOrch->addBridgePort(port)); + EXPECT_CALL(mock_sai_bridge_, create_bridge_port(_, _, _, _)).Times(0); + + _unhook_sai_bridge_api(); + } + + TEST_F(PortsOrchTest, PortSupportedFecModes) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + not_support_fetching_fec = false; + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_port_fec_count; + + entries.push_back({"Ethernet0", "SET", + { + {"fec", "rs"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_port_fec_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_port_fec_mode, SAI_PORT_FEC_MODE_RS); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + entries.push_back({"Ethernet0", "SET", + { + {"fec", "none"} + }}); + consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + + ASSERT_EQ(_sai_set_port_fec_count, current_sai_api_call_count); + ASSERT_EQ(_sai_port_fec_mode, SAI_PORT_FEC_MODE_RS); + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + _unhook_sai_port_api(); + } + + /* + * Test case: SAI_PORT_ATTR_SUPPORTED_FEC_MODE is not supported by vendor + **/ + TEST_F(PortsOrchTest, PortNotSupportedFecModes) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + not_support_fetching_fec = true; + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_port_fec_count; + + entries.push_back({"Ethernet0", "SET", + { + {"fec", "rs"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_port_fec_count, ++current_sai_api_call_count); + ASSERT_EQ(_sai_port_fec_mode, SAI_PORT_FEC_MODE_RS); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + _unhook_sai_port_api(); + } + + /* + * Test case: Fetching SAI_PORT_ATTR_SUPPORTED_FEC_MODE is supported but no FEC mode is supported on the port + **/ + TEST_F(PortsOrchTest, PortSupportNoFecModes) + { + _hook_sai_port_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + not_support_fetching_fec = false; + auto old_mock_port_fec_modes = mock_port_fec_modes; + mock_port_fec_modes.clear(); + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + uint32_t current_sai_api_call_count = _sai_set_port_fec_count; + + entries.push_back({"Ethernet0", "SET", + { + {"fec", "rs"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(gPortsOrch)->doTask(); + entries.clear(); + + ASSERT_EQ(_sai_set_port_fec_count, current_sai_api_call_count); + + vector ts; + + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + mock_port_fec_modes = old_mock_port_fec_modes; + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, PortTestSAIFailureHandling) + { + _hook_sai_port_api(); + _hook_sai_switch_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + std::deque entries; + + not_support_fetching_fec = false; + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + static_cast(gPortsOrch)->doTask(); + + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + + entries.push_back({"Ethernet0", "SET", + { + {"autoneg", "on"} + }}); + auto consumer = dynamic_cast(gPortsOrch->getExecutor(APP_PORT_TABLE_NAME)); + consumer->addToSync(entries); + ASSERT_DEATH({static_cast(gPortsOrch)->doTask();}, ""); + + ASSERT_EQ(*_sai_syncd_notifications_count, 1); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + _unhook_sai_port_api(); + _unhook_sai_switch_api(); + } + TEST_F(PortsOrchTest, PortReadinessColdBoot) { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); @@ -232,7 +1137,6 @@ namespace portsorch_test // create ports static_cast(gBufferOrch)->doTask(); - static_cast(gPortsOrch)->doTask(); // Ports are not ready yet @@ -317,6 +1221,12 @@ namespace portsorch_test portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); portTable.set("PortInitDone", { { "lanes", "0" } }); + // warm start, initialize ports ready list + + WarmStart::getInstance().m_enabled = true; + gBufferOrch->initBufferReadyLists(m_app_db.get(), m_config_db.get()); + WarmStart::getInstance().m_enabled = false; + // warm start, bake fill refill consumer gBufferOrch->bake(); @@ -353,12 +1263,216 @@ namespace portsorch_test ASSERT_TRUE(ts.empty()); } - TEST_F(PortsOrchTest, PfcZeroBufferHandlerLocksPortPgAndQueue) + TEST_F(PortsOrchTest, PfcDlrHandlerCallingDlrInitAttribute) + { + _hook_sai_port_api(); + _hook_sai_queue_api(); + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); + Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); + Table poolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table queueTable = Table(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone, PortInitDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + + static_cast(gPortsOrch)->doTask(); + + // Apply configuration + // ports + static_cast(gPortsOrch)->doTask(); + + ASSERT_TRUE(gPortsOrch->allPortsReady()); + + // No more tasks + vector ts; + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + ts.clear(); + + // Simulate storm drop handler started on Ethernet0 TC 3 + Port port; + gPortsOrch->getPort("Ethernet0", port); + auto current_pfc_mode_count = _sai_set_pfc_mode_count; + auto countersTable = make_shared
(m_counters_db.get(), COUNTERS_TABLE); + auto dropHandler = make_unique(port.m_port_id, port.m_queue_ids[3], 3, countersTable); + ASSERT_EQ(current_pfc_mode_count, _sai_set_pfc_mode_count); + ASSERT_TRUE(_sai_set_queue_attr_count == 1); + + dropHandler.reset(); + ASSERT_EQ(current_pfc_mode_count, _sai_set_pfc_mode_count); + ASSERT_FALSE(_sai_set_queue_attr_count == 1); + + _unhook_sai_queue_api(); + _unhook_sai_port_api(); + } + + TEST_F(PortsOrchTest, PfcDlrPacketAction) + { + _hook_sai_switch_api(); + std::deque entries; + sai_packet_action_t dlr_packet_action; + gSwitchOrch->m_PfcDlrInitEnable = true; + gPfcwdOrch->m_platform = BRCM_PLATFORM_SUBSTRING; + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table cfgPfcwdTable = Table(m_config_db.get(), CFG_PFC_WD_TABLE_NAME); + Table cfgPortQosMapTable = Table(m_config_db.get(), CFG_PORT_QOS_MAP_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone, PortInitDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + portTable.set("PortInitDone", { { "lanes", "0" } }); + + // refill consumer + gPortsOrch->addExistingData(&portTable); + + // Apply configuration : + // create ports + + static_cast(gPortsOrch)->doTask(); + + // Apply configuration + // ports + static_cast(gPortsOrch)->doTask(); + + ASSERT_TRUE(gPortsOrch->allPortsReady()); + + // No more tasks + vector ts; + gPortsOrch->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + ts.clear(); + + entries.clear(); + entries.push_back({"Ethernet0", "SET", + { + {"pfc_enable", "3,4"}, + {"pfcwd_sw_enable", "3,4"} + }}); + entries.push_back({"Ethernet8", "SET", + { + {"pfc_enable", "3,4"}, + {"pfcwd_sw_enable", "3,4"} + }}); + auto portQosMapConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + portQosMapConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + + // create pfcwd entry for first port with drop action + dlr_packet_action = SAI_PACKET_ACTION_DROP; + entries.push_back({"GLOBAL", "SET", + { + {"POLL_INTERVAL", "200"}, + }}); + entries.push_back({"Ethernet0", "SET", + { + {"action", "drop"}, + {"detection_time", "200"}, + {"restoration_time", "200"} + }}); + + auto PfcwdConsumer = dynamic_cast(gPfcwdOrch->getExecutor(CFG_PFC_WD_TABLE_NAME)); + PfcwdConsumer->addToSync(entries); + entries.clear(); + + auto current_switch_dlr_packet_action_count = _sai_switch_dlr_packet_action_count; + static_cast(gPfcwdOrch)->doTask(); + ASSERT_EQ(++current_switch_dlr_packet_action_count, _sai_switch_dlr_packet_action_count); + ASSERT_EQ(_sai_switch_dlr_packet_action, dlr_packet_action); + ASSERT_EQ((gPfcwdOrch->m_pfcwd_ports.size()), 1); + + // create pfcwd entry for second port with drop action + entries.push_back({"Ethernet8", "SET", + { + {"action", "drop"}, + {"detection_time", "200"}, + {"restoration_time", "200"} + }}); + PfcwdConsumer->addToSync(entries); + entries.clear(); + current_switch_dlr_packet_action_count = _sai_switch_dlr_packet_action_count; + static_cast(gPfcwdOrch)->doTask(); + // verify no change in count + ASSERT_EQ(current_switch_dlr_packet_action_count, _sai_switch_dlr_packet_action_count); + + // remove both the entries + entries.push_back({"Ethernet0", "DEL", + {{}} + }); + PfcwdConsumer->addToSync(entries); + entries.clear(); + static_cast(gPfcwdOrch)->doTask(); + ASSERT_EQ((gPfcwdOrch->m_pfcwd_ports.size()), 1); + + entries.push_back({"Ethernet8", "DEL", + {{}} + }); + PfcwdConsumer->addToSync(entries); + entries.clear(); + static_cast(gPfcwdOrch)->doTask(); + + // create pfcwd entry for first port with forward action + dlr_packet_action = SAI_PACKET_ACTION_FORWARD; + entries.push_back({"Ethernet0", "SET", + { + {"action", "forward"}, + {"detection_time", "200"}, + {"restoration_time", "200"} + }}); + + PfcwdConsumer->addToSync(entries); + entries.clear(); + + current_switch_dlr_packet_action_count = _sai_switch_dlr_packet_action_count; + static_cast(gPfcwdOrch)->doTask(); + ASSERT_EQ(++current_switch_dlr_packet_action_count, _sai_switch_dlr_packet_action_count); + ASSERT_EQ(_sai_switch_dlr_packet_action, dlr_packet_action); + ASSERT_EQ((gPfcwdOrch->m_pfcwd_ports.size()), 1); + + // remove the entry + entries.push_back({"Ethernet0", "DEL", + {{}} + }); + PfcwdConsumer->addToSync(entries); + entries.clear(); + static_cast(gPfcwdOrch)->doTask(); + ASSERT_EQ((gPfcwdOrch->m_pfcwd_ports.size()), 0); + + _unhook_sai_switch_api(); + } + + TEST_F(PortsOrchTest, PfcZeroBufferHandler) { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); Table poolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table queueTable = Table(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME); // Get SAI default ports to populate DB auto ports = ut_helper::getInitialSaiPorts(); @@ -402,7 +1516,14 @@ namespace portsorch_test // Create test buffer pool poolTable.set( - "test_pool", + "egress_pool", + { + { "type", "egress" }, + { "mode", "dynamic" }, + { "size", "4200000" }, + }); + poolTable.set( + "ingress_pool", { { "type", "ingress" }, { "mode", "dynamic" }, @@ -410,54 +1531,49 @@ namespace portsorch_test }); // Create test buffer profile - profileTable.set("test_profile", { { "pool", "test_pool" }, - { "xon", "14832" }, - { "xoff", "14832" }, - { "size", "35000" }, - { "dynamic_th", "0" } }); - - // Apply profile on PGs 3-4 all ports + profileTable.set("ingress_profile", { { "pool", "ingress_pool" }, + { "xon", "14832" }, + { "xoff", "14832" }, + { "size", "35000" }, + { "dynamic_th", "0" } }); + profileTable.set("egress_profile", { { "pool", "egress_pool" }, + { "size", "0" }, + { "dynamic_th", "0" } }); + + // Apply profile on Queue and PGs 3-4 all ports for (const auto &it : ports) { std::ostringstream oss; oss << it.first << ":3-4"; - pgTable.set(oss.str(), { { "profile", "test_profile" } }); + pgTable.set(oss.str(), { { "profile", "ingress_profile" } }); + queueTable.set(oss.str(), { {"profile", "egress_profile" } }); } gBufferOrch->addExistingData(&pgTable); gBufferOrch->addExistingData(&poolTable); gBufferOrch->addExistingData(&profileTable); + gBufferOrch->addExistingData(&queueTable); - // process pool, profile and PGs + // process pool, profile and Q's static_cast(gBufferOrch)->doTask(); - // Port should have been updated by BufferOrch->doTask - gPortsOrch->getPort("Ethernet0", port); - auto profile_id = (*BufferOrch::m_buffer_type_maps["BUFFER_PROFILE_TABLE"])[string("test_profile")].m_saiObjectId; - ASSERT_TRUE(profile_id != SAI_NULL_OBJECT_ID); - ASSERT_TRUE(port.m_priority_group_pending_profile[3] == profile_id); - ASSERT_TRUE(port.m_priority_group_pending_profile[4] == SAI_NULL_OBJECT_ID); + auto queueConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_QUEUE_TABLE_NAME)); + queueConsumer->dumpPendingTasks(ts); + ASSERT_FALSE(ts.empty()); // Queue is skipped + ts.clear(); auto pgConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); pgConsumer->dumpPendingTasks(ts); - ASSERT_TRUE(ts.empty()); // PG is stored in m_priority_group_pending_profile + ASSERT_TRUE(ts.empty()); // PG Notification is not skipped ts.clear(); // release zero buffer drop handler dropHandler.reset(); - // re-fetch the port - gPortsOrch->getPort("Ethernet0", port); - - // pending profile should be cleared - ASSERT_TRUE(port.m_priority_group_pending_profile[3] == SAI_NULL_OBJECT_ID); - ASSERT_TRUE(port.m_priority_group_pending_profile[4] == SAI_NULL_OBJECT_ID); - - // process PGs + // process queue static_cast(gBufferOrch)->doTask(); - pgConsumer = static_cast(gBufferOrch->getExecutor(APP_BUFFER_PG_TABLE_NAME)); - pgConsumer->dumpPendingTasks(ts); - ASSERT_TRUE(ts.empty()); // PG should be processed now + queueConsumer->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); // queue should be processed now ts.clear(); } @@ -566,113 +1682,113 @@ namespace portsorch_test * if port operational status is up but operational speed is 0, the port speed should not be * updated to DB. */ - TEST_F(PortsOrchTest, PortOperStatusIsUpAndOperSpeedIsZero) - { - Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); - - // Get SAI default ports to populate DB - auto ports = ut_helper::getInitialSaiPorts(); - - // Populate port table with SAI ports - for (const auto &it : ports) - { - portTable.set(it.first, it.second); - } - - // Set PortConfigDone, PortInitDone - portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); - portTable.set("PortInitDone", { { "lanes", "0" } }); - - // refill consumer - gPortsOrch->addExistingData(&portTable); - // Apply configuration : create ports - static_cast(gPortsOrch)->doTask(); - - // Get first port, expect the oper status is not UP - Port port; - gPortsOrch->getPort("Ethernet0", port); - ASSERT_TRUE(port.m_oper_status != SAI_PORT_OPER_STATUS_UP); - - // save original api since we will spy - auto orig_port_api = sai_port_api; - sai_port_api = new sai_port_api_t(); - memcpy(sai_port_api, orig_port_api, sizeof(*sai_port_api)); - - // mock SAI API sai_port_api->get_port_attribute - auto portSpy = SpyOn(&sai_port_api->get_port_attribute); - portSpy->callFake([&](sai_object_id_t oid, uint32_t count, sai_attribute_t * attrs) -> sai_status_t { - if (attrs[0].id == SAI_PORT_ATTR_OPER_STATUS) - { - attrs[0].value.u32 = (uint32_t)SAI_PORT_OPER_STATUS_UP; - } - else if (attrs[0].id == SAI_PORT_ATTR_OPER_SPEED) - { - // Return 0 for port operational speed - attrs[0].value.u32 = 0; - } - - return (sai_status_t)SAI_STATUS_SUCCESS; - } - ); - - auto exec = static_cast(gPortsOrch->getExecutor("PORT_STATUS_NOTIFICATIONS")); - auto consumer = exec->getNotificationConsumer(); - - // mock a redis reply for notification, it notifies that Ehernet0 is going to up - mockReply = (redisReply *)calloc(sizeof(redisReply), 1); - mockReply->type = REDIS_REPLY_ARRAY; - mockReply->elements = 3; // REDIS_PUBLISH_MESSAGE_ELEMNTS - mockReply->element = (redisReply **)calloc(sizeof(redisReply *), mockReply->elements); - mockReply->element[2] = (redisReply *)calloc(sizeof(redisReply), 1); - mockReply->element[2]->type = REDIS_REPLY_STRING; - sai_port_oper_status_notification_t port_oper_status; - port_oper_status.port_id = port.m_port_id; - port_oper_status.port_state = SAI_PORT_OPER_STATUS_UP; - std::string data = sai_serialize_port_oper_status_ntf(1, &port_oper_status); - std::vector notifyValues; - FieldValueTuple opdata("port_state_change", data); - notifyValues.push_back(opdata); - std::string msg = swss::JSon::buildJson(notifyValues); - mockReply->element[2]->str = (char*)calloc(1, msg.length() + 1); - memcpy(mockReply->element[2]->str, msg.c_str(), msg.length()); - - // trigger the notification - consumer->readData(); - gPortsOrch->doTask(*consumer); - mockReply = nullptr; - - gPortsOrch->getPort("Ethernet0", port); - ASSERT_TRUE(port.m_oper_status == SAI_PORT_OPER_STATUS_UP); - - std::vector values; - portTable.get("Ethernet0", values); - for (auto &valueTuple : values) - { - if (fvField(valueTuple) == "speed") - { - ASSERT_TRUE(fvValue(valueTuple) != "0"); - } - } - - gPortsOrch->refreshPortStatus(); - for (const auto &it : ports) - { - gPortsOrch->getPort(it.first, port); - ASSERT_TRUE(port.m_oper_status == SAI_PORT_OPER_STATUS_UP); - - std::vector values; - portTable.get(it.first, values); - for (auto &valueTuple : values) - { - if (fvField(valueTuple) == "speed") - { - ASSERT_TRUE(fvValue(valueTuple) != "0"); - } - } - } - - sai_port_api = orig_port_api; - } + // TEST_F(PortsOrchTest, PortOperStatusIsUpAndOperSpeedIsZero) + // { + // Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // // Get SAI default ports to populate DB + // auto ports = ut_helper::getInitialSaiPorts(); + + // // Populate port table with SAI ports + // for (const auto &it : ports) + // { + // portTable.set(it.first, it.second); + // } + + // // Set PortConfigDone, PortInitDone + // portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + // portTable.set("PortInitDone", { { "lanes", "0" } }); + + // // refill consumer + // gPortsOrch->addExistingData(&portTable); + // // Apply configuration : create ports + // static_cast(gPortsOrch)->doTask(); + + // // Get first port, expect the oper status is not UP + // Port port; + // gPortsOrch->getPort("Ethernet0", port); + // ASSERT_TRUE(port.m_oper_status != SAI_PORT_OPER_STATUS_UP); + + // // save original api since we will spy + // auto orig_port_api = sai_port_api; + // sai_port_api = new sai_port_api_t(); + // memcpy(sai_port_api, orig_port_api, sizeof(*sai_port_api)); + + // // mock SAI API sai_port_api->get_port_attribute + // auto portSpy = SpyOn(&sai_port_api->get_port_attribute); + // portSpy->callFake([&](sai_object_id_t oid, uint32_t count, sai_attribute_t * attrs) -> sai_status_t { + // if (attrs[0].id == SAI_PORT_ATTR_OPER_STATUS) + // { + // attrs[0].value.u32 = (uint32_t)SAI_PORT_OPER_STATUS_UP; + // } + // else if (attrs[0].id == SAI_PORT_ATTR_OPER_SPEED) + // { + // // Return 0 for port operational speed + // attrs[0].value.u32 = 0; + // } + + // return (sai_status_t)SAI_STATUS_SUCCESS; + // } + // ); + + // auto exec = static_cast(gPortsOrch->getExecutor("PORT_STATUS_NOTIFICATIONS")); + // auto consumer = exec->getNotificationConsumer(); + + // // mock a redis reply for notification, it notifies that Ehernet0 is going to up + // mockReply = (redisReply *)calloc(sizeof(redisReply), 1); + // mockReply->type = REDIS_REPLY_ARRAY; + // mockReply->elements = 3; // REDIS_PUBLISH_MESSAGE_ELEMNTS + // mockReply->element = (redisReply **)calloc(sizeof(redisReply *), mockReply->elements); + // mockReply->element[2] = (redisReply *)calloc(sizeof(redisReply), 1); + // mockReply->element[2]->type = REDIS_REPLY_STRING; + // sai_port_oper_status_notification_t port_oper_status; + // port_oper_status.port_id = port.m_port_id; + // port_oper_status.port_state = SAI_PORT_OPER_STATUS_UP; + // std::string data = sai_serialize_port_oper_status_ntf(1, &port_oper_status); + // std::vector notifyValues; + // FieldValueTuple opdata("port_state_change", data); + // notifyValues.push_back(opdata); + // std::string msg = swss::JSon::buildJson(notifyValues); + // mockReply->element[2]->str = (char*)calloc(1, msg.length() + 1); + // memcpy(mockReply->element[2]->str, msg.c_str(), msg.length()); + + // // trigger the notification + // consumer->readData(); + // gPortsOrch->doTask(*consumer); + // mockReply = nullptr; + + // gPortsOrch->getPort("Ethernet0", port); + // ASSERT_TRUE(port.m_oper_status == SAI_PORT_OPER_STATUS_UP); + + // std::vector values; + // portTable.get("Ethernet0", values); + // for (auto &valueTuple : values) + // { + // if (fvField(valueTuple) == "speed") + // { + // ASSERT_TRUE(fvValue(valueTuple) != "0"); + // } + // } + + // gPortsOrch->refreshPortStatus(); + // for (const auto &it : ports) + // { + // gPortsOrch->getPort(it.first, port); + // ASSERT_TRUE(port.m_oper_status == SAI_PORT_OPER_STATUS_UP); + + // std::vector values; + // portTable.get(it.first, values); + // for (auto &valueTuple : values) + // { + // if (fvField(valueTuple) == "speed") + // { + // ASSERT_TRUE(fvValue(valueTuple) != "0"); + // } + // } + // } + + // sai_port_api = orig_port_api; + // } /* * The scope of this test is to verify that LAG member is @@ -788,4 +1904,5 @@ namespace portsorch_test ASSERT_FALSE(bridgePortCalledBeforeLagMember); // bridge port created on lag before lag member was created } + } diff --git a/tests/mock_tests/portsyncd/portsyncd_ut.cpp b/tests/mock_tests/portsyncd/portsyncd_ut.cpp new file mode 100644 index 0000000000..a7aaf0f9f8 --- /dev/null +++ b/tests/mock_tests/portsyncd/portsyncd_ut.cpp @@ -0,0 +1,342 @@ +#include "gtest/gtest.h" +#include +#include +#include "mock_table.h" +#define private public +#include "linksync.h" +#undef private + +struct if_nameindex *if_ni_mock = NULL; + +/* Mock if_nameindex() call */ +extern "C" { + struct if_nameindex *__wrap_if_nameindex() + { + return if_ni_mock; + } +} + +/* Mock if_freenameindex() call */ +extern "C" { + void __wrap_if_freenameindex(struct if_nameindex *ptr) + { + return ; + } +} + +extern std::string mockCmdStdcout; +extern std::vector mockCallArgs; +std::set g_portSet; +bool g_init = false; + +void writeToApplDB(swss::ProducerStateTable &p, swss::DBConnector &cfgDb) +{ + swss::Table table(&cfgDb, CFG_PORT_TABLE_NAME); + std::vector ovalues; + std::vector keys; + table.getKeys(keys); + + for ( auto &k : keys ) + { + table.get(k, ovalues); + std::vector attrs; + for ( auto &v : ovalues ) + { + swss::FieldValueTuple attr(v.first, v.second); + attrs.push_back(attr); + } + p.set(k, attrs); + g_portSet.insert(k); + } +} + +/* +Test Fixture +*/ +namespace portsyncd_ut +{ + struct PortSyncdTest : public ::testing::Test + { + std::shared_ptr m_config_db; + std::shared_ptr m_app_db; + std::shared_ptr m_state_db; + std::shared_ptr m_portCfgTable; + std::shared_ptr m_portAppTable; + + virtual void SetUp() override + { + testing_db::reset(); + m_config_db = std::make_shared("CONFIG_DB", 0); + m_app_db = std::make_shared("APPL_DB", 0); + m_state_db = std::make_shared("STATE_DB", 0); + m_portCfgTable = std::make_shared(m_config_db.get(), CFG_PORT_TABLE_NAME); + m_portAppTable = std::make_shared(m_app_db.get(), APP_PORT_TABLE_NAME); + } + + virtual void TearDown() override { + if (if_ni_mock != NULL) free(if_ni_mock); + if_ni_mock = NULL; + } + }; + + /* Helper Methods */ + void populateCfgDb(swss::Table* tbl){ + /* populate config db with Eth0 and Eth4 objects */ + std::vector vec; + vec.emplace_back("admin_status", "down"); + vec.emplace_back("index", "2"); + vec.emplace_back("lanes", "4,5,6,7"); + vec.emplace_back("mtu", "9100"); + vec.emplace_back("speed", "10000"); + vec.emplace_back("alias", "etp1"); + tbl->set("Ethernet0", vec); + vec.pop_back(); + vec.emplace_back("alias", "etp1"); + tbl->set("Ethernet4", vec); + } + + /* Create internal ds holding netdev ifaces for eth0 & lo */ + inline struct if_nameindex * populateNetDev(){ + struct if_nameindex *if_ni_temp; + /* Construct a mock if_nameindex array */ + if_ni_temp = (struct if_nameindex*) calloc(3, sizeof(struct if_nameindex)); + + if_ni_temp[2].if_index = 0; + if_ni_temp[2].if_name = NULL; + + if_ni_temp[1].if_index = 16222; + if_ni_temp[1].if_name = "eth0"; + + if_ni_temp[0].if_index = 1; + if_ni_temp[0].if_name = "lo"; + + return if_ni_temp; + } + + /* Create internal ds holding netdev ifaces for lo & Ethernet0 */ + inline struct if_nameindex * populateNetDevAdvanced(){ + struct if_nameindex *if_ni_temp; + /* Construct a mock if_nameindex array */ + if_ni_temp = (struct if_nameindex*) calloc(3, sizeof(struct if_nameindex)); + + if_ni_temp[2].if_index = 0; + if_ni_temp[2].if_name = NULL; + + if_ni_temp[1].if_index = 142; + if_ni_temp[1].if_name = "Ethernet0"; + + if_ni_temp[0].if_index = 1; + if_ni_temp[0].if_name = "lo"; + + return if_ni_temp; + } + + /* Draft a rtnl_link msg */ + struct nl_object* draft_nlmsg(const std::string& name, + std::vector flags, + const std::string& type, + const std::string& ll_add, + int ifindex, + unsigned int mtu, + int master_ifindex = 0){ + + struct rtnl_link* nl_obj = rtnl_link_alloc(); + if (!nl_obj){ + throw std::runtime_error("netlink: rtnl_link object allocation failed"); + } + /* Set name for rtnl link object */ + rtnl_link_set_name(nl_obj, name.c_str()); + + /* Set flags */ + for (auto nlflag : flags){ + rtnl_link_set_flags(nl_obj, nlflag); + } + + /* Set type */ + if (!type.empty()){ + rtnl_link_set_type(nl_obj, type.c_str()); + } + + /* Set Link layer Address */ + struct nl_addr * ll_addr; + int result = nl_addr_parse(ll_add.c_str(), AF_LLC, &ll_addr); + if (result < 0){ + throw std::runtime_error("netlink: Link layer address allocation failed"); + } + rtnl_link_set_addr(nl_obj, ll_addr); + + /* Set ifindex */ + rtnl_link_set_ifindex(nl_obj, ifindex); + + /* Set mtu */ + rtnl_link_set_mtu(nl_obj, mtu); + + /* Set master_ifindex if any */ + if (master_ifindex){ + rtnl_link_set_master(nl_obj, master_ifindex); + } + + return (struct nl_object*)nl_obj; + } + + inline void free_nlobj(struct nl_object* msg){ + nl_object_free(msg); + } +} + +namespace portsyncd_ut +{ + TEST_F(PortSyncdTest, test_linkSyncInit) + { + if_ni_mock = populateNetDev(); + mockCmdStdcout = "up\n"; + swss::LinkSync sync(m_app_db.get(), m_state_db.get()); + std::vector keys; + sync.m_stateMgmtPortTable.getKeys(keys); + ASSERT_EQ(keys.size(), 1); + ASSERT_EQ(keys.back(), "eth0"); + ASSERT_EQ(mockCallArgs.back(), "cat /sys/class/net/\"eth0\"/operstate"); + } + + TEST_F(PortSyncdTest, test_cacheOldIfaces) + { + if_ni_mock = populateNetDevAdvanced(); + swss::LinkSync sync(m_app_db.get(), m_state_db.get()); + ASSERT_EQ(mockCallArgs.back(), "ip link set \"Ethernet0\" down"); + ASSERT_NE(sync.m_ifindexOldNameMap.find(142), sync.m_ifindexOldNameMap.end()); + ASSERT_EQ(sync.m_ifindexOldNameMap[142], "Ethernet0"); + } + + TEST_F(PortSyncdTest, test_onMsgNewLink) + { + swss::LinkSync sync(m_app_db.get(), m_state_db.get()); + /* Write config to Config DB */ + populateCfgDb(m_portCfgTable.get()); + swss::DBConnector cfg_db_conn("CONFIG_DB", 0); + + /* Handle CFG DB notifs and Write them to APPL_DB */ + swss::ProducerStateTable p(m_app_db.get(), APP_PORT_TABLE_NAME); + writeToApplDB(p, cfg_db_conn); + + /* Generate a netlink notification about the netdev iface */ + std::vector flags = {IFF_UP, IFF_RUNNING}; + struct nl_object* msg = draft_nlmsg("Ethernet0", + flags, + "sx_netdev", + "1c:34:da:1c:9f:00", + 142, + 9100, + 0); + sync.onMsg(RTM_NEWLINK, msg); + + /* Verify if the update has been written to State DB */ + std::vector ovalues; + ASSERT_EQ(sync.m_statePortTable.get("Ethernet0", ovalues), true); + for (auto value : ovalues){ + if (fvField(value) == "state") {ASSERT_EQ(fvValue(value), "ok");} + if (fvField(value) == "mtu") {ASSERT_EQ(fvValue(value), "9100");} + if (fvField(value) == "netdev_oper_status") {ASSERT_EQ(fvValue(value), "up");} + if (fvField(value) == "admin_status") {ASSERT_EQ(fvValue(value), "up");} + if (fvField(value) == "speed") {ASSERT_EQ(fvValue(value), "10000");} + } + + /* Verify if the internal strctures are updated as expected */ + ASSERT_NE(sync.m_ifindexNameMap.find(142), sync.m_ifindexNameMap.end()); + ASSERT_EQ(sync.m_ifindexNameMap[142], "Ethernet0"); + + /* Free Nl_object */ + free_nlobj(msg); + } + + TEST_F(PortSyncdTest, test_onMsgDelLink){ + + swss::LinkSync sync(m_app_db.get(), m_state_db.get()); + + /* Write config to Config DB */ + populateCfgDb(m_portCfgTable.get()); + swss::DBConnector cfg_db_conn("CONFIG_DB", 0); + + /* Handle CFG DB notifs and Write them to APPL_DB */ + swss::ProducerStateTable p(m_app_db.get(), APP_PORT_TABLE_NAME); + writeToApplDB(p, cfg_db_conn);; + + /* Generate a netlink notification about the netdev iface */ + std::vector flags = {IFF_UP, IFF_RUNNING}; + struct nl_object* msg = draft_nlmsg("Ethernet0", + flags, + "sx_netdev", + "1c:34:da:1c:9f:00", + 142, + 9100, + 0); + sync.onMsg(RTM_NEWLINK, msg); + + /* Verify if the update has been written to State DB */ + std::vector ovalues; + ASSERT_EQ(sync.m_statePortTable.get("Ethernet0", ovalues), true); + + /* Free Nl_object */ + free_nlobj(msg); + + /* Generate a DELLINK Notif */ + msg = draft_nlmsg("Ethernet0", + flags, + "sx_netdev", + "1c:34:da:1c:9f:00", + 142, + 9100, + 0); + + sync.onMsg(RTM_DELLINK, msg); + ovalues.clear(); + + /* Verify if the state_db entry is cleared */ + ASSERT_EQ(sync.m_statePortTable.get("Ethernet0", ovalues), false); + } + + TEST_F(PortSyncdTest, test_onMsgMgmtIface){ + swss::LinkSync sync(m_app_db.get(), m_state_db.get()); + + /* Generate a netlink notification about the eth0 netdev iface */ + std::vector flags = {IFF_UP}; + struct nl_object* msg = draft_nlmsg("eth0", + flags, + "", + "00:50:56:28:0e:4a", + 16222, + 9100, + 0); + sync.onMsg(RTM_NEWLINK, msg); + + /* Verify if the update has been written to State DB */ + std::string oper_status; + ASSERT_EQ(sync.m_stateMgmtPortTable.hget("eth0", "oper_status", oper_status), true); + ASSERT_EQ(oper_status, "down"); + + /* Free Nl_object */ + free_nlobj(msg); + } + + TEST_F(PortSyncdTest, test_onMsgIgnoreOldNetDev){ + if_ni_mock = populateNetDevAdvanced(); + swss::LinkSync sync(m_app_db.get(), m_state_db.get()); + ASSERT_EQ(mockCallArgs.back(), "ip link set \"Ethernet0\" down"); + ASSERT_NE(sync.m_ifindexOldNameMap.find(142), sync.m_ifindexOldNameMap.end()); + ASSERT_EQ(sync.m_ifindexOldNameMap[142], "Ethernet0"); + + /* Generate a netlink notification about the netdev iface */ + std::vector flags; + struct nl_object* msg = draft_nlmsg("Ethernet0", + flags, + "sx_netdev", + "1c:34:da:1c:9f:00", + 142, + 9100, + 0); + sync.onMsg(RTM_NEWLINK, msg); + + /* Verify if nothing is written to state_db */ + std::vector ovalues; + ASSERT_EQ(sync.m_statePortTable.get("Ethernet0", ovalues), false); + } +} diff --git a/tests/mock_tests/qosorch_ut.cpp b/tests/mock_tests/qosorch_ut.cpp new file mode 100644 index 0000000000..713238e9cd --- /dev/null +++ b/tests/mock_tests/qosorch_ut.cpp @@ -0,0 +1,1591 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" + +extern string gMySwitchType; + + +namespace qosorch_test +{ + using namespace std; + + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_chassis_app_db; + + int sai_remove_qos_map_count; + int sai_remove_wred_profile_count; + int sai_remove_scheduler_count; + int sai_set_wred_attribute_count; + sai_object_id_t switch_dscp_to_tc_map_id; + TunnelDecapOrch *tunnel_decap_orch; + + sai_remove_scheduler_fn old_remove_scheduler; + sai_scheduler_api_t ut_sai_scheduler_api, *pold_sai_scheduler_api; + sai_create_wred_fn old_create_wred; + sai_remove_wred_fn old_remove_wred; + sai_set_wred_attribute_fn old_set_wred_attribute; + sai_wred_api_t ut_sai_wred_api, *pold_sai_wred_api; + sai_remove_qos_map_fn old_remove_qos_map; + sai_qos_map_api_t ut_sai_qos_map_api, *pold_sai_qos_map_api; + sai_set_switch_attribute_fn old_set_switch_attribute_fn; + sai_switch_api_t ut_sai_switch_api, *pold_sai_switch_api; + sai_tunnel_api_t ut_sai_tunnel_api, *pold_sai_tunnel_api; + + typedef struct + { + sai_uint32_t green_max_drop_probability; + sai_uint32_t yellow_max_drop_probability; + sai_uint32_t red_max_drop_probability; + } qos_wred_max_drop_probability_t; + + sai_status_t _ut_stub_sai_set_switch_attribute(sai_object_id_t switch_id, const sai_attribute_t *attr) + { + auto rc = old_set_switch_attribute_fn(switch_id, attr); + if (rc == SAI_STATUS_SUCCESS && attr->id == SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP) + switch_dscp_to_tc_map_id = attr->value.oid; + return rc; + } + + sai_status_t _ut_stub_sai_remove_qos_map(sai_object_id_t qos_map_id) + { + auto rc = old_remove_qos_map(qos_map_id); + if (rc == SAI_STATUS_SUCCESS) + sai_remove_qos_map_count++; + return rc; + } + + bool testing_wred_thresholds; + WredMapHandler::qos_wred_thresholds_t saiThresholds; + qos_wred_max_drop_probability_t saiMaxDropProbabilities; + void _ut_stub_sai_check_wred_attributes(const sai_attribute_t &attr) + { + if (!testing_wred_thresholds) + { + return; + } + + switch (attr.id) + { + case SAI_WRED_ATTR_GREEN_MAX_THRESHOLD: + ASSERT_TRUE(!saiThresholds.green_min_threshold || saiThresholds.green_min_threshold < attr.value.u32); + saiThresholds.green_max_threshold = attr.value.u32; + break; + case SAI_WRED_ATTR_GREEN_MIN_THRESHOLD: + ASSERT_TRUE(!saiThresholds.green_max_threshold || saiThresholds.green_max_threshold > attr.value.u32); + saiThresholds.green_min_threshold = attr.value.u32; + break; + case SAI_WRED_ATTR_YELLOW_MAX_THRESHOLD: + ASSERT_TRUE(!saiThresholds.yellow_min_threshold || saiThresholds.yellow_min_threshold < attr.value.u32); + saiThresholds.yellow_max_threshold = attr.value.u32; + break; + case SAI_WRED_ATTR_YELLOW_MIN_THRESHOLD: + ASSERT_TRUE(!saiThresholds.yellow_max_threshold || saiThresholds.yellow_max_threshold > attr.value.u32); + saiThresholds.yellow_min_threshold = attr.value.u32; + break; + case SAI_WRED_ATTR_RED_MAX_THRESHOLD: + ASSERT_TRUE(!saiThresholds.red_min_threshold || saiThresholds.red_min_threshold < attr.value.u32); + saiThresholds.red_max_threshold = attr.value.u32; + break; + case SAI_WRED_ATTR_RED_MIN_THRESHOLD: + ASSERT_TRUE(!saiThresholds.red_max_threshold || saiThresholds.red_max_threshold > attr.value.u32); + saiThresholds.red_min_threshold = attr.value.u32; + break; + case SAI_WRED_ATTR_GREEN_DROP_PROBABILITY: + saiMaxDropProbabilities.green_max_drop_probability = attr.value.u32; + break; + case SAI_WRED_ATTR_YELLOW_DROP_PROBABILITY: + saiMaxDropProbabilities.yellow_max_drop_probability = attr.value.u32; + break; + case SAI_WRED_ATTR_RED_DROP_PROBABILITY: + saiMaxDropProbabilities.red_max_drop_probability = attr.value.u32; + break; + default: + break; + } + } + + void checkWredProfileEqual(const string &name, WredMapHandler::qos_wred_thresholds_t &thresholds) + { + auto &oaThresholds = WredMapHandler::m_wredProfiles[name]; + + ASSERT_EQ(oaThresholds.green_min_threshold, thresholds.green_min_threshold); + ASSERT_EQ(oaThresholds.green_max_threshold, thresholds.green_max_threshold); + ASSERT_EQ(oaThresholds.yellow_min_threshold, thresholds.yellow_min_threshold); + ASSERT_EQ(oaThresholds.yellow_max_threshold, thresholds.yellow_max_threshold); + ASSERT_EQ(oaThresholds.red_min_threshold, thresholds.red_min_threshold); + ASSERT_EQ(oaThresholds.red_max_threshold, thresholds.red_max_threshold); + } + + void updateWredProfileAndCheck(vector &thresholdsVector, WredMapHandler::qos_wred_thresholds_t &thresholdsValue) + { + std::deque entries; + entries.push_back({"AZURE", "SET", thresholdsVector}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_WRED_PROFILE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + checkWredProfileEqual("AZURE", saiThresholds); + checkWredProfileEqual("AZURE", thresholdsValue); + } + + void updateWrongWredProfileAndCheck(vector &thresholdsVector) + { + std::deque entries; + vector ts; + entries.push_back({"AZURE", "SET", thresholdsVector}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_WRED_PROFILE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + auto current_sai_wred_set_count = sai_set_wred_attribute_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(current_sai_wred_set_count, sai_set_wred_attribute_count); + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + } + + void updateMaxDropProbabilityAndCheck(string name, vector &maxDropProbabilityVector, qos_wred_max_drop_probability_t &maxDropProbabilities) + { + std::deque entries; + vector ts; + entries.push_back({name, "SET", maxDropProbabilityVector}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_WRED_PROFILE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + saiMaxDropProbabilities.green_max_drop_probability = 0; + saiMaxDropProbabilities.yellow_max_drop_probability = 0; + saiMaxDropProbabilities.red_max_drop_probability = 0; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(saiMaxDropProbabilities.green_max_drop_probability, maxDropProbabilities.green_max_drop_probability); + ASSERT_EQ(saiMaxDropProbabilities.yellow_max_drop_probability, maxDropProbabilities.yellow_max_drop_probability); + ASSERT_EQ(saiMaxDropProbabilities.red_max_drop_probability, maxDropProbabilities.red_max_drop_probability); + } + + sai_status_t _ut_stub_sai_create_wred( + _Out_ sai_object_id_t *wred_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + auto rc = old_create_wred(wred_id, switch_id, attr_count, attr_list); + if (rc == SAI_STATUS_SUCCESS) + { + for (uint32_t i = 0; i < attr_count; i++) + { + _ut_stub_sai_check_wred_attributes(attr_list[i]); + } + } + return rc; + } + + sai_status_t _ut_stub_sai_remove_wred(sai_object_id_t wred_id) + { + auto rc = old_remove_wred(wred_id); + if (rc == SAI_STATUS_SUCCESS) + sai_remove_wred_profile_count++; + return rc; + } + + sai_status_t _ut_stub_sai_set_wred_attribute( + _In_ sai_object_id_t wred_id, + _In_ const sai_attribute_t *attr) + { + auto rc = old_set_wred_attribute(wred_id, attr); + if (rc == SAI_STATUS_SUCCESS) + { + _ut_stub_sai_check_wred_attributes(*attr); + } + sai_set_wred_attribute_count++; + return rc; + } + + sai_status_t _ut_stub_sai_remove_scheduler(sai_object_id_t scheduler_id) + { + auto rc = old_remove_scheduler(scheduler_id); + if (rc == SAI_STATUS_SUCCESS) + sai_remove_scheduler_count++; + return rc; + } + + sai_status_t _ut_stub_sai_create_tunnel( + _Out_ sai_object_id_t *tunnel_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + *tunnel_id = (sai_object_id_t)(0x1); + return SAI_STATUS_SUCCESS; + } + + sai_status_t _ut_stub_sai_create_tunnel_term_table_entry( + _Out_ sai_object_id_t *tunnel_term_table_entry_id, + _In_ sai_object_id_t switch_id, + _In_ uint32_t attr_count, + _In_ const sai_attribute_t *attr_list) + { + *tunnel_term_table_entry_id = (sai_object_id_t)(0x1); + return SAI_STATUS_SUCCESS; + } + + void checkTunnelAttribute(sai_attr_id_t attr) + { + ASSERT_TRUE(attr != SAI_TUNNEL_ATTR_ENCAP_ECN_MODE); + ASSERT_TRUE(attr != SAI_TUNNEL_ATTR_DECAP_ECN_MODE); + } + + sai_status_t _ut_stub_sai_set_tunnel_attribute( + _In_ sai_object_id_t tunnel_id, + _In_ const sai_attribute_t *attr) + { + checkTunnelAttribute(attr->id); + return SAI_STATUS_ATTR_NOT_SUPPORTED_0; + } + + struct QosOrchTest : public ::testing::Test + { + QosOrchTest() + { + } + + void CheckDependency(const string &referencingTableName, const string &referencingObjectName, const string &field, const string &dependentTableName, const string &dependentObjectName="") + { + auto &qosTypeMaps = QosOrch::getTypeMap(); + auto &referencingTable = (*qosTypeMaps[referencingTableName]); + auto &dependentTable = (*qosTypeMaps[dependentTableName]); + + if (dependentObjectName.empty()) + { + ASSERT_TRUE(referencingTable[referencingObjectName].m_objsReferencingByMe[field].empty()); + ASSERT_EQ(dependentTable[dependentObjectName].m_objsDependingOnMe.count(referencingObjectName), 0); + } + else + { + ASSERT_EQ(referencingTable[referencingObjectName].m_objsReferencingByMe[field], dependentTableName + ":" + dependentObjectName); + ASSERT_EQ(dependentTable[dependentObjectName].m_objsDependingOnMe.count(referencingObjectName), 1); + } + } + + void RemoveItem(const string &table, const string &key) + { + std::deque entries; + entries.push_back({key, "DEL", {}}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(table)); + consumer->addToSync(entries); + } + + template void ReplaceSaiRemoveApi(sai_api_t* &sai_api, + sai_api_t &ut_sai_api, + sai_api_t* &pold_sai_api, + sai_remove_func ut_remove, + sai_remove_func &sai_remove, + sai_remove_func &old_remove, + sai_remove_func &put_remove) + { + old_remove = sai_remove; + pold_sai_api = sai_api; + ut_sai_api = *pold_sai_api; + sai_api = &ut_sai_api; + put_remove = ut_remove; + } + + void SetUp() override + { + ASSERT_EQ(sai_route_api, nullptr); + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + + // Hack SAI APIs + ReplaceSaiRemoveApi(sai_qos_map_api, ut_sai_qos_map_api, pold_sai_qos_map_api, + _ut_stub_sai_remove_qos_map, sai_qos_map_api->remove_qos_map, + old_remove_qos_map, ut_sai_qos_map_api.remove_qos_map); + ReplaceSaiRemoveApi(sai_scheduler_api, ut_sai_scheduler_api, pold_sai_scheduler_api, + _ut_stub_sai_remove_scheduler, sai_scheduler_api->remove_scheduler, + old_remove_scheduler, ut_sai_scheduler_api.remove_scheduler); + ReplaceSaiRemoveApi(sai_wred_api, ut_sai_wred_api, pold_sai_wred_api, + _ut_stub_sai_remove_wred, sai_wred_api->remove_wred, + old_remove_wred, ut_sai_wred_api.remove_wred); + // Mock other wred APIs + old_create_wred = pold_sai_wred_api->create_wred; + ut_sai_wred_api.create_wred = _ut_stub_sai_create_wred; + old_set_wred_attribute = pold_sai_wred_api->set_wred_attribute; + ut_sai_wred_api.set_wred_attribute = _ut_stub_sai_set_wred_attribute; + + // Mock switch API + pold_sai_switch_api = sai_switch_api; + ut_sai_switch_api = *pold_sai_switch_api; + old_set_switch_attribute_fn = pold_sai_switch_api->set_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + + // Mock tunnel API + pold_sai_tunnel_api = sai_tunnel_api; + ut_sai_tunnel_api = *pold_sai_tunnel_api; + sai_tunnel_api = &ut_sai_tunnel_api; + ut_sai_tunnel_api.set_tunnel_attribute = _ut_stub_sai_set_tunnel_attribute; + ut_sai_tunnel_api.create_tunnel = _ut_stub_sai_create_tunnel; + ut_sai_tunnel_api.create_tunnel_term_table_entry = _ut_stub_sai_create_tunnel_term_table_entry; + + // Init switch and create dependencies + m_app_db = make_shared("APPL_DB", 0); + m_config_db = make_shared("CONFIG_DB", 0); + m_state_db = make_shared("STATE_DB", 0); + if(gMySwitchType == "voq") + m_chassis_app_db = make_shared("CHASSIS_APP_DB", 0); + + sai_attribute_t attr; + + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + auto status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + // Get the default virtual router ID + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + + ASSERT_EQ(gCrmOrch, nullptr); + gCrmOrch = new CrmOrch(m_config_db.get(), CFG_CRM_TABLE_NAME); + + TableConnector stateDbSwitchTable(m_state_db.get(), "SWITCH_CAPABILITY"); + TableConnector conf_asic_sensors(m_config_db.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector app_switch_table(m_app_db.get(), APP_SWITCH_TABLE_NAME); + + vector switch_tables = { + conf_asic_sensors, + app_switch_table + }; + + ASSERT_EQ(gSwitchOrch, nullptr); + gSwitchOrch = new SwitchOrch(m_app_db.get(), switch_tables, stateDbSwitchTable); + + // Create dependencies ... + + const int portsorch_base_pri = 40; + + vector ports_tables = { + { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, + { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } + }; + + vector flex_counter_tables = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); + gDirectory.set(flexCounterOrch); + + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + + ASSERT_EQ(gVrfOrch, nullptr); + gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); + + ASSERT_EQ(gIntfsOrch, nullptr); + gIntfsOrch = new IntfsOrch(m_app_db.get(), APP_INTF_TABLE_NAME, gVrfOrch, m_chassis_app_db.get()); + + const int fdborch_pri = 20; + + vector app_fdb_tables = { + { APP_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_VXLAN_FDB_TABLE_NAME, FdbOrch::fdborch_pri}, + { APP_MCLAG_FDB_TABLE_NAME, fdborch_pri} + }; + + TableConnector stateDbFdb(m_state_db.get(), STATE_FDB_TABLE_NAME); + TableConnector stateMclagDbFdb(m_state_db.get(), STATE_MCLAG_REMOTE_FDB_TABLE_NAME); + ASSERT_EQ(gFdbOrch, nullptr); + gFdbOrch = new FdbOrch(m_app_db.get(), app_fdb_tables, stateDbFdb, stateMclagDbFdb, gPortsOrch); + + ASSERT_EQ(gNeighOrch, nullptr); + gNeighOrch = new NeighOrch(m_app_db.get(), APP_NEIGH_TABLE_NAME, gIntfsOrch, gFdbOrch, gPortsOrch, m_chassis_app_db.get()); + + ASSERT_EQ(tunnel_decap_orch, nullptr); + tunnel_decap_orch = new TunnelDecapOrch(m_app_db.get(), APP_TUNNEL_DECAP_TABLE_NAME); + + vector qos_tables = { + CFG_TC_TO_QUEUE_MAP_TABLE_NAME, + CFG_SCHEDULER_TABLE_NAME, + CFG_DSCP_TO_TC_MAP_TABLE_NAME, + CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME, + CFG_DOT1P_TO_TC_MAP_TABLE_NAME, + CFG_QUEUE_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME, + CFG_WRED_PROFILE_TABLE_NAME, + CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, + CFG_DSCP_TO_FC_MAP_TABLE_NAME, + CFG_EXP_TO_FC_MAP_TABLE_NAME, + CFG_TC_TO_DSCP_MAP_TABLE_NAME + }; + gQosOrch = new QosOrch(m_config_db.get(), qos_tables); + + // Recreate buffer orch to read populated data + vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME }; + + gBufferOrch = new BufferOrch(m_app_db.get(), m_config_db.get(), m_state_db.get(), buffer_tables); + + Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate pot table with SAI ports + for (const auto &it : ports) + { + portTable.set(it.first, it.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + Table tcToQueueMapTable = Table(m_config_db.get(), CFG_TC_TO_QUEUE_MAP_TABLE_NAME); + Table scheduleTable = Table(m_config_db.get(), CFG_SCHEDULER_TABLE_NAME); + Table dscpToTcMapTable = Table(m_config_db.get(), CFG_DSCP_TO_TC_MAP_TABLE_NAME); + Table dot1pToTcMapTable = Table(m_config_db.get(), CFG_DOT1P_TO_TC_MAP_TABLE_NAME); + Table queueTable = Table(m_config_db.get(), CFG_QUEUE_TABLE_NAME); + Table portQosMapTable = Table(m_config_db.get(), CFG_PORT_QOS_MAP_TABLE_NAME); + Table wredProfileTable = Table(m_config_db.get(), CFG_WRED_PROFILE_TABLE_NAME); + Table tcToPgMapTable = Table(m_config_db.get(), CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME); + Table pfcPriorityToPgMapTable = Table(m_config_db.get(), CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME); + Table pfcPriorityToQueueMapTable = Table(m_config_db.get(), CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME); + Table dscpToFcMapTable = Table(m_config_db.get(), CFG_DSCP_TO_FC_MAP_TABLE_NAME); + Table expToFcMapTable = Table(m_config_db.get(), CFG_EXP_TO_FC_MAP_TABLE_NAME); + + scheduleTable.set("scheduler.1", + { + {"type", "DWRR"}, + {"weight", "15"} + }); + + scheduleTable.set("scheduler.0", + { + {"type", "DWRR"}, + {"weight", "14"} + }); + + wredProfileTable.set("AZURE_LOSSLESS", + { + {"ecn", "ecn_all"}, + {"green_drop_probability", "5"}, + {"green_max_threshold", "2097152"}, + {"green_min_threshold", "1048576"}, + {"wred_green_enable", "true"}, + {"yellow_drop_probability", "5"}, + {"yellow_max_threshold", "2097152"}, + {"yellow_min_threshold", "1048576"}, + {"wred_yellow_enable", "true"}, + {"red_drop_probability", "5"}, + {"red_max_threshold", "2097152"}, + {"red_min_threshold", "1048576"}, + {"wred_red_enable", "true"} + }); + + tcToQueueMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + dscpToTcMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + tcToPgMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + dot1pToTcMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + pfcPriorityToPgMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + pfcPriorityToQueueMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + dot1pToTcMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + + gQosOrch->addExistingData(&tcToQueueMapTable); + gQosOrch->addExistingData(&dscpToTcMapTable); + gQosOrch->addExistingData(&tcToPgMapTable); + gQosOrch->addExistingData(&pfcPriorityToPgMapTable); + gQosOrch->addExistingData(&pfcPriorityToQueueMapTable); + gQosOrch->addExistingData(&scheduleTable); + gQosOrch->addExistingData(&wredProfileTable); + + static_cast(gQosOrch)->doTask(); + } + + void TearDown() override + { + auto qos_maps = QosOrch::getTypeMap(); + for (auto &i : qos_maps) + { + i.second->clear(); + } + + gDirectory.m_values.clear(); + + delete gCrmOrch; + gCrmOrch = nullptr; + + delete gSwitchOrch; + gSwitchOrch = nullptr; + + delete gVrfOrch; + gVrfOrch = nullptr; + + delete gIntfsOrch; + gIntfsOrch = nullptr; + + delete gNeighOrch; + gNeighOrch = nullptr; + + delete gFdbOrch; + gFdbOrch = nullptr; + + delete gPortsOrch; + gPortsOrch = nullptr; + + delete gQosOrch; + gQosOrch = nullptr; + + delete tunnel_decap_orch; + tunnel_decap_orch = nullptr; + + sai_qos_map_api = pold_sai_qos_map_api; + sai_scheduler_api = pold_sai_scheduler_api; + sai_wred_api = pold_sai_wred_api; + sai_switch_api = pold_sai_switch_api; + sai_tunnel_api = pold_sai_tunnel_api; + ut_helper::uninitSaiApi(); + } + }; + + TEST_F(QosOrchTest, QosOrchTestPortQosMapRemoveOneField) + { + Table portQosMapTable = Table(m_config_db.get(), CFG_PORT_QOS_MAP_TABLE_NAME); + + portQosMapTable.set("Ethernet0", + { + {"dscp_to_tc_map", "AZURE"}, + {"pfc_to_pg_map", "AZURE"}, + {"pfc_to_queue_map", "AZURE"}, + {"tc_to_pg_map", "AZURE"}, + {"tc_to_queue_map", "AZURE"}, + {"pfc_enable", "3,4"} + }); + gQosOrch->addExistingData(&portQosMapTable); + static_cast(gQosOrch)->doTask(); + + // Check whether the dependencies have been recorded + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_pg_map", CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_queue_map", CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_pg_map", CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_queue_map", CFG_TC_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + + // Try removing AZURE from DSCP_TO_TC_MAP while it is still referenced + RemoveItem(CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + auto current_sai_remove_qos_map_count = sai_remove_qos_map_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(current_sai_remove_qos_map_count, sai_remove_qos_map_count); + // Dependency is not cleared + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + + // Remove dscp_to_tc_map from Ethernet0 via resetting the entry with field dscp_to_tc_map removed + std::deque entries; + entries.push_back({"Ethernet0", "SET", + { + {"pfc_to_pg_map", "AZURE"}, + {"pfc_to_queue_map", "AZURE"}, + {"tc_to_pg_map", "AZURE"}, + {"tc_to_queue_map", "AZURE"}, + {"pfc_enable", "3,4"} + }}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(current_sai_remove_qos_map_count + 1, sai_remove_qos_map_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE"), 0); + // Dependency of dscp_to_tc_map should be cleared + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME); + // Dependencies of other items are not touched + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_pg_map", CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_queue_map", CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_pg_map", CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_queue_map", CFG_TC_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + } + + TEST_F(QosOrchTest, QosOrchTestQueueRemoveWredProfile) + { + std::deque entries; + Table queueTable = Table(m_config_db.get(), CFG_QUEUE_TABLE_NAME); + + queueTable.set("Ethernet0|3", + { + {"scheduler", "scheduler.1"}, + {"wred_profile", "AZURE_LOSSLESS"} + }); + gQosOrch->addExistingData(&queueTable); + static_cast(gQosOrch)->doTask(); + + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + // Try removing scheduler from WRED_PROFILE table while it is still referenced + RemoveItem(CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + auto current_sai_remove_wred_profile_count = sai_remove_wred_profile_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(current_sai_remove_wred_profile_count, sai_remove_wred_profile_count); + // Make sure the dependency is untouched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + // Remove wred_profile from Ethernet0 queue 3 + entries.push_back({"Ethernet0|3", "SET", + { + {"scheduler", "scheduler.1"} + }}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Drain WRED_PROFILE table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency is cleared + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME); + // And the sai remove API has been called + ASSERT_EQ(current_sai_remove_wred_profile_count + 1, sai_remove_wred_profile_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_WRED_PROFILE_TABLE_NAME]).count("AZURE_LOSSLESS"), 0); + // Other field should be untouched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + } + + TEST_F(QosOrchTest, QosOrchTestQueueRemoveScheduler) + { + std::deque entries; + Table queueTable = Table(m_config_db.get(), CFG_QUEUE_TABLE_NAME); + + queueTable.set("Ethernet0|3", + { + {"scheduler", "scheduler.1"}, + {"wred_profile", "AZURE_LOSSLESS"} + }); + gQosOrch->addExistingData(&queueTable); + static_cast(gQosOrch)->doTask(); + + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + // Try removing scheduler from QUEUE table while it is still referenced + RemoveItem(CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + auto current_sai_remove_scheduler_count = sai_remove_scheduler_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(current_sai_remove_scheduler_count, sai_remove_scheduler_count); + // Make sure the dependency is untouched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + + // Remove scheduler from Ethernet0 queue 3 + entries.push_back({"Ethernet0|3", "SET", + { + {"wred_profile", "AZURE_LOSSLESS"} + }}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Drain SCHEDULER table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency is cleared + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME); + // And the sai remove API has been called + ASSERT_EQ(current_sai_remove_scheduler_count + 1, sai_remove_scheduler_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_SCHEDULER_TABLE_NAME]).count("scheduler.1"), 0); + // Other field should be untouched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + } + + TEST_F(QosOrchTest, QosOrchTestQueueReplaceFieldAndRemoveObject) + { + std::deque entries; + Table queueTable = Table(m_config_db.get(), CFG_QUEUE_TABLE_NAME); + auto queueConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + auto wredProfileConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_WRED_PROFILE_TABLE_NAME)); + auto schedulerConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_SCHEDULER_TABLE_NAME)); + + queueTable.set("Ethernet0|3", + { + {"scheduler", "scheduler.1"}, + {"wred_profile", "AZURE_LOSSLESS"} + }); + gQosOrch->addExistingData(&queueTable); + static_cast(gQosOrch)->doTask(); + + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + // Try replacing scheduler in QUEUE table: scheduler.1 => scheduler.0 + entries.push_back({"Ethernet0|3", "SET", + { + {"scheduler", "scheduler.0"}, + {"wred_profile", "AZURE_LOSSLESS"} + }}); + queueConsumer->addToSync(entries); + entries.clear(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency is updated + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + // And the other field is not touched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + + RemoveItem(CFG_SCHEDULER_TABLE_NAME, "scheduler.1"); + auto current_sai_remove_scheduler_count = sai_remove_scheduler_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_scheduler_count, sai_remove_scheduler_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_SCHEDULER_TABLE_NAME]).count("scheduler.1"), 0); + + entries.push_back({"AZURE_LOSSLESS_1", "SET", + { + {"ecn", "ecn_all"}, + {"green_drop_probability", "5"}, + {"green_max_threshold", "2097152"}, + {"green_min_threshold", "1048576"}, + {"wred_green_enable", "true"}, + {"yellow_drop_probability", "5"}, + {"yellow_max_threshold", "2097152"}, + {"yellow_min_threshold", "1048576"}, + {"wred_yellow_enable", "true"}, + {"red_drop_probability", "5"}, + {"red_max_threshold", "2097152"}, + {"red_min_threshold", "1048576"}, + {"wred_red_enable", "true"} + }}); + wredProfileConsumer->addToSync(entries); + entries.clear(); + // Drain WRED_PROFILE table + static_cast(gQosOrch)->doTask(); + + // Replace wred_profile from Ethernet0 queue 3 + entries.push_back({"Ethernet0|3", "SET", + { + {"scheduler", "scheduler.0"}, + {"wred_profile", "AZURE_LOSSLESS_1"} + }}); + queueConsumer->addToSync(entries); + entries.clear(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency is updated + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS_1"); + // And the other field is not touched + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + + RemoveItem(CFG_WRED_PROFILE_TABLE_NAME, "AZURE_LOSSLESS"); + // Drain WRED_PROFILE table + auto current_sai_remove_wred_profile_count = sai_remove_wred_profile_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_wred_profile_count, sai_remove_wred_profile_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_WRED_PROFILE_TABLE_NAME]).count("AZURE_LOSSLESS"), 0); + + // Remove object + entries.push_back({"Ethernet0|3", "DEL", {}}); + queueConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + + // Make sure the dependency is updated + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "wred_profile", CFG_WRED_PROFILE_TABLE_NAME); + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|3", "scheduler", CFG_SCHEDULER_TABLE_NAME); + + // Remove scheduler object + entries.push_back({"scheduler.0", "DEL", {}}); + schedulerConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_scheduler_count, sai_remove_scheduler_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_SCHEDULER_TABLE_NAME]).count("scheduler.0"), 0); + + // Remove wred profile object + entries.push_back({"AZURE_LOSSLESS_1", "DEL", {}}); + wredProfileConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_wred_profile_count, sai_remove_wred_profile_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_WRED_PROFILE_TABLE_NAME]).count("AZURE_LOSSLESS_1"), 0); + } + + TEST_F(QosOrchTest, QosOrchTestPortQosMapReplaceOneFieldAndRemoveObject) + { + std::deque entries; + Table portQosMapTable = Table(m_config_db.get(), CFG_PORT_QOS_MAP_TABLE_NAME); + + portQosMapTable.set("Ethernet0", + { + {"dscp_to_tc_map", "AZURE"}, + {"pfc_to_pg_map", "AZURE"}, + {"pfc_to_queue_map", "AZURE"}, + {"tc_to_pg_map", "AZURE"}, + {"tc_to_queue_map", "AZURE"}, + {"pfc_enable", "3,4"} + }); + + static_cast(gQosOrch)->doTask(); + + entries.push_back({"AZURE_1", "SET", + { + {"1", "0"}, + {"0", "1"} + }}); + + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + + entries.push_back({"Ethernet0", "SET", + { + {"dscp_to_tc_map", "AZURE_1"}, + {"pfc_to_pg_map", "AZURE"}, + {"pfc_to_queue_map", "AZURE"}, + {"tc_to_pg_map", "AZURE"}, + {"tc_to_queue_map", "AZURE"}, + {"pfc_enable", "3,4"} + }}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Dependency is updated + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE_1"); + + // Try removing AZURE from DSCP_TO_TC_MAP + RemoveItem(CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + auto current_sai_remove_qos_map_count = sai_remove_qos_map_count; + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_qos_map_count, sai_remove_qos_map_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE"), 0); + + // Make sure other dependencies are not touched + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_pg_map", CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "pfc_to_queue_map", CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_pg_map", CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, "AZURE"); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "tc_to_queue_map", CFG_TC_TO_QUEUE_MAP_TABLE_NAME, "AZURE"); + + // Remove port from PORT_QOS_MAP table + entries.push_back({"Ethernet0", "DEL", {}}); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_PORT_QOS_MAP_TABLE_NAME]).count("Ethernet0"), 0); + + // Make sure the maps can be removed now. Checking anyone should suffice since all the maps are handled in the same way. + entries.push_back({"AZURE", "DEL", {}}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_qos_map_count, sai_remove_qos_map_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME]).count("AZURE"), 0); + + entries.push_back({"AZURE_1", "DEL", {}}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + ASSERT_EQ(++current_sai_remove_qos_map_count, sai_remove_qos_map_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE_1"), 0); + // Global dscp to tc map should be cleared + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE_1"].m_saiObjectId, SAI_NULL_OBJECT_ID); + } + + TEST_F(QosOrchTest, QosOrchTestPortQosMapReferencingObjRemoveThenAdd) + { + vector ts; + std::deque entries; + Table portQosMapTable = Table(m_config_db.get(), CFG_PORT_QOS_MAP_TABLE_NAME); + + portQosMapTable.set("Ethernet0", + { + {"dscp_to_tc_map", "AZURE"} + }); + gQosOrch->addExistingData(&portQosMapTable); + static_cast(gQosOrch)->doTask(); + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + + // Remove referenced obj + entries.push_back({"AZURE", "DEL", {}}); + auto dscpToTcMapConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + dscpToTcMapConsumer->addToSync(entries); + entries.clear(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency remains + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + // Make sure the notification isn't drained + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 1); + ASSERT_EQ(ts[0], "DSCP_TO_TC_MAP|AZURE|DEL"); + ts.clear(); + + // Remove and readd referencing obj + entries.push_back({"Ethernet0", "DEL", {}}); + entries.push_back({"Ethernet0", "SET", + { + {"dscp_to_tc_map", "AZURE"} + }}); + auto portQosMapConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + portQosMapConsumer->addToSync(entries); + entries.clear(); + // Drain the PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Drain the DSCP_TO_TC_MAP table which contains items need to retry + static_cast(gQosOrch)->doTask(); + // The dependency should be removed + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME); + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 1); + ASSERT_EQ(ts[0], "PORT_QOS_MAP|Ethernet0|SET|dscp_to_tc_map:AZURE"); + ts.clear(); + + // Re-create referenced obj + entries.push_back({"AZURE", "SET", + { + {"1", "0"} + }}); + dscpToTcMapConsumer->addToSync(entries); + entries.clear(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency recovers + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + + // All items have been drained + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + // Remove and recreate the referenced obj + entries.push_back({"AZURE", "DEL", {}}); + entries.push_back({"AZURE", "SET", + { + {"1", "0"} + }}); + dscpToTcMapConsumer->addToSync(entries); + entries.clear(); + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency remains + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + // Make sure the notification isn't drained + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 2); + ASSERT_EQ(ts[0], "DSCP_TO_TC_MAP|AZURE|DEL"); + ASSERT_EQ(ts[1], "DSCP_TO_TC_MAP|AZURE|SET|1:0"); + ts.clear(); + } + + TEST_F(QosOrchTest, QosOrchTestQueueReferencingObjRemoveThenAdd) + { + vector ts; + std::deque entries; + Table queueTable = Table(m_config_db.get(), CFG_QUEUE_TABLE_NAME); + + queueTable.set("Ethernet0|0", + { + {"scheduler", "scheduler.0"} + }); + gQosOrch->addExistingData(&queueTable); + static_cast(gQosOrch)->doTask(); + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|0", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + + // Remove referenced obj + entries.push_back({"scheduler.0", "DEL", {}}); + auto schedulerConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_SCHEDULER_TABLE_NAME)); + schedulerConsumer->addToSync(entries); + entries.clear(); + // Drain SCHEDULER table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency remains + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|0", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 1); + ASSERT_EQ(ts[0], "SCHEDULER|scheduler.0|DEL"); + ts.clear(); + + // Remove and readd referencing obj + entries.push_back({"Ethernet0|0", "DEL", {}}); + entries.push_back({"Ethernet0|0", "SET", + { + {"scheduler", "scheduler.0"} + }}); + auto queueConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + queueConsumer->addToSync(entries); + entries.clear(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Drain SCHEDULER table + static_cast(gQosOrch)->doTask(); + // The dependency should be removed + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|0", "scheduler", CFG_SCHEDULER_TABLE_NAME); + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 1); + ASSERT_EQ(ts[0], "QUEUE|Ethernet0|0|SET|scheduler:scheduler.0"); + ts.clear(); + + // Re-create referenced obj + entries.push_back({"scheduler.0", "SET", + { + {"type", "DWRR"}, + {"weight", "14"} + }}); + schedulerConsumer->addToSync(entries); + entries.clear(); + // Drain SCHEDULER table + static_cast(gQosOrch)->doTask(); + // Drain QUEUE table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency recovers + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|0", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + + // All items have been drained + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + + // Remove and then re-add the referenced obj + entries.push_back({"scheduler.0", "DEL", {}}); + entries.push_back({"scheduler.0", "SET", + { + {"type", "DWRR"}, + {"weight", "14"} + }}); + schedulerConsumer->addToSync(entries); + entries.clear(); + // Drain SCHEDULER table + static_cast(gQosOrch)->doTask(); + // Make sure the dependency remains + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|0", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + static_cast(gQosOrch)->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 2); + ASSERT_EQ(ts[0], "SCHEDULER|scheduler.0|DEL"); + ASSERT_EQ(ts[1], "SCHEDULER|scheduler.0|SET|type:DWRR|weight:14"); + ts.clear(); + } + + TEST_F(QosOrchTest, QosOrchTestGlobalDscpToTcMap) + { + // Create a new dscp to tc map + std::deque entries; + entries.push_back({"AZURE", "SET", + { + {"1", "0"}, + {"0", "1"} + }}); + + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + + entries.push_back({"global", "SET", + { + {"dscp_to_tc_map", "AZURE"} + }}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + + // Drain DSCP_TO_TC_MAP and PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + // Check DSCP_TO_TC_MAP|AZURE is applied to switch + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME])["AZURE"].m_saiObjectId, switch_dscp_to_tc_map_id); + + // Remove global DSCP_TO_TC_MAP + entries.push_back({"global", "DEL", {}}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + // Drain PORT_QOS_TABLE table + static_cast(gQosOrch)->doTask(); + // Check switch_level dscp_to_tc_map is set to NULL + ASSERT_EQ(SAI_NULL_OBJECT_ID, switch_dscp_to_tc_map_id); + + entries.push_back({"AZURE", "DEL", {}}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + + auto current_sai_remove_qos_map_count = sai_remove_qos_map_count; + // Drain DSCP_TO_TC_MAP table + static_cast(gQosOrch)->doTask(); + // Check DSCP_TO_TC_MAP|AZURE is removed, and the switch_level dscp_to_tc_map is set to NULL + ASSERT_EQ(current_sai_remove_qos_map_count + 1, sai_remove_qos_map_count); + ASSERT_EQ((*QosOrch::getTypeMap()[CFG_DSCP_TO_TC_MAP_TABLE_NAME]).count("AZURE"), 0); + + } + + TEST_F(QosOrchTest, QosOrchTestRetryFirstItem) + { + // There was a bug in QosOrch that the 2nd notifications and after can not be handled, eg the 1st one needs to be retried + // This is to verify the bug has been fixed + vector ts; + std::deque entries; + + // Try adding dscp_to_tc_map AZURE.1 and AZURE to PORT_QOS_MAP table + // The object AZURE.1 does not exist so the first item can not be handled and remain in m_toSync. + entries.push_back({"Ethernet0", "SET", + { + {"dscp_to_tc_map", "AZURE.1"} + }}); + entries.push_back({"Ethernet4", "SET", + { + {"dscp_to_tc_map", "AZURE"} + }}); + auto portQosMapConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + portQosMapConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + // The 2nd notification should be handled. Make sure by checking reference + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet4", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE"); + // Make sure there is one item left + portQosMapConsumer->dumpPendingTasks(ts); + ASSERT_EQ(ts[0], "PORT_QOS_MAP|Ethernet0|SET|dscp_to_tc_map:AZURE.1"); + ASSERT_EQ(ts.size(), 1); + ts.clear(); + + // Try adding scheduler.0 and scheduler.2 to QUEUE table + entries.push_back({"Ethernet0|0", "SET", + { + {"scheduler", "scheduler.2"} + }}); + entries.push_back({"Ethernet0|1", "SET", + { + {"scheduler", "scheduler.0"} + }}); + auto queueConsumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + queueConsumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + // The 2nd notification should be handled. Make sure by checking reference + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|1", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.0"); + // Make sure there is one item left + queueConsumer->dumpPendingTasks(ts); + ASSERT_EQ(ts[0], "QUEUE|Ethernet0|0|SET|scheduler:scheduler.2"); + ASSERT_EQ(ts.size(), 1); + ts.clear(); + + // Try removing AZURE and adding AZURE.1 to DSCP_TO_TC_MAP table + entries.push_back({"AZURE", "DEL", {{}}}); + entries.push_back({"AZURE.1", "SET", + { + {"1", "1"} + }}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_DSCP_TO_TC_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + // The 2nd notification should be handled. Make sure by checking reference + CheckDependency(CFG_PORT_QOS_MAP_TABLE_NAME, "Ethernet0", "dscp_to_tc_map", CFG_DSCP_TO_TC_MAP_TABLE_NAME, "AZURE.1"); + // The pending item in PORT_QOS_MAP table should also be handled since the dependency is met + portQosMapConsumer->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + consumer->dumpPendingTasks(ts); + ASSERT_EQ(ts[0], "DSCP_TO_TC_MAP|AZURE|DEL|:"); + ASSERT_EQ(ts.size(), 1); + ts.clear(); + + entries.push_back({"scheduler.0", "DEL", {{}}}); + entries.push_back({"scheduler.2", "SET", + { + {"type", "DWRR"}, + {"weight", "15"} + }}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_SCHEDULER_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + static_cast(gQosOrch)->doTask(); + // We need a second call to "doTask" because scheduler table is handled after queue table + static_cast(gQosOrch)->doTask(); + // The 2nd notification should be handled. Make sure by checking reference + CheckDependency(CFG_QUEUE_TABLE_NAME, "Ethernet0|0", "scheduler", CFG_SCHEDULER_TABLE_NAME, "scheduler.2"); + // The pending item in QUEUE table should also be handled since the dependency is met + queueConsumer->dumpPendingTasks(ts); + ASSERT_TRUE(ts.empty()); + consumer->dumpPendingTasks(ts); + ASSERT_EQ(ts[0], "SCHEDULER|scheduler.0|DEL|:"); + ASSERT_EQ(ts.size(), 1); + ts.clear(); + } + + /* + * There are 4 ECN ranges + * ------------------------------------------------------------------------------- + * profile lower min=1M max=2M + * profile upper min=3M max=4M + * proile middle min=1.5M max=1.5M + * ------------------------------------------------------------------------------- + * Test step Test case + * 1. Initialize a wred profile with value lower set Wred profile intialization + * 2. Update the value to upper set The new min threshold is greater than the current max threshold + * 3. Update the value back to lower set The new max threshold is less than the current min threshold + * 4. Update the value to middle set Normal case to ensure nothing broken + * 5. Update the value back to lower set Normal case to ensure nothing broken + */ + TEST_F(QosOrchTest, QosOrchTestWredThresholdsTest) + { + testing_wred_thresholds = true; + + // The order of fields matters when the wred profile is updated from the upper set to the lower set + // It should be max, min for each color. In this order, the new max is less then the current min + // QoS orchagent should guarantee that the new min is configured first and then new max + vector lowerSetVector = { + {"ecn", "ecn_all"}, + {"green_drop_probability", "5"}, + {"green_max_threshold", "2097152"}, + {"green_min_threshold", "1048576"}, + {"wred_green_enable", "true"}, + {"yellow_drop_probability", "5"}, + {"yellow_max_threshold", "2097153"}, + {"yellow_min_threshold", "1048577"}, + {"wred_yellow_enable", "true"}, + {"red_drop_probability", "5"}, + {"red_max_threshold", "2097154"}, + {"red_min_threshold", "1048578"}, + {"wred_red_enable", "true"} + }; + WredMapHandler::qos_wred_thresholds_t lowerThresholds = { + 2097152, //green_max_threshold + 1048576, //green_min_threshold + 2097153, //yellow_max_threshold + 1048577, //yellow_min_threshold + 2097154, //red_max_threshold + 1048578 //red_min_threshold + }; + // The order of fields matters when the wred profile is updated from the lower set to the upper set + // It should be min, max for each color, in which the new min is larger then the current max + // QoS orchagent should guarantee that the new max is configured first and then new min + vector upperSetVector = { + {"ecn", "ecn_all"}, + {"green_drop_probability", "5"}, + {"green_min_threshold", "3145728"}, + {"green_max_threshold", "4194304"}, + {"wred_green_enable", "true"}, + {"yellow_drop_probability", "5"}, + {"yellow_min_threshold", "3145729"}, + {"yellow_max_threshold", "4194305"}, + {"wred_yellow_enable", "true"}, + {"red_drop_probability", "5"}, + {"red_min_threshold", "3145730"}, + {"red_max_threshold", "4194306"}, + {"wred_red_enable", "true"} + }; + WredMapHandler::qos_wred_thresholds_t upperThresholds = { + 4194304, //green_max_threshold + 3145728, //green_min_threshold + 4194305, //yellow_max_threshold + 3145729, //yellow_min_threshold + 4194306, //red_max_threshold + 3145730 //red_min_threshold + }; + // Order doesn't matter. + vector middleSetVector = { + {"ecn", "ecn_all"}, + {"green_drop_probability", "5"}, + {"green_min_threshold", "1572864"}, + {"green_max_threshold", "2621440"}, + {"wred_green_enable", "true"}, + {"yellow_drop_probability", "5"}, + {"yellow_min_threshold", "1572865"}, + {"yellow_max_threshold", "2621441"}, + {"wred_yellow_enable", "true"}, + {"red_drop_probability", "5"}, + {"red_min_threshold", "1572866"}, + {"red_max_threshold", "2621442"}, + {"wred_red_enable", "true"} + }; + WredMapHandler::qos_wred_thresholds_t middleThresholds = { + 2621440, //green_max_threshold + 1572864, //green_min_threshold + 2621441, //yellow_max_threshold + 1572865, //yellow_min_threshold + 2621442, //red_max_threshold + 1572866 //red_min_threshold + }; + + // Wrong profile + vector greenWrongVector = { + {"ecn", "ecn_green"}, + {"green_drop_probability", "5"}, + {"green_min_threshold", "2621440"}, + {"green_max_threshold", "1572864"}, + {"wred_green_enable", "true"} + }; + + vector yellowWrongVector = { + {"ecn", "ecn_yellow"}, + {"yellow_drop_probability", "5"}, + {"yellow_min_threshold", "2621441"}, + {"yellow_max_threshold", "1572865"}, + {"wred_yellow_enable", "true"} + }; + + vector redWrongVector = { + {"ecn", "ecn_red"}, + {"red_drop_probability", "5"}, + {"red_min_threshold", "2621442"}, + {"red_max_threshold", "1572866"}, + {"wred_red_enable", "true"} + }; + + std::deque entries; + // 1. Initialize + updateWredProfileAndCheck(lowerSetVector, lowerThresholds); + + // 2. Update the thresholds from the lower set to the upper set + updateWredProfileAndCheck(upperSetVector, upperThresholds); + + // 3. Update the thresholds from the upper set back to the lower set + updateWredProfileAndCheck(lowerSetVector, lowerThresholds); + + // 4. Update the thresholds from the lower set to the middle set + updateWredProfileAndCheck(middleSetVector, middleThresholds); + + // 5. Update the thresholds from the middle set back to the lower set + updateWredProfileAndCheck(lowerSetVector, lowerThresholds); + + // Wrong parameters + updateWrongWredProfileAndCheck(greenWrongVector); + updateWrongWredProfileAndCheck(yellowWrongVector); + updateWrongWredProfileAndCheck(redWrongVector); + + // Make sure the profiles in orchagent and SAI are not updated by the wrong profile + checkWredProfileEqual("AZURE", saiThresholds); + checkWredProfileEqual("AZURE", lowerThresholds); + + testing_wred_thresholds = false; + } + + TEST_F(QosOrchTest, QosOrchTestWredDropProbability) + { + testing_wred_thresholds = true; + + // The order of fields matters when the wred profile is updated from the upper set to the lower set + // It should be max, min for each color. In this order, the new max is less then the current min + // QoS orchagent should guarantee that the new min is configured first and then new max + vector greenProfile = { + {"wred_green_enable", "true"}, + {"wred_yellow_enable", "false"}, + }; + qos_wred_max_drop_probability_t greenProbabilities = { + 100, // green_max_drop_probability + 0, // yellow_max_drop_probability + 0 // red_max_drop_probability + }; + updateMaxDropProbabilityAndCheck("green_default", greenProfile, greenProbabilities); + + greenProfile.push_back({"green_drop_probability", "5"}); + greenProbabilities.green_max_drop_probability = 5; + updateMaxDropProbabilityAndCheck("green", greenProfile, greenProbabilities); + + vector yellowProfile = { + {"wred_yellow_enable", "true"}, + {"wred_red_enable", "false"}, + }; + qos_wred_max_drop_probability_t yellowProbabilities = { + 0, // green_max_drop_probability + 100, // yellow_max_drop_probability + 0 // red_max_drop_probability + }; + updateMaxDropProbabilityAndCheck("yellow_default", yellowProfile, yellowProbabilities); + + yellowProfile.push_back({"yellow_drop_probability", "5"}); + yellowProbabilities.yellow_max_drop_probability = 5; + updateMaxDropProbabilityAndCheck("yellow", yellowProfile, yellowProbabilities); + + vector redProfile = { + {"wred_green_enable", "false"}, + {"wred_red_enable", "true"}, + }; + qos_wred_max_drop_probability_t redProbabilities = { + 0, // green_max_drop_probability + 0, // yellow_max_drop_probability + 100 // red_max_drop_probability + }; + updateMaxDropProbabilityAndCheck("red_default", redProfile, redProbabilities); + + redProfile.push_back({"red_drop_probability", "5"}); + redProbabilities.red_max_drop_probability = 5; + updateMaxDropProbabilityAndCheck("red", redProfile, redProbabilities); + + testing_wred_thresholds = false; + } + + + /* + * Make sure empty fields won't cause orchagent crash + */ + TEST_F(QosOrchTest, QosOrchTestEmptyField) + { + // Create a new dscp to tc map + std::deque entries; + entries.push_back({"Ethernet0", "SET", + { + {"dscp_to_tc_map", ""} + }}); + auto consumer = dynamic_cast(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + + entries.push_back({"Ethernet0|3", "SET", + { + {"scheduler", ""} + }}); + entries.push_back({"Ethernet0|4", "SET", + { + {"wred_profile", ""} + }}); + consumer = dynamic_cast(gQosOrch->getExecutor(CFG_QUEUE_TABLE_NAME)); + consumer->addToSync(entries); + entries.clear(); + + // Drain DSCP_TO_TC_MAP and PORT_QOS_MAP table + static_cast(gQosOrch)->doTask(); + } + + /* + * Set tunnel QoS attribute test - OA should skip settings + */ + TEST_F(QosOrchTest, QosOrchTestSetTunnelQoSAttribute) + { + // Create a new dscp to tc map + Table tcToDscpMapTable = Table(m_config_db.get(), CFG_TC_TO_DSCP_MAP_TABLE_NAME); + tcToDscpMapTable.set("AZURE", + { + {"0", "0"}, + {"1", "1"} + }); + gQosOrch->addExistingData(&tcToDscpMapTable); + static_cast(gQosOrch)->doTask(); + + std::deque entries; + entries.push_back({"MuxTunnel0", "SET", + { + {"decap_dscp_to_tc_map", "AZURE"}, + {"decap_tc_to_pg_map", "AZURE"}, + {"dscp_mode", "pipe"}, + {"dst_ip", "10.1.0.32"}, + {"encap_tc_to_dscp_map", "AZURE"}, + {"encap_tc_to_queue_map", "AZURE"}, + {"src_ip", "10.1.0.33"}, + {"ttl_mode", "pipe"}, + {"tunnel_type", "IPINIP"} + }}); + entries.push_back({"MuxTunnel1", "SET", + { + {"decap_dscp_to_tc_map", "AZURE"}, + {"dscp_mode", "pipe"}, + {"dst_ip", "10.1.0.32"}, + {"encap_tc_to_dscp_map", "AZURE"}, + {"encap_tc_to_queue_map", "AZURE"}, + {"src_ip", "10.1.0.33"}, + {"ttl_mode", "pipe"}, + {"tunnel_type", "IPINIP"} + }}); + auto consumer = dynamic_cast(tunnel_decap_orch->getExecutor(APP_TUNNEL_DECAP_TABLE_NAME)); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + + // Set an attribute that is not supported by vendor + entries.push_back({"MuxTunnel1", "SET", + { + {"decap_tc_to_pg_map", "AZURE"} + }}); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + + // Set attributes for the 2nd time + entries.push_back({"MuxTunnel0", "SET", + { + {"encap_ecn_mode", "standard"} + }}); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + + // Set attributes for the 2nd time + entries.push_back({"MuxTunnel1", "SET", + { + {"ecn_mode", "copy_from_outer"} + }}); + consumer->addToSync(entries); + // Drain TUNNEL_DECAP_TABLE table + static_cast(tunnel_decap_orch)->doTask(); + entries.clear(); + } +} diff --git a/tests/mock_tests/response_publisher/response_publisher_ut.cpp b/tests/mock_tests/response_publisher/response_publisher_ut.cpp new file mode 100644 index 0000000000..9e836bad04 --- /dev/null +++ b/tests/mock_tests/response_publisher/response_publisher_ut.cpp @@ -0,0 +1,32 @@ +#include "response_publisher.h" + +#include + +using namespace swss; + +TEST(ResponsePublisher, TestPublish) +{ + DBConnector conn{"APPL_STATE_DB", 0}; + Table stateTable{&conn, "SOME_TABLE"}; + std::string value; + ResponsePublisher publisher{}; + + publisher.publish("SOME_TABLE", "SOME_KEY", {{"field", "value"}}, ReturnCode(SAI_STATUS_SUCCESS)); + ASSERT_TRUE(stateTable.hget("SOME_KEY", "field", value)); + ASSERT_EQ(value, "value"); +} + +TEST(ResponsePublisher, TestPublishBuffered) +{ + DBConnector conn{"APPL_STATE_DB", 0}; + Table stateTable{&conn, "SOME_TABLE"}; + std::string value; + ResponsePublisher publisher{}; + + publisher.setBuffered(true); + + publisher.publish("SOME_TABLE", "SOME_KEY", {{"field", "value"}}, ReturnCode(SAI_STATUS_SUCCESS)); + publisher.flush(); + ASSERT_TRUE(stateTable.hget("SOME_KEY", "field", value)); + ASSERT_EQ(value, "value"); +} diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp index 84f92a088c..091dabed6a 100644 --- a/tests/mock_tests/routeorch_ut.cpp +++ b/tests/mock_tests/routeorch_ut.cpp @@ -7,10 +7,14 @@ #include "ut_helper.h" #include "mock_orchagent_main.h" #include "mock_table.h" +#include "mock_response_publisher.h" #include "bulker.h" extern string gMySwitchType; +extern std::unique_ptr gMockResponsePublisher; + +using ::testing::_; namespace routeorch_test { @@ -176,14 +180,20 @@ namespace routeorch_test { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } }; + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); - ASSERT_EQ(gPortsOrch, nullptr); - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + static const vector route_pattern_tables = { + CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, + }; + gFlowCounterRouteOrch = new FlowCounterRouteOrch(m_config_db.get(), route_pattern_tables); + gDirectory.set(gFlowCounterRouteOrch); ASSERT_EQ(gVrfOrch, nullptr); gVrfOrch = new VRFOrch(m_app_db.get(), APP_VRF_TABLE_NAME, m_state_db.get(), STATE_VRF_OBJECT_TABLE_NAME); @@ -276,6 +286,10 @@ namespace routeorch_test {"mac_addr", "00:00:00:00:00:00" }}); intfTable.set("Ethernet0:10.0.0.1/24", { { "scope", "global" }, { "family", "IPv4" }}); + intfTable.set("Ethernet4", { {"NULL", "NULL" }, + {"mac_addr", "00:00:00:00:00:00" }}); + intfTable.set("Ethernet4:11.0.0.1/32", { { "scope", "global" }, + { "family", "IPv4" }}); gIntfsOrch->addExistingData(&intfTable); static_cast(gIntfsOrch)->doTask(); @@ -416,4 +430,58 @@ namespace routeorch_test ASSERT_EQ(current_set_count + 1, set_route_count); ASSERT_EQ(sai_fail_count, 0); } + + TEST_F(RouteOrchTest, RouteOrchTestSetDelResponse) + { + gMockResponsePublisher = std::make_unique(); + + std::deque entries; + std::string key = "2.2.2.0/24"; + std::vector fvs{{"ifname", "Ethernet0,Ethernet0"}, {"nexthop", "10.0.0.2,10.0.0.3"}, {"protocol", "bgp"}}; + entries.push_back({key, "SET", fvs}); + + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + + EXPECT_CALL(*gMockResponsePublisher, publish(APP_ROUTE_TABLE_NAME, key, std::vector{{"protocol", "bgp"}}, ReturnCode(SAI_STATUS_SUCCESS), false)).Times(1); + static_cast(gRouteOrch)->doTask(); + + // add entries again to the consumer queue (in case of rapid DEL/SET operations from fpmsyncd, routeorch just gets the last SET update) + consumer->addToSync(entries); + + EXPECT_CALL(*gMockResponsePublisher, publish(APP_ROUTE_TABLE_NAME, key, std::vector{{"protocol", "bgp"}}, ReturnCode(SAI_STATUS_SUCCESS), false)).Times(1); + static_cast(gRouteOrch)->doTask(); + + entries.clear(); + + // Route deletion + + entries.clear(); + entries.push_back({key, "DEL", {}}); + + consumer->addToSync(entries); + + EXPECT_CALL(*gMockResponsePublisher, publish(APP_ROUTE_TABLE_NAME, key, std::vector{}, ReturnCode(SAI_STATUS_SUCCESS), false)).Times(1); + static_cast(gRouteOrch)->doTask(); + + gMockResponsePublisher.reset(); + } + + TEST_F(RouteOrchTest, RouteOrchSetFullMaskSubnetPrefix) + { + gMockResponsePublisher = std::make_unique(); + + std::deque entries; + std::string key = "11.0.0.1/32"; + std::vector fvs{{"ifname", "Ethernet4"}, {"nexthop", "0.0.0.0"}, {"protocol", "bgp"}}; + entries.push_back({key, "SET", fvs}); + + auto consumer = dynamic_cast(gRouteOrch->getExecutor(APP_ROUTE_TABLE_NAME)); + consumer->addToSync(entries); + + EXPECT_CALL(*gMockResponsePublisher, publish(APP_ROUTE_TABLE_NAME, key, std::vector{{"protocol", "bgp"}}, ReturnCode(SAI_STATUS_SUCCESS), false)).Times(1); + static_cast(gRouteOrch)->doTask(); + + gMockResponsePublisher.reset(); + } } diff --git a/tests/mock_tests/sflowmgrd_ut.cpp b/tests/mock_tests/sflowmgrd_ut.cpp new file mode 100644 index 0000000000..7e47b162f2 --- /dev/null +++ b/tests/mock_tests/sflowmgrd_ut.cpp @@ -0,0 +1,320 @@ +#include "gtest/gtest.h" +#include "mock_table.h" +#include "redisutility.h" +#include "sflowmgr.h" + +namespace sflowmgr_ut +{ + using namespace swss; + using namespace std; + + struct SflowMgrTest : public ::testing::Test + { + shared_ptr m_app_db; + shared_ptr m_config_db; + shared_ptr m_state_db; + shared_ptr m_sflowMgr; + SflowMgrTest() + { + m_app_db = make_shared( + "APPL_DB", 0); + m_config_db = make_shared( + "CONFIG_DB", 0); + m_state_db = make_shared( + "STATE_DB", 0); + } + + virtual void SetUp() override + { + ::testing_db::reset(); + TableConnector conf_port_table(m_config_db.get(), CFG_PORT_TABLE_NAME); + TableConnector state_port_table(m_state_db.get(), STATE_PORT_TABLE_NAME); + TableConnector conf_sflow_table(m_config_db.get(), CFG_SFLOW_TABLE_NAME); + TableConnector conf_sflow_session_table(m_config_db.get(), CFG_SFLOW_SESSION_TABLE_NAME); + + vector sflow_tables = { + conf_port_table, + state_port_table, + conf_sflow_table, + conf_sflow_session_table + }; + m_sflowMgr.reset(new SflowMgr(m_app_db.get(), sflow_tables)); + } + + void enableSflow() + { + Table cfg_sflow(m_config_db.get(), CFG_SFLOW_TABLE_NAME); + cfg_sflow.set("global", { + {"admin_state", "up"} + }); + m_sflowMgr->addExistingData(&cfg_sflow); + m_sflowMgr->doTask(); + } + + void cfgSflowSession(string alias, bool status, string sample_rate, string direction = "") + { + Table cfg_sflow_table(m_config_db.get(), CFG_SFLOW_SESSION_TABLE_NAME); + vector values; + values.emplace_back("admin_state", status ? "up" : "down"); + if (!sample_rate.empty()) + { + values.emplace_back("sample_rate", sample_rate); + } + if (!direction.empty()) + { + values.emplace_back("sample_direction", direction); + } + cfg_sflow_table.set(alias, values); + m_sflowMgr->addExistingData(&cfg_sflow_table); + m_sflowMgr->doTask(); + } + + void cfgSflowSessionAll(bool status) + { + Table cfg_sflow_table(m_config_db.get(), CFG_SFLOW_SESSION_TABLE_NAME); + cfg_sflow_table.set("all", { + {"admin_state", status ? "up" : "down"}, + }); + m_sflowMgr->addExistingData(&cfg_sflow_table); + m_sflowMgr->doTask(); + } + + void cfgPortSpeed(string alias, string speed) + { + Table cfg_port_table(m_config_db.get(), CFG_PORT_TABLE_NAME); + cfg_port_table.set(alias, { + {"speed", speed} + }); + m_sflowMgr->addExistingData(&cfg_port_table); + m_sflowMgr->doTask(); + } + + void statePortSpeed(string alias, string speed) + { + Table state_port_table(m_config_db.get(), STATE_PORT_TABLE_NAME); + state_port_table.set(alias, { + {"speed", speed} + }); + m_sflowMgr->addExistingData(&state_port_table); + m_sflowMgr->doTask(); + } + + string getSflowSampleRate(string alias) + { + Table appl_sflow_table(m_app_db.get(), APP_SFLOW_SESSION_TABLE_NAME); + std::vector values; + appl_sflow_table.get(alias, values); + auto value_rate = swss::fvsGetValue(values, "sample_rate", true); + if (value_rate) + { + string ret = value_rate.get(); + return ret; + } + return ""; + } + + string getSflowSampleDir(string alias) + { + Table appl_sflow_table(m_app_db.get(), APP_SFLOW_SESSION_TABLE_NAME); + std::vector values; + appl_sflow_table.get(alias, values); + auto value_rate = swss::fvsGetValue(values, "sample_direction", true); + if (value_rate) + { + string ret = value_rate.get(); + return ret; + } + return ""; + } + + string getSflowAdminStatus(string alias) + { + Table appl_sflow_table(m_app_db.get(), APP_SFLOW_SESSION_TABLE_NAME); + std::vector values; + appl_sflow_table.get(alias, values); + auto value_rate = swss::fvsGetValue(values, "admin_state", true); + if (value_rate) + { + string ret = value_rate.get(); + return ret; + } + return "down"; + } + }; + + TEST_F(SflowMgrTest, test_RateConfiguration) + { + enableSflow(); + cfgPortSpeed("Ethernet0", "100000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "100000"); + + /* Scenario: Operational Speed Changes to 25000 */ + statePortSpeed("Ethernet0", "25000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "25000"); + ASSERT_TRUE(getSflowSampleDir("Ethernet0") == "rx"); + } + + TEST_F(SflowMgrTest, test_RateConfigurationCfgSpeed) + { + enableSflow(); + /* Configure the Speed to 100G */ + cfgPortSpeed("Ethernet0", "100000"); + + /* Scenario: Operational Speed Changes to 100G with autoneg */ + statePortSpeed("Ethernet0", "100000"); + + /* User changes the config speed to 10G */ + cfgPortSpeed("Ethernet0", "10000"); + + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "100000"); + + /* Scenario: Operational Speed Changes to 10G, with autoneg */ + statePortSpeed("Ethernet0", "10000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "10000"); + + /* Configured speed is updated by user */ + cfgPortSpeed("Ethernet0", "200000"); + + /* Sampling Rate will not be updated */ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "10000"); + } + + TEST_F(SflowMgrTest, test_OnlyStateDbNotif) + { + enableSflow(); + statePortSpeed("Ethernet0", "100000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == ""); + } + + TEST_F(SflowMgrTest, test_LocalRateConfiguration) + { + enableSflow(); + cfgPortSpeed("Ethernet0", "100000"); + cfgSflowSession("Ethernet0", true, "12345"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "12345"); + } + + TEST_F(SflowMgrTest, test_LocalRateConfWithOperSpeed) + { + enableSflow(); + cfgPortSpeed("Ethernet0", "100000"); + + /* Scenario: Operational Speed Changes to 25000 */ + statePortSpeed("Ethernet0", "25000"); + + /* Set per interface sampling rate*/ + cfgSflowSession("Ethernet0", true, "12345"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "12345"); + + /* Operational Speed Changes again to 50000 */ + statePortSpeed("Ethernet0", "50000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "12345"); + } + + TEST_F(SflowMgrTest, test_newSpeed) + { + enableSflow(); + cfgPortSpeed("Ethernet0", "800000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "800000"); + } + + TEST_F(SflowMgrTest, test_CfgSpeedAdminCfg) + { + enableSflow(); + cfgPortSpeed("Ethernet0", "100000"); + cfgSflowSessionAll(false); /* Disable sflow on all interfaces*/ + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "down"); + cfgSflowSession("Ethernet0", true, ""); /* Set local admin up with no rate */ + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "up"); + + /* Sampling rate should adhere to config speed*/ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "100000"); + + cfgPortSpeed("Ethernet0", "25000"); /* Change cfg speed */ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "25000"); + } + + TEST_F(SflowMgrTest, test_OperSpeedAdminCfg) + { + enableSflow(); + cfgPortSpeed("Ethernet0", "100000"); + cfgSflowSessionAll(false); /* Disable sflow on all interfaces*/ + cfgSflowSession("Ethernet0", true, ""); /* Set local admin up with no rate */ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "100000"); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "up"); + + statePortSpeed("Ethernet0", "50000"); + /* Sampling rate should adhere to oper speed*/ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "50000"); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "up"); + + /* Change cfg speed */ + cfgPortSpeed("Ethernet0", "25000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "50000"); + + statePortSpeed("Ethernet0", "1000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "1000"); + + cfgSflowSession("Ethernet0", true, "12345"); /* Set local sampling rate */ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "12345"); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "up"); + + /* Change oper speed now */ + statePortSpeed("Ethernet0", "12345"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "12345"); + } + + TEST_F(SflowMgrTest, test_SflowCfgAfterPortCfg) + { + cfgPortSpeed("Ethernet0", "100000"); + /* Nothing is written yet since cfg is not enabled */ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == ""); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "down"); + + /* State DB is updated with oper speed */ + statePortSpeed("Ethernet0", "100000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == ""); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "down"); + + /* enable sflow */ + enableSflow(); + cfgSflowSessionAll(true); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "100000"); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "up"); + ASSERT_TRUE(getSflowSampleDir("Ethernet0") == "rx"); + } + + TEST_F(SflowMgrTest, test_SflowCfgAfterOperSpeed) + { + cfgPortSpeed("Ethernet0", "100000"); + /* Nothing is written yet since cfg is not enabled */ + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == ""); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "down"); + + /* State DB is updated with oper speed */ + statePortSpeed("Ethernet0", "50000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == ""); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "down"); + + /* enable sflow */ + cfgSflowSessionAll(true); + enableSflow(); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "50000"); + ASSERT_TRUE(getSflowAdminStatus("Ethernet0") == "up"); + ASSERT_TRUE(getSflowSampleDir("Ethernet0") == "rx"); + } + + TEST_F(SflowMgrTest, test_RateConfigEgressDir) + { + enableSflow(); + cfgPortSpeed("Ethernet0", "100000"); + /* Set local admin up with no rate and no egress direction */ + cfgSflowSession("Ethernet0", true, "", "tx"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "100000"); + + /* Scenario: Operational Speed Changes to 25000 */ + statePortSpeed("Ethernet0", "25000"); + ASSERT_TRUE(getSflowSampleRate("Ethernet0") == "25000"); + ASSERT_TRUE(getSflowSampleDir("Ethernet0") == "tx"); + } +} diff --git a/tests/mock_tests/sfloworh_ut.cpp b/tests/mock_tests/sfloworh_ut.cpp new file mode 100644 index 0000000000..d3d4d0defa --- /dev/null +++ b/tests/mock_tests/sfloworh_ut.cpp @@ -0,0 +1,372 @@ +#include +#include +#include +#include +#include + +#include "ut_helper.h" +#include "mock_orchagent_main.h" + +using namespace swss; + +namespace sflow_test +{ + class MockSflowOrch final + { + public: + MockSflowOrch() + { + this->appDb = std::make_shared("APPL_DB", 0); + std::vector sflow_tables = { + APP_SFLOW_TABLE_NAME, + APP_SFLOW_SESSION_TABLE_NAME, + APP_SFLOW_SAMPLE_RATE_TABLE_NAME + }; + sflowOrch = std::make_shared(this->appDb.get(), sflow_tables); + } + ~MockSflowOrch() = default; + + void doSflowTableTask(const std::deque &entries) + { + // ConsumerStateTable is used for APP DB + auto consumer = std::unique_ptr(new Consumer( + new ConsumerStateTable(this->appDb.get(), APP_SFLOW_TABLE_NAME, 1, 1), + this->sflowOrch.get(), APP_SFLOW_TABLE_NAME + )); + + consumer->addToSync(entries); + static_cast(this->sflowOrch.get())->doTask(*consumer); + } + + void doSflowSessionTableTask(const std::deque &entries) + { + // ConsumerStateTable is used for APP DB + auto consumer = std::unique_ptr(new Consumer( + new ConsumerStateTable(this->appDb.get(), APP_SFLOW_SESSION_TABLE_NAME, 1, 1), + this->sflowOrch.get(), APP_SFLOW_SESSION_TABLE_NAME + )); + + consumer->addToSync(entries); + static_cast(this->sflowOrch.get())->doTask(*consumer); + } + + void doSflowSampleTableTask(const std::deque &entries) + { + // ConsumerStateTable is used for APP DB + auto consumer = std::unique_ptr(new Consumer( + new ConsumerStateTable(this->appDb.get(), APP_SFLOW_SAMPLE_RATE_TABLE_NAME, 1, 1), + this->sflowOrch.get(), APP_SFLOW_SAMPLE_RATE_TABLE_NAME + )); + + consumer->addToSync(entries); + static_cast(this->sflowOrch.get())->doTask(*consumer); + } + + SflowOrch& get() + { + return *sflowOrch; + } + + private: + std::shared_ptr sflowOrch; + std::shared_ptr appDb; + }; + + class SflowOrchTest : public ::testing::Test + { + public: + SflowOrchTest() + { + this->initDb(); + } + virtual ~SflowOrchTest() = default; + + void SetUp() override + { + this->initSaiApi(); + this->initSwitch(); + this->initOrch(); + this->initPorts(); + } + + void TearDown() override + { + this->deinitOrch(); + this->deinitSwitch(); + this->deinitSaiApi(); + } + + private: + void initSaiApi() + { + std::map profileMap = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + auto status = ut_helper::initSaiApi(profileMap); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + } + + void deinitSaiApi() + { + auto status = ut_helper::uninitSaiApi(); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + } + + void initSwitch() + { + sai_status_t status; + sai_attribute_t attr; + + // Create switch + attr.id = SAI_SWITCH_ATTR_INIT_SWITCH; + attr.value.booldata = true; + + status = sai_switch_api->create_switch(&gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + // Get switch source MAC address + attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gMacAddress = attr.value.mac; + + // Get switch default virtual router ID + attr.id = SAI_SWITCH_ATTR_DEFAULT_VIRTUAL_ROUTER_ID; + + status = sai_switch_api->get_switch_attribute(gSwitchId, 1, &attr); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gVirtualRouterId = attr.value.oid; + } + + void deinitSwitch() + { + // Remove switch + auto status = sai_switch_api->remove_switch(gSwitchId); + ASSERT_EQ(status, SAI_STATUS_SUCCESS); + + gSwitchId = SAI_NULL_OBJECT_ID; + gVirtualRouterId = SAI_NULL_OBJECT_ID; + } + + void initOrch() + { + // + // SwitchOrch + // + + TableConnector switchCapTableStateDb(this->stateDb.get(), "SWITCH_CAPABILITY"); + TableConnector asicSensorsTableCfgDb(this->configDb.get(), CFG_ASIC_SENSORS_TABLE_NAME); + TableConnector switchTableAppDb(this->appDb.get(), APP_SWITCH_TABLE_NAME); + + std::vector switchTableList = { + asicSensorsTableCfgDb, + switchTableAppDb + }; + + gSwitchOrch = new SwitchOrch(this->appDb.get(), switchTableList, switchCapTableStateDb); + gDirectory.set(gSwitchOrch); + resourcesList.push_back(gSwitchOrch); + + // + // PortsOrch + // + + const int portsorchBasePri = 40; + + std::vector portTableList = { + { APP_PORT_TABLE_NAME, portsorchBasePri + 5 }, + { APP_VLAN_TABLE_NAME, portsorchBasePri + 2 }, + { APP_VLAN_MEMBER_TABLE_NAME, portsorchBasePri }, + { APP_LAG_TABLE_NAME, portsorchBasePri + 4 }, + { APP_LAG_MEMBER_TABLE_NAME, portsorchBasePri } + }; + + gPortsOrch = new PortsOrch(this->appDb.get(), this->stateDb.get(), portTableList, this->chassisAppDb.get()); + gDirectory.set(gPortsOrch); + resourcesList.push_back(gPortsOrch); + + // + // QosOrch + // + + std::vector qosTableList = { + CFG_TC_TO_QUEUE_MAP_TABLE_NAME, + CFG_SCHEDULER_TABLE_NAME, + CFG_DSCP_TO_TC_MAP_TABLE_NAME, + CFG_MPLS_TC_TO_TC_MAP_TABLE_NAME, + CFG_DOT1P_TO_TC_MAP_TABLE_NAME, + CFG_QUEUE_TABLE_NAME, + CFG_PORT_QOS_MAP_TABLE_NAME, + CFG_WRED_PROFILE_TABLE_NAME, + CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_PRIORITY_GROUP_MAP_TABLE_NAME, + CFG_PFC_PRIORITY_TO_QUEUE_MAP_TABLE_NAME, + CFG_DSCP_TO_FC_MAP_TABLE_NAME, + CFG_EXP_TO_FC_MAP_TABLE_NAME + }; + gQosOrch = new QosOrch(this->configDb.get(), qosTableList); + gDirectory.set(gQosOrch); + resourcesList.push_back(gQosOrch); + + // + // BufferOrch + // + + std::vector bufferTableList = { + APP_BUFFER_POOL_TABLE_NAME, + APP_BUFFER_PROFILE_TABLE_NAME, + APP_BUFFER_QUEUE_TABLE_NAME, + APP_BUFFER_PG_TABLE_NAME, + APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, + APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME + }; + gBufferOrch = new BufferOrch(this->appDb.get(), this->configDb.get(), this->stateDb.get(), bufferTableList); + gDirectory.set(gBufferOrch); + resourcesList.push_back(gBufferOrch); + + // + // FlexCounterOrch + // + + std::vector flexCounterTableList = { + CFG_FLEX_COUNTER_TABLE_NAME + }; + + auto flexCounterOrch = new FlexCounterOrch(this->configDb.get(), flexCounterTableList); + gDirectory.set(flexCounterOrch); + resourcesList.push_back(flexCounterOrch); + } + + void deinitOrch() + { + std::reverse(this->resourcesList.begin(), this->resourcesList.end()); + for (auto &it : this->resourcesList) + { + delete it; + } + + gSwitchOrch = nullptr; + gPortsOrch = nullptr; + gQosOrch = nullptr; + gBufferOrch = nullptr; + + Portal::DirectoryInternal::clear(gDirectory); + EXPECT_TRUE(Portal::DirectoryInternal::empty(gDirectory)); + } + + void initPorts() + { + auto portTable = Table(this->appDb.get(), APP_PORT_TABLE_NAME); + + // Get SAI default ports to populate DB + auto ports = ut_helper::getInitialSaiPorts(); + + // Populate port table with SAI ports + for (const auto &cit : ports) + { + portTable.set(cit.first, cit.second); + } + + // Set PortConfigDone + portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + + // Set PortInitDone + portTable.set("PortInitDone", { { "lanes", "0" } }); + gPortsOrch->addExistingData(&portTable); + static_cast(gPortsOrch)->doTask(); + } + + void initDb() + { + this->appDb = std::make_shared("APPL_DB", 0); + this->configDb = std::make_shared("CONFIG_DB", 0); + this->stateDb = std::make_shared("STATE_DB", 0); + this->chassisAppDb = std::make_shared("CHASSIS_APP_DB", 0); + } + + std::shared_ptr appDb; + std::shared_ptr configDb; + std::shared_ptr stateDb; + std::shared_ptr chassisAppDb; + + std::vector resourcesList; + }; + + /* Test enabling/disabling SFLOW */ + TEST_F(SflowOrchTest, SflowEnableDisable) + { + MockSflowOrch mock_orch; + { + auto table1 = deque( + { + { + "global", + SET_COMMAND, + { + {"admin_state", "down"} + } + } + }); + mock_orch.doSflowTableTask(table1); + + ASSERT_FALSE(Portal::SflowOrchInternal::getSflowStatusEnable(mock_orch.get())); + } + { + auto table2 = deque( + { + { + "global", + SET_COMMAND, + { + {"admin_state", "up"} + } + } + }); + mock_orch.doSflowTableTask(table2); + + ASSERT_TRUE(Portal::SflowOrchInternal::getSflowStatusEnable(mock_orch.get())); + } + } + + /* Test create/delete SFLOW */ + TEST_F(SflowOrchTest, SflowCreateDelete) + { + MockSflowOrch mock_orch; + { + auto table3 = deque( + { + { + "global", + SET_COMMAND, + { + {"admin_state", "up"}, + } + } + }); + mock_orch.doSflowTableTask(table3); + ASSERT_TRUE(Portal::SflowOrchInternal::getSflowStatusEnable(mock_orch.get())); + } + { + auto table4 = deque( + { + { + "global", + DEL_COMMAND, + { + {"admin_state", "up"}, + } + } + }); + mock_orch.doSflowTableTask(table4); + ASSERT_FALSE(Portal::SflowOrchInternal::getSflowStatusEnable(mock_orch.get())); + } + } +} + + diff --git a/tests/mock_tests/swssnet_ut.cpp b/tests/mock_tests/swssnet_ut.cpp new file mode 100644 index 0000000000..6ec64e0202 --- /dev/null +++ b/tests/mock_tests/swssnet_ut.cpp @@ -0,0 +1,39 @@ +#include "ut_helper.h" +#include "swssnet.h" + +namespace swssnet_test +{ + struct SwssNetTest : public ::testing::Test + { + SwssNetTest() {} + }; + + TEST_F(SwssNetTest, CovertSAIPrefixToSONiCPrefix) + { + IpPrefix ip_prefix("1.2.3.4/24"); + sai_ip_prefix_t sai_prefix; + swss::copy(sai_prefix, ip_prefix); + IpPrefix ip_prefix_copied = swss::getIpPrefixFromSaiPrefix(sai_prefix); + ASSERT_EQ("1.2.3.4/24", ip_prefix_copied.to_string()); + + IpPrefix ip_prefix1("1.2.3.4/32"); + swss::copy(sai_prefix, ip_prefix1); + ip_prefix_copied = swss::getIpPrefixFromSaiPrefix(sai_prefix); + ASSERT_EQ("1.2.3.4/32", ip_prefix_copied.to_string()); + + IpPrefix ip_prefix2("0.0.0.0/0"); + swss::copy(sai_prefix, ip_prefix2); + ip_prefix_copied = swss::getIpPrefixFromSaiPrefix(sai_prefix); + ASSERT_EQ("0.0.0.0/0", ip_prefix_copied.to_string()); + + IpPrefix ip_prefix3("2000::1/128"); + swss::copy(sai_prefix, ip_prefix3); + ip_prefix_copied = swss::getIpPrefixFromSaiPrefix(sai_prefix); + ASSERT_EQ("2000::1/128", ip_prefix_copied.to_string()); + + IpPrefix ip_prefix4("::/0"); + swss::copy(sai_prefix, ip_prefix4); + ip_prefix_copied = swss::getIpPrefixFromSaiPrefix(sai_prefix); + ASSERT_EQ("::/0", ip_prefix_copied.to_string()); + } +} diff --git a/tests/mock_tests/test_failure_handling.cpp b/tests/mock_tests/test_failure_handling.cpp new file mode 100644 index 0000000000..7381f4015e --- /dev/null +++ b/tests/mock_tests/test_failure_handling.cpp @@ -0,0 +1,91 @@ +#include "saihelper.h" +#include "ut_helper.h" +#include + +extern sai_switch_api_t *sai_switch_api; + +namespace saifailure_test +{ + struct SaiFailureTest : public ::testing::Test + { + }; + uint32_t *_sai_syncd_notifications_count; + int32_t *_sai_syncd_notification_event; + sai_switch_api_t *pold_sai_switch_api; + sai_switch_api_t ut_sai_switch_api; + + sai_status_t _ut_stub_sai_set_switch_attribute( + _In_ sai_object_id_t switch_id, + _In_ const sai_attribute_t *attr) + { + if (attr[0].id == SAI_REDIS_SWITCH_ATTR_NOTIFY_SYNCD) + { + *_sai_syncd_notifications_count = *_sai_syncd_notifications_count + 1; + *_sai_syncd_notification_event = attr[0].value.s32; + } + return pold_sai_switch_api->set_switch_attribute(switch_id, attr); + } + + void _hook_sai_switch_api() + { + map profile = { + { "SAI_VS_SWITCH_TYPE", "SAI_VS_SWITCH_TYPE_BCM56850" }, + { "KV_DEVICE_MAC_ADDRESS", "20:03:04:05:06:00" } + }; + + ut_helper::initSaiApi(profile); + ut_sai_switch_api = *sai_switch_api; + pold_sai_switch_api = sai_switch_api; + + ut_sai_switch_api.set_switch_attribute = _ut_stub_sai_set_switch_attribute; + sai_switch_api = &ut_sai_switch_api; + } + + void _unhook_sai_switch_api() + { + sai_switch_api = pold_sai_switch_api; + ut_helper::uninitSaiApi(); + } + + TEST_F(SaiFailureTest, handleSaiFailure) + { + _hook_sai_switch_api(); + _sai_syncd_notifications_count = (uint32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + + _sai_syncd_notification_event = (int32_t*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + *_sai_syncd_notifications_count = 0; + uint32_t notif_count = *_sai_syncd_notifications_count; + + ASSERT_DEATH({handleSaiCreateStatus(SAI_API_FDB, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiCreateStatus(SAI_API_HOSTIF, SAI_STATUS_INVALID_PARAMETER);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiCreateStatus(SAI_API_PORT, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiSetStatus(SAI_API_HOSTIF, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiSetStatus(SAI_API_PORT, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiSetStatus(SAI_API_TUNNEL, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + ASSERT_DEATH({handleSaiRemoveStatus(SAI_API_LAG, SAI_STATUS_FAILURE);}, ""); + ASSERT_EQ(*_sai_syncd_notifications_count, ++notif_count); + ASSERT_EQ(*_sai_syncd_notification_event, SAI_REDIS_NOTIFY_SYNCD_INVOKE_DUMP); + + _unhook_sai_switch_api(); + } +} diff --git a/tests/mock_tests/ut_helper.h b/tests/mock_tests/ut_helper.h index baaf8184d8..f8f411d025 100644 --- a/tests/mock_tests/ut_helper.h +++ b/tests/mock_tests/ut_helper.h @@ -13,7 +13,7 @@ namespace ut_helper { sai_status_t initSaiApi(const std::map &profile); - void uninitSaiApi(); + sai_status_t uninitSaiApi(); map> getInitialSaiPorts(); } diff --git a/tests/mock_tests/ut_saihelper.cpp b/tests/mock_tests/ut_saihelper.cpp index e16a217e71..8b6b35b6f7 100644 --- a/tests/mock_tests/ut_saihelper.cpp +++ b/tests/mock_tests/ut_saihelper.cpp @@ -64,8 +64,10 @@ namespace ut_helper } sai_api_query(SAI_API_SWITCH, (void **)&sai_switch_api); + sai_api_query(SAI_API_HASH, (void **)&sai_hash_api); sai_api_query(SAI_API_BRIDGE, (void **)&sai_bridge_api); sai_api_query(SAI_API_VIRTUAL_ROUTER, (void **)&sai_virtual_router_api); + sai_api_query(SAI_API_SAMPLEPACKET, (void **)&sai_samplepacket_api); sai_api_query(SAI_API_PORT, (void **)&sai_port_api); sai_api_query(SAI_API_LAG, (void **)&sai_lag_api); sai_api_query(SAI_API_VLAN, (void **)&sai_vlan_api); @@ -74,18 +76,30 @@ namespace ut_helper sai_api_query(SAI_API_NEIGHBOR, (void **)&sai_neighbor_api); sai_api_query(SAI_API_TUNNEL, (void **)&sai_tunnel_api); sai_api_query(SAI_API_NEXT_HOP, (void **)&sai_next_hop_api); + sai_api_query(SAI_API_NEXT_HOP_GROUP, (void **)&sai_next_hop_group_api); sai_api_query(SAI_API_ACL, (void **)&sai_acl_api); sai_api_query(SAI_API_HOSTIF, (void **)&sai_hostif_api); + sai_api_query(SAI_API_POLICER, (void **)&sai_policer_api); sai_api_query(SAI_API_BUFFER, (void **)&sai_buffer_api); + sai_api_query(SAI_API_QOS_MAP, (void **)&sai_qos_map_api); + sai_api_query(SAI_API_SCHEDULER_GROUP, (void **)&sai_scheduler_group_api); + sai_api_query(SAI_API_SCHEDULER, (void **)&sai_scheduler_api); + sai_api_query(SAI_API_WRED, (void **)&sai_wred_api); sai_api_query(SAI_API_QUEUE, (void **)&sai_queue_api); sai_api_query(SAI_API_MPLS, (void**)&sai_mpls_api); + sai_api_query(SAI_API_COUNTER, (void**)&sai_counter_api); + sai_api_query(SAI_API_FDB, (void**)&sai_fdb_api); return SAI_STATUS_SUCCESS; } - void uninitSaiApi() + sai_status_t uninitSaiApi() { - sai_api_uninitialize(); + auto status = sai_api_uninitialize(); + if (status != SAI_STATUS_SUCCESS) + { + return status; + } sai_switch_api = nullptr; sai_bridge_api = nullptr; @@ -100,8 +114,12 @@ namespace ut_helper sai_next_hop_api = nullptr; sai_acl_api = nullptr; sai_hostif_api = nullptr; + sai_policer_api = nullptr; sai_buffer_api = nullptr; sai_queue_api = nullptr; + sai_counter_api = nullptr; + + return SAI_STATUS_SUCCESS; } map> getInitialSaiPorts() diff --git a/tests/mock_tests/warmrestartassist_ut.cpp b/tests/mock_tests/warmrestartassist_ut.cpp new file mode 100644 index 0000000000..6adcd08baf --- /dev/null +++ b/tests/mock_tests/warmrestartassist_ut.cpp @@ -0,0 +1,64 @@ +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +//#include "mock_orchagent_main.h" +#include "mock_table.h" +#include "warm_restart.h" +#define private public +#include "warmRestartAssist.h" +#undef private + +#define APP_WRA_TEST_TABLE_NAME "TEST_TABLE" + +namespace warmrestartassist_test +{ + using namespace std; + + shared_ptr m_app_db = make_shared("APPL_DB", 0); + shared_ptr m_app_db_pipeline = make_shared(m_app_db.get()); + shared_ptr m_wra_test_table = make_shared(m_app_db.get(), APP_WRA_TEST_TABLE_NAME); + + AppRestartAssist *appRestartAssist; + + struct WarmrestartassistTest : public ::testing::Test + { + WarmrestartassistTest() + { + appRestartAssist = new AppRestartAssist(m_app_db_pipeline.get(), "testsyncd", "swss", 0); + appRestartAssist->m_warmStartInProgress = true; + appRestartAssist->registerAppTable(APP_WRA_TEST_TABLE_NAME, m_wra_test_table.get()); + } + + void SetUp() override + { + testing_db::reset(); + + Table testTable = Table(m_app_db.get(), APP_WRA_TEST_TABLE_NAME); + testTable.set("key", + { + {"field", "value0"}, + }); + } + + void TearDown() override + { + } + }; + + TEST_F(WarmrestartassistTest, warmRestartAssistTest) + { + appRestartAssist->readTablesToMap(); + vector fvVector; + fvVector.emplace_back("field", "value1"); + appRestartAssist->insertToMap(APP_WRA_TEST_TABLE_NAME, "key", fvVector, false); + appRestartAssist->insertToMap(APP_WRA_TEST_TABLE_NAME, "key", fvVector, false); + appRestartAssist->reconcile(); + + fvVector.clear(); + Table testTable = Table(m_app_db.get(), APP_WRA_TEST_TABLE_NAME); + ASSERT_TRUE(testTable.get("key", fvVector)); + ASSERT_EQ(fvField(fvVector[0]), "field"); + ASSERT_EQ(fvValue(fvVector[0]), "value1"); + } +} diff --git a/tests/mux_neigh_miss_tests.py b/tests/mux_neigh_miss_tests.py new file mode 100644 index 0000000000..d8c32a29e0 --- /dev/null +++ b/tests/mux_neigh_miss_tests.py @@ -0,0 +1,243 @@ +""" +Test scenarios and related constants for dualtor neighbor miss. + +Each item in NEIGH_MISS_TESTS is a test case, comprising of a list of steps. +Each step is a dictionary containing the action to be performed during that +step, as well as the expected result. +The expected result itself is another dictionary, containing the following +attributes: + - (bool) EXPECT_ROUTE: if we expect a route entry in ASIC_DB + - (bool) EXPECT_NEIGH: if we expect a neighbor entry in ASIC_DB + - (bool) REAL_MAC: If a real MAC address is expected in the + APPL_DB neighbor table entry, as opposed + to a zero/empty MAC + +All expected result attributes will be verified agains the DVS +after each test step is executed + +Note: EXPECT_ROUTE and EXPECT_NEIGH cannot both be True + +Note: for the purposes of this test, there is a distinction made + between 'server' IPs and 'neighbor' IPs. Server IPs are + IP addresses explicitly configured on a specific mux cable + interface in the MUX_CABLE table in config DB. Neighbor IPs + are any other IPs within the VLAN subnet. + + +""" + +__all__ = [ + 'TEST_ACTION', 'EXPECTED_RESULT', 'ACTIVE', 'STANDBY', 'PING_SERV', 'PING_NEIGH', + 'RESOLVE_ENTRY', 'DELETE_ENTRY', 'EXPECT_ROUTE', 'EXPECT_NEIGH', 'REAL_MAC', + 'INTF', 'IP', 'MAC', 'NEIGH_MISS_TESTS' +] + +TEST_ACTION = 'action' +EXPECTED_RESULT = 'result' + +# Possible test actions +ACTIVE = 'active' # Switch the test interface to active +STANDBY = 'standby' # Switch the test interface to standby +PING_SERV = 'ping_serv' # Ping the server mux cable IP, used to trigger a netlink fail message +PING_NEIGH = 'ping_neigh' # Ping the neighbor IP (not configured on a specific mux cable port) +RESOLVE_ENTRY = 'resolve_entry' # Resolve the test IP neighbor entry in the kernel +DELETE_ENTRY = 'delete_entry' # Delete the test IP neighbor entry from the kernel + +# Test expectations +EXPECT_ROUTE = 'expect_route' +EXPECT_NEIGH = 'expect_neigh' +REAL_MAC = 'real_mac' + +INTF = 'intf' +IP = 'ip' +MAC = 'mac' + +# Note: For most test cases below, after the neighbor entry is deleted, we must +# still set `REAL_MAC` to `True` in the expected result since a prior step in the +# test should have resolved the neighbor entry and confirmed that the APPL_DB +# neighbor entry contained a real MAC address. Thus, when we verify that APPL_DB +# no longer contains a neighbor table entry, we need to check for the real MAC. +# The exception to this is test cases where the neighbor entry is never resolved +# in the kernel. In that case, APPL_DB will never contain the real MAC address. + +STANDBY_MUX_CABLE_TESTS = [ + [ + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + } + ] +] + +ACTIVE_MUX_CABLE_TESTS = [ + [ + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_SERV, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + } + ] +] + +NEIGH_IP_TESTS = [ + [ + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_NEIGH, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: ACTIVE, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: PING_NEIGH, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: RESOLVE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: True, REAL_MAC: True} + }, + { + TEST_ACTION: STANDBY, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: True} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: True} + } + ], + [ + { + TEST_ACTION: PING_NEIGH, + EXPECTED_RESULT: {EXPECT_ROUTE: True, EXPECT_NEIGH: False, REAL_MAC: False} + }, + { + TEST_ACTION: DELETE_ENTRY, + EXPECTED_RESULT: {EXPECT_ROUTE: False, EXPECT_NEIGH: False, REAL_MAC: False} + } + ] + +] + +NEIGH_MISS_TESTS = ACTIVE_MUX_CABLE_TESTS + STANDBY_MUX_CABLE_TESTS + NEIGH_IP_TESTS diff --git a/tests/p4rt/acl.py b/tests/p4rt/acl.py index 283ba95ce6..6623bb3fcf 100644 --- a/tests/p4rt/acl.py +++ b/tests/p4rt/acl.py @@ -15,6 +15,7 @@ class P4RtAclTableDefinitionWrapper(util.DBInterface): SAI_ATTR_MATCH_ETHER_TYPE = "SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE" SAI_ATTR_MATCH_IP_TYPE = "SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE" SAI_ATTR_MATCH_DST_MAC = "SAI_ACL_TABLE_ATTR_FIELD_DST_MAC" + SAI_ATTR_MATCH_ROUTE_DST_USER_META = "SAI_ACL_TABLE_ATTR_FIELD_ROUTE_DST_USER_META" SAI_ATTR_MATCH_SRC_IPV6_WORD3 = "SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6_WORD3" SAI_ATTR_MATCH_SRC_IPV6_WORD2 = "SAI_ACL_TABLE_ATTR_FIELD_SRC_IPV6_WORD2" SAI_ATTR_MATCH_UDF_GROUP_MIN = "SAI_ACL_TABLE_ATTR_USER_DEFINED_FIELD_GROUP_MIN" @@ -31,6 +32,7 @@ class P4RtAclTableDefinitionWrapper(util.DBInterface): SIZE_FIELD = "size" MATCH_FIELD_ETHER_TYPE = "match/ether_type" MATCH_FIELD_ETHER_DST = "match/ether_dst" + MATCH_FIELD_L3_CLASS_ID = "match/l3_class_id" MATCH_FIELD_IS_IP = "match/is_ip" MATCH_FIELD_IS_IPV4 = "match/is_ipv4" MATCH_FIELD_IS_IPV6 = "match/is_ipv6" @@ -57,6 +59,7 @@ class P4RtAclRuleWrapper(util.DBInterface): SAI_ATTR_MATCH_ETHER_TYPE = "SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE" SAI_ATTR_MATCH_IP_TYPE = "SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_TYPE" SAI_ATTR_MATCH_DST_MAC = "SAI_ACL_ENTRY_ATTR_FIELD_DST_MAC" + SAI_ATTR_MATCH_ROUTE_DST_USER_META = "SAI_ACL_ENTRY_ATTR_FIELD_ROUTE_DST_USER_META" SAI_ATTR_MATCH_SRC_IPV6_WORD3 = "SAI_ACL_ENTRY_ATTR_FIELD_SRC_IPV6_WORD3" SAI_ATTR_MATCH_SRC_IPV6_WORD2 = "SAI_ACL_ENTRY_ATTR_FIELD_SRC_IPV6_WORD2" SAI_ATTR_MATCH_UDF_GROUP_MIN = "SAI_ACL_ENTRY_ATTR_USER_DEFINED_FIELD_GROUP_MIN" diff --git a/tests/p4rt/l3.py b/tests/p4rt/l3.py index c5f656aa2e..915228a9b5 100644 --- a/tests/p4rt/l3.py +++ b/tests/p4rt/l3.py @@ -28,28 +28,141 @@ class P4RtRouterInterfaceWrapper(util.DBInterface): DEFAULT_SRC_MAC = "00:11:22:33:44:55" DEFAULT_ACTION = "set_port_and_src_mac" + # Fetch oid of the first newly created rif from created rif in ASIC + # db. This API should only be used when only one oid is expected to be + # created after the original entries. + # Original rif entries in asic db must be fetched using + # 'get_original_redis_entries' before fetching oid of newly created rif. + def get_newly_created_router_interface_oid(self, known_oids=set()): + rif_oid = None + rif_entries = util.get_keys(self.asic_db, self.ASIC_DB_TBL_NAME) + for key in rif_entries: + if ( + key + not in self._original_entries[ + "{}:{}".format(self.asic_db, self.ASIC_DB_TBL_NAME) + ] + and + key not in known_oids + ): + rif_oid = key + break + return rif_oid + def generate_app_db_key(self, router_interface_id): d = {} - d[util.prepend_match_field("router_interface_id") - ] = router_interface_id + d[util.prepend_match_field("router_interface_id")] = router_interface_id key = json.dumps(d, separators=(",", ":")) return self.TBL_NAME + ":" + key # create default router interface - def create_router_interface(self, - router_interace_id=None, port_id=None, - src_mac=None, action=None): + def create_router_interface( + self, router_interace_id=None, port_id=None, src_mac=None, action=None + ): router_interface_id = router_interace_id or self.DEFAULT_ROUTER_INTERFACE_ID port_id = port_id or self.DEFAULT_PORT_ID src_mac = src_mac or self.DEFAULT_SRC_MAC action = action or self.DEFAULT_ACTION - attr_list = [(util.prepend_param_field(self.PORT_FIELD), port_id), - (util.prepend_param_field(self.SRC_MAC_FIELD), src_mac), - (self.ACTION_FIELD, action)] + attr_list = [ + (util.prepend_param_field(self.PORT_FIELD), port_id), + (util.prepend_param_field(self.SRC_MAC_FIELD), src_mac), + (self.ACTION_FIELD, action), + ] router_intf_key = self.generate_app_db_key(router_interface_id) self.set_app_db_entry(router_intf_key, attr_list) return router_interface_id, router_intf_key, attr_list +class P4RtGreTunnelWrapper(util.DBInterface): + """Interface to interact with APP DB and ASIC DB tables for P4RT GRE Tunnel object.""" + + # database and SAI constants + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + TBL_NAME = "FIXED_TUNNEL_TABLE" + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL" + SAI_ATTR_TYPE = "SAI_TUNNEL_ATTR_TYPE" + SAI_ATTR_PEER_MODE = "SAI_TUNNEL_ATTR_PEER_MODE" + SAI_ATTR_UNDERLAY_INTERFACE = "SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE" + SAI_ATTR_OVERLAY_INTERFACE = "SAI_TUNNEL_ATTR_OVERLAY_INTERFACE" + SAI_ATTR_ENCAP_SRC_IP = "SAI_TUNNEL_ATTR_ENCAP_SRC_IP" + SAI_ATTR_ENCAP_DST_IP = "SAI_TUNNEL_ATTR_ENCAP_DST_IP" + + # attribute fields for tunnel object + ROUTER_ROUTER_INTERFACE_ID_FIELD = "router_interface_id" + ENCAP_SRC_IP_FIELD = "encap_src_ip" + ENCAP_DST_IP_FIELD = "encap_dst_ip" + + # default tunnel attribute values + DEFAULT_TUNNEL_ID = "tunnel-1" + DEFAULT_ROUTER_INTERFACE_ID = "16" + DEFAULT_ENCAP_SRC_IP = "1.2.3.4" + DEFAULT_ENCAP_DST_IP = "12.0.0.1" + DEFAULT_ACTION = "mark_for_p2p_tunnel_encap" + + def generate_app_db_key(self, tunnel_id): + d = {} + d[util.prepend_match_field("tunnel_id")] = tunnel_id + key = json.dumps(d, separators=(",", ":")) + return self.TBL_NAME + ":" + key + + # create default tunnel + def create_gre_tunnel( + self, tunnel_id=None, router_interface_id=None, encap_src_ip=None, encap_dst_ip=None, action=None + ): + tunnel_id = tunnel_id or self.DEFAULT_TUNNEL_ID + router_interface_id = router_interface_id or self.DEFAULT_ROUTER_INTERFACE_ID + encap_src_ip = encap_src_ip or self.DEFAULT_ENCAP_SRC_IP + encap_dst_ip = encap_dst_ip or self.DEFAULT_ENCAP_DST_IP + action = action or self.DEFAULT_ACTION + attr_list = [ + (util.prepend_param_field(self.ROUTER_ROUTER_INTERFACE_ID_FIELD), router_interface_id), + (util.prepend_param_field(self.ENCAP_SRC_IP_FIELD), encap_src_ip), + (util.prepend_param_field(self.ENCAP_DST_IP_FIELD), encap_dst_ip), + (self.ACTION_FIELD, action), + ] + tunnel_key = self.generate_app_db_key(tunnel_id) + self.set_app_db_entry(tunnel_key, attr_list) + return tunnel_id, tunnel_key, attr_list + + # Fetch oid of the first newly created tunnel from created tunnels in ASIC + # db. This API should only be used when only one oid is expected to be + # created after the original entries. + # Original tunnel entries in asic db must be fetched using + # 'get_original_redis_entries' before fetching oid of newly created tunnel. + def get_newly_created_tunnel_oid(self): + tunnel_oid = None + tunnel_entries = util.get_keys(self.asic_db, self.ASIC_DB_TBL_NAME) + for key in tunnel_entries: + if ( + key + not in self._original_entries[ + "{}:{}".format(self.asic_db, self.ASIC_DB_TBL_NAME) + ] + ): + tunnel_oid = key + break + return tunnel_oid + + def get_original_appl_db_entries_count(self): + return len( + self._original_entries[ + "%s:%s" % (self.appl_db, (self.APP_DB_TBL_NAME + ":" + self.TBL_NAME)) + ] + ) + + def get_original_appl_state_db_entries_count(self): + return len( + self._original_entries[ + "%s:%s" + % (self.appl_state_db, (self.APP_DB_TBL_NAME + ":" + self.TBL_NAME)) + ] + ) + + def get_original_asic_db_entries_count(self): + return len( + self._original_entries[ + "%s:%s" % (self.asic_db, self.ASIC_DB_TBL_NAME) + ] + ) class P4RtNeighborWrapper(util.DBInterface): """Interface to interact with APP DB and ASIC DB tables for P4RT neighbor object.""" @@ -72,24 +185,31 @@ class P4RtNeighborWrapper(util.DBInterface): def generate_app_db_key(self, router_interface_id, neighbor_id): d = {} - d[util.prepend_match_field("router_interface_id") - ] = router_interface_id + d[util.prepend_match_field("router_interface_id")] = router_interface_id d[util.prepend_match_field("neighbor_id")] = neighbor_id key = json.dumps(d, separators=(",", ":")) return self.TBL_NAME + ":" + key # create default neighbor - def create_neighbor(self, router_interface_id=None, neighbor_id=None, - dst_mac=None, action=None, ipv4=True): + def create_neighbor( + self, + router_interface_id=None, + neighbor_id=None, + dst_mac=None, + action=None, + ipv4=True, + ): router_interface_id = router_interface_id or self.DEFAULT_ROUTER_INTERFACE_ID - neighbor_id = neighbor_id or (self.DEFAULT_IPV4_NEIGHBOR_ID if ipv4 - else self.DEFAULT_IPV6_NEIGHBOR_ID) + neighbor_id = neighbor_id or ( + self.DEFAULT_IPV4_NEIGHBOR_ID if ipv4 else self.DEFAULT_IPV6_NEIGHBOR_ID + ) dst_mac = dst_mac or self.DEFAULT_DST_MAC action = action or self.DEFAULT_ACTION - attr_list = [(util.prepend_param_field(self.DST_MAC_FIELD), dst_mac), - (self.ACTION_FIELD, action)] - neighbor_key = self.generate_app_db_key( - router_interface_id, neighbor_id) + attr_list = [ + (util.prepend_param_field(self.DST_MAC_FIELD), dst_mac), + (self.ACTION_FIELD, action), + ] + neighbor_key = self.generate_app_db_key(router_interface_id, neighbor_id) self.set_app_db_entry(neighbor_key, attr_list) return neighbor_id, neighbor_key, attr_list @@ -103,38 +223,55 @@ class P4RtNextHopWrapper(util.DBInterface): ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" SAI_ATTR_TYPE = "SAI_NEXT_HOP_ATTR_TYPE" SAI_ATTR_IP = "SAI_NEXT_HOP_ATTR_IP" + SAI_ATTR_TUNNEL_ENCAP = "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP" SAI_ATTR_ROUTER_INTF_OID = "SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID" + SAI_ATTR_TUNNEL_OID = "SAI_NEXT_HOP_ATTR_TUNNEL_ID" # attribute fields for nexthop object RIF_FIELD = "router_interface_id" NEIGHBOR_ID_FIELD = "neighbor_id" + TUNNEL_ID_FIELD = "tunnel_id" # default next hop attribute values - DEFAULT_ACTION = "set_nexthop" + DEFAULT_ACTION = "set_ip_nexthop" DEFAULT_NEXTHOP_ID = "8" DEFAULT_ROUTER_INTERFACE_ID = "16" DEFAULT_IPV4_NEIGHBOR_ID = "12.0.0.1" DEFAULT_IPV6_NEIGHBOR_ID = "fe80::21a:11ff:fe17:5f80" + # tunnel nexthop attribute values + TUNNEL_ACTION = "set_p2p_tunnel_encap_nexthop" + DEFAULT_TUNNEL_ID = "tunnel-1" + def generate_app_db_key(self, nexthop_id): d = {} d[util.prepend_match_field("nexthop_id")] = nexthop_id key = json.dumps(d, separators=(",", ":")) return self.TBL_NAME + ":" + key - # create default next hop - def create_next_hop(self, router_interface_id=None, neighbor_id=None, - action=None, nexthop_id=None, ipv4=True): - action = action or self.DEFAULT_ACTION + # create next hop + def create_next_hop( + self, + router_interface_id=None, + neighbor_id=None, + action=None, + nexthop_id=None, + ipv4=True, + tunnel_id=None, + ): + action = action or (self.DEFAULT_ACTION if tunnel_id == None else self.TUNNEL_ACTION) router_interface_id = router_interface_id or self.DEFAULT_ROUTER_INTERFACE_ID if ipv4 is True: neighbor_id = neighbor_id or self.DEFAULT_IPV4_NEIGHBOR_ID else: neighbor_id = neighbor_id or self.DEFAULT_IPV6_NEIGHBOR_ID nexthop_id = nexthop_id or self.DEFAULT_NEXTHOP_ID - attr_list = [(util.prepend_param_field(self.RIF_FIELD), router_interface_id), - (util.prepend_param_field(self.NEIGHBOR_ID_FIELD), neighbor_id), - (self.ACTION_FIELD, action)] + attr_list = [(self.ACTION_FIELD, action)] + if action == self.DEFAULT_ACTION: + attr_list.append((util.prepend_param_field(self.RIF_FIELD), router_interface_id)) + attr_list.append((util.prepend_param_field(self.NEIGHBOR_ID_FIELD), neighbor_id)) + if tunnel_id != None: + attr_list.append((util.prepend_param_field(self.TUNNEL_ID_FIELD), tunnel_id)) nexthop_key = self.generate_app_db_key(nexthop_id) self.set_app_db_entry(nexthop_key, attr_list) return nexthop_id, nexthop_key, attr_list @@ -148,12 +285,37 @@ def get_newly_created_nexthop_oid(self): nexthop_oid = None nexthop_entries = util.get_keys(self.asic_db, self.ASIC_DB_TBL_NAME) for key in nexthop_entries: - if key not in self._original_entries["{}:{}".format(self.asic_db, - self.ASIC_DB_TBL_NAME)]: + if ( + key + not in self._original_entries[ + "{}:{}".format(self.asic_db, self.ASIC_DB_TBL_NAME) + ] + ): nexthop_oid = key break return nexthop_oid + def get_original_appl_db_entries_count(self): + return len( + self._original_entries[ + "%s:%s" % (self.appl_db, (self.APP_DB_TBL_NAME + ":" + self.TBL_NAME)) + ] + ) + + def get_original_appl_state_db_entries_count(self): + return len( + self._original_entries[ + "%s:%s" + % (self.appl_state_db, (self.APP_DB_TBL_NAME + ":" + self.TBL_NAME)) + ] + ) + + def get_original_asic_db_entries_count(self): + return len( + self._original_entries[ + "%s:%s" % (self.asic_db, self.ASIC_DB_TBL_NAME) + ] + ) class P4RtWcmpGroupWrapper(util.DBInterface): """Interface to interact with APP DB and ASIC DB tables for P4RT wcmp group object.""" @@ -163,9 +325,13 @@ class P4RtWcmpGroupWrapper(util.DBInterface): TBL_NAME = swsscommon.APP_P4RT_WCMP_GROUP_TABLE_NAME ASIC_DB_GROUP_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP" SAI_ATTR_GROUP_TYPE = "SAI_NEXT_HOP_GROUP_ATTR_TYPE" - SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP = "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP" + SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP = ( + "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP" + ) ASIC_DB_GROUP_MEMBER_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" - SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID = "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID" + SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID = ( + "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID" + ) SAI_ATTR_GROUP_MEMBER_NEXTHOP_ID = "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID" SAI_ATTR_GROUP_MEMBER_WEIGHT = "SAI_NEXT_HOP_GROUP_MEMBER_ATTR_WEIGHT" @@ -190,11 +356,14 @@ class P4RtWcmpGroupWrapper(util.DBInterface): # 'get_original_redis_entries' before fetching oid of newly created wcmp group. def get_newly_created_wcmp_group_oid(self): wcmp_group_oid = None - wcmp_group_entries = util.get_keys( - self.asic_db, self.ASIC_DB_GROUP_TBL_NAME) + wcmp_group_entries = util.get_keys(self.asic_db, self.ASIC_DB_GROUP_TBL_NAME) for key in wcmp_group_entries: - if key not in self._original_entries["{}:{}".format( - self.asic_db, self.ASIC_DB_GROUP_TBL_NAME)]: + if ( + key + not in self._original_entries[ + "{}:{}".format(self.asic_db, self.ASIC_DB_GROUP_TBL_NAME) + ] + ): wcmp_group_oid = key break return wcmp_group_oid @@ -207,11 +376,16 @@ def get_newly_created_wcmp_group_oid(self): # wcmp group member. def get_newly_created_wcmp_group_member_asic_db_key(self): asic_db_wcmp_group_member_key = None - wcmp_group_member_entries = util.get_keys(self.asic_db, - self.ASIC_DB_GROUP_MEMBER_TBL_NAME) + wcmp_group_member_entries = util.get_keys( + self.asic_db, self.ASIC_DB_GROUP_MEMBER_TBL_NAME + ) for key in wcmp_group_member_entries: - if key not in self._original_entries["{}:{}".format( - self.asic_db, self.ASIC_DB_GROUP_MEMBER_TBL_NAME)]: + if ( + key + not in self._original_entries[ + "{}:{}".format(self.asic_db, self.ASIC_DB_GROUP_MEMBER_TBL_NAME) + ] + ): asic_db_wcmp_group_member_key = key break return asic_db_wcmp_group_member_key @@ -223,16 +397,25 @@ def generate_app_db_key(self, group_id): return self.TBL_NAME + ":" + key # create default wcmp group - def create_wcmp_group(self, nexthop_id=None, wcmp_group_id=None, - action=None, weight=None, watch_port=None): + def create_wcmp_group( + self, + nexthop_id=None, + wcmp_group_id=None, + action=None, + weight=None, + watch_port=None, + ): wcmp_group_id = wcmp_group_id or self.DEFAULT_WCMP_GROUP_ID weight = weight or self.DEFAULT_WEIGHT action = action or self.DEFAULT_ACTION nexthop_id = nexthop_id or self.DEFAULT_NEXTHOP_ID watch_port = watch_port or self.DEFAULT_WATCH_PORT - action1 = {util.prepend_param_field(self.NEXTHOP_ID_FIELD): nexthop_id, - self.WEIGHT_FIELD: weight, self.ACTION_FIELD: action, - self.WATCH_PORT_FIELD: watch_port} + action1 = { + util.prepend_param_field(self.NEXTHOP_ID_FIELD): nexthop_id, + self.WEIGHT_FIELD: weight, + self.ACTION_FIELD: action, + self.WATCH_PORT_FIELD: watch_port, + } actions = [action1] attr_list = [(self.ACTIONS_FIELD, json.dumps(actions))] wcmp_group_key = self.generate_app_db_key(wcmp_group_id) @@ -240,22 +423,33 @@ def create_wcmp_group(self, nexthop_id=None, wcmp_group_id=None, return wcmp_group_id, wcmp_group_key, attr_list def get_original_appl_db_entries_count(self): - return len(self._original_entries["%s:%s" % (self.appl_db, - (self.APP_DB_TBL_NAME + ":" - + self.TBL_NAME))]) + return len( + self._original_entries[ + "%s:%s" % (self.appl_db, (self.APP_DB_TBL_NAME + ":" + self.TBL_NAME)) + ] + ) def get_original_appl_state_db_entries_count(self): - return len(self._original_entries["%s:%s" % (self.appl_state_db, - (self.APP_DB_TBL_NAME + ":" - + self.TBL_NAME))]) + return len( + self._original_entries[ + "%s:%s" + % (self.appl_state_db, (self.APP_DB_TBL_NAME + ":" + self.TBL_NAME)) + ] + ) def get_original_asic_db_group_entries_count(self): - return len(self._original_entries["%s:%s" % (self.asic_db, - self.ASIC_DB_GROUP_TBL_NAME)]) + return len( + self._original_entries[ + "%s:%s" % (self.asic_db, self.ASIC_DB_GROUP_TBL_NAME) + ] + ) def get_original_asic_db_member_entries_count(self): - return len(self._original_entries["%s:%s" % (self.asic_db, - self.ASIC_DB_GROUP_MEMBER_TBL_NAME)]) + return len( + self._original_entries[ + "%s:%s" % (self.asic_db, self.ASIC_DB_GROUP_MEMBER_TBL_NAME) + ] + ) class P4RtRouteWrapper(util.DBInterface): @@ -267,11 +461,14 @@ class P4RtRouteWrapper(util.DBInterface): SAI_ATTR_PACKET_ACTION = "SAI_ROUTE_ENTRY_ATTR_PACKET_ACTION" SAI_ATTR_PACKET_ACTION_FORWARD = "SAI_PACKET_ACTION_FORWARD" SAI_ATTR_PACKET_ACTION_DROP = "SAI_PACKET_ACTION_DROP" + SAI_ATTR_PACKET_ACTION_TRAP = "SAI_PACKET_ACTION_TRAP" SAI_ATTR_NEXTHOP_ID = "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" + SAI_ATTR_META_DATA = "SAI_ROUTE_ENTRY_ATTR_META_DATA" # attribute fields for route object NEXTHOP_ID_FIELD = "nexthop_id" WCMP_GROUP_ID_FIELD = "wcmp_group_id" + ROUTE_METADATA_FIELD = "route_metadata" # default route attribute values DEFAULT_ACTION = "set_nexthop_id" @@ -297,21 +494,44 @@ def set_ip_type(self, ip_type): self.TBL_NAME = "FIXED_" + ip_type + "_TABLE" # Create default route. - def create_route(self, nexthop_id=None, wcmp_group_id=None, action=None, - vrf_id=None, dst=None): + def create_route( + self, + nexthop_id=None, + wcmp_group_id=None, + action=None, + vrf_id=None, + dst=None, + metadata="", + ): action = action or self.DEFAULT_ACTION vrf_id = vrf_id or self.DEFAULT_VRF_ID dst = dst or self.DEFAULT_DST if action == "set_wcmp_group_id": wcmp_group_id = wcmp_group_id or self.DEFAULT_WCMP_GROUP_ID - attr_list = [(self.ACTION_FIELD, action), - (util.prepend_param_field( - self.WCMP_GROUP_ID_FIELD), wcmp_group_id)] + attr_list = [ + (self.ACTION_FIELD, action), + (util.prepend_param_field(self.WCMP_GROUP_ID_FIELD), wcmp_group_id), + ] elif action == "set_nexthop_id": nexthop_id = nexthop_id or self.DEFAULT_NEXTHOP_ID - attr_list = [(self.ACTION_FIELD, action), - (util.prepend_param_field(self.NEXTHOP_ID_FIELD), - nexthop_id)] + attr_list = [ + (self.ACTION_FIELD, action), + (util.prepend_param_field(self.NEXTHOP_ID_FIELD), nexthop_id), + ] + elif action == "set_wcmp_group_id_and_metadata": + wcmp_group_id = wcmp_group_id or self.DEFAULT_WCMP_GROUP_ID + attr_list = [ + (self.ACTION_FIELD, action), + (util.prepend_param_field(self.WCMP_GROUP_ID_FIELD), wcmp_group_id), + (util.prepend_param_field(self.ROUTE_METADATA_FIELD), metadata), + ] + elif action == "set_nexthop_id_and_metadata": + nexthop_id = nexthop_id or self.DEFAULT_NEXTHOP_ID + attr_list = [ + (self.ACTION_FIELD, action), + (util.prepend_param_field(self.NEXTHOP_ID_FIELD), nexthop_id), + (util.prepend_param_field(self.ROUTE_METADATA_FIELD), metadata), + ] else: attr_list = [(self.ACTION_FIELD, action)] route_key = self.generate_app_db_key(vrf_id, dst) @@ -327,22 +547,32 @@ def create_route(self, nexthop_id=None, wcmp_group_id=None, action=None, def get_newly_created_asic_db_key(self): route_entries = util.get_keys(self.asic_db, self.ASIC_DB_TBL_NAME) for key in route_entries: - if key not in self._original_entries["%s:%s" % (self.asic_db, - self.ASIC_DB_TBL_NAME)]: + if ( + key + not in self._original_entries[ + "%s:%s" % (self.asic_db, self.ASIC_DB_TBL_NAME) + ] + ): asic_db_key = key break return asic_db_key def get_original_appl_db_entries_count(self): - return len(self._original_entries["%s:%s" % (self.appl_db, - (self.APP_DB_TBL_NAME + ":" - + self.TBL_NAME))]) + return len( + self._original_entries[ + "%s:%s" % (self.appl_db, (self.APP_DB_TBL_NAME + ":" + self.TBL_NAME)) + ] + ) def get_original_appl_state_db_entries_count(self): - return len(self._original_entries["%s:%s" % (self.appl_state_db, - (self.APP_DB_TBL_NAME + ":" - + self.TBL_NAME))]) + return len( + self._original_entries[ + "%s:%s" + % (self.appl_state_db, (self.APP_DB_TBL_NAME + ":" + self.TBL_NAME)) + ] + ) def get_original_asic_db_entries_count(self): - return len(self._original_entries["%s:%s" % (self.asic_db, - self.ASIC_DB_TBL_NAME)]) + return len( + self._original_entries["%s:%s" % (self.asic_db, self.ASIC_DB_TBL_NAME)] + ) diff --git a/tests/p4rt/l3_admit.py b/tests/p4rt/l3_admit.py new file mode 100644 index 0000000000..18fcc88482 --- /dev/null +++ b/tests/p4rt/l3_admit.py @@ -0,0 +1,84 @@ +from swsscommon import swsscommon + +import util +import json + + +class P4RtL3AdmitWrapper(util.DBInterface): + """Interface to interact with APP DB and ASIC DB tables for P4RT L3 Admit object.""" + + # database and SAI constants + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + TBL_NAME = "FIXED_L3_ADMIT_TABLE" + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_MY_MAC" + SAI_ATTR_DST_MAC = "SAI_MY_MAC_ATTR_MAC_ADDRESS" + SAI_ATTR_DST_MAC_MASK = "SAI_MY_MAC_ATTR_MAC_ADDRESS_MASK" + SAI_ATTR_PORT_ID = "SAI_MY_MAC_ATTR_PORT_ID" + SAI_ATTR_PRIORITY = "SAI_MY_MAC_ATTR_PRIORITY" + + # attribute fields for l3 admit object in APP DB + IN_PORT_FIELD = "in_port" + DST_MAC_FIELD = "dst_mac" + PRIORITY = "priority" + L3_ADMIT_ACTION = "admit_to_l3" + + def generate_app_db_key(self, dst_mac, priority, port_id=None): + d = {} + d[util.prepend_match_field(self.DST_MAC_FIELD)] = dst_mac + d[self.PRIORITY] = priority + if port_id != "" and port_id != None: + d[util.prepend_match_field(self.IN_PORT_FIELD)] = port_id + key = json.dumps(d, separators=(",", ":")) + return self.TBL_NAME + ":" + key + + # create default l3 admit + def create_l3_admit( + self, dst_mac, priority, port_id=None + ): + attr_list = [ + (self.ACTION_FIELD, self.L3_ADMIT_ACTION), + ] + l3_admit_key = self.generate_app_db_key(dst_mac, priority, port_id) + self.set_app_db_entry(l3_admit_key, attr_list) + return l3_admit_key, attr_list + + def get_original_appl_db_entries_count(self): + return len( + self._original_entries[ + "%s:%s" % (self.appl_db, (self.APP_DB_TBL_NAME + ":" + self.TBL_NAME)) + ] + ) + + def get_original_appl_state_db_entries_count(self): + return len( + self._original_entries[ + "%s:%s" + % (self.appl_state_db, (self.APP_DB_TBL_NAME + ":" + self.TBL_NAME)) + ] + ) + + def get_original_asic_db_entries_count(self): + return len( + self._original_entries[ + "%s:%s" % (self.asic_db, self.ASIC_DB_TBL_NAME) + ] + ) + + # Fetch the asic_db_key for the first newly created my mac entry from created + # my mac in ASIC db. This API should only be used when only one key is + # expected to be created after original entries. + # Original my mac entries in asic db must be fetched using + # 'get_original_redis_entries' before fetching asic db key of newly created + # my mac entries. + def get_newly_created_asic_db_key(self): + l3_admit_entries = util.get_keys(self.asic_db, self.ASIC_DB_TBL_NAME) + for key in l3_admit_entries: + if ( + key + not in self._original_entries[ + "%s:%s" % (self.asic_db, self.ASIC_DB_TBL_NAME) + ] + ): + asic_db_key = key + break + return asic_db_key \ No newline at end of file diff --git a/tests/p4rt/tables_definition.py b/tests/p4rt/tables_definition.py new file mode 100644 index 0000000000..fe3a077def --- /dev/null +++ b/tests/p4rt/tables_definition.py @@ -0,0 +1,35 @@ +from swsscommon import swsscommon + +import util +import json + + +class P4RtTableDefinitionWrapper(util.DBInterface): + """Interface to interact with APP DB for P4RT tables definition.""" + + # database constants + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + TBL_NAME = swsscommon.APP_P4RT_TABLES_DEFINITION_TABLE_NAME + + # attribute fields for tables definition object + INFO_FIELD = "info" + + # tables definition object's attribute values + INFO_VALUE = "{\"tables\":[{\"actions\":[{\"alias\":\"drop\",\"id\":16777222,\"name\":\"ingress.routing.drop\",\"params\":null},{\"alias\":\"set_nexthop_id\",\"id\":16777221,\"name\":\"ingress.routing.set_nexthop_id\",\"params\":[{\"bitwidth\":0,\"format\":\"STRING\",\"id\":1,\"name\":\"nexthop_id\",\"references\":[{\"match\":\"nexthop_id\",\"table\":\"nexthop_table\"}]}]},{\"alias\":\"set_wcmp_group_id\",\"id\":16777220,\"name\":\"ingress.routing.set_wcmp_group_id\",\"params\":[{\"bitwidth\":0,\"format\":\"STRING\",\"id\":1,\"name\":\"wcmp_group_id\",\"references\":[{\"match\":\"wcmp_group_id\",\"table\":\"wcmp_group_table\"}]}]}],\"alias\":\"vipv4_table\",\"counter/unit\":\"BOTH\",\"id\":33554500,\"matchFields\":[{\"bitwidth\":32,\"format\":\"IPV4\",\"id\":1,\"name\":\"ipv4_dst\",\"references\":null}],\"name\":\"ingress.routing.vipv4_table\"}]}" + + + def generate_app_db_key(self, context): + d = {} + d["context"] = context + key = json.dumps(d, separators=(",", ":")) + return self.TBL_NAME + ":" + key + + + # create tables definition set + def create_tables_definition(self, info=None): + info = info or self.INFO_VALUE + attr_list = [(self.INFO_FIELD, info)] + tables_definition_key = self.generate_app_db_key("0") + self.set_app_db_entry(tables_definition_key, attr_list) + return tables_definition_key, attr_list + diff --git a/tests/p4rt/test_l3.py b/tests/p4rt/test_l3.py index 42f32facbd..a16c8d3f03 100644 --- a/tests/p4rt/test_l3.py +++ b/tests/p4rt/test_l3.py @@ -8,9 +8,9 @@ class TestP4RTL3(object): - def _set_up(self, dvs): self._p4rt_router_intf_obj = l3.P4RtRouterInterfaceWrapper() + self._p4rt_gre_tunnel_obj = l3.P4RtGreTunnelWrapper() self._p4rt_neighbor_obj = l3.P4RtNeighborWrapper() self._p4rt_nexthop_obj = l3.P4RtNextHopWrapper() self._p4rt_route_obj = l3.P4RtRouteWrapper() @@ -18,12 +18,15 @@ def _set_up(self, dvs): self._vrf_obj = test_vrf.TestVrf() self._p4rt_router_intf_obj.set_up_databases(dvs) + self._p4rt_gre_tunnel_obj.set_up_databases(dvs) self._p4rt_neighbor_obj.set_up_databases(dvs) self._p4rt_nexthop_obj.set_up_databases(dvs) self._p4rt_route_obj.set_up_databases(dvs) self._p4rt_wcmp_group_obj.set_up_databases(dvs) self.response_consumer = swsscommon.NotificationConsumer( - self._p4rt_route_obj.appl_state_db, "APPL_DB_P4RT_TABLE_RESPONSE_CHANNEL") + self._p4rt_route_obj.appl_db, "APPL_DB_" + + swsscommon.APP_P4RT_TABLE_NAME + "_RESPONSE_CHANNEL" + ) def _set_vrf(self, dvs): # Create VRF. @@ -45,17 +48,24 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): # Maintain list of original Application and ASIC DB entries before # adding new route. - db_list = ((self._p4rt_nexthop_obj.asic_db, - self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME),) + db_list = ( + (self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME), + ) self._p4rt_nexthop_obj.get_original_redis_entries(db_list) - db_list = ((self._p4rt_route_obj.appl_db, - "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, - self._p4rt_route_obj.TBL_NAME)), - (self._p4rt_route_obj.appl_state_db, - "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, - self._p4rt_route_obj.TBL_NAME)), - (self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME)) + db_list = ( + ( + self._p4rt_route_obj.appl_db, + "%s:%s" + % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), + ), + ( + self._p4rt_route_obj.appl_state_db, + "%s:%s" + % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), + ), + (self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME), + ) self._p4rt_route_obj.get_original_redis_entries(db_list) # Fetch the original key to oid information from Redis DB. @@ -63,11 +73,14 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): _, original_key_oid_info = key_to_oid_helper.get_db_info() # Create router interface. - router_interface_id, router_intf_key, attr_list = ( - self._p4rt_router_intf_obj.create_router_interface() + ( + router_interface_id, + router_intf_key, + attr_list, + ) = self._p4rt_router_intf_obj.create_router_interface() + util.verify_response( + self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS" ) - util.verify_response(self.response_consumer, router_intf_key, - attr_list, "SWSS_RC_SUCCESS") # Verify that P4RT key to OID count incremented by 1 in Redis DB. count = 1 @@ -76,11 +89,10 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create neighbor. - neighbor_id, neighbor_key, attr_list = ( - self._p4rt_neighbor_obj.create_neighbor() + neighbor_id, neighbor_key, attr_list = self._p4rt_neighbor_obj.create_neighbor() + util.verify_response( + self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS" ) - util.verify_response(self.response_consumer, neighbor_key, attr_list, - "SWSS_RC_SUCCESS") # Verify that P4RT key to OID count incremented by 1 in Redis DB. count += 1 @@ -89,11 +101,10 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create nexthop. - nexthop_id, nexthop_key, attr_list = ( - self._p4rt_nexthop_obj.create_next_hop() + nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop() + util.verify_response( + self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS" ) - util.verify_response(self.response_consumer, nexthop_key, attr_list, - "SWSS_RC_SUCCESS") # get nexthop_oid of newly created nexthop nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() assert nexthop_oid is not None @@ -106,8 +117,9 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): # Create route entry. route_key, attr_list = self._p4rt_route_obj.create_route(nexthop_id) - util.verify_response(self.response_consumer, route_key, attr_list, - "SWSS_RC_SUCCESS") + util.verify_response( + self.response_consumer, route_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count incremented by 1 in Redis DB. count += 1 @@ -118,36 +130,43 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_appl_db_entries_count() + 1 ) # Query application database for newly created route key. - (status, fvs) = util.get_key(self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query application state database for route entries. state_route_entries = util.get_keys( self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(state_route_entries) == ( self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 ) # Query application state database for newly created route key. - (status, fvs) = util.get_key(self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query ASIC database for route entries. - route_entries = util.get_keys(self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME) + route_entries = util.get_keys( + self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_asic_db_entries_count() + 1 ) @@ -155,17 +174,22 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): # Query ASIC database for newly created route key. asic_db_key = self._p4rt_route_obj.get_newly_created_asic_db_key() assert asic_db_key is not None - (status, fvs) = util.get_key(self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME, - asic_db_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME, + asic_db_key, + ) assert status == True attr_list = [(self._p4rt_route_obj.SAI_ATTR_NEXTHOP_ID, nexthop_oid)] util.verify_attr(fvs, attr_list) - # Update route entry. - route_key, attr_list = self._p4rt_route_obj.create_route(action="drop") - util.verify_response(self.response_consumer, route_key, attr_list, - "SWSS_RC_SUCCESS") + # Update route entry to set_nexthop_id_and_metadata. + route_key, attr_list = self._p4rt_route_obj.create_route( + action="set_nexthop_id_and_metadata", metadata="2" + ) + util.verify_response( + self.response_consumer, route_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count did not change in Redis DB. status, fvs = key_to_oid_helper.get_db_info() @@ -175,38 +199,139 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_appl_db_entries_count() + 1 ) # Query application database for the updated route key. - (status, fvs) = util.get_key(self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) + assert status == True + attr_list_appl_db = [ + (self._p4rt_route_obj.ACTION_FIELD, "set_nexthop_id_and_metadata"), + ( + util.prepend_param_field( + self._p4rt_route_obj.NEXTHOP_ID_FIELD), + nexthop_id, + ), + ( + util.prepend_param_field( + self._p4rt_route_obj.ROUTE_METADATA_FIELD), + "2", + ), + ] + util.verify_attr(fvs, attr_list_appl_db) + + # Query application state database for route entries. + state_route_entries = util.get_keys( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) + assert len(state_route_entries) == ( + self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 + ) + + # Query application state database for the updated route key. + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query ASIC database for route entries. + route_entries = util.get_keys( + self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME + ) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_asic_db_entries_count() + 1 + ) + + # Query ASIC database for the updated route key. + asic_db_key = self._p4rt_route_obj.get_newly_created_asic_db_key() + assert asic_db_key is not None + (status, fvs) = util.get_key( + self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME, + asic_db_key, + ) assert status == True - attr_list_appl_db = [(self._p4rt_route_obj.ACTION_FIELD, "drop"), - (util.prepend_param_field(self._p4rt_route_obj.NEXTHOP_ID_FIELD), nexthop_id)] + attr_list = [ + (self._p4rt_route_obj.SAI_ATTR_NEXTHOP_ID, nexthop_oid), + (self._p4rt_route_obj.SAI_ATTR_META_DATA, "2"), + ] + util.verify_attr(fvs, attr_list) + + # Update route entry to drop. + route_key, attr_list = self._p4rt_route_obj.create_route(action="drop") + util.verify_response( + self.response_consumer, route_key, attr_list, "SWSS_RC_SUCCESS" + ) + + # Verify that P4RT key to OID count did not change in Redis DB. + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for route entries. + route_entries = util.get_keys( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for the updated route key. + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) + assert status == True + attr_list_appl_db = [ + (self._p4rt_route_obj.ACTION_FIELD, "drop"), + ( + util.prepend_param_field( + self._p4rt_route_obj.NEXTHOP_ID_FIELD), + nexthop_id, + ), + ( + util.prepend_param_field( + self._p4rt_route_obj.ROUTE_METADATA_FIELD), + "2", + ), + ] util.verify_attr(fvs, attr_list_appl_db) # Query application state database for route entries. state_route_entries = util.get_keys( self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(state_route_entries) == ( self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 ) # Query application state database for the updated route key. - (status, fvs) = util.get_key(self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query ASIC database for route entries. - route_entries = util.get_keys(self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME) + route_entries = util.get_keys( + self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_asic_db_entries_count() + 1 ) @@ -214,18 +339,26 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): # Query ASIC database for the updated route key. asic_db_key = self._p4rt_route_obj.get_newly_created_asic_db_key() assert asic_db_key is not None - (status, fvs) = util.get_key(self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME, - asic_db_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME, + asic_db_key, + ) assert status == True - attr_list = [(self._p4rt_route_obj.SAI_ATTR_NEXTHOP_ID, "oid:0x0"), - (self._p4rt_route_obj.SAI_ATTR_PACKET_ACTION, self._p4rt_route_obj.SAI_ATTR_PACKET_ACTION_DROP)] + attr_list = [ + (self._p4rt_route_obj.SAI_ATTR_NEXTHOP_ID, "oid:0x0"), + ( + self._p4rt_route_obj.SAI_ATTR_PACKET_ACTION, + self._p4rt_route_obj.SAI_ATTR_PACKET_ACTION_DROP, + ), + (self._p4rt_route_obj.SAI_ATTR_META_DATA, "0"), + ] util.verify_attr(fvs, attr_list) # Remove route entry. self._p4rt_route_obj.remove_app_db_entry(route_key) - util.verify_response( - self.response_consumer, route_key, [], "SWSS_RC_SUCCESS") + util.verify_response(self.response_consumer, + route_key, [], "SWSS_RC_SUCCESS") # Verify that P4RT key to OID count decremented by 1 in Redis DB. count -= 1 @@ -235,8 +368,8 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): # Remove nexthop. self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) - util.verify_response(self.response_consumer, nexthop_key, [], - "SWSS_RC_SUCCESS") + util.verify_response(self.response_consumer, + nexthop_key, [], "SWSS_RC_SUCCESS") # Verify that P4RT key to OID count decremented by 1 in Redis DB. count -= 1 @@ -246,8 +379,9 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): # Remove neighbor. self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) - util.verify_response(self.response_consumer, neighbor_key, [], - "SWSS_RC_SUCCESS") + util.verify_response( + self.response_consumer, neighbor_key, [], "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count decremented by 1 in Redis DB. count -= 1 @@ -258,53 +392,63 @@ def test_IPv4RouteWithNexthopAddUpdateDeletePass(self, dvs, testlog): # Remove router interface. self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) util.verify_response( - self.response_consumer, router_intf_key, [], "SWSS_RC_SUCCESS") + self.response_consumer, router_intf_key, [], "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count is same as the original count. status, fvs = key_to_oid_helper.get_db_info() - assert status == True + assert status == False assert len(fvs) == len(original_key_oid_info) # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_appl_db_entries_count() ) # Verify that the route_key no longer exists in application database. - (status, fsv) = util.get_key(self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fsv) = util.get_key( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == False # Query application state database for route entries. state_route_entries = util.get_keys( self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(state_route_entries) == ( self._p4rt_route_obj.get_original_appl_state_db_entries_count() ) # Verify that the route_key no longer exists in application state # database. - (status, fsv) = util.get_key(self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fsv) = util.get_key( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == False # Query ASIC database for route entries. - route_entries = util.get_keys(self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME) + route_entries = util.get_keys( + self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_asic_db_entries_count() ) # Verify that removed route no longer exists in ASIC database. - (status, fvs) = util.get_key(self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME, - asic_db_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME, + asic_db_key, + ) assert status == False self._clean_vrf(dvs) @@ -318,28 +462,51 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): # Maintain list of original Application and ASIC DB entries before # adding new route. - db_list = ((self._p4rt_route_obj.appl_db, - "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, - self._p4rt_route_obj.TBL_NAME)), - (self._p4rt_route_obj.appl_state_db, - "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, - self._p4rt_route_obj.TBL_NAME)), - (self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME)) + db_list = ( + ( + self._p4rt_route_obj.appl_db, + "%s:%s" + % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), + ), + ( + self._p4rt_route_obj.appl_state_db, + "%s:%s" + % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), + ), + (self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME), + ) self._p4rt_route_obj.get_original_redis_entries(db_list) - db_list = ((self._p4rt_nexthop_obj.asic_db, - self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME),) + db_list = ( + (self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME), + ) self._p4rt_nexthop_obj.get_original_redis_entries(db_list) - db_list = ((self._p4rt_wcmp_group_obj.appl_db, - "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME)), - (self._p4rt_wcmp_group_obj.appl_state_db, - "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME)), - (self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME), - (self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME)) + db_list = ( + ( + self._p4rt_wcmp_group_obj.appl_db, + "%s:%s" + % ( + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME, + ), + ), + ( + self._p4rt_wcmp_group_obj.appl_state_db, + "%s:%s" + % ( + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME, + ), + ), + ( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + ), + ( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + ), + ) self._p4rt_wcmp_group_obj.get_original_redis_entries(db_list) # Fetch the original key to oid information from Redis DB. @@ -347,11 +514,14 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): _, original_key_oid_info = key_to_oid_helper.get_db_info() # Create router interface. - router_interface_id, router_intf_key, attr_list = ( - self._p4rt_router_intf_obj.create_router_interface() + ( + router_interface_id, + router_intf_key, + attr_list, + ) = self._p4rt_router_intf_obj.create_router_interface() + util.verify_response( + self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS" ) - util.verify_response(self.response_consumer, router_intf_key, attr_list, - "SWSS_RC_SUCCESS") # Verify that P4RT key to OID count incremented by 1 in Redis DB. count = 1 @@ -360,11 +530,12 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create neighbor. - neighbor_id, neighbor_key, attr_list = ( - self._p4rt_neighbor_obj.create_neighbor(ipv4=False) + neighbor_id, neighbor_key, attr_list = self._p4rt_neighbor_obj.create_neighbor( + ipv4=False + ) + util.verify_response( + self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS" ) - util.verify_response(self.response_consumer, neighbor_key, attr_list, - "SWSS_RC_SUCCESS") # Verify that P4RT key to OID count incremented by 1 in Redis DB. count += 1 @@ -373,11 +544,12 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create nexthop. - nexthop_id, nexthop_key, attr_list = ( - self._p4rt_nexthop_obj.create_next_hop(ipv4=False) + nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop( + ipv4=False + ) + util.verify_response( + self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS" ) - util.verify_response(self.response_consumer, nexthop_key, attr_list, - "SWSS_RC_SUCCESS") # Get the oid of the newly created nexthop. nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() assert nexthop_oid is not None @@ -389,11 +561,14 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create wcmp group. - wcmp_group_id, wcmp_group_key, attr_list = ( - self._p4rt_wcmp_group_obj.create_wcmp_group() + ( + wcmp_group_id, + wcmp_group_key, + attr_list, + ) = self._p4rt_wcmp_group_obj.create_wcmp_group() + util.verify_response( + self.response_consumer, wcmp_group_key, attr_list, "SWSS_RC_SUCCESS" ) - util.verify_response(self.response_consumer, wcmp_group_key, attr_list, - "SWSS_RC_SUCCESS") # Verify that P4RT key to OID count incremented by 2 in Redis DB # (1 each for WCMP group and member). @@ -405,60 +580,76 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): # Query application database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME, + ) assert len(wcmp_group_entries) == ( self._p4rt_wcmp_group_obj.get_original_appl_db_entries_count() + 1 ) # Query application database for newly created wcmp group key. - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key) + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query application state database for wcmp group entries. state_wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME, + ) assert len(state_wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() - + 1 + self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + 1 ) # Query application state database for newly created wcmp group key. - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key) + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query ASIC database for wcmp group entries. - wcmp_group_entries = util.get_keys(self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME) + wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + ) assert len(wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() - + 1 + self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() + 1 ) # Query ASIC database for newly created wcmp group oid. wcmp_group_oid = self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_oid() assert wcmp_group_oid is not None - attr_list = [(self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, - self._p4rt_wcmp_group_obj.SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP)] - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, - wcmp_group_oid) + attr_list = [ + ( + self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, + self._p4rt_wcmp_group_obj.SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP, + ) + ] + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + wcmp_group_oid, + ) assert status == True util.verify_attr(fvs, attr_list) # Query ASIC database for wcmp group member entries. wcmp_group_member_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME) + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + ) assert len(wcmp_group_member_entries) == ( - self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() - + 1 + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + 1 ) # Query ASIC database for newly crated wcmp group member key. @@ -466,23 +657,32 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_member_asic_db_key() ) assert asic_db_group_member_key is not None - attr_list = [(self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID, - wcmp_group_oid), - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_ID, - nexthop_oid), - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_WEIGHT, - str(self._p4rt_wcmp_group_obj.DEFAULT_WEIGHT))] - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, - asic_db_group_member_key) + attr_list = [ + ( + self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID, + wcmp_group_oid, + ), + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_ID, nexthop_oid), + ( + self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_WEIGHT, + str(self._p4rt_wcmp_group_obj.DEFAULT_WEIGHT), + ), + ] + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + asic_db_group_member_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Create route entry. route_key, attr_list = self._p4rt_route_obj.create_route( - wcmp_group_id=wcmp_group_id, action="set_wcmp_group_id", dst="2001:db8::/32") - util.verify_response(self.response_consumer, route_key, attr_list, - "SWSS_RC_SUCCESS") + wcmp_group_id=wcmp_group_id, action="set_wcmp_group_id", dst="2001:db8::/32" + ) + util.verify_response( + self.response_consumer, route_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count incremented by 1 in Redis DB. count += 1 @@ -493,36 +693,43 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_appl_db_entries_count() + 1 ) # Query application database for newly created route key. - (status, fvs) = util.get_key(self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query application state database for route entries. state_route_entries = util.get_keys( self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(state_route_entries) == ( self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 ) # Query application state database for newly created route key. - (status, fvs) = util.get_key(self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query ASIC database for route entries. - route_entries = util.get_keys(self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME) + route_entries = util.get_keys( + self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_asic_db_entries_count() + 1 ) @@ -530,19 +737,23 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): # Query ASIC database for newly created route key. asic_db_key = self._p4rt_route_obj.get_newly_created_asic_db_key() assert asic_db_key is not None - (status, fvs) = util.get_key(self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME, - asic_db_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME, + asic_db_key, + ) assert status == True attr_list = [ (self._p4rt_route_obj.SAI_ATTR_NEXTHOP_ID, wcmp_group_oid)] util.verify_attr(fvs, attr_list) - # Update route entry. + # Update route entry to drop action route_key, attr_list = self._p4rt_route_obj.create_route( - action="drop", dst="2001:db8::/32") - util.verify_response(self.response_consumer, route_key, attr_list, - "SWSS_RC_SUCCESS") + action="drop", dst="2001:db8::/32" + ) + util.verify_response( + self.response_consumer, route_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count did not change in Redis DB. status, fvs = key_to_oid_helper.get_db_info() @@ -552,38 +763,51 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_appl_db_entries_count() + 1 ) # Query application database for the updated route key. - (status, fvs) = util.get_key(self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == True - attr_list_appl_db = [(self._p4rt_route_obj.ACTION_FIELD, "drop"), - (util.prepend_param_field(self._p4rt_route_obj.WCMP_GROUP_ID_FIELD), wcmp_group_id)] + attr_list_appl_db = [ + (self._p4rt_route_obj.ACTION_FIELD, "drop"), + ( + util.prepend_param_field( + self._p4rt_route_obj.WCMP_GROUP_ID_FIELD), + wcmp_group_id, + ), + ] util.verify_attr(fvs, attr_list_appl_db) # Query application state database for route entries. state_route_entries = util.get_keys( self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(state_route_entries) == ( self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 ) # Query application state database for the updated route key. - (status, fvs) = util.get_key(self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query ASIC database for route entries. - route_entries = util.get_keys(self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME) + route_entries = util.get_keys( + self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_asic_db_entries_count() + 1 ) @@ -591,18 +815,108 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): # Query ASIC database for the updated route key. asic_db_key = self._p4rt_route_obj.get_newly_created_asic_db_key() assert asic_db_key is not None - (status, fvs) = util.get_key(self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME, - asic_db_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME, + asic_db_key, + ) + assert status == True + attr_list = [ + (self._p4rt_route_obj.SAI_ATTR_NEXTHOP_ID, "oid:0x0"), + ( + self._p4rt_route_obj.SAI_ATTR_PACKET_ACTION, + self._p4rt_route_obj.SAI_ATTR_PACKET_ACTION_DROP, + ), + ] + util.verify_attr(fvs, attr_list) + + # Update route entry to trap action. + route_key, attr_list = self._p4rt_route_obj.create_route( + action="trap", dst="2001:db8::/32" + ) + util.verify_response( + self.response_consumer, route_key, attr_list, "SWSS_RC_SUCCESS" + ) + + # Verify that P4RT key to OID count did not change in Redis DB. + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for route entries. + route_entries = util.get_keys( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for the updated route key. + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) + assert status == True + attr_list_appl_db = [ + (self._p4rt_route_obj.ACTION_FIELD, "trap"), + ( + util.prepend_param_field( + self._p4rt_route_obj.WCMP_GROUP_ID_FIELD), + wcmp_group_id, + ), + ] + util.verify_attr(fvs, attr_list_appl_db) + + # Query application state database for route entries. + state_route_entries = util.get_keys( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) + assert len(state_route_entries) == ( + self._p4rt_route_obj.get_original_appl_state_db_entries_count() + 1 + ) + + # Query application state database for the updated route key. + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == True - attr_list = [(self._p4rt_route_obj.SAI_ATTR_NEXTHOP_ID, "oid:0x0"), - (self._p4rt_route_obj.SAI_ATTR_PACKET_ACTION, self._p4rt_route_obj.SAI_ATTR_PACKET_ACTION_DROP)] + util.verify_attr(fvs, attr_list) + + # Query ASIC database for route entries. + route_entries = util.get_keys( + self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME + ) + assert len(route_entries) == ( + self._p4rt_route_obj.get_original_asic_db_entries_count() + 1 + ) + + # Query ASIC database for the updated route key. + asic_db_key = self._p4rt_route_obj.get_newly_created_asic_db_key() + assert asic_db_key is not None + (status, fvs) = util.get_key( + self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME, + asic_db_key, + ) + assert status == True + attr_list = [ + (self._p4rt_route_obj.SAI_ATTR_NEXTHOP_ID, "oid:0x0"), + ( + self._p4rt_route_obj.SAI_ATTR_PACKET_ACTION, + self._p4rt_route_obj.SAI_ATTR_PACKET_ACTION_TRAP, + ), + ] util.verify_attr(fvs, attr_list) # Remove route entry. self._p4rt_route_obj.remove_app_db_entry(route_key) - util.verify_response( - self.response_consumer, route_key, [], "SWSS_RC_SUCCESS") + util.verify_response(self.response_consumer, + route_key, [], "SWSS_RC_SUCCESS") # Verify that P4RT key to OID count decremented by 1 in Redis DB. count -= 1 @@ -612,8 +926,9 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): # Remove wcmp group entry. self._p4rt_wcmp_group_obj.remove_app_db_entry(wcmp_group_key) - util.verify_response(self.response_consumer, wcmp_group_key, [], - "SWSS_RC_SUCCESS") + util.verify_response( + self.response_consumer, wcmp_group_key, [], "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count decremented by 2 in Redis DB # (1 each for WCMP group and member). @@ -624,8 +939,8 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): # Remove nexthop. self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) - util.verify_response(self.response_consumer, nexthop_key, [], - "SWSS_RC_SUCCESS") + util.verify_response(self.response_consumer, + nexthop_key, [], "SWSS_RC_SUCCESS") # Verify that P4RT key to OID count decremented by 1 in Redis DB. count -= 1 @@ -635,8 +950,9 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): # Remove neighbor. self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) - util.verify_response(self.response_consumer, neighbor_key, [], - "SWSS_RC_SUCCESS") + util.verify_response( + self.response_consumer, neighbor_key, [], "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count decremented by 1 in Redis DB. count -= 1 @@ -647,111 +963,547 @@ def test_IPv6WithWcmpRouteAddUpdateDeletePass(self, dvs, testlog): # Remove router interface. self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) util.verify_response( - self.response_consumer, router_intf_key, [], "SWSS_RC_SUCCESS") + self.response_consumer, router_intf_key, [], "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count is same as original count. status, fvs = key_to_oid_helper.get_db_info() - assert status == True + assert status == False assert len(fvs) == len(original_key_oid_info) # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_appl_db_entries_count() ) # Verify that the route_key no longer exists in application database. - (status, fsv) = util.get_key(self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fsv) = util.get_key( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == False # Query application state database for route entries. state_route_entries = util.get_keys( self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(state_route_entries) == ( self._p4rt_route_obj.get_original_appl_state_db_entries_count() ) # Verify that the route_key no longer exists in application state # database. - (status, fsv) = util.get_key(self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fsv) = util.get_key( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == False # Query ASIC database for route entries. - route_entries = util.get_keys(self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME) + route_entries = util.get_keys( + self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_asic_db_entries_count() ) # Verify that removed route no longer exists in ASIC database. - (status, fvs) = util.get_key(self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME, - asic_db_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.asic_db, + self._p4rt_route_obj.ASIC_DB_TBL_NAME, + asic_db_key, + ) assert status == False # Query application database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME, + ) assert len(wcmp_group_entries) == ( self._p4rt_wcmp_group_obj.get_original_appl_db_entries_count() ) # Verify that the route_key no longer exists in application database. - (status, fsv) = util.get_key(self._p4rt_wcmp_group_obj.appl_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key) + (status, fsv) = util.get_key( + self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key, + ) assert status == False # Query application state database for wcmp group entries. state_wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME, + ) assert len(state_wcmp_group_entries) == ( self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() ) - # Verify that the wcmp_group_key no longer exists in application state - # database. - (status, fsv) = util.get_key(self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key) + # Verify that the wcmp_group_key no longer exists in application state + # database. + (status, fsv) = util.get_key( + self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key, + ) + assert status == False + + # Query ASIC database for wcmp group entries. + wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + ) + assert len(wcmp_group_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() + ) + + # Verify that removed wcmp group no longer exists in ASIC database. + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + wcmp_group_oid, + ) + assert status == False + + # Query ASIC database for wcmp group member entries. + wcmp_group_member_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + ) + assert len(wcmp_group_member_entries) == ( + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + ) + + # Verify that removed wcmp group member no longer exists in ASIC + # database. + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + asic_db_group_member_key, + ) + assert status == False + + self._clean_vrf(dvs) + + def test_NexthopWithGreTunnelAddDeletePass(self, dvs, testlog): + # Initialize L3 objects and database connectors. + self._set_up(dvs) + self._set_vrf(dvs) + + # Maintain list of original Application and ASIC DB entries before + # adding new entries. + db_list = ( + ( + self._p4rt_nexthop_obj.appl_db, + "%s:%s" + % ( + self._p4rt_nexthop_obj.APP_DB_TBL_NAME, + self._p4rt_nexthop_obj.TBL_NAME, + ), + ), + ( + self._p4rt_nexthop_obj.appl_state_db, + "%s:%s" + % ( + self._p4rt_nexthop_obj.APP_DB_TBL_NAME, + self._p4rt_nexthop_obj.TBL_NAME, + ), + ), + (self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME), + ) + self._p4rt_nexthop_obj.get_original_redis_entries(db_list) + db_list = ( + ( + self._p4rt_gre_tunnel_obj.appl_db, + "%s:%s" + % ( + self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME, + self._p4rt_gre_tunnel_obj.TBL_NAME, + ), + ), + ( + self._p4rt_gre_tunnel_obj.appl_state_db, + "%s:%s" + % ( + self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME, + self._p4rt_gre_tunnel_obj.TBL_NAME, + ), + ), + (self._p4rt_gre_tunnel_obj.asic_db, + self._p4rt_gre_tunnel_obj.ASIC_DB_TBL_NAME), + ) + self._p4rt_gre_tunnel_obj.get_original_redis_entries(db_list) + db_list = ( + (self._p4rt_router_intf_obj.asic_db, + self._p4rt_router_intf_obj.ASIC_DB_TBL_NAME), + ) + self._p4rt_router_intf_obj.get_original_redis_entries(db_list) + + # Fetch the original key to oid information from Redis DB. + key_to_oid_helper = util.KeyToOidDBHelper(dvs) + _, original_key_oid_info = key_to_oid_helper.get_db_info() + + # Create router interface. + ( + router_interface_id, + router_intf_key, + attr_list, + ) = self._p4rt_router_intf_obj.create_router_interface() + util.verify_response( + self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS" + ) + + # get router_interface_oid of newly created router_intf + router_intf_oid = self._p4rt_router_intf_obj.get_newly_created_router_interface_oid() + assert router_intf_oid is not None + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count = 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create tunnel. + tunnel_id, tunnel_key, attr_list = self._p4rt_gre_tunnel_obj.create_gre_tunnel() + util.verify_response( + self.response_consumer, tunnel_key, attr_list, "SWSS_RC_SUCCESS" + ) + # get tunnel_oid of newly created tunnel + tunnel_oid = self._p4rt_gre_tunnel_obj.get_newly_created_tunnel_oid() + assert tunnel_oid is not None + # get overlay router_interface_oid of newly created router_intf + overlay_router_intf_oid = self._p4rt_router_intf_obj.get_newly_created_router_interface_oid( + set([router_intf_oid])) + assert overlay_router_intf_oid is not None + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for tunnel entries. + tunnel_entries = util.get_keys( + self._p4rt_gre_tunnel_obj.appl_db, + self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME + + ":" + self._p4rt_gre_tunnel_obj.TBL_NAME, + ) + assert len(tunnel_entries) == ( + self._p4rt_gre_tunnel_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for newly created tunnel key. + (status, fvs) = util.get_key( + self._p4rt_gre_tunnel_obj.appl_db, + self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME, + tunnel_key, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query application state database for tunnel entries. + state_tunnel_entries = util.get_keys( + self._p4rt_gre_tunnel_obj.appl_state_db, + self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME + + ":" + self._p4rt_gre_tunnel_obj.TBL_NAME, + ) + assert len(state_tunnel_entries) == ( + self._p4rt_gre_tunnel_obj.get_original_appl_state_db_entries_count() + 1 + ) + + # Query application state database for newly created tunnel key. + (status, fvs) = util.get_key( + self._p4rt_gre_tunnel_obj.appl_state_db, + self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME, + tunnel_key, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query ASIC database for tunnel entries. + tunnel_entries = util.get_keys( + self._p4rt_gre_tunnel_obj.asic_db, self._p4rt_gre_tunnel_obj.ASIC_DB_TBL_NAME + ) + assert len(tunnel_entries) == ( + self._p4rt_gre_tunnel_obj.get_original_asic_db_entries_count() + 1 + ) + + # Query ASIC database for newly created nexthop key. + asic_db_key = self._p4rt_gre_tunnel_obj.get_newly_created_tunnel_oid() + assert asic_db_key is not None + (status, fvs) = util.get_key( + self._p4rt_gre_tunnel_obj.asic_db, + self._p4rt_gre_tunnel_obj.ASIC_DB_TBL_NAME, + asic_db_key, + ) + assert status == True + attr_list = [ + (self._p4rt_gre_tunnel_obj.SAI_ATTR_UNDERLAY_INTERFACE, router_intf_oid), + (self._p4rt_gre_tunnel_obj.SAI_ATTR_OVERLAY_INTERFACE, + overlay_router_intf_oid), + (self._p4rt_gre_tunnel_obj.SAI_ATTR_TYPE, "SAI_TUNNEL_TYPE_IPINIP_GRE"), + (self._p4rt_gre_tunnel_obj.SAI_ATTR_PEER_MODE, "SAI_TUNNEL_PEER_MODE_P2P"), + (self._p4rt_gre_tunnel_obj.SAI_ATTR_ENCAP_SRC_IP, + self._p4rt_gre_tunnel_obj.DEFAULT_ENCAP_SRC_IP), + (self._p4rt_gre_tunnel_obj.SAI_ATTR_ENCAP_DST_IP, + self._p4rt_gre_tunnel_obj.DEFAULT_ENCAP_DST_IP), + ] + util.verify_attr(fvs, attr_list) + + # Create neighbor. + neighbor_id, neighbor_key, attr_list = self._p4rt_neighbor_obj.create_neighbor() + util.verify_response( + self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS" + ) + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create tunnel nexthop. + nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop( + tunnel_id=tunnel_id + ) + util.verify_response( + self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS" + ) + # get nexthop_oid of newly created nexthop + nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() + assert nexthop_oid is not None + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for nexthop entries. + nexthop_entries = util.get_keys( + self._p4rt_nexthop_obj.appl_db, + self._p4rt_nexthop_obj.APP_DB_TBL_NAME + ":" + self._p4rt_nexthop_obj.TBL_NAME, + ) + assert len(nexthop_entries) == ( + self._p4rt_nexthop_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for newly created nexthop key. + (status, fvs) = util.get_key( + self._p4rt_nexthop_obj.appl_db, + self._p4rt_nexthop_obj.APP_DB_TBL_NAME, + nexthop_key, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query application state database for nexthop entries. + state_nexthop_entries = util.get_keys( + self._p4rt_nexthop_obj.appl_state_db, + self._p4rt_nexthop_obj.APP_DB_TBL_NAME + ":" + self._p4rt_nexthop_obj.TBL_NAME, + ) + assert len(state_nexthop_entries) == ( + self._p4rt_nexthop_obj.get_original_appl_state_db_entries_count() + 1 + ) + + # Query application state database for newly created nexthop key. + (status, fvs) = util.get_key( + self._p4rt_nexthop_obj.appl_state_db, + self._p4rt_nexthop_obj.APP_DB_TBL_NAME, + nexthop_key, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query ASIC database for nexthop entries. + nexthop_entries = util.get_keys( + self._p4rt_nexthop_obj.asic_db, self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME + ) + assert len(nexthop_entries) == ( + self._p4rt_nexthop_obj.get_original_asic_db_entries_count() + 1 + ) + + # Query ASIC database for newly created nexthop key. + asic_db_key = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() + assert asic_db_key is not None + (status, fvs) = util.get_key( + self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME, + asic_db_key, + ) + assert status == True + attr_list = [ + (self._p4rt_nexthop_obj.SAI_ATTR_TUNNEL_OID, tunnel_oid), + (self._p4rt_nexthop_obj.SAI_ATTR_IP, + self._p4rt_nexthop_obj.DEFAULT_IPV4_NEIGHBOR_ID), + (self._p4rt_nexthop_obj.SAI_ATTR_TYPE, + self._p4rt_nexthop_obj.SAI_ATTR_TUNNEL_ENCAP) + ] + util.verify_attr(fvs, attr_list) + + # Remove nexthop. + self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) + util.verify_response(self.response_consumer, + nexthop_key, [], "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Remove neighbor. + self._p4rt_neighbor_obj.remove_app_db_entry(neighbor_key) + util.verify_response( + self.response_consumer, neighbor_key, [], "SWSS_RC_SUCCESS" + ) + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Remove tunnel. + self._p4rt_gre_tunnel_obj.remove_app_db_entry(tunnel_key) + util.verify_response( + self.response_consumer, tunnel_key, [], "SWSS_RC_SUCCESS" + ) + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Remove router interface. + self._p4rt_router_intf_obj.remove_app_db_entry(router_intf_key) + util.verify_response( + self.response_consumer, router_intf_key, [], "SWSS_RC_SUCCESS" + ) + + # Verify that P4RT key to OID count is same as the original count. + status, fvs = key_to_oid_helper.get_db_info() + assert status == False + assert len(fvs) == len(original_key_oid_info) + + # Query application database for nexthop entries. + nexthop_entries = util.get_keys( + self._p4rt_nexthop_obj.appl_db, + self._p4rt_nexthop_obj.APP_DB_TBL_NAME + ":" + self._p4rt_nexthop_obj.TBL_NAME, + ) + assert len(nexthop_entries) == ( + self._p4rt_nexthop_obj.get_original_appl_db_entries_count() + ) + + # Verify that the nexthop_key no longer exists in application database. + (status, fsv) = util.get_key( + self._p4rt_nexthop_obj.appl_db, + self._p4rt_nexthop_obj.APP_DB_TBL_NAME, + nexthop_key, + ) + assert status == False + + # Query application state database for nexthop entries. + state_nexthop_entries = util.get_keys( + self._p4rt_nexthop_obj.appl_state_db, + self._p4rt_nexthop_obj.APP_DB_TBL_NAME + ":" + self._p4rt_nexthop_obj.TBL_NAME, + ) + assert len(state_nexthop_entries) == ( + self._p4rt_nexthop_obj.get_original_appl_state_db_entries_count() + ) + + # Verify that the nexthop_key no longer exists in application state + # database. + (status, fsv) = util.get_key( + self._p4rt_nexthop_obj.appl_state_db, + self._p4rt_nexthop_obj.APP_DB_TBL_NAME, + nexthop_key, + ) + assert status == False + + # Query ASIC database for nexthop entries. + nexthop_entries = util.get_keys( + self._p4rt_nexthop_obj.asic_db, self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME + ) + assert len(nexthop_entries) == ( + self._p4rt_nexthop_obj.get_original_asic_db_entries_count() + ) + + # Verify that removed nexthop no longer exists in ASIC database. + (status, fvs) = util.get_key( + self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME, + asic_db_key, + ) assert status == False - # Query ASIC database for wcmp group entries. - wcmp_group_entries = util.get_keys(self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME) - assert len(wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() + # Query application database for tunnel entries. + tunnel_entries = util.get_keys( + self._p4rt_gre_tunnel_obj.appl_db, + self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME + + ":" + self._p4rt_gre_tunnel_obj.TBL_NAME, + ) + assert len(tunnel_entries) == ( + self._p4rt_gre_tunnel_obj.get_original_appl_db_entries_count() ) - # Verify that removed wcmp group no longer exists in ASIC database. - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, - wcmp_group_oid) + # Verify that the tunnel_key no longer exists in application database. + (status, fsv) = util.get_key( + self._p4rt_gre_tunnel_obj.appl_db, + self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME, + tunnel_key, + ) assert status == False - # Query ASIC database for wcmp group member entries. - wcmp_group_member_entries = util.get_keys(self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME) - assert len(wcmp_group_member_entries) == ( - self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + # Query application state database for tunnel entries. + state_tunnel_entries = util.get_keys( + self._p4rt_gre_tunnel_obj.appl_state_db, + self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME + + ":" + self._p4rt_gre_tunnel_obj.TBL_NAME, + ) + assert len(state_tunnel_entries) == ( + self._p4rt_gre_tunnel_obj.get_original_appl_state_db_entries_count() ) - # Verify that removed wcmp group member no longer exists in ASIC + # Verify that the tunnel_key no longer exists in application state # database. - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, - asic_db_group_member_key) + (status, fsv) = util.get_key( + self._p4rt_gre_tunnel_obj.appl_state_db, + self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME, + tunnel_key, + ) assert status == False + # Query ASIC database for tunnel entries. + runnel_entries = util.get_keys( + self._p4rt_gre_tunnel_obj.asic_db, self._p4rt_gre_tunnel_obj.ASIC_DB_TBL_NAME + ) + assert len(tunnel_entries) == ( + self._p4rt_gre_tunnel_obj.get_original_asic_db_entries_count() + ) + + # Verify that removed route no longer exists in ASIC database. + (status, fvs) = util.get_key( + self._p4rt_gre_tunnel_obj.asic_db, + self._p4rt_gre_tunnel_obj.ASIC_DB_TBL_NAME, + asic_db_key, + ) + assert status == False self._clean_vrf(dvs) def test_IPv4RouteAddWithInvalidNexthopFail(self, dvs, testlog): @@ -766,34 +1518,43 @@ def test_IPv4RouteAddWithInvalidNexthopFail(self, dvs, testlog): # Maintain list of original Application and ASIC DB entries before # adding new route. - db_list = ((self._p4rt_route_obj.appl_db, - "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, - self._p4rt_route_obj.TBL_NAME)), - (self._p4rt_route_obj.appl_state_db, - "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, - self._p4rt_route_obj.TBL_NAME)), - (self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME)) + db_list = ( + ( + self._p4rt_route_obj.appl_db, + "%s:%s" + % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), + ), + ( + self._p4rt_route_obj.appl_state_db, + "%s:%s" + % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), + ), + (self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME), + ) self._p4rt_route_obj.get_original_redis_entries(db_list) # Create route entry using invalid nexthop (expect failure). route_key, attr_list = self._p4rt_route_obj.create_route() err_log = "[OrchAgent] Nexthop ID '8' does not exist" - util.verify_response(self.response_consumer, route_key, attr_list, - "SWSS_RC_NOT_FOUND", err_log) + util.verify_response( + self.response_consumer, route_key, attr_list, "SWSS_RC_NOT_FOUND", err_log + ) # Query application database for route entries. route_entries = util.get_keys( self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_appl_db_entries_count() + 1 ) # Query application database for newly created route key. - (status, fvs) = util.get_key(self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == True util.verify_attr(fvs, attr_list) @@ -801,22 +1562,26 @@ def test_IPv4RouteAddWithInvalidNexthopFail(self, dvs, testlog): # expected). state_route_entries = util.get_keys( self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(state_route_entries) == ( self._p4rt_route_obj.get_original_appl_state_db_entries_count() ) # Verify that the newly added route key does not exist in application # state db. - (status, fvs) = util.get_key(self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == False # Query ASIC database for route entries (no new ASIC DB entry should be # created for route entry). - route_entries = util.get_keys(self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME) + route_entries = util.get_keys( + self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_asic_db_entries_count() ) @@ -825,7 +1590,8 @@ def test_IPv4RouteAddWithInvalidNexthopFail(self, dvs, testlog): self._p4rt_route_obj.remove_app_db_entry(route_key) err_log = "[OrchAgent] Route entry does not exist" util.verify_response( - self.response_consumer, route_key, [], "SWSS_RC_NOT_FOUND", err_log) + self.response_consumer, route_key, [], "SWSS_RC_NOT_FOUND", err_log + ) self._clean_vrf(dvs) def test_IPv6RouteAddWithInvalidWcmpFail(self, dvs, testlog): @@ -840,56 +1606,72 @@ def test_IPv6RouteAddWithInvalidWcmpFail(self, dvs, testlog): # Maintain list of original Application and ASIC DB entries before # adding new route. - db_list = ((self._p4rt_route_obj.appl_db, - "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, - self._p4rt_route_obj.TBL_NAME)), - (self._p4rt_route_obj.appl_state_db, - "%s:%s" % (self._p4rt_route_obj.APP_DB_TBL_NAME, - self._p4rt_route_obj.TBL_NAME)), - (self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME)) + db_list = ( + ( + self._p4rt_route_obj.appl_db, + "%s:%s" + % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), + ), + ( + self._p4rt_route_obj.appl_state_db, + "%s:%s" + % (self._p4rt_route_obj.APP_DB_TBL_NAME, self._p4rt_route_obj.TBL_NAME), + ), + (self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME), + ) self._p4rt_route_obj.get_original_redis_entries(db_list) # Create route entry using invalid wcmp group (expect failure). route_key, attr_list = self._p4rt_route_obj.create_route( - action="set_wcmp_group_id", wcmp_group_id="8") + action="set_wcmp_group_id", wcmp_group_id="8" + ) err_log = "[OrchAgent] WCMP group '8' does not exist" - util.verify_response(self.response_consumer, route_key, attr_list, - "SWSS_RC_NOT_FOUND", err_log) + util.verify_response( + self.response_consumer, route_key, attr_list, "SWSS_RC_NOT_FOUND", err_log + ) # Query application database for route entries - route_entries = util.get_keys(self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + route_entries = util.get_keys( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_appl_db_entries_count() + 1 ) # Query application database for newly created route key. - (status, fvs) = util.get_key(self._p4rt_route_obj.appl_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query application state database for route entries (no new APPL STATE DB # entry should be created for route entry). - state_route_entries = util.get_keys(self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME) + state_route_entries = util.get_keys( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME + ":" + self._p4rt_route_obj.TBL_NAME, + ) assert len(state_route_entries) == ( self._p4rt_route_obj.get_original_appl_state_db_entries_count() ) # Verify that newly created route key does not exist in application # state db. - (status, fvs) = util.get_key(self._p4rt_route_obj.appl_state_db, - self._p4rt_route_obj.APP_DB_TBL_NAME, - route_key) + (status, fvs) = util.get_key( + self._p4rt_route_obj.appl_state_db, + self._p4rt_route_obj.APP_DB_TBL_NAME, + route_key, + ) assert status == False # Query ASIC database for route entries (no new ASIC DB entry should be # created for route entry). - route_entries = util.get_keys(self._p4rt_route_obj.asic_db, - self._p4rt_route_obj.ASIC_DB_TBL_NAME) + route_entries = util.get_keys( + self._p4rt_route_obj.asic_db, self._p4rt_route_obj.ASIC_DB_TBL_NAME + ) assert len(route_entries) == ( self._p4rt_route_obj.get_original_asic_db_entries_count() ) @@ -898,7 +1680,8 @@ def test_IPv6RouteAddWithInvalidWcmpFail(self, dvs, testlog): self._p4rt_route_obj.remove_app_db_entry(route_key) err_log = "[OrchAgent] Route entry does not exist" util.verify_response( - self.response_consumer, route_key, [], "SWSS_RC_NOT_FOUND", err_log) + self.response_consumer, route_key, [], "SWSS_RC_NOT_FOUND", err_log + ) self._clean_vrf(dvs) def test_PruneAndRestoreNextHop(self, dvs, testlog): @@ -907,19 +1690,37 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) # Maintain original WCMP group entries for ASIC DB. - db_list = ((self._p4rt_wcmp_group_obj.appl_db, - "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME)), - (self._p4rt_wcmp_group_obj.appl_state_db, - "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME)), - (self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME), - (self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME)) + db_list = ( + ( + self._p4rt_wcmp_group_obj.appl_db, + "%s:%s" + % ( + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME, + ), + ), + ( + self._p4rt_wcmp_group_obj.appl_state_db, + "%s:%s" + % ( + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME, + ), + ), + ( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + ), + ( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + ), + ) self._p4rt_wcmp_group_obj.get_original_redis_entries(db_list) - db_list = ((self._p4rt_nexthop_obj.asic_db, - self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME),) + db_list = ( + (self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME), + ) self._p4rt_nexthop_obj.get_original_redis_entries(db_list) # Fetch the original key to oid information from Redis DB. @@ -933,12 +1734,14 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): util.set_interface_status(dvs, if_name, "up") # Create router interface. - router_interface_id, router_intf_key, attr_list = ( - self._p4rt_router_intf_obj.create_router_interface() - ) + ( + router_interface_id, + router_intf_key, + attr_list, + ) = self._p4rt_router_intf_obj.create_router_interface() util.verify_response( - self.response_consumer, router_intf_key, attr_list, - "SWSS_RC_SUCCESS") + self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count incremented by 1 in Redis DB. count = 1 @@ -947,11 +1750,10 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create neighbor. - neighbor_id, neighbor_key, attr_list = ( - self._p4rt_neighbor_obj.create_neighbor() - ) + neighbor_id, neighbor_key, attr_list = self._p4rt_neighbor_obj.create_neighbor() util.verify_response( - self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS") + self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count incremented by 1 in Redis DB. count += 1 @@ -960,11 +1762,10 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create nexthop. - nexthop_id, nexthop_key, attr_list = ( - self._p4rt_nexthop_obj.create_next_hop() - ) + nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop() util.verify_response( - self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS") + self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS" + ) # Get nexthop_oid of newly created nexthop. nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() assert nexthop_oid is not None @@ -976,12 +1777,14 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create wcmp group with one member. - wcmp_group_id, wcmp_group_key, attr_list = ( - self._p4rt_wcmp_group_obj.create_wcmp_group(watch_port=port_name) - ) + ( + wcmp_group_id, + wcmp_group_key, + attr_list, + ) = self._p4rt_wcmp_group_obj.create_wcmp_group(watch_port=port_name) util.verify_response( - self.response_consumer, wcmp_group_key, attr_list, - "SWSS_RC_SUCCESS") + self.response_consumer, wcmp_group_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count incremented by 2 in Redis DB # (1 each for WCMP group and member). @@ -993,40 +1796,50 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): # Query application database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME, + ) assert len(wcmp_group_entries) == ( self._p4rt_wcmp_group_obj.get_original_appl_db_entries_count() + 1 ) # Query application database for newly created wcmp group key. - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key) + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query application state database for wcmp group entries. state_wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME, + ) assert len(state_wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() - + 1 + self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + 1 ) # Query application state database for newly created wcmp group key. - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key) + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query ASIC database for wcmp group entries. - wcmp_group_entries = util.get_keys(self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME) + wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + ) assert len(wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() - + 1 + self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() + 1 ) # Query ASIC database for newly created wcmp group oid. @@ -1035,32 +1848,40 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): (status, fvs) = util.get_key( self._p4rt_wcmp_group_obj.asic_db, self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, - wcmp_group_oid + wcmp_group_oid, ) assert status == True asic_attr_list = [ - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, - (self._p4rt_wcmp_group_obj. - SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP)) + ( + self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, + ( + self._p4rt_wcmp_group_obj.SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP + ), + ) ] util.verify_attr(fvs, asic_attr_list) # Query ASIC database for newly created wcmp group member key. - asic_db_group_member_key = self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_member_asic_db_key() + asic_db_group_member_key = ( + self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_member_asic_db_key() + ) assert asic_db_group_member_key is not None (status, fvs) = util.get_key( self._p4rt_wcmp_group_obj.asic_db, self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, - asic_db_group_member_key + asic_db_group_member_key, ) assert status == True asic_attr_list = [ - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID, - wcmp_group_oid), - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_ID, - nexthop_oid), - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_WEIGHT, - str(self._p4rt_wcmp_group_obj.DEFAULT_WEIGHT)) + ( + self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID, + wcmp_group_oid, + ), + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_ID, nexthop_oid), + ( + self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_WEIGHT, + str(self._p4rt_wcmp_group_obj.DEFAULT_WEIGHT), + ), ] util.verify_attr(fvs, asic_attr_list) @@ -1071,16 +1892,18 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): # pruned. wcmp_group_member_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, ) assert len(wcmp_group_member_entries) == ( self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() ) # Check APPL STATE DB to verify no change. - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key) + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key, + ) assert status == True util.verify_attr(fvs, attr_list) @@ -1090,18 +1913,19 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): # Check pruned next hop member is restored in ASIC DB. wcmp_group_member_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, ) assert len(wcmp_group_member_entries) == ( - self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() - + 1 + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + 1 + ) + asic_db_group_member_key = ( + self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_member_asic_db_key() ) - asic_db_group_member_key = self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_member_asic_db_key() assert asic_db_group_member_key is not None (status, fvs) = util.get_key( self._p4rt_wcmp_group_obj.asic_db, self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, - asic_db_group_member_key + asic_db_group_member_key, ) assert status == True util.verify_attr(fvs, asic_attr_list) @@ -1119,8 +1943,12 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): # Verify that APPL STATE DB is now updated. state_wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_state_db, - (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + - self._p4rt_wcmp_group_obj.TBL_NAME)) + ( + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME + ), + ) assert len(state_wcmp_group_entries) == ( self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() ) @@ -1148,7 +1976,7 @@ def test_PruneAndRestoreNextHop(self, dvs, testlog): # Verify that P4RT key to OID count is same as the original count. status, fvs = key_to_oid_helper.get_db_info() - assert status == True + assert status == False assert len(fvs) == len(original_key_oid_info) def test_PruneNextHopOnWarmBoot(self, dvs, testlog): @@ -1157,19 +1985,37 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) # Maintain original WCMP group entries for ASIC DB. - db_list = ((self._p4rt_wcmp_group_obj.appl_db, - "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME)), - (self._p4rt_wcmp_group_obj.appl_state_db, - "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME)), - (self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME), - (self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME)) + db_list = ( + ( + self._p4rt_wcmp_group_obj.appl_db, + "%s:%s" + % ( + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME, + ), + ), + ( + self._p4rt_wcmp_group_obj.appl_state_db, + "%s:%s" + % ( + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME, + ), + ), + ( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + ), + ( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + ), + ) self._p4rt_wcmp_group_obj.get_original_redis_entries(db_list) - db_list = ((self._p4rt_nexthop_obj.asic_db, - self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME),) + db_list = ( + (self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME), + ) self._p4rt_nexthop_obj.get_original_redis_entries(db_list) # Fetch the original key to oid information from Redis DB. @@ -1183,12 +2029,14 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): util.set_interface_status(dvs, if_name, "up") # Create router interface. - router_interface_id, router_intf_key, attr_list = ( - self._p4rt_router_intf_obj.create_router_interface() - ) + ( + router_interface_id, + router_intf_key, + attr_list, + ) = self._p4rt_router_intf_obj.create_router_interface() util.verify_response( - self.response_consumer, router_intf_key, attr_list, - "SWSS_RC_SUCCESS") + self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count incremented by 1 in Redis DB. count = 1 @@ -1197,11 +2045,10 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create neighbor. - neighbor_id, neighbor_key, attr_list = ( - self._p4rt_neighbor_obj.create_neighbor() - ) + neighbor_id, neighbor_key, attr_list = self._p4rt_neighbor_obj.create_neighbor() util.verify_response( - self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS") + self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count incremented by 1 in Redis DB. count += 1 @@ -1210,11 +2057,10 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create nexthop. - nexthop_id, nexthop_key, attr_list = ( - self._p4rt_nexthop_obj.create_next_hop() - ) + nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop() util.verify_response( - self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS") + self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS" + ) # Get nexthop_oid of newly created nexthop. nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() assert nexthop_oid is not None @@ -1226,12 +2072,14 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create wcmp group with one member. - wcmp_group_id, wcmp_group_key, attr_list = ( - self._p4rt_wcmp_group_obj.create_wcmp_group(watch_port=port_name) - ) + ( + wcmp_group_id, + wcmp_group_key, + attr_list, + ) = self._p4rt_wcmp_group_obj.create_wcmp_group(watch_port=port_name) util.verify_response( - self.response_consumer, wcmp_group_key, attr_list, - "SWSS_RC_SUCCESS") + self.response_consumer, wcmp_group_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count incremented by 2 in Redis DB # (1 each for WCMP group and member). @@ -1243,40 +2091,50 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): # Query application database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME, + ) assert len(wcmp_group_entries) == ( self._p4rt_wcmp_group_obj.get_original_appl_db_entries_count() + 1 ) # Query application database for newly created wcmp group key. - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key) + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query application state database for wcmp group entries. state_wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME, + ) assert len(state_wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() - + 1 + self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + 1 ) # Query application state database for newly created wcmp group key. - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key) + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query ASIC database for wcmp group entries. - wcmp_group_entries = util.get_keys(self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME) + wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + ) assert len(wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() - + 1 + self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() + 1 ) # Query ASIC database for newly created wcmp group oid. @@ -1285,42 +2143,49 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): (status, fvs) = util.get_key( self._p4rt_wcmp_group_obj.asic_db, self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, - wcmp_group_oid + wcmp_group_oid, ) assert status == True asic_attr_list = [ - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, - (self._p4rt_wcmp_group_obj. - SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP)) + ( + self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, + ( + self._p4rt_wcmp_group_obj.SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP + ), + ) ] util.verify_attr(fvs, asic_attr_list) # Query ASIC database for wcmp group member entries. wcmp_group_member_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, ) assert len(wcmp_group_member_entries) == ( - self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() - + 1 + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + 1 ) # Query ASIC database for newly created wcmp group member key. - asic_db_group_member_key = self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_member_asic_db_key() + asic_db_group_member_key = ( + self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_member_asic_db_key() + ) assert asic_db_group_member_key is not None (status, fvs) = util.get_key( self._p4rt_wcmp_group_obj.asic_db, self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, - asic_db_group_member_key + asic_db_group_member_key, ) assert status == True asic_attr_list = [ - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID, - wcmp_group_oid), - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_ID, - nexthop_oid), - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_WEIGHT, - str(self._p4rt_wcmp_group_obj.DEFAULT_WEIGHT)) + ( + self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID, + wcmp_group_oid, + ), + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_ID, nexthop_oid), + ( + self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_WEIGHT, + str(self._p4rt_wcmp_group_obj.DEFAULT_WEIGHT), + ), ] util.verify_attr(fvs, asic_attr_list) @@ -1328,7 +2193,7 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): util.set_interface_status(dvs, if_name) # Execute the warm reboot. - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") dvs.stop_swss() dvs.start_swss() @@ -1338,7 +2203,7 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): # Verify that the associated next hop is pruned in ASIC DB. wcmp_group_member_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, ) assert len(wcmp_group_member_entries) == ( self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() @@ -1357,8 +2222,12 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): # Verify that APPL STATE DB is updated. state_wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_state_db, - (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + - self._p4rt_wcmp_group_obj.TBL_NAME)) + ( + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME + ), + ) assert len(state_wcmp_group_entries) == ( self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() ) @@ -1386,7 +2255,7 @@ def test_PruneNextHopOnWarmBoot(self, dvs, testlog): # Verify that P4RT key to OID count is same as the original count. status, fvs = key_to_oid_helper.get_db_info() - assert status == True + assert status == False assert len(fvs) == len(original_key_oid_info) def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): @@ -1395,19 +2264,37 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) # Maintain original WCMP group entries for ASIC DB. - db_list = ((self._p4rt_wcmp_group_obj.appl_db, - "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME)), - (self._p4rt_wcmp_group_obj.appl_state_db, - "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME)), - (self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME), - (self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME)) + db_list = ( + ( + self._p4rt_wcmp_group_obj.appl_db, + "%s:%s" + % ( + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME, + ), + ), + ( + self._p4rt_wcmp_group_obj.appl_state_db, + "%s:%s" + % ( + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME, + ), + ), + ( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + ), + ( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + ), + ) self._p4rt_wcmp_group_obj.get_original_redis_entries(db_list) - db_list = ((self._p4rt_nexthop_obj.asic_db, - self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME),) + db_list = ( + (self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME), + ) self._p4rt_nexthop_obj.get_original_redis_entries(db_list) # Fetch the original key to oid information from Redis DB. @@ -1421,12 +2308,14 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): util.set_interface_status(dvs, if_name) # Create router interface. - router_interface_id, router_intf_key, attr_list = ( - self._p4rt_router_intf_obj.create_router_interface() - ) + ( + router_interface_id, + router_intf_key, + attr_list, + ) = self._p4rt_router_intf_obj.create_router_interface() util.verify_response( - self.response_consumer, router_intf_key, attr_list, - "SWSS_RC_SUCCESS") + self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count incremented by 1 in Redis DB. count = 1 @@ -1435,11 +2324,10 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create neighbor. - neighbor_id, neighbor_key, attr_list = ( - self._p4rt_neighbor_obj.create_neighbor() - ) + neighbor_id, neighbor_key, attr_list = self._p4rt_neighbor_obj.create_neighbor() util.verify_response( - self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS") + self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count incremented by 1 in Redis DB. count += 1 @@ -1448,11 +2336,10 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create nexthop. - nexthop_id, nexthop_key, attr_list = ( - self._p4rt_nexthop_obj.create_next_hop() - ) + nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop() util.verify_response( - self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS") + self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS" + ) # Get nexthop_oid of newly created nexthop. nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() assert nexthop_oid is not None @@ -1464,12 +2351,14 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create wcmp group with one member. - wcmp_group_id, wcmp_group_key, attr_list = ( - self._p4rt_wcmp_group_obj.create_wcmp_group(watch_port=port_name) - ) + ( + wcmp_group_id, + wcmp_group_key, + attr_list, + ) = self._p4rt_wcmp_group_obj.create_wcmp_group(watch_port=port_name) util.verify_response( - self.response_consumer, wcmp_group_key, attr_list, - "SWSS_RC_SUCCESS") + self.response_consumer, wcmp_group_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count incremented by 1 in Redis DB # (WCMP group member is not created for operationally down watchport). @@ -1481,39 +2370,50 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): # Query application database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME, + ) assert len(wcmp_group_entries) == ( self._p4rt_wcmp_group_obj.get_original_appl_db_entries_count() + 1 ) # Query application database for newly created wcmp group key. - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key) + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query application state database for wcmp group entries. state_wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME, + ) assert len(state_wcmp_group_entries) == ( self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + 1 ) # Query application state database for newly created wcmp group key. - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key) + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query ASIC database for wcmp group entries. - wcmp_group_entries = util.get_keys(self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME) + wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + ) assert len(wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() - + 1 + self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() + 1 ) # Query ASIC database for newly created wcmp group oid. @@ -1522,20 +2422,23 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): (status, fvs) = util.get_key( self._p4rt_wcmp_group_obj.asic_db, self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, - wcmp_group_oid + wcmp_group_oid, ) assert status == True asic_attr_list = [ - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, - (self._p4rt_wcmp_group_obj. - SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP)) + ( + self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, + ( + self._p4rt_wcmp_group_obj.SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP + ), + ) ] util.verify_attr(fvs, asic_attr_list) # Query ASIC database for wcmp group member entries (expect no entry). wcmp_group_member_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, ) assert len(wcmp_group_member_entries) == ( self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() @@ -1555,26 +2458,31 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): # Verify that next hop member is now created in SAI. wcmp_group_member_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, ) assert len(wcmp_group_member_entries) == ( - self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() - + 1 + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + 1 + ) + asic_db_group_member_key = ( + self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_member_asic_db_key() ) - asic_db_group_member_key = self._p4rt_wcmp_group_obj.get_newly_created_wcmp_group_member_asic_db_key() assert asic_db_group_member_key is not None - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.asic_db, - (self._p4rt_wcmp_group_obj. - ASIC_DB_GROUP_MEMBER_TBL_NAME), - asic_db_group_member_key) + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.asic_db, + (self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME), + asic_db_group_member_key, + ) assert status == True asic_attr_list = [ - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID, - wcmp_group_oid), - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_ID, - nexthop_oid), - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_WEIGHT, - str(self._p4rt_wcmp_group_obj.DEFAULT_WEIGHT)) + ( + self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_GROUP_ID, + wcmp_group_oid, + ), + (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_NEXTHOP_ID, nexthop_oid), + ( + self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_MEMBER_WEIGHT, + str(self._p4rt_wcmp_group_obj.DEFAULT_WEIGHT), + ), ] util.verify_attr(fvs, asic_attr_list) @@ -1591,8 +2499,12 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): # Verify that APPL STATE DB is updated. state_wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_state_db, - (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + - self._p4rt_wcmp_group_obj.TBL_NAME)) + ( + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME + ), + ) assert len(state_wcmp_group_entries) == ( self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() ) @@ -1620,7 +2532,7 @@ def test_CreateWcmpMemberForOperUpWatchportOnly(self, dvs, testlog): # Verify that P4RT key to OID count is same as the original count. status, fvs = key_to_oid_helper.get_db_info() - assert status == True + assert status == False assert len(fvs) == len(original_key_oid_info) def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): @@ -1629,19 +2541,37 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) # Maintain original WCMP group entries for ASIC DB. - db_list = ((self._p4rt_wcmp_group_obj.appl_db, - "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME)), - (self._p4rt_wcmp_group_obj.appl_state_db, - "%s:%s" % (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - self._p4rt_wcmp_group_obj.TBL_NAME)), - (self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME), - (self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME)) + db_list = ( + ( + self._p4rt_wcmp_group_obj.appl_db, + "%s:%s" + % ( + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME, + ), + ), + ( + self._p4rt_wcmp_group_obj.appl_state_db, + "%s:%s" + % ( + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + self._p4rt_wcmp_group_obj.TBL_NAME, + ), + ), + ( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + ), + ( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + ), + ) self._p4rt_wcmp_group_obj.get_original_redis_entries(db_list) - db_list = ((self._p4rt_nexthop_obj.asic_db, - self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME),) + db_list = ( + (self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME), + ) self._p4rt_nexthop_obj.get_original_redis_entries(db_list) # Fetch the original key to oid information from Redis DB. @@ -1655,12 +2585,14 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): util.set_interface_status(dvs, if_name) # Create router interface. - router_interface_id, router_intf_key, attr_list = ( - self._p4rt_router_intf_obj.create_router_interface() - ) + ( + router_interface_id, + router_intf_key, + attr_list, + ) = self._p4rt_router_intf_obj.create_router_interface() util.verify_response( - self.response_consumer, router_intf_key, attr_list, - "SWSS_RC_SUCCESS") + self.response_consumer, router_intf_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count incremented by 1 in Redis DB. count = 1 @@ -1669,11 +2601,10 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create neighbor. - neighbor_id, neighbor_key, attr_list = ( - self._p4rt_neighbor_obj.create_neighbor() - ) + neighbor_id, neighbor_key, attr_list = self._p4rt_neighbor_obj.create_neighbor() util.verify_response( - self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS") + self.response_consumer, neighbor_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count incremented by 1 in Redis DB. count += 1 @@ -1682,11 +2613,10 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create nexthop. - nexthop_id, nexthop_key, attr_list = ( - self._p4rt_nexthop_obj.create_next_hop() - ) + nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop() util.verify_response( - self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS") + self.response_consumer, nexthop_key, attr_list, "SWSS_RC_SUCCESS" + ) # Get nexthop_oid of newly created nexthop. nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() assert nexthop_oid is not None @@ -1698,12 +2628,14 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): assert len(fvs) == len(original_key_oid_info) + count # Create wcmp group with one member. - wcmp_group_id, wcmp_group_key, attr_list = ( - self._p4rt_wcmp_group_obj.create_wcmp_group(watch_port=port_name) - ) + ( + wcmp_group_id, + wcmp_group_key, + attr_list, + ) = self._p4rt_wcmp_group_obj.create_wcmp_group(watch_port=port_name) util.verify_response( - self.response_consumer, wcmp_group_key, attr_list, - "SWSS_RC_SUCCESS") + self.response_consumer, wcmp_group_key, attr_list, "SWSS_RC_SUCCESS" + ) # Verify that P4RT key to OID count incremented by 1 in Redis DB # (WCMP group member is not created for operationally down watchport). @@ -1715,39 +2647,50 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): # Query application database for wcmp group entries. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME, + ) assert len(wcmp_group_entries) == ( self._p4rt_wcmp_group_obj.get_original_appl_db_entries_count() + 1 ) # Query application database for newly created wcmp group key. - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key) + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.appl_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query application state database for wcmp group entries. state_wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + self._p4rt_wcmp_group_obj.TBL_NAME) + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME, + ) assert len(state_wcmp_group_entries) == ( self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() + 1 ) # Query application state database for newly created wcmp group key. - (status, fvs) = util.get_key(self._p4rt_wcmp_group_obj.appl_state_db, - self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, - wcmp_group_key) + (status, fvs) = util.get_key( + self._p4rt_wcmp_group_obj.appl_state_db, + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME, + wcmp_group_key, + ) assert status == True util.verify_attr(fvs, attr_list) # Query ASIC database for wcmp group entries. - wcmp_group_entries = util.get_keys(self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME) + wcmp_group_entries = util.get_keys( + self._p4rt_wcmp_group_obj.asic_db, + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, + ) assert len(wcmp_group_entries) == ( - self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() - + 1 + self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() + 1 ) # Query ASIC database for newly created wcmp group oid. @@ -1756,35 +2699,56 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): (status, fvs) = util.get_key( self._p4rt_wcmp_group_obj.asic_db, self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, - wcmp_group_oid + wcmp_group_oid, ) assert status == True asic_attr_list = [ - (self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, - (self._p4rt_wcmp_group_obj. - SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP)) + ( + self._p4rt_wcmp_group_obj.SAI_ATTR_GROUP_TYPE, + ( + self._p4rt_wcmp_group_obj.SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP + ), + ) ] util.verify_attr(fvs, asic_attr_list) # Query ASIC database for wcmp group member entries. wcmp_group_member_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, ) assert len(wcmp_group_member_entries) == ( - self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() - + 1 + self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + 1 ) # Query ASIC database for wcmp group member entries (expect no entry). wcmp_group_member_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, + ) + assert ( + len(wcmp_group_member_entries) + == self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + ) + + # Attempt to delete the next hop. Expect failure as the pruned WCMP + # group member is still referencing it. + self._p4rt_nexthop_obj.remove_app_db_entry(nexthop_key) + + # Verify that the P4RT key to OID count is same as before in Redis DB. + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Verify that the next hop still exists in app state db. + (status, fvs) = util.get_key( + self._p4rt_nexthop_obj.appl_state_db, + self._p4rt_nexthop_obj.APP_DB_TBL_NAME, + nexthop_key, ) - assert len( - wcmp_group_member_entries) == self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() + assert status == True - # Delete the pruned wcmp group member. + # Delete the pruned wcmp group member and try again. self._p4rt_wcmp_group_obj.remove_app_db_entry(wcmp_group_key) # Verify that P4RT key to OID count decremented by 1 in Redis DB. @@ -1796,8 +2760,12 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): # Verify that APPL STATE DB is updated. state_wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.appl_state_db, - (self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + ":" + - self._p4rt_wcmp_group_obj.TBL_NAME)) + ( + self._p4rt_wcmp_group_obj.APP_DB_TBL_NAME + + ":" + + self._p4rt_wcmp_group_obj.TBL_NAME + ), + ) assert len(state_wcmp_group_entries) == ( self._p4rt_wcmp_group_obj.get_original_appl_state_db_entries_count() ) @@ -1805,14 +2773,14 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): # Verify that ASIC DB is updated. wcmp_group_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_TBL_NAME, ) assert len(wcmp_group_entries) == ( self._p4rt_wcmp_group_obj.get_original_asic_db_group_entries_count() ) wcmp_group_member_entries = util.get_keys( self._p4rt_wcmp_group_obj.asic_db, - self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME + self._p4rt_wcmp_group_obj.ASIC_DB_GROUP_MEMBER_TBL_NAME, ) assert len(wcmp_group_member_entries) == ( self._p4rt_wcmp_group_obj.get_original_asic_db_member_entries_count() @@ -1841,5 +2809,146 @@ def test_RemovePrunedWcmpGroupMember(self, dvs, testlog): # Verify that P4RT key to OID count is same as the original count. status, fvs = key_to_oid_helper.get_db_info() - assert status == True + assert status == False + assert len(fvs) == len(original_key_oid_info) + + def test_NexthopWithGreTunnelCreationFailIfDependenciesAreMissing(self, dvs, testlog): + # Initialize L3 objects and database connectors. + self._set_up(dvs) + self._set_vrf(dvs) + + # Maintain list of original Application and ASIC DB entries before + # adding new entries. + db_list = ( + ( + self._p4rt_nexthop_obj.appl_db, + "%s:%s" + % ( + self._p4rt_nexthop_obj.APP_DB_TBL_NAME, + self._p4rt_nexthop_obj.TBL_NAME, + ), + ), + ( + self._p4rt_nexthop_obj.appl_state_db, + "%s:%s" + % ( + self._p4rt_nexthop_obj.APP_DB_TBL_NAME, + self._p4rt_nexthop_obj.TBL_NAME, + ), + ), + (self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME), + ) + self._p4rt_nexthop_obj.get_original_redis_entries(db_list) + db_list = ( + ( + self._p4rt_gre_tunnel_obj.appl_db, + "%s:%s" + % ( + self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME, + self._p4rt_gre_tunnel_obj.TBL_NAME, + ), + ), + ( + self._p4rt_gre_tunnel_obj.appl_state_db, + "%s:%s" + % ( + self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME, + self._p4rt_gre_tunnel_obj.TBL_NAME, + ), + ), + (self._p4rt_gre_tunnel_obj.asic_db, + self._p4rt_gre_tunnel_obj.ASIC_DB_TBL_NAME), + ) + self._p4rt_gre_tunnel_obj.get_original_redis_entries(db_list) + db_list = ( + (self._p4rt_router_intf_obj.asic_db, + self._p4rt_router_intf_obj.ASIC_DB_TBL_NAME), + ) + self._p4rt_router_intf_obj.get_original_redis_entries(db_list) + + # Fetch the original key to oid information from Redis DB. + key_to_oid_helper = util.KeyToOidDBHelper(dvs) + _, original_key_oid_info = key_to_oid_helper.get_db_info() + + # Create tunnel. + tunnel_id, tunnel_key, attr_list = self._p4rt_gre_tunnel_obj.create_gre_tunnel() + util.verify_response( + self.response_consumer, tunnel_key, attr_list, "SWSS_RC_NOT_FOUND", + "[OrchAgent] Router intf '16' does not exist" + ) + + # Verify that P4RT key to OID count does not change in Redis DB. + status, fvs = key_to_oid_helper.get_db_info() + assert status == False + assert len(fvs) == len(original_key_oid_info) + + # Query application database for tunnel entries. + tunnel_entries = util.get_keys( + self._p4rt_gre_tunnel_obj.appl_db, + self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME + + ":" + self._p4rt_gre_tunnel_obj.TBL_NAME, + ) + assert len(tunnel_entries) == ( + self._p4rt_gre_tunnel_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application state database for tunnel entries. + state_tunnel_entries = util.get_keys( + self._p4rt_gre_tunnel_obj.appl_state_db, + self._p4rt_gre_tunnel_obj.APP_DB_TBL_NAME + + ":" + self._p4rt_gre_tunnel_obj.TBL_NAME, + ) + assert len(state_tunnel_entries) == ( + self._p4rt_gre_tunnel_obj.get_original_appl_state_db_entries_count() + ) + + # Query ASIC database for tunnel entries. + tunnel_entries = util.get_keys( + self._p4rt_gre_tunnel_obj.asic_db, self._p4rt_gre_tunnel_obj.ASIC_DB_TBL_NAME + ) + assert len(tunnel_entries) == ( + self._p4rt_gre_tunnel_obj.get_original_asic_db_entries_count() + ) + + # Create tunnel nexthop. + nexthop_id, nexthop_key, attr_list = self._p4rt_nexthop_obj.create_next_hop( + tunnel_id=tunnel_id + ) + util.verify_response( + self.response_consumer, nexthop_key, attr_list, "SWSS_RC_NOT_FOUND", + "[OrchAgent] GRE Tunnel 'tunnel-1' does not exist in GRE Tunnel Manager" + ) + + # Verify that P4RT key to OID count does not change in Redis DB. + status, fvs = key_to_oid_helper.get_db_info() + assert status == False assert len(fvs) == len(original_key_oid_info) + + # Query application database for nexthop entries. + nexthop_entries = util.get_keys( + self._p4rt_nexthop_obj.appl_db, + self._p4rt_nexthop_obj.APP_DB_TBL_NAME + ":" + self._p4rt_nexthop_obj.TBL_NAME, + ) + assert len(nexthop_entries) == ( + self._p4rt_nexthop_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application state database for nexthop entries. + state_nexthop_entries = util.get_keys( + self._p4rt_nexthop_obj.appl_state_db, + self._p4rt_nexthop_obj.APP_DB_TBL_NAME + ":" + self._p4rt_nexthop_obj.TBL_NAME, + ) + assert len(state_nexthop_entries) == ( + self._p4rt_nexthop_obj.get_original_appl_state_db_entries_count() + ) + + # Query ASIC database for nexthop entries. + nexthop_entries = util.get_keys( + self._p4rt_nexthop_obj.asic_db, self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME + ) + assert len(nexthop_entries) == ( + self._p4rt_nexthop_obj.get_original_asic_db_entries_count() + ) + + self._clean_vrf(dvs) diff --git a/tests/p4rt/test_l3_admit.py b/tests/p4rt/test_l3_admit.py new file mode 100644 index 0000000000..81ffdf884a --- /dev/null +++ b/tests/p4rt/test_l3_admit.py @@ -0,0 +1,263 @@ +from swsscommon import swsscommon + +import pytest +import json +import util +import l3_admit + + +class TestP4RTL3Admit(object): + def _set_up(self, dvs): + self._p4rt_l3_admit_obj = l3_admit.P4RtL3AdmitWrapper() + + self._p4rt_l3_admit_obj.set_up_databases(dvs) + self.response_consumer = swsscommon.NotificationConsumer( + self._p4rt_l3_admit_obj.appl_db, "APPL_DB_" + + swsscommon.APP_P4RT_TABLE_NAME + "_RESPONSE_CHANNEL" + ) + + @pytest.mark.skip(reason="sairedis vs MY MAC support is not ready") + def test_DefaultL3AdmitAddDeletePass(self, dvs, testlog): + # Initialize database connectors. + self._set_up(dvs) + + # Maintain list of original Application and ASIC DB entries before + # adding new entries + db_list = ( + ( + self._p4rt_l3_admit_obj.appl_db, + "%s:%s" + % (self._p4rt_l3_admit_obj.APP_DB_TBL_NAME, self._p4rt_l3_admit_obj.TBL_NAME), + ), + ( + self._p4rt_l3_admit_obj.appl_state_db, + "%s:%s" + % (self._p4rt_l3_admit_obj.APP_DB_TBL_NAME, self._p4rt_l3_admit_obj.TBL_NAME), + ), + (self._p4rt_l3_admit_obj.asic_db, + self._p4rt_l3_admit_obj.ASIC_DB_TBL_NAME), + ) + self._p4rt_l3_admit_obj.get_original_redis_entries(db_list) + + # Fetch the original key to oid information from Redis DB. + key_to_oid_helper = util.KeyToOidDBHelper(dvs) + _, original_key_oid_info = key_to_oid_helper.get_db_info() + + # l3 admit entry attributes + # P4RT_TABLE:FIXED_L3_ADMIT_TABLE:{\"match/dst_mac\":\"00:02:03:04:00:00&ff:ff:ff:ff:00:00\",\"match/in_port\":\"Ethernet8\",\"priority\":2030} + # "action": "admit_to_l3" + # "controller_metadata": "..." + dst_mac_data = "00:02:03:04:00:00" + dst_mac_mask = "FF:FF:FF:FF:00:00" + in_port = "Ethernet8" + priority = 2030 + + # Create l3 admit entry. + ( + l3_admit_key, + attr_list, + ) = self._p4rt_l3_admit_obj.create_l3_admit(dst_mac_data + "&" + dst_mac_mask, priority, in_port) + util.verify_response( + self.response_consumer, l3_admit_key, attr_list, "SWSS_RC_SUCCESS" + ) + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count = 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for l3 admit entries. + l3_admit_entries = util.get_keys( + self._p4rt_l3_admit_obj.appl_db, + self._p4rt_l3_admit_obj.APP_DB_TBL_NAME + + ":" + self._p4rt_l3_admit_obj.TBL_NAME, + ) + assert len(l3_admit_entries) == ( + self._p4rt_l3_admit_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for newly created l3 admit key. + (status, fvs) = util.get_key( + self._p4rt_l3_admit_obj.appl_db, + self._p4rt_l3_admit_obj.APP_DB_TBL_NAME, + l3_admit_key, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query application state database for l3 admit entries. + state_l3_admit_entries = util.get_keys( + self._p4rt_l3_admit_obj.appl_state_db, + self._p4rt_l3_admit_obj.APP_DB_TBL_NAME + + ":" + self._p4rt_l3_admit_obj.TBL_NAME, + ) + assert len(state_l3_admit_entries) == ( + self._p4rt_l3_admit_obj.get_original_appl_state_db_entries_count() + 1 + ) + + # Query application state database for newly created l3 admit key. + (status, fvs) = util.get_key( + self._p4rt_l3_admit_obj.appl_state_db, + self._p4rt_l3_admit_obj.APP_DB_TBL_NAME, + l3_admit_key, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query ASIC database for my mac entries. + asic_l3_admit_entries = util.get_keys( + self._p4rt_l3_admit_obj.asic_db, self._p4rt_l3_admit_obj.ASIC_DB_TBL_NAME + ) + assert len(asic_l3_admit_entries) == ( + self._p4rt_l3_admit_obj.get_original_asic_db_entries_count() + 1 + ) + + # Query ASIC database for newly created my mac key. + asic_db_key = self._p4rt_l3_admit_obj.get_newly_created_asic_db_key() + assert asic_db_key is not None + (status, fvs) = util.get_key( + self._p4rt_l3_admit_obj.asic_db, + self._p4rt_l3_admit_obj.ASIC_DB_TBL_NAME, + asic_db_key, + ) + assert status == True + attr_list = [(self._p4rt_l3_admit_obj.SAI_ATTR_DST_MAC, dst_mac_data), + (self._p4rt_l3_admit_obj.SAI_ATTR_DST_MAC_MASK, dst_mac_mask), + (self._p4rt_l3_admit_obj.SAI_ATTR_PRIORITY, str(priority)), + (self._p4rt_l3_admit_obj.SAI_ATTR_PORT_ID, util.get_port_oid_by_name(dvs, in_port))] + util.verify_attr(fvs, attr_list) + + # deplicate SET will be no-op. + new_l3_admit_key, new_attr_list = self._p4rt_l3_admit_obj.create_l3_admit( + dst_mac_data + "&" + dst_mac_mask, priority, in_port) + util.verify_response( + self.response_consumer, new_l3_admit_key, new_attr_list, + "SWSS_RC_SUCCESS", + "L3 Admit entry with the same key received: 'match/dst_mac=00:02:03:04:00:00&ff:ff:ff:ff:00:00:match/in_port=Ethernet8:priority=2030'" + ) + + # Verify that P4RT key to OID count did not change in Redis DB. + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Remove l3 admit entry. + self._p4rt_l3_admit_obj.remove_app_db_entry(l3_admit_key) + util.verify_response(self.response_consumer, + l3_admit_key, [], "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count decremented to orig in Redis DB. + status, fvs = key_to_oid_helper.get_db_info() + assert status == False + assert len(fvs) == len(original_key_oid_info) + + # Query application database for route entries. + l3_admit_entries = util.get_keys( + self._p4rt_l3_admit_obj.appl_db, + self._p4rt_l3_admit_obj.APP_DB_TBL_NAME + + ":" + self._p4rt_l3_admit_obj.TBL_NAME, + ) + assert len(l3_admit_entries) == ( + self._p4rt_l3_admit_obj.get_original_appl_db_entries_count() + ) + + # Verify that the route_key no longer exists in application database. + (status, fsv) = util.get_key( + self._p4rt_l3_admit_obj.appl_db, + self._p4rt_l3_admit_obj.APP_DB_TBL_NAME, + l3_admit_key, + ) + assert status == False + + # Query application database for route entries. + state_l3_admit_entries = util.get_keys( + self._p4rt_l3_admit_obj.appl_state_db, + self._p4rt_l3_admit_obj.APP_DB_TBL_NAME + + ":" + self._p4rt_l3_admit_obj.TBL_NAME, + ) + assert len(state_l3_admit_entries) == ( + self._p4rt_l3_admit_obj.get_original_appl_state_db_entries_count() + ) + + # Verify that the route_key no longer exists in application database. + (status, fsv) = util.get_key( + self._p4rt_l3_admit_obj.appl_state_db, + self._p4rt_l3_admit_obj.APP_DB_TBL_NAME, + l3_admit_key, + ) + assert status == False + + # Query ASIC database for my mac entries. + my_mac_entries = util.get_keys( + self._p4rt_l3_admit_obj.asic_db, self._p4rt_l3_admit_obj.ASIC_DB_TBL_NAME + ) + assert len(my_mac_entries) == ( + self._p4rt_l3_admit_obj.get_original_asic_db_entries_count() + ) + + # Verify that removed route no longer exists in ASIC database. + (status, fvs) = util.get_key( + self._p4rt_l3_admit_obj.asic_db, + self._p4rt_l3_admit_obj.ASIC_DB_TBL_NAME, + asic_db_key, + ) + assert status == False + + def test_InvalidL3AdmitKeyFailsToCreate(self, dvs, testlog): + # Initialize database connectors. + self._set_up(dvs) + + # Maintain list of original Application and ASIC DB entries before + # adding new entries + db_list = ( + ( + self._p4rt_l3_admit_obj.appl_db, + "%s:%s" + % (self._p4rt_l3_admit_obj.APP_DB_TBL_NAME, self._p4rt_l3_admit_obj.TBL_NAME), + ), + ( + self._p4rt_l3_admit_obj.appl_state_db, + "%s:%s" + % (self._p4rt_l3_admit_obj.APP_DB_TBL_NAME, self._p4rt_l3_admit_obj.TBL_NAME), + ), + (self._p4rt_l3_admit_obj.asic_db, + self._p4rt_l3_admit_obj.ASIC_DB_TBL_NAME), + ) + self._p4rt_l3_admit_obj.get_original_redis_entries(db_list) + + # Fetch the original key to oid information from Redis DB. + key_to_oid_helper = util.KeyToOidDBHelper(dvs) + _, original_key_oid_info = key_to_oid_helper.get_db_info() + + # Invalid l3 admit key + # P4RT_TABLE:FIXED_L3_ADMIT_TABLE:{\"match/dst_mac\":\"1\",\"match/in_port\":\"Ethernet8\",\"priority\":2030} + # "action": "admit_to_l3" + # "controller_metadata": "..." + dst_mac_data = "1" + in_port = "Ethernet8" + priority = 2030 + + # Create l3 admit entry. + ( + l3_admit_key, + attr_list, + ) = self._p4rt_l3_admit_obj.create_l3_admit(dst_mac_data, priority, in_port) + util.verify_response( + self.response_consumer, l3_admit_key, attr_list, + "SWSS_RC_INVALID_PARAM", + "[OrchAgent] Failed to deserialize l3 admit key" + ) + + # Verify that P4RT key to OID count not changed in Redis DB + status, fvs = key_to_oid_helper.get_db_info() + assert status == False + assert len(fvs) == len(original_key_oid_info) + + # Query ASIC database for my mac entries. Count remains the same + asic_l3_admit_entries = util.get_keys( + self._p4rt_l3_admit_obj.asic_db, self._p4rt_l3_admit_obj.ASIC_DB_TBL_NAME + ) + assert len(asic_l3_admit_entries) == ( + self._p4rt_l3_admit_obj.get_original_asic_db_entries_count() + ) diff --git a/tests/p4rt/test_p4rt_acl.py b/tests/p4rt/test_p4rt_acl.py index 89015fc9d5..cfa1c0fb45 100644 --- a/tests/p4rt/test_p4rt_acl.py +++ b/tests/p4rt/test_p4rt_acl.py @@ -24,12 +24,13 @@ def verify_selected_attr_vals(db, table, key, expected_attrs): fv_dict = dict(fvs) for attr_name, expected_val in expected_attrs: - assert attr_name in fv_dict, "Attribute %s not found in %s" % (attr_name, key) + assert attr_name in fv_dict, "Attribute %s not found in %s" % ( + attr_name, key) assert fv_dict[attr_name] == expected_val, "Wrong value %s for the attribute %s = %s" % ( - fv_dict[attr_name], - attr_name, - expected_val, - ) + fv_dict[attr_name], + attr_name, + expected_val, + ) class TestP4RTAcl(object): @@ -63,7 +64,8 @@ def _set_up(self, dvs): self._p4rt_udf_obj.set_up_databases(dvs) self.response_consumer = swsscommon.NotificationConsumer( - self._p4rt_acl_table_definition_obj.appl_state_db, "APPL_DB_P4RT_TABLE_RESPONSE_CHANNEL" + self._p4rt_acl_table_definition_obj.appl_db, "APPL_DB_" + + swsscommon.APP_P4RT_TABLE_NAME + "_RESPONSE_CHANNEL" ) @pytest.mark.skip(reason="p4orch is not enabled") @@ -127,8 +129,8 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH", switch_oid, [("SAI_SWITCH_ATTR_PRE_INGRESS_ACL", pre_ingress_group_oids[0]), - ("SAI_SWITCH_ATTR_INGRESS_ACL",ingress_group_oids[0]), - ("SAI_SWITCH_ATTR_EGRESS_ACL", egress_group_oids[0])], + ("SAI_SWITCH_ATTR_INGRESS_ACL", ingress_group_oids[0]), + ("SAI_SWITCH_ATTR_EGRESS_ACL", egress_group_oids[0])], ) # Verify APP DB trap groups for QOS_QUEUE @@ -161,6 +163,7 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): size = "123" ether_type = '{"kind":"sai_field","sai_field":"SAI_ACL_TABLE_ATTR_FIELD_ETHER_TYPE","format":"HEX_STRING","bitwidth":8}' ether_dst = '{"kind":"sai_field","sai_field":"SAI_ACL_TABLE_ATTR_FIELD_DST_MAC","format":"MAC","bitwidth":48}' + l3_class_id = '{"kind":"sai_field","sai_field":"SAI_ACL_TABLE_ATTR_FIELD_ROUTE_DST_USER_META","format":"HEX_STRING","bitwidth":6}' is_ip = '{"kind":"sai_field","sai_field":"SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE/IP","format":"HEX_STRING","bitwidth":1}' is_ipv4 = '{"kind":"sai_field","sai_field":"SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE/IPV4ANY","format":"HEX_STRING","bitwidth":1}' is_ipv6 = '{"kind":"sai_field","sai_field":"SAI_ACL_TABLE_ATTR_FIELD_ACL_IP_TYPE/IPV6ANY","format":"HEX_STRING","bitwidth":1}' @@ -185,6 +188,7 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): (self._p4rt_acl_table_definition_obj.SIZE_FIELD, size), (self._p4rt_acl_table_definition_obj.MATCH_FIELD_ETHER_DST, ether_dst), (self._p4rt_acl_table_definition_obj.MATCH_FIELD_ETHER_TYPE, ether_type), + (self._p4rt_acl_table_definition_obj.MATCH_FIELD_L3_CLASS_ID, l3_class_id), (self._p4rt_acl_table_definition_obj.MATCH_FIELD_IS_IP, is_ip), (self._p4rt_acl_table_definition_obj.MATCH_FIELD_IS_IPV4, is_ipv4), (self._p4rt_acl_table_definition_obj.MATCH_FIELD_IS_IPV6, is_ipv6), @@ -257,8 +261,17 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): assert status == True util.verify_attr(fvs, attr_list) + asic_udf_matches = util.get_keys( + self._p4rt_udf_match_obj.asic_db, self._p4rt_udf_match_obj.ASIC_DB_TBL_NAME + ) + # query ASIC database for default UDF wildcard match - udf_match_asic_db_key = original_asic_udf_matches[0] + udf_match_asic_db_keys = [ + key for key in asic_udf_matches if key not in original_asic_udf_matches + ] + + assert len(udf_match_asic_db_keys) == 1 + udf_match_asic_db_key = udf_match_asic_db_keys[0] (status, fvs) = util.get_key( self._p4rt_udf_match_obj.asic_db, @@ -321,7 +334,8 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): assert len(udfs_asic) == len(original_asic_udfs) + 2 # query ASIC database for newly created UDFs - udfs_asic_db_keys = [key for key in udfs_asic if key not in original_asic_udfs] + udfs_asic_db_keys = [ + key for key in udfs_asic if key not in original_asic_udfs] assert len(udfs_asic_db_keys) == 2 udfs_asic_db_keys.sort() udf_0_asic_db_key = udfs_asic_db_keys[0] @@ -382,6 +396,7 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): ), (self._p4rt_acl_table_definition_obj.SAI_ACL_TABLE_ATTR_SIZE, size), (self._p4rt_acl_table_definition_obj.SAI_ATTR_MATCH_ETHER_TYPE, "true"), + (self._p4rt_acl_table_definition_obj.SAI_ATTR_MATCH_ROUTE_DST_USER_META, "true"), (self._p4rt_acl_table_definition_obj.SAI_ATTR_MATCH_IP_TYPE, "true"), (self._p4rt_acl_table_definition_obj.SAI_ATTR_MATCH_DST_MAC, "true"), (self._p4rt_acl_table_definition_obj.SAI_ATTR_MATCH_SRC_IPV6_WORD3, "true"), @@ -440,7 +455,8 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): (self._p4rt_acl_rule_obj.METER_PBURST, meter_pbs), ] - self._p4rt_acl_rule_obj.set_app_db_entry(table_name_with_rule_key1, attr_list) + self._p4rt_acl_rule_obj.set_app_db_entry( + table_name_with_rule_key1, attr_list) util.verify_response( self.response_consumer, table_name_with_rule_key1, @@ -612,7 +628,8 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): (self._p4rt_acl_rule_obj.METER_PBURST, meter_pbs), ] - self._p4rt_acl_rule_obj.set_app_db_entry(table_name_with_rule_key1, attr_list) + self._p4rt_acl_rule_obj.set_app_db_entry( + table_name_with_rule_key1, attr_list) util.verify_response( self.response_consumer, table_name_with_rule_key1, @@ -785,7 +802,8 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): (self._p4rt_acl_rule_obj.METER_PBURST, meter_pbs), ] - self._p4rt_acl_rule_obj.set_app_db_entry(table_name_with_rule_key2, attr_list) + self._p4rt_acl_rule_obj.set_app_db_entry( + table_name_with_rule_key2, attr_list) util.verify_response( self.response_consumer, table_name_with_rule_key2, @@ -972,6 +990,189 @@ def test_AclRulesAddUpdateDelPass(self, dvs, testlog): ] util.verify_attr(fvs, attr_list) + # create ACL rule 3 with match field SAI_ACL_TABLE_ATTR_FIELD_ROUTE_DST_USER_META + rule_json_key3 = '{"match/ether_type":"0x0800","match/l3_class_id":"0x1", "priority":100}' + action = "copy_and_set_tc" + table_name_with_rule_key3 = table_name + ":" + rule_json_key3 + + attr_list = [ + (self._p4rt_acl_rule_obj.ACTION, action), + ("param/traffic_class", "1"), + ] + + self._p4rt_acl_rule_obj.set_app_db_entry( + table_name_with_rule_key3, attr_list) + util.verify_response( + self.response_consumer, + table_name_with_rule_key3, + attr_list, + "SWSS_RC_SUCCESS", + ) + + # query application database for ACL rules + acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(acl_rules) == len(original_appl_acl_rules) + 3 + + # query application database for newly created ACL rule + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, + table_name_with_rule_key3, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # query application state database for ACL rules + state_acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(state_acl_rules) == len(original_appl_state_acl_rules) + 3 + + # query application state database for newly created ACL rule + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_table_definition_obj.APP_DB_TBL_NAME, + table_name_with_rule_key3, + ) + assert status == True + util.verify_attr(fvs, attr_list) + + # query ASIC database for ACL counters + acl_asic_counters = util.get_keys( + self._p4rt_acl_counter_obj.asic_db, + self._p4rt_acl_counter_obj.ASIC_DB_TBL_NAME, + ) + assert len(acl_asic_counters) == len(original_asic_acl_counters) + 3 + + # query ASIC database for newly created ACL counter + counter_asic_db_keys = [ + key for key in acl_asic_counters + if key not in original_asic_acl_counters + and key != counter_asic_db_key1 + and key != counter_asic_db_key2 + ] + assert len(counter_asic_db_keys) == 1 + counter_asic_db_key3 = counter_asic_db_keys[0] + + (status, fvs) = util.get_key( + self._p4rt_acl_counter_obj.asic_db, + self._p4rt_acl_counter_obj.ASIC_DB_TBL_NAME, + counter_asic_db_key3, + ) + assert status == True + attr_list = [ + (self._p4rt_acl_counter_obj.SAI_ATTR_ENABLE_PACKET_COUNT, "true"), + (self._p4rt_acl_counter_obj.SAI_ATTR_ENABLE_BYTE_COUNT, "true"), + (self._p4rt_acl_counter_obj.SAI_ATTR_TABLE_ID, table_asic_db_key), + ] + util.verify_attr(fvs, attr_list) + + # query ASIC database for ACL rules + acl_asic_rules = util.get_keys( + self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME + ) + assert len(acl_asic_rules) == len(original_asic_acl_rules) + 3 + + # query ASIC database for newly created ACL rule + rule_asic_db_keys = [ + key for key in acl_asic_rules + if key not in original_asic_acl_rules + and key != rule_asic_db_key1 + and key != rule_asic_db_key2 + ] + assert len(rule_asic_db_keys) == 1 + rule_asic_db_key3 = rule_asic_db_keys[0] + + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.asic_db, + self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME, + rule_asic_db_key3, + ) + assert status == True + attr_list = [ + (self._p4rt_acl_rule_obj.SAI_ATTR_ACTION_SET_TC, "1"), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_ACTION_PACKET_ACTION, + "SAI_PACKET_ACTION_COPY", + ), + (self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_ETHER_TYPE, "2048&mask:0xffff"), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_ROUTE_DST_USER_META, + "1&mask:0xffffffff", + ), + ( + self._p4rt_acl_rule_obj.SAI_ATTR_MATCH_IP_TYPE, + "SAI_ACL_IP_TYPE_ANY&mask:0xffffffffffffffff", + ), + (self._p4rt_acl_rule_obj.SAI_ATTR_TABLE_ID, table_asic_db_key), + (self._p4rt_acl_rule_obj.SAI_ATTR_COUNTER, counter_asic_db_key3), + (self._p4rt_acl_rule_obj.SAI_ATTR_ADMIN_STATE, "true"), + (self._p4rt_acl_rule_obj.SAI_ATTR_PRIORITY, "100"), + ] + util.verify_attr(fvs, attr_list) + + # remove ACL rule 3 + self._p4rt_acl_rule_obj.remove_app_db_entry(table_name_with_rule_key3) + util.verify_response( + self.response_consumer, table_name_with_rule_key3, [], "SWSS_RC_SUCCESS" + ) + + # query application database for ACL rules + acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(acl_rules) == len(original_appl_acl_rules) + 2 + + # verify that the ACL rule no longer exists in application database + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME, + table_name_with_rule_key3, + ) + assert status == False + + # query application state database for ACL rules + state_acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME + ":" + table_name, + ) + assert len(state_acl_rules) == len(original_appl_state_acl_rules) + 2 + + # verify that the ACL rule no longer exists in application state database + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.appl_state_db, + self._p4rt_acl_rule_obj.APP_DB_TBL_NAME, + table_name_with_rule_key3, + ) + assert status == False + + # query ASIC database for ACL rules + acl_rules = util.get_keys( + self._p4rt_acl_rule_obj.asic_db, self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME + ) + assert len(acl_rules) == len(original_asic_acl_rules) + 2 + + # verify that removed ACL rule no longer exists in ASIC database + (status, fvs) = util.get_key( + self._p4rt_acl_rule_obj.asic_db, + self._p4rt_acl_rule_obj.ASIC_DB_TBL_NAME, + rule_asic_db_key3, + ) + assert status == False + + # verify that removed ACL counter no longer exists in ASIC database + (status, fvs) = util.get_key( + self._p4rt_acl_counter_obj.asic_db, + self._p4rt_acl_counter_obj.ASIC_DB_TBL_NAME, + counter_asic_db_key3, + ) + assert status == False + # remove ACL rule 1 self._p4rt_acl_rule_obj.remove_app_db_entry(table_name_with_rule_key1) util.verify_response( @@ -1201,7 +1402,8 @@ def test_AclRuleAddWithoutTableDefinitionFails(self, dvs, testlog): (self._p4rt_acl_rule_obj.METER_PBURST, meter_pbs), ] - self._p4rt_acl_rule_obj.set_app_db_entry(table_name_with_rule_key, attr_list) + self._p4rt_acl_rule_obj.set_app_db_entry( + table_name_with_rule_key, attr_list) util.verify_response( self.response_consumer, table_name_with_rule_key, diff --git a/tests/p4rt/test_p4rt_mirror.py b/tests/p4rt/test_p4rt_mirror.py index bc218df147..c1327370c3 100644 --- a/tests/p4rt/test_p4rt_mirror.py +++ b/tests/p4rt/test_p4rt_mirror.py @@ -1,48 +1,51 @@ from swsscommon import swsscommon +import pytest import util import json + class P4RtMirrorSessionWrapper(util.DBInterface): - """Interface to interact with APP DB and ASIC DB tables for P4RT mirror session object.""" - - # database and SAI constants - APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME - TBL_NAME = swsscommon.APP_P4RT_MIRROR_SESSION_TABLE_NAME - ACTION = "action" - PORT = "port" - SRC_IP = "src_ip" - DST_IP = "dst_ip" - SRC_MAC = "src_mac" - DST_MAC = "dst_mac" - TTL = "ttl" - TOS = "tos" - - ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION" - SAI_MIRROR_SESSION_ATTR_MONITOR_PORT = "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT" - SAI_MIRROR_SESSION_ATTR_TYPE = "SAI_MIRROR_SESSION_ATTR_TYPE" - SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE = "SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE" - SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION = "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION" - SAI_MIRROR_SESSION_ATTR_TOS = "SAI_MIRROR_SESSION_ATTR_TOS" - SAI_MIRROR_SESSION_ATTR_TTL = "SAI_MIRROR_SESSION_ATTR_TTL" - SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS = "SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS" - SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS = "SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS" - SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS = "SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS" - SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS = "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS" - SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE = "SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE" - - def generate_app_db_key(self, mirror_session_id): - d = {} - d[util.prepend_match_field("mirror_session_id")] = mirror_session_id - key = json.dumps(d, separators=(",", ":")) - return self.TBL_NAME + ":" + key + """Interface to interact with APP DB and ASIC DB tables for P4RT mirror session object.""" + + # database and SAI constants + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + TBL_NAME = swsscommon.APP_P4RT_MIRROR_SESSION_TABLE_NAME + ACTION = "action" + PORT = "port" + SRC_IP = "src_ip" + DST_IP = "dst_ip" + SRC_MAC = "src_mac" + DST_MAC = "dst_mac" + TTL = "ttl" + TOS = "tos" + + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_MIRROR_SESSION" + SAI_MIRROR_SESSION_ATTR_MONITOR_PORT = "SAI_MIRROR_SESSION_ATTR_MONITOR_PORT" + SAI_MIRROR_SESSION_ATTR_TYPE = "SAI_MIRROR_SESSION_ATTR_TYPE" + SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE = "SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE" + SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION = "SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION" + SAI_MIRROR_SESSION_ATTR_TOS = "SAI_MIRROR_SESSION_ATTR_TOS" + SAI_MIRROR_SESSION_ATTR_TTL = "SAI_MIRROR_SESSION_ATTR_TTL" + SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS = "SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS" + SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS = "SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS" + SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS = "SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS" + SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS = "SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS" + SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE = "SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE" + + def generate_app_db_key(self, mirror_session_id): + d = {} + d[util.prepend_match_field("mirror_session_id")] = mirror_session_id + key = json.dumps(d, separators=(",", ":")) + return self.TBL_NAME + ":" + key + class TestP4RTMirror(object): def _set_up(self, dvs): self._p4rt_mirror_session_wrapper = P4RtMirrorSessionWrapper() self._p4rt_mirror_session_wrapper.set_up_databases(dvs) self._response_consumer = swsscommon.NotificationConsumer( - self._p4rt_mirror_session_wrapper.appl_state_db, "APPL_DB_P4RT_TABLE_RESPONSE_CHANNEL") + self._p4rt_mirror_session_wrapper.appl_db, "APPL_DB_" + swsscommon.APP_P4RT_TABLE_NAME + "_RESPONSE_CHANNEL") def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): # Initialize database connectors @@ -71,13 +74,19 @@ def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): tos = "0x00" attr_list_in_app_db = [(self._p4rt_mirror_session_wrapper.ACTION, action), - (util.prepend_param_field(self._p4rt_mirror_session_wrapper.PORT), port), - (util.prepend_param_field(self._p4rt_mirror_session_wrapper.SRC_IP), src_ip), - (util.prepend_param_field(self._p4rt_mirror_session_wrapper.DST_IP), dst_ip), - (util.prepend_param_field(self._p4rt_mirror_session_wrapper.SRC_MAC), src_mac), - (util.prepend_param_field(self._p4rt_mirror_session_wrapper.DST_MAC), dst_mac), - (util.prepend_param_field(self._p4rt_mirror_session_wrapper.TTL), ttl), - (util.prepend_param_field(self._p4rt_mirror_session_wrapper.TOS), tos)] + (util.prepend_param_field( + self._p4rt_mirror_session_wrapper.PORT), port), + (util.prepend_param_field( + self._p4rt_mirror_session_wrapper.SRC_IP), src_ip), + (util.prepend_param_field( + self._p4rt_mirror_session_wrapper.DST_IP), dst_ip), + (util.prepend_param_field( + self._p4rt_mirror_session_wrapper.SRC_MAC), src_mac), + (util.prepend_param_field( + self._p4rt_mirror_session_wrapper.DST_MAC), dst_mac), + (util.prepend_param_field( + self._p4rt_mirror_session_wrapper.TTL), ttl), + (util.prepend_param_field(self._p4rt_mirror_session_wrapper.TOS), tos)] mirror_session_key = self._p4rt_mirror_session_wrapper.generate_app_db_key( mirror_session_id) self._p4rt_mirror_session_wrapper.set_app_db_entry( @@ -89,7 +98,8 @@ def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): appl_mirror_entries = util.get_keys( self._p4rt_mirror_session_wrapper.appl_db, self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME + ":" + self._p4rt_mirror_session_wrapper.TBL_NAME) - assert len(appl_mirror_entries) == len(original_appl_mirror_entries) + 1 + assert len(appl_mirror_entries) == len( + original_appl_mirror_entries) + 1 # Query application database for newly created mirror key (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.appl_db, @@ -102,7 +112,8 @@ def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): appl_state_mirror_entries = util.get_keys( self._p4rt_mirror_session_wrapper.appl_state_db, self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME + ":" + self._p4rt_mirror_session_wrapper.TBL_NAME) - assert len(appl_state_mirror_entries) == len(original_appl_state_mirror_entries) + 1 + assert len(appl_state_mirror_entries) == len( + original_appl_state_mirror_entries) + 1 # Query application state database for newly created mirror key (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.appl_state_db, @@ -113,8 +124,9 @@ def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): # Query ASIC database for mirror entries asic_mirror_entries = util.get_keys(self._p4rt_mirror_session_wrapper.asic_db, - self._p4rt_mirror_session_wrapper.ASIC_DB_TBL_NAME) - assert len(asic_mirror_entries) == len(original_asic_mirror_entries) + 1 + self._p4rt_mirror_session_wrapper.ASIC_DB_TBL_NAME) + assert len(asic_mirror_entries) == len( + original_asic_mirror_entries) + 1 # Query ASIC database for newly created mirror key asic_db_key = None @@ -134,23 +146,28 @@ def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): assert port_oid != None expected_attr_list_in_asic_db = [ - (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_MONITOR_PORT, port_oid), - (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_TYPE, "SAI_MIRROR_SESSION_TYPE_ENHANCED_REMOTE"), - (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE, "SAI_ERSPAN_ENCAPSULATION_TYPE_MIRROR_L3_GRE_TUNNEL"), - (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION, "4"), # MIRROR_SESSION_DEFAULT_IP_HDR_VER - (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_TOS, "0"), - (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_TTL, "64"), - (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS, src_ip), - (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS, dst_ip), - (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS, src_mac), - (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS, dst_mac), - (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE, "35006") # GRE_PROTOCOL_ERSPAN 0x88be + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_MONITOR_PORT, port_oid), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_TYPE, + "SAI_MIRROR_SESSION_TYPE_ENHANCED_REMOTE"), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_ERSPAN_ENCAPSULATION_TYPE, + "SAI_ERSPAN_ENCAPSULATION_TYPE_MIRROR_L3_GRE_TUNNEL"), + # MIRROR_SESSION_DEFAULT_IP_HDR_VER + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_IPHDR_VERSION, "4"), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_TOS, "0"), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_TTL, "64"), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS, src_ip), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS, dst_ip), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS, src_mac), + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS, dst_mac), + # GRE_PROTOCOL_ERSPAN 0x88be + (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_GRE_PROTOCOL_TYPE, "35006") ] util.verify_attr(fvs, expected_attr_list_in_asic_db) # 2. Modify the existing mirror session. new_dst_mac = "00:1A:11:17:5F:FF" - attr_list_in_app_db[5] = (util.prepend_param_field(self._p4rt_mirror_session_wrapper.DST_MAC), new_dst_mac) + attr_list_in_app_db[5] = (util.prepend_param_field( + self._p4rt_mirror_session_wrapper.DST_MAC), new_dst_mac) self._p4rt_mirror_session_wrapper.set_app_db_entry( mirror_session_key, attr_list_in_app_db) util.verify_response( @@ -171,7 +188,8 @@ def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): util.verify_attr(fvs, attr_list_in_app_db) # Query ASIC DB about the modified mirror session. - expected_attr_list_in_asic_db[9] = (self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS, new_dst_mac) + expected_attr_list_in_asic_db[9] = ( + self._p4rt_mirror_session_wrapper.SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS, new_dst_mac) (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.asic_db, self._p4rt_mirror_session_wrapper.ASIC_DB_TBL_NAME, asic_db_key) @@ -200,7 +218,8 @@ def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): appl_state_mirror_entries = util.get_keys( self._p4rt_mirror_session_wrapper.appl_state_db, self._p4rt_mirror_session_wrapper.APP_DB_TBL_NAME + ":" + self._p4rt_mirror_session_wrapper.TBL_NAME) - assert len(appl_state_mirror_entries) == len(original_appl_state_mirror_entries) + assert len(appl_state_mirror_entries) == len( + original_appl_state_mirror_entries) # Query application state database for the deleted mirror key (status, fvs) = util.get_key(self._p4rt_mirror_session_wrapper.appl_state_db, @@ -210,7 +229,7 @@ def test_MirrorSessionAddModifyAndDelete(self, dvs, testlog): # Query ASIC database for mirror entries asic_mirror_entries = util.get_keys(self._p4rt_mirror_session_wrapper.asic_db, - self._p4rt_mirror_session_wrapper.ASIC_DB_TBL_NAME) + self._p4rt_mirror_session_wrapper.ASIC_DB_TBL_NAME) assert len(asic_mirror_entries) == len(original_asic_mirror_entries) # Query ASIC state database for the deleted mirror key diff --git a/tests/p4rt/test_viplb.py b/tests/p4rt/test_viplb.py new file mode 100644 index 0000000000..fbb51ea48d --- /dev/null +++ b/tests/p4rt/test_viplb.py @@ -0,0 +1,282 @@ +from swsscommon import swsscommon + +import pytest +import json +import util +import time +import l3 +import viplb +import tables_definition + +def getCrmCounterValue(dvs, key, counter): + + counters_db = swsscommon.DBConnector(swsscommon.COUNTERS_DB, dvs.redis_sock, 0) + crm_stats_table = swsscommon.Table(counters_db, 'CRM') + + for k in crm_stats_table.get(key)[1]: + if k[0] == counter: + return int(k[1]) + + return 0 + +def crm_update(dvs, field, value): + cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(cfg_db, "CRM") + fvs = swsscommon.FieldValuePairs([(field, value)]) + tbl.set("Config", fvs) + time.sleep(1) + + +class TestP4RTVIPLB(object): + + def _set_up(self, dvs): + self._p4rt_tables_definition_obj = tables_definition.P4RtTableDefinitionWrapper() + self._p4rt_router_intf_obj = l3.P4RtRouterInterfaceWrapper() + self._p4rt_neighbor_obj = l3.P4RtNeighborWrapper() + self._p4rt_nexthop_obj = l3.P4RtNextHopWrapper() + self._p4rt_viplb_obj = viplb.P4RtVIPLBWrapper() + + self._p4rt_tables_definition_obj.set_up_databases(dvs) + self._p4rt_router_intf_obj.set_up_databases(dvs) + self._p4rt_neighbor_obj.set_up_databases(dvs) + self._p4rt_nexthop_obj.set_up_databases(dvs) + self._p4rt_viplb_obj.set_up_databases(dvs) + self.response_consumer = swsscommon.NotificationConsumer( + self._p4rt_viplb_obj.appl_db, "APPL_DB_" + + swsscommon.APP_P4RT_TABLE_NAME + "_RESPONSE_CHANNEL" + ) + + def test_VIPv4LBWithGoodNexthopAddUpdateDeletePass(self, dvs, testlog): + # Initialize L3 objects and database connectors. + self._set_up(dvs) + crm_update(dvs, "polling_interval", "1") + + # Create tables definition AppDb entry + tables_definition_key, attr_list = ( + self._p4rt_tables_definition_obj.create_tables_definition() + ) + util.verify_response(self.response_consumer, tables_definition_key, + attr_list, "SWSS_RC_SUCCESS") + + # Set IP type for viplb object. + self._p4rt_viplb_obj.set_ip_type("IPV4") + + # Maintain list of original Application and ASIC DB entries before + # adding new entry. + db_list = ((self._p4rt_nexthop_obj.asic_db, + self._p4rt_nexthop_obj.ASIC_DB_TBL_NAME),) + self._p4rt_nexthop_obj.get_original_redis_entries(db_list) + db_list = ((self._p4rt_viplb_obj.appl_db, + "%s:%s" % (self._p4rt_viplb_obj.APP_DB_TBL_NAME, + self._p4rt_viplb_obj.TBL_NAME)), + (self._p4rt_viplb_obj.appl_state_db, + "%s:%s" % (self._p4rt_viplb_obj.APP_DB_TBL_NAME, + self._p4rt_viplb_obj.TBL_NAME)), + (self._p4rt_viplb_obj.asic_db, + self._p4rt_viplb_obj.ASIC_DB_TBL_NAME)) + self._p4rt_viplb_obj.get_original_redis_entries(db_list) + + # Fetch the original key to oid information from Redis DB. + key_to_oid_helper = util.KeyToOidDBHelper(dvs) + _, original_key_oid_info = key_to_oid_helper.get_db_info() + + # Create router interface. + router_interface_id, router_intf_key, attr_list = ( + self._p4rt_router_intf_obj.create_router_interface() + ) + util.verify_response(self.response_consumer, router_intf_key, + attr_list, "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count = 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create neighbor. + neighbor_id, neighbor_key, attr_list = ( + self._p4rt_neighbor_obj.create_neighbor() + ) + util.verify_response(self.response_consumer, neighbor_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create nexthop. + first_nexthop_id, first_nexthop_key, attr_list = ( + self._p4rt_nexthop_obj.create_next_hop() + ) + util.verify_response(self.response_consumer, first_nexthop_key, attr_list, + "SWSS_RC_SUCCESS") + # get nexthop_oid of newly created nexthop + first_nexthop_oid = self._p4rt_nexthop_obj.get_newly_created_nexthop_oid() + assert first_nexthop_oid is not None + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create viplb. + viplb_key, attr_list = ( + self._p4rt_viplb_obj.create_viplb(first_nexthop_id) + ) + util.verify_response(self.response_consumer, viplb_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Query application database for viplb entries. + viplb_entries = util.get_keys( + self._p4rt_viplb_obj.appl_db, + self._p4rt_viplb_obj.APP_DB_TBL_NAME + ":" + self._p4rt_viplb_obj.TBL_NAME) + assert len(viplb_entries) == ( + self._p4rt_viplb_obj.get_original_appl_db_entries_count() + 1 + ) + + # Query application database for newly created viplb key. + (status, fvs) = util.get_key(self._p4rt_viplb_obj.appl_db, + self._p4rt_viplb_obj.APP_DB_TBL_NAME, + viplb_key) + assert status == True + util.verify_attr(fvs, attr_list) + + # Query application state database for viplb entries. + state_viplb_entries = util.get_keys( + self._p4rt_viplb_obj.appl_state_db, + self._p4rt_viplb_obj.APP_DB_TBL_NAME + ":" + self._p4rt_viplb_obj.TBL_NAME) + assert len(state_viplb_entries) == ( + self._p4rt_viplb_obj.get_original_appl_state_db_entries_count() + 1 + ) + + # Query application state database for newly created viplb key. + (status, fvs) = util.get_key(self._p4rt_viplb_obj.appl_state_db, + self._p4rt_viplb_obj.APP_DB_TBL_NAME, + viplb_key) + assert status == True + util.verify_attr(fvs, attr_list) + + + # get programmable_object_oid of newly created viplb + viplb_oid = self._p4rt_viplb_obj.get_newly_created_programmable_object_oid() + assert viplb_oid is not None + + # get crm counters + time.sleep(1) + used_counter = getCrmCounterValue(dvs, "EXT_TABLE_STATS:"+self._p4rt_viplb_obj.TBL_NAME, 'crm_stats_extension_table_used') + avail_counter = getCrmCounterValue(dvs, "EXT_TABLE_STATS:"+self._p4rt_viplb_obj.TBL_NAME, 'crm_stats_extension_table_available') + assert used_counter is 1 + + # Create another router interface. + router_interface_id, router_intf_key, attr_list = ( + self._p4rt_router_intf_obj.create_router_interface(router_interace_id="20") + ) + util.verify_response(self.response_consumer, router_intf_key, + attr_list, "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create another neighbor. + neighbor_id, neighbor_key, attr_list = ( + self._p4rt_neighbor_obj.create_neighbor(router_interface_id="20", neighbor_id="10.0.0.1") + ) + util.verify_response(self.response_consumer, neighbor_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Create another nexthop. + second_nexthop_id, second_nexthop_key, attr_list = ( + self._p4rt_nexthop_obj.create_next_hop(router_interface_id="20", neighbor_id="10.0.0.1", nexthop_id="16") + ) + util.verify_response(self.response_consumer, second_nexthop_key, attr_list, + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count incremented by 1 in Redis DB. + count += 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # Update viplb. + viplb_key, attr_list = ( + self._p4rt_viplb_obj.create_viplb(second_nexthop_id) + ) + util.verify_response(self.response_consumer, viplb_key, attr_list, + "SWSS_RC_SUCCESS") + + + # Remove nexthop. + self._p4rt_nexthop_obj.remove_app_db_entry(first_nexthop_key) + util.verify_response(self.response_consumer, first_nexthop_key, [], + "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # get crm counters + time.sleep(1) + used_counter = getCrmCounterValue(dvs, "EXT_TABLE_STATS:"+self._p4rt_viplb_obj.TBL_NAME, 'crm_stats_extension_table_used') + avail_counter = getCrmCounterValue(dvs, "EXT_TABLE_STATS:"+self._p4rt_viplb_obj.TBL_NAME, 'crm_stats_extension_table_available') + assert used_counter is 1 + + # Remove viplb entry. + self._p4rt_viplb_obj.remove_app_db_entry(viplb_key) + util.verify_response( + self.response_consumer, viplb_key, [], "SWSS_RC_SUCCESS") + + # Verify that P4RT key to OID count decremented by 1 in Redis DB. + count -= 1 + status, fvs = key_to_oid_helper.get_db_info() + assert status == True + assert len(fvs) == len(original_key_oid_info) + count + + # get crm counters + time.sleep(1) + used_counter = getCrmCounterValue(dvs, "EXT_TABLE_STATS:"+self._p4rt_viplb_obj.TBL_NAME, 'crm_stats_extension_table_used') + avail_counter = getCrmCounterValue(dvs, "EXT_TABLE_STATS:"+self._p4rt_viplb_obj.TBL_NAME, 'crm_stats_extension_table_available') + assert used_counter is 0 + + + def test_VIPv4LBWithBadNexthopAddUpdateDeletePass(self, dvs, testlog): + # Initialize L3 objects and database connectors. + self._set_up(dvs) + return + + # Create tables definition AppDb entry + tables_definition_key, attr_list = ( + self._p4rt_tables_definition_obj.create_tables_definition() + ) + util.verify_response(self.response_consumer, tables_definition_key, + attr_list, "SWSS_RC_SUCCESS") + + # Set IP type for viplb object. + self._p4rt_viplb_obj.set_ip_type("IPV4") + + # Create viplb. + viplb_key, attr_list = ( + self._p4rt_viplb_obj.create_viplb() + ) + util.verify_response(self.response_consumer, viplb_key, attr_list, + "SWSS_RC_INVALID_PARAM", "[OrchAgent] Cross-table reference valdiation failed, no OID found") + diff --git a/tests/p4rt/util.py b/tests/p4rt/util.py index 831c7a5cbe..ac46a48587 100644 --- a/tests/p4rt/util.py +++ b/tests/p4rt/util.py @@ -54,8 +54,10 @@ def verify_response(consumer, key, attr_list, status, err_message = "SWSS_RC_SUC assert data == key assert op == status assert len(values) >= 1 - assert values[0][0] == "err_str" - assert values[0][1] == err_message + assert values[0][0] == "err_str", "Unexpected status '%s' received, expected '%s'" % \ + (values[0][0], "err_str") + assert values[0][1] == err_message, "Unexpected message '%s' received, expected '%s'" % \ + (values[0][1], err_message) values = values[1:] verify_attr(values, attr_list) @@ -84,8 +86,8 @@ def get_port_oid_by_name(dvs, port_name): return port_oid def initialize_interface(dvs, port_name, ip): - dvs.runcmd("config interface startup {}".format(port_name)) - dvs.runcmd("config interface ip add {} {}".format(port_name, ip)) + dvs.port_admin_set(port_name, "up") + dvs.interface_ip_add(port_name, ip) def set_interface_status(dvs, if_name, status = "down", server = 0): dvs.servers[0].runcmd("ip link set {} dev {}".format(status, if_name)) == 0 diff --git a/tests/p4rt/viplb.py b/tests/p4rt/viplb.py new file mode 100644 index 0000000000..06e61443fa --- /dev/null +++ b/tests/p4rt/viplb.py @@ -0,0 +1,74 @@ +from swsscommon import swsscommon + +import util +import json + + +class P4RtVIPLBWrapper(util.DBInterface): + """Interface to interact with APP DB and ASIC DB tables for P4RT viplb object.""" + + # database and SAI constants + APP_DB_TBL_NAME = swsscommon.APP_P4RT_TABLE_NAME + ASIC_DB_TBL_NAME = "ASIC_STATE:SAI_OBJECT_TYPE_GENERIC_PROGRAMMABLE" + SAI_ATTR_TYPE = "SAI_GENERIC_PROGRAMMABLE_ATTR_TYPE" + SAI_ATTR_OBJECT_NAME = "SAI_GENERIC_PROGRAMMABLE_ATTR_OBJECT_NAME" + SAI_ATTR_ENTRY = "SAI_GENERIC_PROGRAMMABLE_ATTR_ENTRY" + + # default viplb attribute values + DEFAULT_ACTION = "set_nexthop_id" + DEFAULT_NEXTHOP_ID = "18" + DEFAULT_DST = "10.11.12.0/24" + + # attribute fields for viplb object + NEXTHOP_ID_FIELD = "nexthop_id" + + def generate_app_db_key(self, dst): + assert self.ip_type is not None + d = {} + if self.ip_type == "IPV4": + d[util.prepend_match_field("ipv4_dst")] = dst + else: + d[util.prepend_match_field("ipv6_dst")] = dst + key = json.dumps(d, separators=(",", ":")) + return self.TBL_NAME + ":" + key + + def set_ip_type(self, ip_type): + assert ip_type in ("IPV4", "IPV6") + self.ip_type = ip_type + self.TBL_NAME = "EXT_V" + ip_type + "_TABLE" + + # Create entry + def create_viplb(self, nexthop_id=None, action=None, dst=None): + action = action or self.DEFAULT_ACTION + dst = dst or self.DEFAULT_DST + if action == "set_nexthop_id": + nexthop_id = nexthop_id or self.DEFAULT_NEXTHOP_ID + attr_list = [(self.ACTION_FIELD, action), + (util.prepend_param_field(self.NEXTHOP_ID_FIELD), + nexthop_id)] + else: + attr_list = [(self.ACTION_FIELD, action)] + viplb_key = self.generate_app_db_key(dst) + self.set_app_db_entry(viplb_key, attr_list) + return viplb_key, attr_list + + def get_newly_created_programmable_object_oid(self): + viplb_oid = None + viplb_entries = util.get_keys(self.asic_db, self.ASIC_DB_TBL_NAME) + for key in viplb_entries: + if key not in self._original_entries["{}:{}".format(self.asic_db, + self.ASIC_DB_TBL_NAME)]: + viplb_oid = key + break + return viplb_oid + + def get_original_appl_db_entries_count(self): + return len(self._original_entries["%s:%s" % (self.appl_db, + (self.APP_DB_TBL_NAME + ":" + + self.TBL_NAME))]) + + def get_original_appl_state_db_entries_count(self): + return len(self._original_entries["%s:%s" % (self.appl_state_db, + (self.APP_DB_TBL_NAME + ":" + + self.TBL_NAME))]) + diff --git a/tests/test_acl.py b/tests/test_acl.py index fb8aecb0ea..d0bad2c509 100644 --- a/tests/test_acl.py +++ b/tests/test_acl.py @@ -1,4 +1,5 @@ import pytest +from requests import request L3_TABLE_TYPE = "L3" L3_TABLE_NAME = "L3_TEST" @@ -20,17 +21,20 @@ MIRROR_BIND_PORTS = ["Ethernet0", "Ethernet4", "Ethernet8", "Ethernet12"] MIRROR_RULE_NAME = "MIRROR_TEST_RULE" +PFCWD_TABLE_TYPE = "PFCWD" +PFCWD_TABLE_NAME = "PFCWD_TEST" +PFCWD_BIND_PORTS = ["Ethernet0", "Ethernet4", "Ethernet8", "Ethernet12"] class TestAcl: - @pytest.yield_fixture - def l3_acl_table(self, dvs_acl): + @pytest.fixture(params=['ingress', 'egress']) + def l3_acl_table(self, dvs_acl, request): try: - dvs_acl.create_acl_table(L3_TABLE_NAME, L3_TABLE_TYPE, L3_BIND_PORTS) - yield dvs_acl.get_acl_table_ids(1)[0] + dvs_acl.create_acl_table(L3_TABLE_NAME, L3_TABLE_TYPE, L3_BIND_PORTS, stage=request.param) + yield dvs_acl.get_acl_table_ids(1)[0], request.param finally: dvs_acl.remove_acl_table(L3_TABLE_NAME) dvs_acl.verify_acl_table_count(0) - @pytest.yield_fixture + @pytest.fixture def l3v6_acl_table(self, dvs_acl): try: dvs_acl.create_acl_table(L3V6_TABLE_NAME, @@ -41,7 +45,7 @@ def l3v6_acl_table(self, dvs_acl): dvs_acl.remove_acl_table(L3V6_TABLE_NAME) dvs_acl.verify_acl_table_count(0) - @pytest.yield_fixture + @pytest.fixture def mclag_acl_table(self, dvs_acl): try: dvs_acl.create_acl_table(MCLAG_TABLE_NAME, MCLAG_TABLE_TYPE, MCLAG_BIND_PORTS) @@ -50,7 +54,7 @@ def mclag_acl_table(self, dvs_acl): dvs_acl.remove_acl_table(MCLAG_TABLE_NAME) dvs_acl.verify_acl_table_count(0) - @pytest.yield_fixture + @pytest.fixture def mirror_acl_table(self, dvs_acl): try: dvs_acl.create_acl_table(MIRROR_TABLE_NAME, MIRROR_TABLE_TYPE, MIRROR_BIND_PORTS) @@ -59,7 +63,16 @@ def mirror_acl_table(self, dvs_acl): dvs_acl.remove_acl_table(MIRROR_TABLE_NAME) dvs_acl.verify_acl_table_count(0) - @pytest.yield_fixture + @pytest.fixture(params=['ingress', 'egress']) + def pfcwd_acl_table(self, dvs_acl, request): + try: + dvs_acl.create_acl_table(PFCWD_TABLE_NAME, PFCWD_TABLE_TYPE, PFCWD_BIND_PORTS, request.param) + yield dvs_acl.get_acl_table_ids(1)[0], request.param + finally: + dvs_acl.remove_acl_table(PFCWD_TABLE_NAME) + dvs_acl.verify_acl_table_count(0) + + @pytest.fixture def setup_teardown_neighbor(self, dvs): try: # NOTE: set_interface_status has a dependency on cdb within dvs, @@ -87,9 +100,36 @@ def test_AclTableCreationDeletion(self, dvs_acl): dvs_acl.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, 1) dvs_acl.verify_acl_table_port_binding(acl_table_id, L3_BIND_PORTS, 1) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status(L3_TABLE_NAME, "Active") finally: dvs_acl.remove_acl_table(L3_TABLE_NAME) dvs_acl.verify_acl_table_count(0) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(L3_TABLE_NAME, None) + + def test_InvalidAclTableCreationDeletion(self, dvs_acl): + try: + dvs_acl.create_acl_table("INVALID_ACL_TABLE", L3_TABLE_TYPE, "dummy_port", "invalid_stage") + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status("INVALID_ACL_TABLE", "Inactive") + finally: + dvs_acl.remove_acl_table("INVALID_ACL_TABLE") + dvs_acl.verify_acl_table_count(0) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status("INVALID_ACL_TABLE", None) + + def test_InvalidAclRuleCreation(self, dvs_acl, l3_acl_table): + config_qualifiers = {"INVALID_QUALIFIER": "TEST"} + + dvs_acl.create_acl_rule(L3_TABLE_NAME, "INVALID_RULE", config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, "INVALID_RULE", "Inactive") + + dvs_acl.remove_acl_rule(L3_TABLE_NAME, "INVALID_RULE") + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, "INVALID_RULE", None) + dvs_acl.verify_no_acl_rules() def test_AclRuleL4SrcPort(self, dvs_acl, l3_acl_table): config_qualifiers = {"L4_SRC_PORT": "65000"} @@ -99,8 +139,12 @@ def test_AclRuleL4SrcPort(self, dvs_acl, l3_acl_table): dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclRuleIpProtocol(self, dvs_acl, l3_acl_table): @@ -111,8 +155,12 @@ def test_AclRuleIpProtocol(self, dvs_acl, l3_acl_table): dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclRuleTCPProtocolAppendedForTCPFlags(self, dvs_acl, l3_acl_table): @@ -128,8 +176,12 @@ def test_AclRuleTCPProtocolAppendedForTCPFlags(self, dvs_acl, l3_acl_table): } dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclRuleNextHeader(self, dvs_acl, l3_acl_table): @@ -137,9 +189,13 @@ def test_AclRuleNextHeader(self, dvs_acl, l3_acl_table): # Shouldn't allow NEXT_HEADER on vanilla L3 tables. dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Inactive") dvs_acl.verify_no_acl_rules() dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleNextHeaderAppendedForTCPFlags(self, dvs_acl, l3v6_acl_table): @@ -156,8 +212,12 @@ def test_V6AclRuleNextHeaderAppendedForTCPFlags(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclRuleInPorts(self, dvs_acl, mirror_acl_table): @@ -174,9 +234,13 @@ def test_AclRuleInPorts(self, dvs_acl, mirror_acl_table): } dvs_acl.create_acl_rule(MIRROR_TABLE_NAME, MIRROR_RULE_NAME, config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(MIRROR_TABLE_NAME, MIRROR_RULE_NAME, "Active") dvs_acl.verify_acl_rule(expected_sai_qualifiers) dvs_acl.remove_acl_rule(MIRROR_TABLE_NAME, MIRROR_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(MIRROR_TABLE_NAME, MIRROR_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclRuleOutPorts(self, dvs_acl, mclag_acl_table): @@ -194,8 +258,12 @@ def test_AclRuleOutPorts(self, dvs_acl, mclag_acl_table): dvs_acl.create_acl_rule(MCLAG_TABLE_NAME, MCLAG_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(MCLAG_TABLE_NAME, MCLAG_RULE_NAME, "Active") dvs_acl.remove_acl_rule(MCLAG_TABLE_NAME, MCLAG_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(MCLAG_TABLE_NAME, MCLAG_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclRuleInPortsNonExistingInterface(self, dvs_acl, mirror_acl_table): @@ -207,9 +275,12 @@ def test_AclRuleInPortsNonExistingInterface(self, dvs_acl, mirror_acl_table): } dvs_acl.create_acl_rule(MIRROR_TABLE_NAME, MIRROR_RULE_NAME, config_qualifiers) - + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(MIRROR_TABLE_NAME, MIRROR_RULE_NAME, "Inactive") dvs_acl.verify_no_acl_rules() dvs_acl.remove_acl_rule(MIRROR_TABLE_NAME, MIRROR_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(MIRROR_TABLE_NAME, MIRROR_RULE_NAME, None) def test_AclRuleOutPortsNonExistingInterface(self, dvs_acl, mclag_acl_table): """ @@ -220,9 +291,12 @@ def test_AclRuleOutPortsNonExistingInterface(self, dvs_acl, mclag_acl_table): } dvs_acl.create_acl_rule(MCLAG_TABLE_NAME, MCLAG_RULE_NAME, config_qualifiers) - + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(MCLAG_TABLE_NAME, MCLAG_RULE_NAME, "Inactive") dvs_acl.verify_no_acl_rules() dvs_acl.remove_acl_rule(MCLAG_TABLE_NAME, MCLAG_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(MCLAG_TABLE_NAME, MCLAG_RULE_NAME, None) def test_AclRuleVlanId(self, dvs_acl, l3_acl_table): config_qualifiers = {"VLAN_ID": "100"} @@ -231,9 +305,13 @@ def test_AclRuleVlanId(self, dvs_acl, l3_acl_table): } dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") dvs_acl.verify_acl_rule(expected_sai_qualifiers) dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclTableCreationDeletion(self, dvs_acl): @@ -246,8 +324,12 @@ def test_V6AclTableCreationDeletion(self, dvs_acl): acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(L3V6_BIND_PORTS)) dvs_acl.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, 1) dvs_acl.verify_acl_table_port_binding(acl_table_id, L3V6_BIND_PORTS, 1) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status(L3V6_TABLE_NAME, "Active") finally: dvs_acl.remove_acl_table(L3V6_TABLE_NAME) + # Verify the STATE_DB entry is cleared + dvs_acl.verify_acl_table_status(L3V6_TABLE_NAME, None) dvs_acl.verify_acl_table_count(0) def test_V6AclRuleIPv6Any(self, dvs_acl, l3v6_acl_table): @@ -257,9 +339,13 @@ def test_V6AclRuleIPv6Any(self, dvs_acl, l3v6_acl_table): } dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.verify_acl_rule(expected_sai_qualifiers) dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleIPv6AnyDrop(self, dvs_acl, l3v6_acl_table): @@ -273,8 +359,12 @@ def test_V6AclRuleIPv6AnyDrop(self, dvs_acl, l3v6_acl_table): config_qualifiers, action="DROP") dvs_acl.verify_acl_rule(expected_sai_qualifiers, action="DROP") + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() # This test validates that backwards compatibility works as expected, it should @@ -287,8 +377,12 @@ def test_V6AclRuleIpProtocol(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleNextHeader(self, dvs_acl, l3v6_acl_table): @@ -299,8 +393,12 @@ def test_V6AclRuleNextHeader(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleSrcIPv6(self, dvs_acl, l3v6_acl_table): @@ -312,8 +410,12 @@ def test_V6AclRuleSrcIPv6(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleDstIPv6(self, dvs_acl, l3v6_acl_table): @@ -324,8 +426,12 @@ def test_V6AclRuleDstIPv6(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleL4SrcPort(self, dvs_acl, l3v6_acl_table): @@ -336,8 +442,12 @@ def test_V6AclRuleL4SrcPort(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleL4DstPort(self, dvs_acl, l3v6_acl_table): @@ -348,8 +458,12 @@ def test_V6AclRuleL4DstPort(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleL4SrcPortRange(self, dvs_acl, l3v6_acl_table): @@ -360,8 +474,12 @@ def test_V6AclRuleL4SrcPortRange(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleL4DstPortRange(self, dvs_acl, l3v6_acl_table): @@ -372,8 +490,12 @@ def test_V6AclRuleL4DstPortRange(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_V6AclRuleVlanId(self, dvs_acl, l3v6_acl_table): @@ -384,8 +506,12 @@ def test_V6AclRuleVlanId(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_InsertAclRuleBetweenPriorities(self, dvs_acl, l3_acl_table): @@ -417,6 +543,8 @@ def test_InsertAclRuleBetweenPriorities(self, dvs_acl, l3_acl_table): f"PRIORITY_TEST_RULE_{rule}", config_qualifiers[rule], action=config_actions[rule], priority=rule) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, f"PRIORITY_TEST_RULE_{rule}", "Active") dvs_acl.verify_acl_rule_set(rule_priorities, config_actions, expected_sai_qualifiers) @@ -434,9 +562,12 @@ def test_InsertAclRuleBetweenPriorities(self, dvs_acl, l3_acl_table): action="DROP", priority=odd_priority) dvs_acl.verify_acl_rule_set(rule_priorities, config_actions, expected_sai_qualifiers) - + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, f"PRIORITY_TEST_RULE_{odd_priority}", "Active") for rule in rule_priorities: dvs_acl.remove_acl_rule(L3_TABLE_NAME, f"PRIORITY_TEST_RULE_{rule}") + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, f"PRIORITY_TEST_RULE_{rule}", None) dvs_acl.verify_no_acl_rules() def test_RulesWithDiffMaskLengths(self, dvs_acl, l3_acl_table): @@ -475,10 +606,14 @@ def test_RulesWithDiffMaskLengths(self, dvs_acl, l3_acl_table): config_qualifiers[rule], action=config_actions[rule], priority=rule) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, f"MASK_TEST_RULE_{rule}", "Active") dvs_acl.verify_acl_rule_set(rule_priorities, config_actions, expected_sai_qualifiers) for rule in rule_priorities: dvs_acl.remove_acl_rule(L3_TABLE_NAME, f"MASK_TEST_RULE_{rule}") + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, f"MASK_TEST_RULE_{rule}", None) dvs_acl.verify_no_acl_rules() def test_AclRuleIcmp(self, dvs_acl, l3_acl_table): @@ -494,8 +629,12 @@ def test_AclRuleIcmp(self, dvs_acl, l3_acl_table): dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() dvs_acl.remove_acl_table(L3_TABLE_NAME) @@ -514,8 +653,12 @@ def test_AclRuleIcmpV6(self, dvs_acl, l3v6_acl_table): dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) dvs_acl.verify_acl_rule(expected_sai_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) dvs_acl.verify_no_acl_rules() def test_AclRuleRedirect(self, dvs, dvs_acl, l3_acl_table, setup_teardown_neighbor): @@ -533,8 +676,11 @@ def test_AclRuleRedirect(self, dvs, dvs_acl, l3_acl_table, setup_teardown_neighb next_hop_id = setup_teardown_neighbor dvs_acl.verify_redirect_acl_rule(expected_sai_qualifiers, next_hop_id, priority="20") - + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() dvs_acl.create_redirect_acl_rule(L3_TABLE_NAME, @@ -545,19 +691,50 @@ def test_AclRuleRedirect(self, dvs, dvs_acl, l3_acl_table, setup_teardown_neighb intf_id = dvs.asic_db.port_name_map["Ethernet4"] dvs_acl.verify_redirect_acl_rule(expected_sai_qualifiers, intf_id, priority="20") - + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() + + def test_AclTableMandatoryMatchFields(self, dvs, pfcwd_acl_table): + """ + The test case is to verify stage particular matching fields is applied + """ + table_oid, stage = pfcwd_acl_table + match_in_ports = False + entry = dvs.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE", table_oid) + for k, v in entry.items(): + if k == "SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS" and v == "true": + match_in_ports = True + + if stage == "ingress": + assert match_in_ports + else: + assert not match_in_ports + + def test_AclTableMandatoryRangeFields(self, dvs, l3_acl_table): + """ + The test case is to verify range qualifier is not applied for egress ACL + """ + table_oid, stage = l3_acl_table + match_range_qualifier = False + entry = dvs.asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE", table_oid) + for k, v in entry.items(): + if k == "SAI_ACL_TABLE_ATTR_FIELD_ACL_RANGE_TYPE" and v == "true": + match_range_qualifier = True + assert not match_range_qualifier class TestAclCrmUtilization: @pytest.fixture(scope="class", autouse=True) def configure_crm_polling_interval_for_test(self, dvs): - dvs.runcmd("crm config polling interval 1") + dvs.crm_poll_set("1") yield - dvs.runcmd("crm config polling interval 300") + dvs.crm_poll_set("300") def test_ValidateAclTableBindingCrmUtilization(self, dvs, dvs_acl): counter_db = dvs.get_counters_db() diff --git a/tests/test_acl_cli.py b/tests/test_acl_cli.py deleted file mode 100644 index 02785314d2..0000000000 --- a/tests/test_acl_cli.py +++ /dev/null @@ -1,33 +0,0 @@ -class TestAclCli: - def test_AddTableMultipleTimes(self, dvs, dvs_acl): - dvs.runcmd("config acl add table TEST L3 -p Ethernet0") - - cdb = dvs.get_config_db() - cdb.wait_for_field_match( - "ACL_TABLE", - "TEST", - {"ports": "Ethernet0"} - ) - - # Verify that subsequent updates don't delete "ports" from config DB - dvs.runcmd("config acl add table TEST L3 -p Ethernet4") - cdb.wait_for_field_match( - "ACL_TABLE", - "TEST", - {"ports": "Ethernet4"} - ) - - # Verify that subsequent updates propagate to ASIC DB - L3_BIND_PORTS = ["Ethernet0", "Ethernet4", "Ethernet8", "Ethernet12"] - dvs.runcmd(f"config acl add table TEST L3 -p {','.join(L3_BIND_PORTS)}") - acl_table_id = dvs_acl.get_acl_table_ids(1)[0] - acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(L3_BIND_PORTS)) - - dvs_acl.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, 1) - dvs_acl.verify_acl_table_port_binding(acl_table_id, L3_BIND_PORTS, 1) - - -# Add Dummy always-pass test at end as workaroud -# for issue when Flaky fail on final test it invokes module tear-down before retrying -def test_nonflaky_dummy(): - pass diff --git a/tests/test_acl_egress_table.py b/tests/test_acl_egress_table.py index 01800d6b20..c96af74644 100644 --- a/tests/test_acl_egress_table.py +++ b/tests/test_acl_egress_table.py @@ -14,16 +14,18 @@ "VLAN_ID" ] CUSTOM_TABLE_TYPE_BPOINT_TYPES = ["PORT","PORTCHANNEL"] +CUSTOM_TABLE_TYPE_ACTIONS = ["PACKET_ACTION,COUNTER"] +EXPECTED_ACTION_LIST = ['SAI_ACL_ACTION_TYPE_PACKET_ACTION','SAI_ACL_ACTION_TYPE_COUNTER'] TABLE_NAME = "EGRESS_TEST" BIND_PORTS = ["Ethernet0", "Ethernet4"] RULE_NAME = "EGRESS_TEST_RULE" class TestEgressAclTable: - @pytest.yield_fixture + @pytest.fixture def egress_acl_table(self, dvs_acl): try: - dvs_acl.create_acl_table_type(TABLE_TYPE, CUSTOM_TABLE_TYPE_MATCHES, CUSTOM_TABLE_TYPE_BPOINT_TYPES) + dvs_acl.create_acl_table_type(TABLE_TYPE, CUSTOM_TABLE_TYPE_MATCHES, CUSTOM_TABLE_TYPE_BPOINT_TYPES, CUSTOM_TABLE_TYPE_ACTIONS) dvs_acl.create_acl_table(TABLE_NAME, TABLE_TYPE, BIND_PORTS, stage="egress") yield dvs_acl.get_acl_table_ids(1)[0] finally: @@ -33,7 +35,7 @@ def egress_acl_table(self, dvs_acl): def test_EgressAclTableCreationDeletion(self, dvs_acl): try: - dvs_acl.create_acl_table_type(TABLE_TYPE, CUSTOM_TABLE_TYPE_MATCHES, CUSTOM_TABLE_TYPE_BPOINT_TYPES) + dvs_acl.create_acl_table_type(TABLE_TYPE, CUSTOM_TABLE_TYPE_MATCHES, CUSTOM_TABLE_TYPE_BPOINT_TYPES, CUSTOM_TABLE_TYPE_ACTIONS) dvs_acl.create_acl_table(TABLE_NAME, TABLE_TYPE, BIND_PORTS, stage="egress") acl_table_id = dvs_acl.get_acl_table_ids(1)[0] @@ -41,6 +43,7 @@ def test_EgressAclTableCreationDeletion(self, dvs_acl): dvs_acl.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, 1) dvs_acl.verify_acl_table_port_binding(acl_table_id, BIND_PORTS, 1, stage="egress") + dvs_acl.verify_acl_table_action_list(acl_table_id, EXPECTED_ACTION_LIST) finally: dvs_acl.remove_acl_table(TABLE_NAME) dvs_acl.remove_acl_table_type(TABLE_TYPE) diff --git a/tests/test_acl_l3v4v6.py b/tests/test_acl_l3v4v6.py new file mode 100644 index 0000000000..2a5e044f52 --- /dev/null +++ b/tests/test_acl_l3v4v6.py @@ -0,0 +1,99 @@ +import pytest +from requests import request + +L3V4V6_TABLE_TYPE = "L3V4V6" +L3V4V6_TABLE_NAME = "L3_V4V6_TEST" +L3V4V6_BIND_PORTS = ["Ethernet0", "Ethernet4", "Ethernet8"] +L3V4V6_RULE_NAME = "L3V4V6_TEST_RULE" + +class TestAcl: + @pytest.fixture + def l3v4v6_acl_table(self, dvs_acl): + try: + dvs_acl.create_acl_table(L3V4V6_TABLE_NAME, + L3V4V6_TABLE_TYPE, + L3V4V6_BIND_PORTS) + yield dvs_acl.get_acl_table_ids(1)[0] + finally: + dvs_acl.remove_acl_table(L3V4V6_TABLE_NAME) + dvs_acl.verify_acl_table_count(0) + + @pytest.fixture + def setup_teardown_neighbor(self, dvs): + try: + # NOTE: set_interface_status has a dependency on cdb within dvs, + # so we still need to setup the db. This should be refactored. + dvs.setup_db() + + # Bring up an IP interface with a neighbor + dvs.set_interface_status("Ethernet4", "up") + dvs.add_ip_address("Ethernet4", "10.0.0.1/24") + dvs.add_neighbor("Ethernet4", "10.0.0.2", "00:01:02:03:04:05") + + yield dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", 1)[0] + finally: + # Clean up the IP interface and neighbor + dvs.remove_neighbor("Ethernet4", "10.0.0.2") + dvs.remove_ip_address("Ethernet4", "10.0.0.1/24") + dvs.set_interface_status("Ethernet4", "down") + + def test_L3V4V6AclTableCreationDeletion(self, dvs_acl): + try: + dvs_acl.create_acl_table(L3V4V6_TABLE_NAME, L3V4V6_TABLE_TYPE, L3V4V6_BIND_PORTS) + + acl_table_id = dvs_acl.get_acl_table_ids(1)[0] + acl_table_group_ids = dvs_acl.get_acl_table_group_ids(len(L3V4V6_BIND_PORTS)) + + dvs_acl.verify_acl_table_group_members(acl_table_id, acl_table_group_ids, 1) + dvs_acl.verify_acl_table_port_binding(acl_table_id, L3V4V6_BIND_PORTS, 1) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_table_status(L3V4V6_TABLE_NAME, "Active") + finally: + dvs_acl.remove_acl_table(L3V4V6_TABLE_NAME) + dvs_acl.verify_acl_table_count(0) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_table_status(L3V4V6_TABLE_NAME, None) + + def test_ValidAclRuleCreation_sip_dip(self, dvs_acl, l3v4v6_acl_table): + config_qualifiers = {"DST_IP": "20.0.0.1/32", + "SRC_IP": "10.0.0.0/32"}; + + dvs_acl.create_acl_rule(L3V4V6_TABLE_NAME, "VALID_RULE", config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V4V6_TABLE_NAME, "VALID_RULE", "Active") + + dvs_acl.remove_acl_rule(L3V4V6_TABLE_NAME, "VALID_RULE") + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V4V6_TABLE_NAME, "VALID_RULE", None) + dvs_acl.verify_no_acl_rules() + + def test_InvalidAclRuleCreation_sip_sipv6(self, dvs_acl, l3v4v6_acl_table): + config_qualifiers = {"SRC_IPV6": "2777::0/64", + "SRC_IP": "10.0.0.0/32"}; + + dvs_acl.create_acl_rule(L3V4V6_TABLE_NAME, "INVALID_RULE", config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V4V6_TABLE_NAME, "INVALID_RULE", "Inactive") + + dvs_acl.remove_acl_rule(L3V4V6_TABLE_NAME, "INVALID_RULE") + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V4V6_TABLE_NAME, "INVALID_RULE", None) + dvs_acl.verify_no_acl_rules() + + def test_InvalidAclRuleCreation_dip_sipv6(self, dvs_acl, l3v4v6_acl_table): + config_qualifiers = {"SRC_IPV6": "2777::0/64", + "DST_IP": "10.0.0.0/32"}; + + dvs_acl.create_acl_rule(L3V4V6_TABLE_NAME, "INVALID_RULE", config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V4V6_TABLE_NAME, "INVALID_RULE", "Inactive") + + dvs_acl.remove_acl_rule(L3V4V6_TABLE_NAME, "INVALID_RULE") + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V4V6_TABLE_NAME, "INVALID_RULE", None) + dvs_acl.verify_no_acl_rules() + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_acl_portchannel.py b/tests/test_acl_portchannel.py index 759850d1be..b912cbea2f 100644 --- a/tests/test_acl_portchannel.py +++ b/tests/test_acl_portchannel.py @@ -129,7 +129,7 @@ def check_asic_table_absent(self, dvs): # Second create ACL table def test_PortChannelAfterAcl(self, dvs): self.setup_db(dvs) - dvs.runcmd("crm config polling interval 1") + dvs.crm_poll_set("1") time.sleep(2) used_counter = dvs.getCrmCounterValue('ACL_STATS:INGRESS:LAG', 'crm_stats_acl_group_used') @@ -162,7 +162,7 @@ def test_PortChannelAfterAcl(self, dvs): new_new_used_counter = 0 assert new_used_counter - new_new_used_counter == 1 # slow down crm polling - dvs.runcmd("crm config polling interval 10000") + dvs.crm_poll_set("10000") # Frist create ACL table # Second create port channel diff --git a/tests/test_admin_status.py b/tests/test_admin_status.py index 15724a7c02..1b99bf37c7 100644 --- a/tests/test_admin_status.py +++ b/tests/test_admin_status.py @@ -9,6 +9,7 @@ def setup_db(self, dvs): self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) self.cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + self.sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) def set_admin_status(self, port, admin_status): assert admin_status == "up" or admin_status == "down" @@ -52,6 +53,16 @@ def check_admin_status(self, dvs, port, admin_status): if fv[0] == "SAI_PORT_ATTR_ADMIN_STATE": assert fv[1] == "true" if admin_status == "up" else "false" + def check_host_tx_ready_status(self, dvs, port, admin_status): + assert admin_status == "up" or admin_status == "down" + ptbl = swsscommon.Table(self.sdb, "PORT_TABLE") + (status, fvs) = ptbl.get(port) + assert status == True + assert "host_tx_ready" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "host_tx_ready": + assert fv[1] == "true" if admin_status == "up" else "false" + def test_PortChannelMemberAdminStatus(self, dvs, testlog): self.setup_db(dvs) @@ -79,6 +90,24 @@ def test_PortChannelMemberAdminStatus(self, dvs, testlog): # remove port channel self.remove_port_channel(dvs, "PortChannel6") + def test_PortHostTxReadiness(self, dvs, testlog): + self.setup_db(dvs) + + # configure admin status to interface + self.set_admin_status("Ethernet0", "up") + self.set_admin_status("Ethernet4", "down") + self.set_admin_status("Ethernet8", "up") + + # check ASIC port database + self.check_admin_status(dvs, "Ethernet0", "up") + self.check_admin_status(dvs, "Ethernet4", "down") + self.check_admin_status(dvs, "Ethernet8", "up") + + # check host readiness status in PORT TABLE of STATE-DB + self.check_host_tx_ready_status(dvs, "Ethernet0", "up") + self.check_host_tx_ready_status(dvs, "Ethernet4", "down") + self.check_host_tx_ready_status(dvs, "Ethernet8", "up") + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_bfd.py b/tests/test_bfd.py index 0e8b167360..5add329278 100644 --- a/tests/test_bfd.py +++ b/tests/test_bfd.py @@ -49,7 +49,7 @@ def test_addRemoveBfdSession(self, dvs): bfdSessions = self.get_exist_bfd_session() # Create BFD session - fieldValues = {"local_addr": "10.0.0.1"} + fieldValues = {"local_addr": "10.0.0.1","tos":"64"} self.create_bfd_session("default:default:10.0.0.2", fieldValues) self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) @@ -62,13 +62,14 @@ def test_addRemoveBfdSession(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "64", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" } self.check_asic_bfd_session_value(session, expected_adb_values) # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "1"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -102,13 +103,14 @@ def test_addRemoveBfdSession_ipv6(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "2000::1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "2000::2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "6" } self.check_asic_bfd_session_value(session, expected_adb_values) # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "2000::1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "2"} self.check_state_bfd_session_value("default|default|2000::2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -142,6 +144,7 @@ def test_addRemoveBfdSession_interface(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", "SAI_BFD_SESSION_ATTR_HW_LOOKUP_VALID": "false", "SAI_BFD_SESSION_ATTR_DST_MAC_ADDRESS": "00:02:03:04:05:06" @@ -150,7 +153,7 @@ def test_addRemoveBfdSession_interface(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "3"} self.check_state_bfd_session_value("default|Ethernet0|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -184,6 +187,7 @@ def test_addRemoveBfdSession_txrx_interval(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", "SAI_BFD_SESSION_ATTR_MIN_TX": "300000", "SAI_BFD_SESSION_ATTR_MIN_RX": "500000", @@ -192,7 +196,7 @@ def test_addRemoveBfdSession_txrx_interval(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"300", - "rx_interval" : "500", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "500", "multiplier" : "10", "multihop": "false", "local_discriminator" : "4"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -226,6 +230,7 @@ def test_addRemoveBfdSession_multiplier(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", "SAI_BFD_SESSION_ATTR_MULTIPLIER": "5" } @@ -233,7 +238,7 @@ def test_addRemoveBfdSession_multiplier(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "5", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "5", "multihop": "false", "local_discriminator" : "5"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -267,6 +272,7 @@ def test_addRemoveBfdSession_multihop(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", "SAI_BFD_SESSION_ATTR_MULTIHOP": "true" } @@ -274,7 +280,7 @@ def test_addRemoveBfdSession_multihop(self, dvs): # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "true"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "true", "local_discriminator" : "6"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -308,13 +314,14 @@ def test_addRemoveBfdSession_type(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_DEMAND_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" } self.check_asic_bfd_session_value(session, expected_adb_values) # Check STATE_DB entry related to the BFD session expected_sdb_values = {"state": "Down", "type": "demand_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "7"} self.check_state_bfd_session_value("default|default|10.0.0.2", expected_sdb_values) # Send BFD session state notification to update BFD session state @@ -350,6 +357,7 @@ def test_multipleBfdSessions(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" } self.check_asic_bfd_session_value(session1, expected_adb_values) @@ -357,7 +365,7 @@ def test_multipleBfdSessions(self, dvs): # Check STATE_DB entry related to the BFD session 1 key_state_db1 = "default|default|10.0.0.2" expected_sdb_values1 = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "8"} self.check_state_bfd_session_value(key_state_db1, expected_sdb_values1) # Create BFD session 2 @@ -376,6 +384,7 @@ def test_multipleBfdSessions(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.1.2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4", "SAI_BFD_SESSION_ATTR_MIN_TX": "300000", "SAI_BFD_SESSION_ATTR_MIN_RX": "500000", @@ -385,7 +394,7 @@ def test_multipleBfdSessions(self, dvs): # Check STATE_DB entry related to the BFD session 2 key_state_db2 = "default|default|10.0.1.2" expected_sdb_values2 = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.1", "tx_interval" :"300", - "rx_interval" : "500", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "500", "multiplier" : "10", "multihop": "false", "local_discriminator" : "9"} self.check_state_bfd_session_value(key_state_db2, expected_sdb_values2) # Create BFD session 3 @@ -404,6 +413,7 @@ def test_multipleBfdSessions(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "2000::1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "2000::2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_DEMAND_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "6" } self.check_asic_bfd_session_value(session3, expected_adb_values) @@ -411,7 +421,7 @@ def test_multipleBfdSessions(self, dvs): # Check STATE_DB entry related to the BFD session 3 key_state_db3 = "default|default|2000::2" expected_sdb_values3 = {"state": "Down", "type": "demand_active", "local_addr" : "2000::1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "10"} self.check_state_bfd_session_value(key_state_db3, expected_sdb_values3) # Create BFD session 4 @@ -430,6 +440,7 @@ def test_multipleBfdSessions(self, dvs): "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "3000::1", "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "3000::2", "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_TOS": "192", "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "6" } self.check_asic_bfd_session_value(session4, expected_adb_values) @@ -437,7 +448,7 @@ def test_multipleBfdSessions(self, dvs): # Check STATE_DB entry related to the BFD session 4 key_state_db4 = "default|default|3000::2" expected_sdb_values4 = {"state": "Down", "type": "async_active", "local_addr" : "3000::1", "tx_interval" :"1000", - "rx_interval" : "1000", "multiplier" : "3", "multihop": "false"} + "rx_interval" : "1000", "multiplier" : "10", "multihop": "false", "local_discriminator" : "11"} self.check_state_bfd_session_value(key_state_db4, expected_sdb_values4) # Update BFD session states @@ -464,3 +475,23 @@ def test_multipleBfdSessions(self, dvs): self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session3) self.remove_bfd_session(key4) self.adb.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session4) + + def test_bfd_state_db_clear(self, dvs): + self.setup_db(dvs) + + bfdSessions = self.get_exist_bfd_session() + + # Create BFD session + fieldValues = {"local_addr": "10.0.0.1", "type": "demand_active"} + self.create_bfd_session("default:default:10.0.0.2", fieldValues) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", len(bfdSessions) + 1) + + # Checked created BFD session in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + dvs.stop_swss() + dvs.start_swss() + + time.sleep(5) + keys = self.sdb.get_keys("BFD_SESSION_TABLE") + assert len(keys) == 0 diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index 76ba36ee11..02a06569bd 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -5,13 +5,12 @@ from dvslib.dvs_common import PollingConfig -@pytest.yield_fixture +@pytest.fixture def dynamic_buffer(dvs): buffer_model.enable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) yield buffer_model.disable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) - @pytest.mark.usefixtures("dynamic_buffer") class TestBufferMgrDyn(object): DEFAULT_POLLING_CONFIG = PollingConfig(polling_interval=0.01, timeout=60, strict=True) @@ -129,16 +128,18 @@ def check_new_profile_in_asic_db(self, dvs, profile): if fvs.get('dynamic_th'): sai_threshold_value = fvs['dynamic_th'] sai_threshold_mode = 'SAI_BUFFER_PROFILE_THRESHOLD_MODE_DYNAMIC' + sai_threshold_name = 'SAI_BUFFER_PROFILE_ATTR_SHARED_DYNAMIC_TH' else: sai_threshold_value = fvs['static_th'] sai_threshold_mode = 'SAI_BUFFER_PROFILE_THRESHOLD_MODE_STATIC' + sai_threshold_name = 'SAI_BUFFER_PROFILE_ATTR_SHARED_STATIC_TH' self.asic_db.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_PROFILE", self.newProfileInAsicDb, {'SAI_BUFFER_PROFILE_ATTR_XON_TH': fvs['xon'], 'SAI_BUFFER_PROFILE_ATTR_XOFF_TH': fvs['xoff'], 'SAI_BUFFER_PROFILE_ATTR_RESERVED_BUFFER_SIZE': fvs['size'], 'SAI_BUFFER_PROFILE_ATTR_POOL_ID': self.ingress_lossless_pool_oid, 'SAI_BUFFER_PROFILE_ATTR_THRESHOLD_MODE': sai_threshold_mode, - 'SAI_BUFFER_PROFILE_ATTR_SHARED_DYNAMIC_TH': sai_threshold_value}, + sai_threshold_name: sai_threshold_value}, self.DEFAULT_POLLING_CONFIG) def make_lossless_profile_name(self, speed, cable_length, mtu = None, dynamic_th = None): @@ -164,14 +165,14 @@ def test_changeSpeed(self, dvs, testlog): self.setup_db(dvs) # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') self.check_queues_after_port_startup(dvs) # Configure lossless PG 3-4 on interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) # Change speed to speed1 and verify whether the profile has been updated - dvs.runcmd("config interface speed Ethernet0 " + self.speedToTest1) + dvs.port_field_set("Ethernet0", "speed", self.speedToTest1) expectedProfile = self.make_lossless_profile_name(self.speedToTest1, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) @@ -185,7 +186,7 @@ def test_changeSpeed(self, dvs, testlog): self.app_db.wait_for_deleted_entry("BUFFER_PG_TABLE", "Ethernet0:3-4") # Change speed to speed2 and verify - dvs.runcmd("config interface speed Ethernet0 " + self.speedToTest2) + dvs.port_field_set("Ethernet0", "speed", self.speedToTest2) expectedProfile = self.make_lossless_profile_name(self.speedToTest2, self.originalCableLen) # Re-add another lossless PG @@ -197,7 +198,7 @@ def test_changeSpeed(self, dvs, testlog): self.app_db.wait_for_deleted_entry("BUFFER_PG_TABLE", "Ethernet0:6") # Remove the lossless PG 3-4 and revert speed - dvs.runcmd("config interface speed Ethernet0 " + self.originalSpeed) + dvs.port_field_set("Ethernet0", "speed", self.originalSpeed) self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) @@ -210,7 +211,7 @@ def test_changeSpeed(self, dvs, testlog): self.app_db.wait_for_deleted_entry("BUFFER_PG_TABLE", "Ethernet0:3-4") # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -219,7 +220,7 @@ def test_changeCableLen(self, dvs, testlog): self.setup_db(dvs) # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') # Configure lossless PG 3-4 on interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) @@ -263,7 +264,7 @@ def test_changeCableLen(self, dvs, testlog): self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -271,7 +272,7 @@ def test_MultipleLosslessPg(self, dvs, testlog): self.setup_db(dvs) # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') # Configure lossless PG 3-4 on interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) @@ -282,7 +283,7 @@ def test_MultipleLosslessPg(self, dvs, testlog): self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Change speed and check - dvs.runcmd("config interface speed Ethernet0 " + self.speedToTest1) + dvs.port_field_set("Ethernet0", "speed", self.speedToTest1) expectedProfile = self.make_lossless_profile_name(self.speedToTest1, self.originalCableLen) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfile) self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) @@ -299,7 +300,7 @@ def test_MultipleLosslessPg(self, dvs, testlog): # Revert the speed and cable length and check self.change_cable_length(self.originalCableLen) - dvs.runcmd("config interface speed Ethernet0 " + self.originalSpeed) + dvs.port_field_set("Ethernet0", "speed", self.originalSpeed) self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", expectedProfile) self.asic_db.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_PROFILE", self.newProfileInAsicDb) expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) @@ -312,7 +313,7 @@ def test_MultipleLosslessPg(self, dvs, testlog): self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|6') # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -320,7 +321,7 @@ def test_headroomOverride(self, dvs, testlog): self.setup_db(dvs) # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') # Configure static profile self.config_db.update_entry('BUFFER_PROFILE', 'test', @@ -397,7 +398,7 @@ def test_headroomOverride(self, dvs, testlog): self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -405,7 +406,7 @@ def test_mtuUpdate(self, dvs, testlog): self.setup_db(dvs) # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') test_mtu = '1500' default_mtu = '9100' @@ -413,7 +414,7 @@ def test_mtuUpdate(self, dvs, testlog): expectedProfileNormal = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) # update the mtu on the interface - dvs.runcmd("config interface mtu Ethernet0 {}".format(test_mtu)) + dvs.port_field_set("Ethernet0", "mtu", test_mtu) # configure lossless PG 3-4 on interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) @@ -423,7 +424,7 @@ def test_mtuUpdate(self, dvs, testlog): self.check_new_profile_in_asic_db(dvs, expectedProfileMtu) self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfileMtu}) - dvs.runcmd("config interface mtu Ethernet0 {}".format(default_mtu)) + dvs.port_field_set("Ethernet0", "mtu", default_mtu) self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", expectedProfileMtu) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", expectedProfileNormal) @@ -433,7 +434,7 @@ def test_mtuUpdate(self, dvs, testlog): self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -441,7 +442,7 @@ def test_nonDefaultAlpha(self, dvs, testlog): self.setup_db(dvs) # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') test_dynamic_th_1 = '1' expectedProfile_th1 = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen, dynamic_th = test_dynamic_th_1) @@ -477,7 +478,7 @@ def test_nonDefaultAlpha(self, dvs, testlog): self.config_db.delete_entry('BUFFER_PROFILE', 'non-default-dynamic') # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -485,7 +486,7 @@ def test_sharedHeadroomPool(self, dvs, testlog): self.setup_db(dvs) # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') # configure lossless PG 3-4 on interface and start up the interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) @@ -574,10 +575,10 @@ def test_sharedHeadroomPool(self, dvs, testlog): # remove lossless PG 3-4 on interface self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -595,7 +596,7 @@ def test_shutdownPort(self, dvs, testlog): lossless_queue_zero_reference = 'egress_lossless_zero_profile' # Startup interface - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') # Configure lossless PG 3-4 on interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) @@ -604,7 +605,7 @@ def test_shutdownPort(self, dvs, testlog): # Shutdown port and check whether zero profiles have been applied on queues and the PG 0 maximumQueues = int(self.bufferMaxParameter['max_queues']) - 1 - dvs.runcmd("config interface shutdown Ethernet0") + dvs.port_admin_set('Ethernet0', 'down') self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:0", {"profile": lossy_pg_zero_reference}) self.app_db.wait_for_field_match("BUFFER_QUEUE_TABLE", "Ethernet0:0-2", {"profile": lossy_queue_zero_reference}) self.app_db.wait_for_field_match("BUFFER_QUEUE_TABLE", "Ethernet0:3-4", {"profile": lossless_queue_zero_reference}) @@ -632,7 +633,7 @@ def test_shutdownPort(self, dvs, testlog): self.app_db.wait_for_deleted_entry("BUFFER_PG_TABLE", "Ethernet0:6") # Startup port and check whether all the PGs have been added - dvs.runcmd("config interface startup Ethernet0") + dvs.port_admin_set('Ethernet0', 'up') self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:0", {"profile": lossy_pg_reference_appl_db}) self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:1", {"profile": lossy_pg_reference_appl_db}) self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": expectedProfile}) @@ -645,7 +646,7 @@ def test_shutdownPort(self, dvs, testlog): self.app_db.wait_for_deleted_entry("BUFFER_QUEUE_TABLE", "Ethernet0:9-{}".format(maximumQueues)) # Shutdown the port again to verify flow to remove buffer objects from an admin down port - dvs.runcmd("config interface shutdown Ethernet0") + dvs.port_admin_set('Ethernet0', 'down') # First, check whether the objects have been correctly handled self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:0", {"profile": lossy_pg_zero_reference}) self.app_db.wait_for_field_match("BUFFER_QUEUE_TABLE", "Ethernet0:0-2", {"profile": lossy_queue_zero_reference}) @@ -671,7 +672,7 @@ def test_shutdownPort(self, dvs, testlog): self.app_db.wait_for_field_match("BUFFER_QUEUE_TABLE", "Ethernet0:7-{}".format(maximumQueues), {"profile": lossy_queue_zero_reference}) # Startup again - dvs.runcmd("config interface startup Ethernet0") + dvs.port_admin_set('Ethernet0', 'up') self.app_db.wait_for_field_match("BUFFER_QUEUE_TABLE", "Ethernet0:0-2", {"profile": lossy_queue_reference_appl_db}) self.app_db.wait_for_field_match("BUFFER_QUEUE_TABLE", "Ethernet0:3-4", {"profile": lossless_queue_reference_appl_db}) self.app_db.wait_for_field_match("BUFFER_QUEUE_TABLE", "Ethernet0:5-6", {"profile": lossy_queue_reference_appl_db}) @@ -683,7 +684,7 @@ def test_shutdownPort(self, dvs, testlog): self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') # Shutdown interface - dvs.runcmd("config interface shutdown Ethernet0") + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) @@ -698,14 +699,14 @@ def test_autoNegPort(self, dvs, testlog): maximum_advertised_speed = '25000' # Startup interfaces - dvs.runcmd('config interface startup Ethernet0') + dvs.port_admin_set('Ethernet0', 'up') # Configure lossless PG 3-4 on the interface self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'NULL'}) # Enable port auto negotiation - dvs.runcmd('config interface autoneg Ethernet0 enabled') - dvs.runcmd('config interface advertised-speeds Ethernet0 {}'.format(advertised_speeds)) + dvs.port_field_set('Ethernet0','autoneg', 'on') + dvs.port_field_set('Ethernet0','adv_speeds', advertised_speeds) # Check the buffer profile. The maximum_advertised_speed should be used expectedProfile = self.make_lossless_profile_name(maximum_advertised_speed, self.originalCableLen) @@ -719,7 +720,7 @@ def test_autoNegPort(self, dvs, testlog): self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:6", {"profile": expectedProfile}) # Disable port auto negotiation - dvs.runcmd('config interface autoneg Ethernet0 disabled') + dvs.port_field_set('Ethernet0','autoneg', 'off') # Check the buffer profile. The configured speed should be used expectedProfile = self.make_lossless_profile_name(self.originalSpeed, self.originalCableLen) @@ -733,10 +734,11 @@ def test_autoNegPort(self, dvs, testlog): self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|6') # Shutdown interface - dvs.runcmd('config interface shutdown Ethernet0') + dvs.port_admin_set('Ethernet0', 'down') self.cleanup_db(dvs) + @pytest.mark.skip(reason="Failing. Under investigation") def test_removeBufferPool(self, dvs, testlog): self.setup_db(dvs) # Initialize additional databases that are used by this test only @@ -773,8 +775,80 @@ def test_removeBufferPool(self, dvs, testlog): def test_bufferPortMaxParameter(self, dvs, testlog): self.setup_db(dvs) + # Update log level so that we can analyze the log in case the test failed + logfvs = self.config_db.wait_for_entry("LOGGER", "buffermgrd") + old_log_level = logfvs.get("LOGLEVEL") + logfvs["LOGLEVEL"] = "INFO" + self.config_db.update_entry("LOGGER", "buffermgrd", logfvs) + # Check whether port's maximum parameter has been exposed to STATE_DB fvs = self.state_db.wait_for_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0") assert int(fvs["max_queues"]) and int(fvs["max_priority_groups"]) + _, oa_pid = dvs.runcmd("pgrep orchagent") + + try: + fvs["max_headroom_size"] = "122880" + self.state_db.update_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0", fvs) + + # Startup interface + dvs.port_admin_set('Ethernet0', 'up') + # Wait for the lossy profile to be handled + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:0", {"profile": "ingress_lossy_profile"}) + + # Stop orchagent to simulate the scenario that the system is during initialization + dvs.runcmd("kill -s SIGSTOP {}".format(oa_pid)) + + # Create a lossless profile + profile_fvs = {'xon': '19456', + 'xoff': '10240', + 'size': '29696', + 'dynamic_th': '0', + 'pool': 'ingress_lossless_pool'} + self.config_db.update_entry('BUFFER_PROFILE', 'test', profile_fvs) + + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|3-4', {'profile': 'test'}) + + # Make sure the entry has been handled by buffermgrd and is pending on orchagent's queue + self.app_db.wait_for_field_match("_BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "test"}) + + # Should not be added due to the maximum headroom exceeded + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|1', {'profile': 'ingress_lossy_profile'}) + # Should not be added due to the maximum headroom exceeded + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|6', {'profile': 'test'}) + + # Resume orchagent + dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) + + # Check whether BUFFER_PG_TABLE is updated as expected + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", "Ethernet0:3-4", {"profile": "test"}) + + keys = self.app_db.get_keys('BUFFER_PG_TABLE') + + assert 'Ethernet0:1' not in keys + assert 'Ethernet0:6' not in keys + + # Update the profile + profile_fvs['size'] = '28672' + profile_fvs['xoff'] = '9216' + self.config_db.update_entry('BUFFER_PROFILE', 'test', profile_fvs) + self.app_db.wait_for_field_match('BUFFER_PROFILE_TABLE', 'test', profile_fvs) + finally: + dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) + + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|3-4') + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|1') + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|6') + self.config_db.delete_entry('BUFFER_PROFILE', 'test') + + fvs.pop("max_headroom_size") + self.state_db.delete_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0") + self.state_db.update_entry("BUFFER_MAX_PARAM_TABLE", "Ethernet0", fvs) + + if old_log_level: + logfvs["LOGLEVEL"] = old_log_level + self.config_db.update_entry("LOGGER", "buffermgrd", logfvs) + + dvs.port_admin_set('Ethernet0', 'down') + self.cleanup_db(dvs) diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 3defae0c80..21371cb05a 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -3,7 +3,7 @@ class TestBuffer(object): - LOSSLESS_PGS = [3, 4] + lossless_pgs = [] INTF = "Ethernet0" def setup_db(self, dvs): @@ -15,6 +15,10 @@ def setup_db(self, dvs): # enable PG watermark self.set_pg_wm_status('enable') + def get_pfc_enable_queues(self): + qos_map = self.config_db.get_entry("PORT_QOS_MAP", self.INTF) + return qos_map['pfc_enable'].split(',') + def get_pg_oid(self, pg): fvs = dict() fvs = self.counter_db.get_entry("COUNTERS_PG_NAME_MAP", "") @@ -51,25 +55,38 @@ def get_asic_buf_pg_profiles(self): buf_pg_entries = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", self.pg_name_map[pg]) self.buf_pg_profile[pg] = buf_pg_entries["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] - def change_cable_len(self, cable_len): + def change_cable_len(self, cable_len, extra_port=None): fvs = dict() fvs[self.INTF] = cable_len + if extra_port: + fvs[extra_port] = cable_len self.config_db.update_entry("CABLE_LENGTH", "AZURE", fvs) + def set_port_qos_table(self, port, pfc_enable_flag): + fvs=dict() + fvs['pfc_enable'] = pfc_enable_flag + self.config_db.update_entry("PORT_QOS_MAP", port, fvs) + self.lossless_pgs = pfc_enable_flag.split(',') + + def get_pg_name_map(self): + pg_name_map = dict() + for pg in self.lossless_pgs: + pg_name = "{}:{}".format(self.INTF, pg) + pg_name_map[pg_name] = self.get_pg_oid(pg_name) + return pg_name_map + @pytest.fixture def setup_teardown_test(self, dvs): - try: - self.setup_db(dvs) - pg_name_map = dict() - for pg in self.LOSSLESS_PGS: - pg_name = "{}:{}".format(self.INTF, pg) - pg_name_map[pg_name] = self.get_pg_oid(pg_name) - yield pg_name_map - finally: - self.teardown() + self.setup_db(dvs) + self.set_port_qos_table(self.INTF, '3,4') + self.lossless_pg_combinations = ['3-4'] + time.sleep(2) + + yield + + self.teardown() def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): - self.pg_name_map = setup_teardown_test orig_cable_len = None orig_speed = None try: @@ -91,10 +108,11 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): test_speed = "100000" test_cable_len = "0m" - dvs.runcmd("config interface startup {}".format(self.INTF)) + dvs.port_admin_set(self.INTF, "up") # Make sure the buffer PG has been created orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_before_test) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) + self.pg_name_map = self.get_pg_name_map() self.orig_profiles = self.get_asic_buf_profile() # check if the lossless profile for the test speed is already present @@ -113,13 +131,14 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): self.change_cable_len(test_cable_len) # change intf speed to 'test_speed' - dvs.runcmd("config interface speed {} {}".format(self.INTF, test_speed)) + dvs.port_field_set(self.INTF, "speed", test_speed) test_lossless_profile = "pg_lossless_{}_{}_profile".format(test_speed, test_cable_len) # buffer profile should not get created self.app_db.wait_for_deleted_entry("BUFFER_PROFILE_TABLE", test_lossless_profile) # buffer pgs should still point to the original buffer profile - self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":3-4", {"profile": orig_lossless_profile}) + for pg in self.lossless_pg_combinations: + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": orig_lossless_profile}) fvs = dict() for pg in self.pg_name_map: fvs["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] @@ -129,7 +148,7 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): self.change_cable_len(cable_len_before_test) # change intf speed to 'test_speed' - dvs.runcmd("config interface speed {} {}".format(self.INTF, test_speed)) + dvs.port_field_set(self.INTF, "speed", test_speed) if profile_exp_cnt_diff != 0: # new profile will get created self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", new_lossless_profile) @@ -150,5 +169,80 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): if orig_cable_len: self.change_cable_len(orig_cable_len) if orig_speed: - dvs.runcmd("config interface speed {} {}".format(self.INTF, orig_speed)) - dvs.runcmd("config interface shutdown {}".format(self.INTF)) + dvs.port_field_set(self.INTF, "speed", orig_speed) + dvs.port_admin_set(self.INTF, "down") + + # To verify the BUFFER_PG is not hardcoded to 3,4 + # buffermgrd will read 'pfc_enable' entry and apply lossless profile to that queue + def test_buffer_pg_update(self, dvs, setup_teardown_test): + orig_cable_len = None + orig_speed = None + test_speed = None + extra_port = "Ethernet4" + try: + # Retrieve cable len + fvs_cable_len = self.config_db.get_entry("CABLE_LENGTH", "AZURE") + orig_cable_len = fvs_cable_len[self.INTF] + if orig_cable_len == "0m": + cable_len_for_test = "300m" + fvs_cable_len[self.INTF] = cable_len_for_test + fvs_cable_len[extra_port] = cable_len_for_test + + self.config_db.update_entry("CABLE_LENGTH", "AZURE", fvs_cable_len) + else: + cable_len_for_test = orig_cable_len + # Ethernet4 is set to up, while no 'pfc_enable' available. `Ethernet0` is not supposed to be impacted + dvs.port_admin_set(extra_port, "up") + + dvs.port_admin_set(self.INTF, "up") + + # Retrieve port speed + fvs_port = self.config_db.get_entry("PORT", self.INTF) + orig_speed = fvs_port["speed"] + + # Make sure the buffer PG has been created + orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_for_test) + self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) + self.pg_name_map = self.get_pg_name_map() + self.orig_profiles = self.get_asic_buf_profile() + + # get the orig buf profiles attached to the pgs + self.get_asic_buf_pg_profiles() + + # Update port speed + if orig_speed == "100000": + test_speed = "40000" + elif orig_speed == "40000": + test_speed = "100000" + # change intf speed to 'test_speed' + dvs.port_field_set(self.INTF, "speed", test_speed) + dvs.port_field_set(extra_port, "speed", test_speed) + # Verify new profile is generated + new_lossless_profile = "pg_lossless_{}_{}_profile".format(test_speed, cable_len_for_test) + self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", new_lossless_profile) + + # Verify BUFFER_PG is updated + for pg in self.lossless_pg_combinations: + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", self.INTF + ":" + pg, {"profile": new_lossless_profile}) + + fvs_negative = {} + for pg in self.pg_name_map: + # verify that buffer pgs do not point to the old profile since we cannot deduce the new profile oid + fvs_negative["SAI_INGRESS_PRIORITY_GROUP_ATTR_BUFFER_PROFILE"] = self.buf_pg_profile[pg] + self.asic_db.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP", self.pg_name_map[pg], fvs_negative) + + # Add pfc_enable field for extra port + self.set_port_qos_table(extra_port, '2,3,4,6') + self.lossless_pg_combinations = ['2-4', '6'] + time.sleep(1) + # Verify BUFFER_PG is updated when pfc_enable is available + for pg in self.lossless_pg_combinations: + self.app_db.wait_for_field_match("BUFFER_PG_TABLE", extra_port + ":" + pg, {"profile": new_lossless_profile}) + finally: + if orig_cable_len: + self.change_cable_len(orig_cable_len, extra_port) + if orig_speed: + dvs.port_field_set(self.INTF, "speed", orig_speed) + dvs.port_field_set(extra_port, "speed", orig_speed) + dvs.port_admin_set(self.INTF, "down") + dvs.port_admin_set(extra_port, "down") diff --git a/tests/test_crm.py b/tests/test_crm.py index 200b15cf79..bee145c34f 100644 --- a/tests/test_crm.py +++ b/tests/test_crm.py @@ -17,29 +17,16 @@ def getCrmCounterValue(dvs, key, counter): return 0 -def getCrmConfigValue(dvs, key, counter): - - config_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - crm_stats_table = swsscommon.Table(config_db, 'CRM') - - for k in crm_stats_table.get(key)[1]: - if k[0] == counter: - return int(k[1]) - -def getCrmConfigStr(dvs, key, counter): - - config_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - crm_stats_table = swsscommon.Table(config_db, 'CRM') - - for k in crm_stats_table.get(key)[1]: - if k[0] == counter: - return k[1] - return "" - def check_syslog(dvs, marker, err_log, expected_cnt): (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) assert num.strip() >= str(expected_cnt) +def crm_update(dvs, field, value): + cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(cfg_db, "CRM") + fvs = swsscommon.FieldValuePairs([(field, value)]) + tbl.set("Config", fvs) + time.sleep(1) class TestCrm(object): def test_CrmFdbEntry(self, dvs, testlog): @@ -48,7 +35,7 @@ def test_CrmFdbEntry(self, dvs, testlog): # configured, server 2 will send packet which can switch to learn another # mac and fail the test. dvs.servers[2].runcmd("sysctl -w net.ipv6.conf.eth0.disable_ipv6=1") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_FDB_ENTRY', '1000') @@ -99,9 +86,9 @@ def test_CrmFdbEntry(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds fdb high 90") - dvs.runcmd("crm config thresholds fdb type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "fdb_entry_high_threshold", "90") + crm_update(dvs, "fdb_entry_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "FDB_ENTRY THRESHOLD_EXCEEDED for TH_FREE", 1) @@ -115,9 +102,9 @@ def test_CrmIpv4Route(self, dvs, testlog): fvs = swsscommon.FieldValuePairs([("NULL","NULL")]) intf_tbl.set("Ethernet0", fvs) intf_tbl.set("Ethernet0|10.0.0.0/31", fvs) - dvs.runcmd("config interface startup Ethernet0") + dvs.port_admin_set("Ethernet0", "up") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_IPV4_ROUTE_ENTRY', '1000') @@ -162,9 +149,9 @@ def test_CrmIpv4Route(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds ipv4 route high 90") - dvs.runcmd("crm config thresholds ipv4 route type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "ipv4_route_high_threshold", "90") + crm_update(dvs, "ipv4_route_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "IPV4_ROUTE THRESHOLD_EXCEEDED for TH_FREE",1) @@ -182,12 +169,12 @@ def test_CrmIpv6Route(self, dvs, testlog): fvs = swsscommon.FieldValuePairs([("NULL","NULL")]) intf_tbl.set("Ethernet0", fvs) intf_tbl.set("Ethernet0|fc00::1/126", fvs) - dvs.runcmd("config interface startup Ethernet0") + dvs.port_admin_set("Ethernet0", "up") dvs.servers[0].runcmd("ifconfig eth0 inet6 add fc00::2/126") dvs.servers[0].runcmd("ip -6 route add default via fc00::1") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_IPV6_ROUTE_ENTRY', '1000') @@ -232,9 +219,9 @@ def test_CrmIpv6Route(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds ipv6 route high 90") - dvs.runcmd("crm config thresholds ipv6 route type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "ipv6_route_high_threshold", "90") + crm_update(dvs, "ipv6_route_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "IPV6_ROUTE THRESHOLD_EXCEEDED for TH_FREE",1) @@ -248,9 +235,8 @@ def test_CrmIpv4Nexthop(self, dvs, testlog): fvs = swsscommon.FieldValuePairs([("NULL","NULL")]) intf_tbl.set("Ethernet0|10.0.0.0/31", fvs) intf_tbl.set("Ethernet0", fvs) - dvs.runcmd("config interface startup Ethernet0") - - dvs.runcmd("crm config polling interval 1") + dvs.port_admin_set("Ethernet0", "up") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_IPV4_NEXTHOP_ENTRY', '1000') @@ -287,9 +273,9 @@ def test_CrmIpv4Nexthop(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds ipv4 nexthop high 90") - dvs.runcmd("crm config thresholds ipv4 nexthop type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "ipv4_nexthop_high_threshold", "90") + crm_update(dvs, "ipv4_nexthop_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "IPV4_NEXTHOP THRESHOLD_EXCEEDED for TH_FREE",1) @@ -307,9 +293,9 @@ def test_CrmIpv6Nexthop(self, dvs, testlog): fvs = swsscommon.FieldValuePairs([("NULL","NULL")]) intf_tbl.set("Ethernet0", fvs) intf_tbl.set("Ethernet0|fc00::1/126", fvs) - dvs.runcmd("config interface startup Ethernet0") + dvs.port_admin_set("Ethernet0", "up") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_IPV6_NEXTHOP_ENTRY', '1000') @@ -346,9 +332,9 @@ def test_CrmIpv6Nexthop(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds ipv6 nexthop high 90") - dvs.runcmd("crm config thresholds ipv6 nexthop type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "ipv6_nexthop_high_threshold", "90") + crm_update(dvs, "ipv6_nexthop_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "IPV6_NEXTHOP THRESHOLD_EXCEEDED for TH_FREE",1) @@ -362,9 +348,9 @@ def test_CrmIpv4Neighbor(self, dvs, testlog): fvs = swsscommon.FieldValuePairs([("NULL","NULL")]) intf_tbl.set("Ethernet0", fvs) intf_tbl.set("Ethernet0|10.0.0.0/31", fvs) - dvs.runcmd("config interface startup Ethernet0") + dvs.port_admin_set("Ethernet0", "up") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_IPV4_NEIGHBOR_ENTRY', '1000') @@ -401,9 +387,9 @@ def test_CrmIpv4Neighbor(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds ipv4 neighbor high 90") - dvs.runcmd("crm config thresholds ipv4 neighbor type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "ipv4_neighbor_high_threshold", "90") + crm_update(dvs, "ipv4_neighbor_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "IPV4_NEIGHBOR THRESHOLD_EXCEEDED for TH_FREE",1) @@ -421,9 +407,9 @@ def test_CrmIpv6Neighbor(self, dvs, testlog): fvs = swsscommon.FieldValuePairs([("NULL","NULL")]) intf_tbl.set("Ethernet0", fvs) intf_tbl.set("Ethernet0|fc00::1/126", fvs) - dvs.runcmd("config interface startup Ethernet0") + dvs.port_admin_set("Ethernet0", "up") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_IPV6_NEIGHBOR_ENTRY', '1000') @@ -460,9 +446,9 @@ def test_CrmIpv6Neighbor(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds ipv6 neighbor high 90") - dvs.runcmd("crm config thresholds ipv6 neighbor type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "ipv6_neighbor_high_threshold", "90") + crm_update(dvs, "ipv6_neighbor_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "IPV6_NEIGHBOR THRESHOLD_EXCEEDED for TH_FREE",1) @@ -478,10 +464,10 @@ def test_CrmNexthopGroup(self, dvs, testlog): intf_tbl.set("Ethernet4", fvs) intf_tbl.set("Ethernet0|10.0.0.0/31", fvs) intf_tbl.set("Ethernet4|10.0.0.2/31", fvs) - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface startup Ethernet4") + dvs.port_admin_set("Ethernet0", "up") + dvs.port_admin_set("Ethernet4", "up") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_NEXT_HOP_GROUP_ENTRY', '1000') @@ -528,9 +514,9 @@ def test_CrmNexthopGroup(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds nexthop group member high 90") - dvs.runcmd("crm config thresholds nexthop group object type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "nexthop_group_high_threshold", "90") + crm_update(dvs, "nexthop_group_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "NEXTHOP_GROUP THRESHOLD_EXCEEDED for TH_FREE",1) @@ -553,10 +539,10 @@ def test_CrmNexthopGroupMember(self, dvs, testlog): intf_tbl.set("Ethernet4", fvs) intf_tbl.set("Ethernet0|10.0.0.0/31", fvs) intf_tbl.set("Ethernet4|10.0.0.2/31", fvs) - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface startup Ethernet4") + dvs.port_admin_set("Ethernet0", "up") + dvs.port_admin_set("Ethernet4", "up") - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_NEXT_HOP_GROUP_MEMBER_ENTRY', '1000') @@ -603,9 +589,9 @@ def test_CrmNexthopGroupMember(self, dvs, testlog): assert new_avail_counter == avail_counter marker = dvs.add_log_marker() - dvs.runcmd("crm config polling interval 2") - dvs.runcmd("crm config thresholds nexthop group member high 90") - dvs.runcmd("crm config thresholds nexthop group member type free") + crm_update(dvs, "polling_interval", "2") + crm_update(dvs, "nexthop_group_member_high_threshold", "90") + crm_update(dvs, "nexthop_group_member_threshold_type", "free") time.sleep(2) check_syslog(dvs, marker, "NEXTHOP_GROUP_MEMBER THRESHOLD_EXCEEDED for TH_FREE",1) @@ -618,7 +604,7 @@ def test_CrmAcl(self, dvs, testlog): db = swsscommon.DBConnector(4, dvs.redis_sock, 0) adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") time.sleep(1) bind_ports = ["Ethernet0", "Ethernet4"] @@ -698,7 +684,7 @@ def test_CrmAclGroup(self, dvs, testlog): db = swsscommon.DBConnector(4, dvs.redis_sock, 0) adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) - dvs.runcmd("crm config polling interval 1") + crm_update(dvs, "polling_interval", "1") bind_ports = ["Ethernet0", "Ethernet4", "Ethernet8"] # create ACL table @@ -711,12 +697,22 @@ def test_CrmAclGroup(self, dvs, testlog): entry_used_counter = getCrmCounterValue(dvs, 'ACL_STATS:INGRESS:PORT', 'crm_stats_acl_group_used') assert entry_used_counter == 3 - # remove ACL table - #tbl._del("test-aclv6") - #time.sleep(2) - #atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_ACL_TABLE_GROUP") - #table_used_counter = getCrmCounterValue(dvs, 'ACL_STATS:INGRESS:PORT', 'crm_stats_acl_group_used') - #assert table_used_counter == 0 + marker = dvs.add_log_marker() + crm_update(dvs, "polling_interval", "1") + crm_update(dvs, "acl_group_threshold_type", "used") + crm_update(dvs, "acl_group_low_threshold", str(0)) + crm_update(dvs, "acl_group_high_threshold", str(2)) + + time.sleep(2) + check_syslog(dvs, marker, "ACL_GROUP THRESHOLD_EXCEEDED for TH_USED", 1) + check_syslog(dvs, marker, "ACL_GROUP THRESHOLD_CLEAR for TH_USED", 0) + + tbl._del("test-aclv6") + time.sleep(2) + check_syslog(dvs, marker, "ACL_GROUP THRESHOLD_CLEAR for TH_USED", 1) + + table_used_counter = getCrmCounterValue(dvs, 'ACL_STATS:INGRESS:PORT', 'crm_stats_acl_group_used') + assert table_used_counter == 0 def test_CrmSnatEntry(self, dvs, testlog): @@ -734,262 +730,59 @@ def test_CrmDnatEntry(self, dvs, testlog): assert used_counter == 0 assert avail_counter != 0 -# commented ipmc test case till vslib is updated -# def test_CrmIpmcEntry(self, dvs, testlog): -# -# # get counters -# used_counter = getCrmCounterValue(dvs, 'STATS', 'crm_stats_ipmc_entry_used') -# avail_counter = getCrmCounterValue(dvs, 'STATS', 'crm_stats_ipmc_entry_available') -# assert used_counter == 0 -# assert avail_counter != 0 - - def test_Configure(self, dvs, testlog): - - #polling interval - dvs.runcmd("crm config polling interval 10") - time.sleep(2) - polling_interval = getCrmConfigValue(dvs, 'Config', 'polling_interval') - assert polling_interval == 10 - - def test_Configure_ipv4_route(self, dvs, testlog): - - #ipv4 route low/high threshold/type - dvs.runcmd("crm config thresholds ipv4 route low 50") - dvs.runcmd("crm config thresholds ipv4 route high 90") - dvs.runcmd("crm config thresholds ipv4 route type percentage") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'ipv4_route_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'ipv4_route_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'ipv4_route_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_ipv6_route(self, dvs, testlog): - - #ipv6 route low/high threshold/type - dvs.runcmd("crm config thresholds ipv6 route low 50") - dvs.runcmd("crm config thresholds ipv6 route high 90") - dvs.runcmd("crm config thresholds ipv6 route type used") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'ipv6_route_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'ipv6_route_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'ipv6_route_threshold_type') - assert threshold_type == 'used' - - def test_Configure_ipv4_nexthop(self, dvs, testlog): - - #ipv4 nexthop low/high threshold/type - dvs.runcmd("crm config thresholds ipv4 nexthop low 50") - dvs.runcmd("crm config thresholds ipv4 nexthop high 90") - dvs.runcmd("crm config thresholds ipv4 nexthop type 'percentage'") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'ipv4_nexthop_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'ipv4_nexthop_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'ipv4_nexthop_threshold_type') - assert threshold_type == 'percentage' + def test_CrmResetThresholdExceedCount(self, dvs, testlog): - def test_Configure_ipv6_nexthop(self, dvs, testlog): - - #ipv6 nexthop low/high threshold/type - dvs.runcmd("crm config thresholds ipv6 nexthop low 50") - dvs.runcmd("crm config thresholds ipv6 nexthop high 90") - dvs.runcmd("crm config thresholds ipv6 nexthop type free") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'ipv6_nexthop_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'ipv6_nexthop_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'ipv6_nexthop_threshold_type') - assert threshold_type == 'free' - - def test_Configure_ipv4_neighbor(self, dvs, testlog): - - #ipv4 neighbor low/high threshold/type - dvs.runcmd("crm config thresholds ipv4 neighbor low 50") - dvs.runcmd("crm config thresholds ipv4 neighbor high 90") - dvs.runcmd("crm config thresholds ipv4 neighbor type percentage") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'ipv4_neighbor_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'ipv4_neighbor_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'ipv4_neighbor_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_ipv6_neighbor(self, dvs, testlog): - - #ipv6 neighbor low/high threshold/type - dvs.runcmd("crm config thresholds ipv6 neighbor low 50") - dvs.runcmd("crm config thresholds ipv6 neighbor high 90") - dvs.runcmd("crm config thresholds ipv6 neighbor type used") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'ipv6_neighbor_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'ipv6_neighbor_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'ipv6_neighbor_threshold_type') - assert threshold_type == 'used' - - def test_Configure_group_member(self, dvs, testlog): - - #nexthop group member low/high threshold/type - dvs.runcmd("crm config thresholds nexthop group member low 50") - dvs.runcmd("crm config thresholds nexthop group member high 90") - dvs.runcmd("crm config thresholds nexthop group member type percentage") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'nexthop_group_member_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'nexthop_group_member_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'nexthop_group_member_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_group_object(self, dvs, testlog): - - #nexthop group object low/high threshold/type - dvs.runcmd("crm config thresholds nexthop group object low 50") - dvs.runcmd("crm config thresholds nexthop group object high 90") - dvs.runcmd("crm config thresholds nexthop group object type free") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'nexthop_group_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'nexthop_group_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'nexthop_group_threshold_type') - assert threshold_type == 'free' - - def test_Configure_acl_table(self, dvs, testlog): - - #thresholds acl table low/high threshold/type - dvs.runcmd("crm config thresholds acl table low 50") - dvs.runcmd("crm config thresholds acl table high 90") - dvs.runcmd("crm config thresholds acl table type percentage") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'acl_table_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'acl_table_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'acl_table_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_acl_group(self, dvs, testlog): - - #thresholds acl group low/high threshold/type - dvs.runcmd("crm config thresholds acl group low 50") - dvs.runcmd("crm config thresholds acl group high 90") - dvs.runcmd("crm config thresholds acl group type used") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'acl_group_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'acl_group_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'acl_group_threshold_type') - assert threshold_type == 'used' - - def test_Configure_acl_group_entry(self, dvs, testlog): - - #thresholds acl group entry low/high threshold/type - dvs.runcmd("crm config thresholds acl group entry low 50") - dvs.runcmd("crm config thresholds acl group entry high 90") - dvs.runcmd("crm config thresholds acl group entry type percentage") - - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'acl_entry_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'acl_entry_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'acl_entry_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_acl_group_counter(self, dvs, testlog): + config_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + intf_tbl = swsscommon.Table(config_db, "INTERFACE") + fvs = swsscommon.FieldValuePairs([("NULL","NULL")]) + intf_tbl.set("Ethernet0", fvs) + intf_tbl.set("Ethernet0|10.0.0.0/31", fvs) + dvs.port_admin_set("Ethernet0", "up") - #thresholds acl group counter low/high threshold/type - dvs.runcmd("crm config thresholds acl group counter low 50") - dvs.runcmd("crm config thresholds acl group counter high 90") - dvs.runcmd("crm config thresholds acl group counter type free") + crm_update(dvs, "polling_interval", "1") - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'acl_counter_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'acl_counter_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'acl_counter_threshold_type') - assert threshold_type == 'free' + # add static neighbor + dvs.runcmd("ip neigh replace 10.0.0.1 lladdr 11:22:33:44:55:66 dev Ethernet0") - def test_Configure_fdb(self, dvs, testlog): + db = swsscommon.DBConnector(0, dvs.redis_sock, 0) + ps = swsscommon.ProducerStateTable(db, "ROUTE_TABLE") + fvs = swsscommon.FieldValuePairs([("nexthop","10.0.0.1"), ("ifname", "Ethernet0")]) - #thresholds fdb low/high threshold/type - dvs.runcmd("crm config thresholds fdb low 50") - dvs.runcmd("crm config thresholds fdb high 90") - dvs.runcmd("crm config thresholds fdb type percentage") + # add route to make sure used count at least 1 and update available counter + ps.set("2.2.2.0/24", fvs) + dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_IPV4_ROUTE_ENTRY', '1000') time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'fdb_entry_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'fdb_entry_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'fdb_entry_threshold_type') - assert threshold_type == 'percentage' - def test_Configure_snat(self, dvs, testlog): - - #thresholds snat low/high threshold/type - dvs.runcmd("crm config thresholds snat low 50") - dvs.runcmd("crm config thresholds snat high 90") - dvs.runcmd("crm config thresholds snat type percentage") + # get counters + used_counter = getCrmCounterValue(dvs, 'STATS', 'crm_stats_ipv4_route_used') + avail_counter = getCrmCounterValue(dvs, 'STATS', 'crm_stats_ipv4_route_available') - time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'snat_entry_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'snat_entry_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'snat_entry_threshold_type') - assert threshold_type == 'percentage' + crm_update(dvs, "ipv4_route_low_threshold", "0") - def test_Configure_dnat(self, dvs, testlog): + # for changing to free type will rest crm threshold exceed count, previous type can not be same free type + crm_update(dvs, "ipv4_route_threshold_type", "percentage") - #thresholds dnat low/high threshold/type - dvs.runcmd("crm config thresholds dnat low 50") - dvs.runcmd("crm config thresholds dnat high 90") - dvs.runcmd("crm config thresholds dnat type percentage") + # trigger exceed high threshold of free type + marker = dvs.add_log_marker() + crm_update(dvs, "ipv4_route_high_threshold", str(avail_counter)) + crm_update(dvs, "ipv4_route_threshold_type", "free") + time.sleep(20) + check_syslog(dvs, marker, "IPV4_ROUTE THRESHOLD_EXCEEDED for TH_FREE", 1) + # crm threshold exceed count will be reset when threshold type changed + marker = dvs.add_log_marker() + crm_update(dvs, "ipv4_route_high_threshold", str(used_counter)) + crm_update(dvs, "ipv4_route_threshold_type", "used") time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'dnat_entry_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'dnat_entry_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'dnat_entry_threshold_type') - assert threshold_type == 'percentage' - - def test_Configure_ipmc(self, dvs, testlog): + check_syslog(dvs, marker, "IPV4_ROUTE THRESHOLD_EXCEEDED for TH_USED", 1) - #thresholds ipmc low/high threshold/type - dvs.runcmd("crm config thresholds ipmc low 50") - dvs.runcmd("crm config thresholds ipmc high 90") - dvs.runcmd("crm config thresholds ipmc type percentage") + # remove route + ps._del("2.2.2.0/24") + dvs.runcmd("ip neigh del 10.0.0.1 lladdr 11:22:33:44:55:66 dev Ethernet0") + intf_tbl._del("Ethernet0|10.0.0.0/31") time.sleep(2) - threshold_low = getCrmConfigValue(dvs, 'Config', 'ipmc_entry_low_threshold') - assert threshold_low == 50 - threshold_high = getCrmConfigValue(dvs, 'Config', 'ipmc_entry_high_threshold') - assert threshold_high == 90 - threshold_type = getCrmConfigStr(dvs, 'Config', 'ipmc_entry_threshold_type') - assert threshold_type == 'percentage' # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_drop_counters.py b/tests/test_drop_counters.py index b003876f1a..cd089be917 100644 --- a/tests/test_drop_counters.py +++ b/tests/test_drop_counters.py @@ -11,7 +11,7 @@ # Debug Counter Table DEBUG_COUNTER_TABLE = 'DEBUG_COUNTER' -DROP_REASON_TABLE = 'DEBUG_COUNTER_DROP_REASON' +DROP_REASON_TABLE = 'DEBUG_COUNTER_DROP_REASON' # Debug Counter Capability Table CAPABILITIES_TABLE = 'DEBUG_COUNTER_CAPABILITIES' @@ -54,7 +54,11 @@ EXPECTED_ASIC_FIELDS = [ASIC_COUNTER_TYPE_FIELD, ASIC_COUNTER_INGRESS_REASON_LIST_FIELD, ASIC_COUNTER_EGRESS_REASON_LIST_FIELD] EXPECTED_NUM_ASIC_FIELDS = 2 +# port to be add and removed +PORT = "Ethernet0" +PORT_TABLE_NAME = "PORT" +@pytest.mark.usefixtures('dvs_port_manager') # FIXME: It is really annoying to have to re-run tests due to inconsistent timing, should # implement some sort of polling interface for checking ASIC/flex counter tables after # applying changes to config DB @@ -64,6 +68,7 @@ def setup_db(self, dvs): self.config_db = swsscommon.DBConnector(4, dvs.redis_sock, 0) self.flex_db = swsscommon.DBConnector(5, dvs.redis_sock, 0) self.state_db = swsscommon.DBConnector(6, dvs.redis_sock, 0) + self.counters_db = swsscommon.DBConnector(2, dvs.redis_sock, 0) def genericGetAndAssert(self, table, key): status, fields = table.get(key) @@ -654,6 +659,79 @@ def test_removeInvalidDropReason(self, dvs, testlog): # Cleanup for the next test. self.delete_drop_counter(name) self.remove_drop_reason(name, reason1) + + def getPortOid(self, dvs, port_name): + port_name_map = swsscommon.Table(self.counters_db, "COUNTERS_PORT_NAME_MAP") + status, returned_value = port_name_map.hget("", port_name); + assert status == True + return returned_value + + def test_add_remove_port(self, dvs, testlog): + """ + This test verifies that debug counters are removed when we remove a port + and debug counters are added each time we add ports (if debug counter is enabled) + """ + self.setup_db(dvs) + + # save port info + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + tbl = swsscommon.Table(cdb, PORT_TABLE_NAME) + (status, fvs) = tbl.get(PORT) + assert status == True + + # get counter oid + oid = self.getPortOid(dvs, PORT) + + # verifies debug coutner dont exist for port + flex_counter_table = swsscommon.Table(self.flex_db, FLEX_COUNTER_TABLE) + status, fields = flex_counter_table.get(oid) + assert len(fields) == 0 + + # add debug counters + name1 = 'DEBUG_0' + reason1 = 'L3_ANY' + name2 = 'DEBUG_1' + reason2 = 'L2_ANY' + + self.create_drop_counter(name1, PORT_INGRESS_DROPS) + self.add_drop_reason(name1, reason1) + + self.create_drop_counter(name2, PORT_EGRESS_DROPS) + self.add_drop_reason(name2, reason2) + time.sleep(3) + + # verifies debug counter exist for port + flex_counter_table = swsscommon.Table(self.flex_db, FLEX_COUNTER_TABLE) + status, fields = flex_counter_table.get(oid) + assert status == True + assert len(fields) == 1 + + # remove port and wait until it was removed from ASIC DB + self.dvs_port.remove_port(PORT) + dvs.get_asic_db().wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", oid) + + # verify that debug counter were removed + status, fields = flex_counter_table.get(oid) + assert len(fields) == 0 + + # add port and wait until the port is added on asic db + num_of_keys_without_port = len(dvs.get_asic_db().get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) + tbl.set(PORT, fvs) + dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_of_keys_without_port + 1) + dvs.get_counters_db().wait_for_fields("COUNTERS_PORT_NAME_MAP", "", [PORT]) + + # verifies that debug counters were added for port + oid = self.getPortOid(dvs, PORT) + status, fields = flex_counter_table.get(oid) + assert status == True + assert len(fields) == 1 + + # Cleanup for the next test. + self.delete_drop_counter(name1) + self.remove_drop_reason(name1, reason1) + + self.delete_drop_counter(name2) + self.remove_drop_reason(name2, reason2) def test_createAndDeleteMultipleCounters(self, dvs, testlog): """ diff --git a/tests/test_dtel.py b/tests/test_dtel.py index b45ba13972..c8e86d6b7d 100644 --- a/tests/test_dtel.py +++ b/tests/test_dtel.py @@ -211,7 +211,111 @@ def test_DtelQueueReportAttribs(self, dvs, testlog): assert False tbl._del("Ethernet0|0") - + + def test_DtelFlowWatchlist(self, dvs, testlog): + self.db = swsscommon.DBConnector(4, dvs.redis_sock, 0) + self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + self.table = "DTEL_FLOW_WATCHLIST" + + fields_1=[("PRIORITY", "30"), + ("ETHER_TYPE", "0x800"), + ("L4_DST_PORT", "1674"), + ("FLOW_OP", "POSTCARD"), + ("REPORT_ALL_PACKETS", "FALSE"), + ("DROP_REPORT_ENABLE", "TRUE"), + ("TAIL_DROP_REPORT_ENABLE", "TRUE")] + fields_2=[("PRIORITY", "40"), + ("ETHER_TYPE", "0x800"), + ("L4_DST_PORT", "1674"), + ("FLOW_OP", "POSTCARD"), + ("REPORT_ALL_PACKETS", "TRUE"), + ("DROP_REPORT_ENABLE", "FALSE"), + ("TAIL_DROP_REPORT_ENABLE", "FALSE")] + fields_3=[("PRIORITY", "50"), + ("ETHER_TYPE", "0x800"), + ("L4_DST_PORT", "1674"), + ("FLOW_OP", "POSTCARD"), + ("REPORT_ALL_PACKETS", "TRUE")] + fields_4=[("PRIORITY", "60"), + ("ETHER_TYPE", "0x800"), + ("L4_DST_PORT", "1674"), + ("REPORT_ALL_PACKETS", "TRUE"), + ("DROP_REPORT_ENABLE", "TRUE"), + ("TAIL_DROP_REPORT_ENABLE", "TRUE")] + fields_5=[("PRIORITY", "70"), + ("ETHER_TYPE", "0x800"), + ("L4_DST_PORT", "1674"), + ("FLOW_OP", "NOP"), + ("REPORT_ALL_PACKETS", "FALSE"), + ("DROP_REPORT_ENABLE", "TRUE"), + ("TAIL_DROP_REPORT_ENABLE", "TRUE")] + listfield = [fields_1, fields_2, fields_3, fields_4, fields_5] + + for field in listfield: + k = listfield.index(field) + rule = "RULE-" + str(k) + self._create_dtel_acl_rule(self.table, rule, field) + self._check_dtel_acl_rule(dvs, rule) + self._remove_dtel_acl_rule(self.table, rule) + + def _create_dtel_acl_rule(self, table, rule, field): + tbl = swsscommon.Table(self.db, "ACL_RULE") + fvs = swsscommon.FieldValuePairs(field) + tbl.set(table + "|" + rule, fvs) + time.sleep(1) + + def _remove_dtel_acl_rule(self, table, rule): + tbl = swsscommon.Table(self.db, "ACL_RULE") + tbl._del(table + "|" + rule) + time.sleep(1) + + def _check_dtel_acl_rule(self, dvs, rule): + time.sleep(1) + atbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY") + keys = atbl.getKeys() + acl_entry = [k for k in keys if k not in dvs.asicdb.default_acl_entries] + assert len(acl_entry) != 0 + (status, fvs) = atbl.get(acl_entry[0]) + value = dict(fvs) + assert status + + if rule == "RULE-0": + assert value["SAI_ACL_ENTRY_ATTR_PRIORITY"] == "30" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE"] == "2048&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_L4_DST_PORT"] == "1674&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_ACL_DTEL_FLOW_OP"] == "SAI_ACL_DTEL_FLOW_OP_POSTCARD" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_REPORT_ALL_PACKETS"] == "disabled" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_DROP_REPORT_ENABLE"] == "true" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_TAIL_DROP_REPORT_ENABLE"] == "true" + elif rule == "RULE-1": + assert value["SAI_ACL_ENTRY_ATTR_PRIORITY"] == "40" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE"] == "2048&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_L4_DST_PORT"] == "1674&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_ACL_DTEL_FLOW_OP"] == "SAI_ACL_DTEL_FLOW_OP_POSTCARD" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_REPORT_ALL_PACKETS"] == "true" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_DROP_REPORT_ENABLE"] == "disabled" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_TAIL_DROP_REPORT_ENABLE"] == "disabled" + elif rule == "RULE-2": + assert value["SAI_ACL_ENTRY_ATTR_PRIORITY"] == "50" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE"] == "2048&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_L4_DST_PORT"] == "1674&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_ACL_DTEL_FLOW_OP"] == "SAI_ACL_DTEL_FLOW_OP_POSTCARD" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_REPORT_ALL_PACKETS"] == "true" + elif rule == "RULE-3": + assert value["SAI_ACL_ENTRY_ATTR_PRIORITY"] == "60" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE"] == "2048&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_L4_DST_PORT"] == "1674&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_REPORT_ALL_PACKETS"] == "true" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_DROP_REPORT_ENABLE"] == "true" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_TAIL_DROP_REPORT_ENABLE"] == "true" + elif rule == "RULE-4": + assert value["SAI_ACL_ENTRY_ATTR_PRIORITY"] == "70" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE"] == "2048&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_FIELD_L4_DST_PORT"] == "1674&mask:0xffff" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_ACL_DTEL_FLOW_OP"] == "SAI_ACL_DTEL_FLOW_OP_NOP" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_REPORT_ALL_PACKETS"] == "disabled" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_DROP_REPORT_ENABLE"] == "true" + assert value["SAI_ACL_ENTRY_ATTR_ACTION_DTEL_TAIL_DROP_REPORT_ENABLE"] == "true" def test_DtelEventAttribs(self, dvs, testlog): diff --git a/tests/test_evpn_fdb.py b/tests/test_evpn_fdb.py index 31d75535c7..3c9a217747 100644 --- a/tests/test_evpn_fdb.py +++ b/tests/test_evpn_fdb.py @@ -51,7 +51,7 @@ def test_evpnFdb(dvs, testlog): helper = VxlanEvpnHelper() dvs.setup_db() - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() time.sleep(2) #Find switch_id @@ -62,7 +62,6 @@ def test_evpnFdb(dvs, testlog): # create vlan print("Creating Vlan3") - #dvs.runcmd("config vlan add 3") dvs.create_vlan("3") time.sleep(2) @@ -79,7 +78,6 @@ def test_evpnFdb(dvs, testlog): vm_before = helper.how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN_MEMBER") print("Making Ethernet0 as a member of Vlan3") - #dvs.runcmd("config vlan member add 3 Ethernet0") dvs.create_vlan_member("3", "Ethernet0") time.sleep(2) diff --git a/tests/test_evpn_fdb_p2mp.py b/tests/test_evpn_fdb_p2mp.py index 7929bc862f..5aa407966c 100644 --- a/tests/test_evpn_fdb_p2mp.py +++ b/tests/test_evpn_fdb_p2mp.py @@ -54,7 +54,7 @@ def test_evpnFdbP2MP(dvs, testlog): helper = VxlanEvpnHelper() dvs.setup_db() - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() time.sleep(2) #Find switch_id @@ -371,6 +371,40 @@ def test_evpnFdbP2MP(dvs, testlog): assert mac1_found, str(extra) print("FDB Vlan3:52-54-00-25-06-E9:Ethernet0 is created in STATE-DB") + #UT-10 Evpn Mac add from remote when tunnels are not created + mac = "52:54:00:25:06:E1" + remote_ip_9 = "9.9.9.9" + print("Creating Evpn FDB Vlan3:"+mac.lower()+":9.9.9.9 in APP-DB") + helper.create_entry_pst( + dvs.pdb, + "VXLAN_FDB_TABLE", "Vlan3:"+mac.lower(), + [ + ("remote_vtep", remote_ip_9), + ("type", "dynamic"), + ("vni", "3") + ] + ) + time.sleep(1) + + #Adding remote VNI later + vxlan_obj.create_evpn_remote_vni(dvs, "Vlan3", remote_ip_9, "3") + time.sleep(1) + tnl_bp_oid_9 = get_vxlan_p2mp_tunnel_bp(dvs.adb, source_tnl_ip) + + # check that the FDB entry is inserted into ASIC DB + ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY", + [("mac", mac), ("bvid", vlan_oid_3)], + [("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_STATIC"), + ("SAI_FDB_ENTRY_ATTR_ALLOW_MAC_MOVE", "true"), + ("SAI_FDB_ENTRY_ATTR_ENDPOINT_IP", remote_ip_9), + ("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", str(tnl_bp_oid_9)), + ] + ) + assert ok == True, str(extra) + print("EVPN FDB Vlan3:"+mac.lower()+":"+remote_ip_9+" is created in ASIC-DB") + + time.sleep(1) + dvs.remove_vlan_member("3", "Ethernet0") dvs.remove_vlan("3") diff --git a/tests/test_evpn_l3_vxlan.py b/tests/test_evpn_l3_vxlan.py index 7bcabacb6d..3f424f3830 100644 --- a/tests/test_evpn_l3_vxlan.py +++ b/tests/test_evpn_l3_vxlan.py @@ -35,6 +35,7 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): print ("\n\nTesting Create and Delete SIP Tunnel and VRF VNI Map entries") print ("\tCreate SIP Tunnel") vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.check_vlan_obj(dvs, "100") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) @@ -68,10 +69,10 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, tunnel_map_entry_count = 2) print ("\tTesting Tunnel Vlan VNI Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel VRF VNI Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') @@ -82,6 +83,7 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') print ("\tTesting Tunnel Vlan VNI Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) @@ -96,7 +98,7 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): # Test 2 - Create and Delete DIP Tunnel on adding and removing prefix route # @pytest.mark.skip(reason="Starting Route Orch, VRF Orch to be merged") # @pytest.mark.dev_sanity - def test_prefix_route_create_dip_tunnel(self, dvs, testlog): + def test_prefix_route_create_tunnel(self, dvs, testlog): vxlan_obj = self.get_vxlan_obj() helper = self.get_vxlan_helper() @@ -143,10 +145,10 @@ def test_prefix_route_create_dip_tunnel(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, tunnel_map_entry_count = 2) print ("\tTesting Tunnel Vlan Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel Vrf Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') @@ -160,17 +162,11 @@ def test_prefix_route_create_dip_tunnel(self, dvs, testlog): vxlan_obj.create_vrf_route(dvs, "80.80.1.0/24", 'Vrf-RED', '7.7.7.7', "Vlan100", "00:11:11:11:11:11", '1000') vxlan_obj.check_vrf_routes(dvs, "80.80.1.0/24", 'Vrf-RED', '7.7.7.7', tunnel_name, "00:11:11:11:11:11", '1000') - print ("\tTesting DIP tunnel 7.7.7.7 creation") - vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '7.7.7.7') - print ("\tTest VRF IPv4 Route with Tunnel Nexthop Delete") vxlan_obj.delete_vrf_route(dvs, "80.80.1.0/24", 'Vrf-RED') vxlan_obj.check_del_tunnel_nexthop(dvs, 'Vrf-RED', '7.7.7.7', tunnel_name, "00:11:11:11:11:11", '1000') vxlan_obj.check_del_vrf_routes(dvs, "80.80.1.0/24", 'Vrf-RED') - print ("\tTesting DIP tunnel 7.7.7.7 deletion") - vxlan_obj.check_vxlan_dip_tunnel_delete(dvs, '7.7.7.7') - print ("\tTesting Tunnel Vrf Map Entry removal") vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') @@ -180,6 +176,7 @@ def test_prefix_route_create_dip_tunnel(self, dvs, testlog): vxlan_obj.check_del_router_interface(dvs, "Vlan100") print ("\tTesting Tunnel Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) @@ -196,7 +193,7 @@ def test_prefix_route_create_dip_tunnel(self, dvs, testlog): # Test 3 - Create and Delete DIP Tunnel and Test IPv4 route and overlay nexthop add and delete # @pytest.mark.skip(reason="Starting Route Orch, VRF Orch to be merged") # @pytest.mark.dev_sanity - def test_dip_tunnel_ipv4_routes(self, dvs, testlog): + def test_tunnel_ipv4_routes(self, dvs, testlog): vxlan_obj = self.get_vxlan_obj() helper = self.get_vxlan_helper() @@ -209,6 +206,7 @@ def test_dip_tunnel_ipv4_routes(self, dvs, testlog): print ("\n\nTesting IPv4 Route and Overlay Nexthop Add and Delete") print ("\tCreate SIP Tunnel") vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.check_vlan_obj(dvs, "100") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) @@ -242,29 +240,14 @@ def test_dip_tunnel_ipv4_routes(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, tunnel_map_entry_count = 2) print ("\tTesting Tunnel Vlan Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel Vrf Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting First DIP tunnel creation to 7.7.7.7") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7', '1000') - vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '7.7.7.7') - - print ("\tTesting VLAN 100 extension") - vxlan_obj.check_vlan_extension(dvs, '100', '7.7.7.7') - - print ("\tTesting Second DIP tunnel creation to 8.8.8.8") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8', '1000') - vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '8.8.8.8') - - print ("\tTesting VLAN 100 extension to 8.8.8.8 and 7.7.7.7") - vxlan_obj.check_vlan_extension(dvs, '100', '8.8.8.8') - vxlan_obj.check_vlan_extension(dvs, '100', '7.7.7.7') - print ("\tTesting VLAN 100 interface creation") vxlan_obj.create_vlan_interface(dvs, "Vlan100", "Ethernet24", "Vrf-RED", "100.100.3.1/24") vxlan_obj.check_router_interface(dvs, 'Vrf-RED', vxlan_obj.vlan_id_map['100'], 2) @@ -326,6 +309,44 @@ def test_dip_tunnel_ipv4_routes(self, dvs, testlog): vxlan_obj.check_vrf_routes_ecmp_nexthop_grp_del(dvs, 2) vxlan_obj.check_del_vrf_routes(dvs, "80.80.1.0/24", 'Vrf-RED') + print ("\n\nTest VRF IPv4 Multiple Route with ECMP Tunnel Nexthop Add and Delete") + vxlan_obj.fetch_exist_entries(dvs) + + ecmp_nexthop_attr = [ + ("nexthop", "7.7.7.7,8.8.8.8"), + ("ifname", "Vlan100,Vlan100"), + ("vni_label", "1000,1000"), + ("router_mac", "00:11:11:11:11:11,00:22:22:22:22:22"), + ] + + print ("\tTest VRF IPv4 Multiple Route with ECMP Tunnel Nexthop [7.7.7.7 , 8.8.8.8] Add") + vxlan_obj.create_vrf_route_ecmp(dvs, "80.80.1.0/24", 'Vrf-RED', ecmp_nexthop_attr) + + nh_count = 2 + ecmp_nhid_list = vxlan_obj.check_vrf_routes_ecmp(dvs, "80.80.1.0/24", 'Vrf-RED', tunnel_name, nh_count) + assert nh_count == len(ecmp_nhid_list) + vxlan_obj.check_add_tunnel_nexthop(dvs, ecmp_nhid_list[0], '7.7.7.7', tunnel_name, '00:11:11:11:11:11', '1000') + vxlan_obj.check_add_tunnel_nexthop(dvs, ecmp_nhid_list[1], '8.8.8.8', tunnel_name, '00:22:22:22:22:22', '1000') + + nh_count = 2 + vxlan_obj.create_vrf_route_ecmp(dvs, "90.90.1.0/24", 'Vrf-RED', ecmp_nexthop_attr) + ecmp_nhid_list = vxlan_obj.check_vrf_routes_ecmp(dvs, "90.90.1.0/24", 'Vrf-RED', tunnel_name, nh_count) + assert nh_count == len(ecmp_nhid_list) + vxlan_obj.check_add_tunnel_nexthop(dvs, ecmp_nhid_list[0], '7.7.7.7', tunnel_name, '00:11:11:11:11:11', '1000') + vxlan_obj.check_add_tunnel_nexthop(dvs, ecmp_nhid_list[1], '8.8.8.8', tunnel_name, '00:22:22:22:22:22', '1000') + + print ("\tTest VRF IPv4 Multiple Route with ECMP Tunnel Nexthop [7.7.7.7 , 8.8.8.8] Delete") + vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.delete_vrf_route(dvs, "80.80.1.0/24", 'Vrf-RED') + vxlan_obj.check_del_vrf_routes(dvs, "80.80.1.0/24", 'Vrf-RED') + vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.delete_vrf_route(dvs, "90.90.1.0/24", 'Vrf-RED') + vxlan_obj.check_del_vrf_routes(dvs, "90.90.1.0/24", 'Vrf-RED') + helper.check_deleted_object(self.adb, vxlan_obj.ASIC_NEXT_HOP, ecmp_nhid_list[0]) + helper.check_deleted_object(self.adb, vxlan_obj.ASIC_NEXT_HOP, ecmp_nhid_list[1]) + + vxlan_obj.check_vrf_routes_ecmp_nexthop_grp_del(dvs, 2) + print ("\n\nTest VRF IPv4 Route with Tunnel Nexthop update from non-ECMP to ECMP") print ("\tTest VRF IPv4 Route with Tunnel Nexthop 7.7.7.7 Add") vxlan_obj.fetch_exist_entries(dvs) @@ -371,21 +392,12 @@ def test_dip_tunnel_ipv4_routes(self, dvs, testlog): vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting LastVlan removal and DIP tunnel delete for 7.7.7.7") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7') - vxlan_obj.check_vlan_extension_delete(dvs, '100', '7.7.7.7') - vxlan_obj.check_vxlan_dip_tunnel_delete(dvs, '7.7.7.7') - - print ("\tTesting LastVlan removal and DIP tunnel delete for 8.8.8.8") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8') - vxlan_obj.check_vlan_extension_delete(dvs, '100', '8.8.8.8') - vxlan_obj.check_vxlan_dip_tunnel_delete(dvs, '8.8.8.8') - print ("\tTesting Vlan 100 interface delete") vxlan_obj.delete_vlan_interface(dvs, "Vlan100", "100.100.3.1/24") vxlan_obj.check_del_router_interface(dvs, "Vlan100") print ("\tTesting Tunnel Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) @@ -402,7 +414,7 @@ def test_dip_tunnel_ipv4_routes(self, dvs, testlog): # Test 4 - Create and Delete DIP Tunnel and Test IPv6 route and overlay nexthop add and delete # @pytest.mark.skip(reason="Starting Route Orch, VRF Orch to be merged") # @pytest.mark.dev_sanity - def test_dip_tunnel_ipv6_routes(self, dvs, testlog): + def test_tunnel_ipv6_routes(self, dvs, testlog): vxlan_obj = self.get_vxlan_obj() helper = self.get_vxlan_helper() @@ -415,6 +427,7 @@ def test_dip_tunnel_ipv6_routes(self, dvs, testlog): print ("\n\nTesting IPv6 Route and Overlay Nexthop Add and Delete") print ("\tCreate SIP Tunnel") vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.check_vlan_obj(dvs, "100") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) @@ -449,28 +462,14 @@ def test_dip_tunnel_ipv6_routes(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, tunnel_map_entry_count = 2) print ("\tTesting Tunnel Vlan Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel Vrf Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting First DIP tunnel creation to 7.7.7.7") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7', '1000') - vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '7.7.7.7') - - print ("\tTesting VLAN 100 extension") - vxlan_obj.check_vlan_extension(dvs, '100', '7.7.7.7') - - print ("\tTesting Second DIP tunnel creation to 8.8.8.8") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8', '1000') - vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '8.8.8.8') - - print ("\tTesting VLAN 100 extension to 8.8.8.8 and 7.7.7.7") - vxlan_obj.check_vlan_extension(dvs, '100', '8.8.8.8') - vxlan_obj.check_vlan_extension(dvs, '100', '7.7.7.7') vxlan_obj.fetch_exist_entries(dvs) print ("\tTesting VLAN 100 interface creation") @@ -579,21 +578,12 @@ def test_dip_tunnel_ipv6_routes(self, dvs, testlog): vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting LastVlan removal and DIP tunnel delete for 7.7.7.7") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7') - vxlan_obj.check_vlan_extension_delete(dvs, '100', '7.7.7.7') - vxlan_obj.check_vxlan_dip_tunnel_delete(dvs, '7.7.7.7') - - print ("\tTesting LastVlan removal and DIP tunnel delete for 8.8.8.8") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8') - vxlan_obj.check_vlan_extension_delete(dvs, '100', '8.8.8.8') - vxlan_obj.check_vxlan_dip_tunnel_delete(dvs, '8.8.8.8') - print ("\tTesting Vlan 100 interface delete") vxlan_obj.delete_vlan_interface(dvs, "Vlan100", "2001::8/64") vxlan_obj.check_del_router_interface(dvs, "Vlan100") print ("\tTesting Tunnel Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) diff --git a/tests/test_evpn_l3_vxlan_p2mp.py b/tests/test_evpn_l3_vxlan_p2mp.py index c704cb3788..f3041979eb 100644 --- a/tests/test_evpn_l3_vxlan_p2mp.py +++ b/tests/test_evpn_l3_vxlan_p2mp.py @@ -37,12 +37,12 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) - print ("\tCreate Vlan-VNI map and VRF-VNI map") - vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') - vxlan_obj.create_vrf(dvs, "Vrf-RED") vxlan_obj.create_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED', '1000') + print ("\tCreate Vlan-VNI map and VRF-VNI map") + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + print ("\tTesting VRF-VNI map in APP DB") vlanlist = ['100'] vnilist = ['1000'] @@ -67,10 +67,10 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count=2) print ("\tTesting Tunnel Vlan VNI Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel VRF VNI Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') @@ -81,6 +81,7 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') print ("\tTesting Tunnel Vlan VNI Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) @@ -88,7 +89,7 @@ def test_sip_tunnel_vrf_vni_map(self, dvs, testlog): vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') time.sleep(2) - vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) vxlan_obj.remove_vlan(dvs, "100") @@ -141,10 +142,10 @@ def test_prefix_route_create_remote_endpoint(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count=2) print ("\tTesting Tunnel Vlan Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel Vrf Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') @@ -172,6 +173,7 @@ def test_prefix_route_create_remote_endpoint(self, dvs, testlog): vxlan_obj.check_del_router_interface(dvs, "Vlan100") print ("\tTesting Tunnel Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) @@ -179,7 +181,7 @@ def test_prefix_route_create_remote_endpoint(self, dvs, testlog): vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') time.sleep(2) - vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) vxlan_obj.remove_vrf(dvs, "Vrf-RED") vxlan_obj.remove_vlan_member(dvs, "100", "Ethernet24") vxlan_obj.remove_vlan(dvs, "100") @@ -200,6 +202,7 @@ def test_remote_ipv4_routes(self, dvs, testlog): print ("\n\nTesting IPv4 Route and Overlay Nexthop Add and Delete") print ("\tCreate SIP Tunnel") vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.check_vlan_obj(dvs, "100") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) @@ -233,27 +236,14 @@ def test_remote_ipv4_routes(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count=2) print ("\tTesting Tunnel Vlan Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel Vrf Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting First Remote end point to 7.7.7.7") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7', '1000') - - print ("\tTesting VLAN 100 extension") - vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') - - print ("\tTesting Second remote end point to 8.8.8.8") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8', '1000') - - print ("\tTesting VLAN 100 extension to 8.8.8.8 and 7.7.7.7") - vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8') - vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') - print ("\tTesting VLAN 100 interface creation") vxlan_obj.create_vlan_interface(dvs, "Vlan100", "Ethernet24", "Vrf-RED", "100.100.3.1/24") vxlan_obj.check_router_interface(dvs, 'Vrf-RED', vxlan_obj.vlan_id_map['100'], 2) @@ -360,19 +350,12 @@ def test_remote_ipv4_routes(self, dvs, testlog): vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting LastVlan removal and remote end point delete for 7.7.7.7") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7') - vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') - - print ("\tTesting LastVlan removal and remote end point delete for 8.8.8.8") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8') - vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8') - print ("\tTesting Vlan 100 interface delete") vxlan_obj.delete_vlan_interface(dvs, "Vlan100", "100.100.3.1/24") vxlan_obj.check_del_router_interface(dvs, "Vlan100") print ("\tTesting Tunnel Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) @@ -380,7 +363,7 @@ def test_remote_ipv4_routes(self, dvs, testlog): vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') time.sleep(2) - vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) vxlan_obj.remove_vrf(dvs, "Vrf-RED") vxlan_obj.remove_vlan_member(dvs, "100", "Ethernet24") vxlan_obj.remove_vlan(dvs, "100") @@ -402,6 +385,7 @@ def test_remote_ipv6_routes(self, dvs, testlog): print ("\n\nTesting IPv6 Route and Overlay Nexthop Add and Delete") print ("\tCreate SIP Tunnel") vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.check_vlan_obj(dvs, "100") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) @@ -436,27 +420,14 @@ def test_remote_ipv6_routes(self, dvs, testlog): helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) print ("\tTesting SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count=2) print ("\tTesting Tunnel Vlan Map Entry") - vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) print ("\tTesting Tunnel Vrf Map Entry") vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting First remote endpoint creation to 7.7.7.7") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7', '1000') - - print ("\tTesting VLAN 100 extension") - vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') - - print ("\tTesting Second remote endpoint creation to 8.8.8.8") - vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8', '1000') - - print ("\tTesting VLAN 100 extension to 8.8.8.8 and 7.7.7.7") - vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8') - vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') - vxlan_obj.fetch_exist_entries(dvs) print ("\tTesting VLAN 100 interface creation") vxlan_obj.create_vlan_interface(dvs, "Vlan100", "Ethernet24", "Vrf-RED", "2001::8/64") @@ -564,19 +535,12 @@ def test_remote_ipv6_routes(self, dvs, testlog): vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') - print ("\tTesting LastVlan removal and remote endpoint delete for 7.7.7.7") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7') - vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') - - print ("\tTesting LastVlan removal and remote endpoint delete for 8.8.8.8") - vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8') - vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8') - print ("\tTesting Vlan 100 interface delete") vxlan_obj.delete_vlan_interface(dvs, "Vlan100", "2001::8/64") vxlan_obj.check_del_router_interface(dvs, "Vlan100") print ("\tTesting Tunnel Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) @@ -584,11 +548,100 @@ def test_remote_ipv6_routes(self, dvs, testlog): vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') time.sleep(2) - vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) vxlan_obj.remove_vrf(dvs, "Vrf-RED") vxlan_obj.remove_vlan_member(dvs, "100", "Ethernet24") vxlan_obj.remove_vlan(dvs, "100") + def test_prefix_route_create_on_l2_vni(self, dvs, testlog): + vxlan_obj = self.get_vxlan_obj() + helper = self.get_vxlan_helper() + + self.setup_db(dvs) + tunnel_name = 'tunnel_2' + map_name = 'map_1000_100' + vrf_map_name = 'evpn_map_1000_Vrf-RED' + vxlan_obj.fetch_exist_entries(dvs) + + print ("\tCreate SIP Tunnel") + vlan_ids = vxlan_obj.helper.get_exist_entries(dvs, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") + vlan_oid = vxlan_obj.create_vlan(dvs,"Vlan100", vlan_ids) + vxlan_obj.check_vlan_obj(dvs, "100") + vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) + + print ("\tCreate Vlan-VNI map") + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + + print ("\tTesting VRF-VNI map in APP DB") + vxlan_obj.create_vrf(dvs, "Vrf-RED") + + vlanlist = ['100'] + vnilist = ['1000'] + + print ("\tTesting SIP Tunnel Creation") + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count=1) + + print ("\tTesting VLAN 100 interface creation") + vxlan_obj.create_vlan_interface(dvs, "Vlan100", "Ethernet24", "Vrf-RED", "100.100.3.1/24") + vxlan_obj.check_router_interface(dvs, 'Vrf-RED', vlan_oid, 2) + + print ("\tTest if IPv4 Route with Tunnel Nexthop Add is not created") + vxlan_obj.create_vrf_route(dvs, "80.80.1.0/24", 'Vrf-RED', '7.7.7.7', "Vlan100", "00:11:11:11:11:11", '1000') + vxlan_obj.check_vrf_routes_absence(dvs, "80.80.1.0/24", 'Vrf-RED', '7.7.7.7', tunnel_name, "00:11:11:11:11:11", '1000') + + print ("\tCreate Vlan-VNI map and VRF-VNI map") + vxlan_obj.create_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED', '1000') + + exp_attrs = [ + ("vni", "1000"), + ] + exp_attr = {} + for an in range(len(exp_attrs)): + exp_attr[exp_attrs[an][0]] = exp_attrs[an][1] + + helper.check_object(self.pdb, "VRF_TABLE", 'Vrf-RED', exp_attr) + + exp_attrs1 = [ + ("vni", "1000"), + ("vlan", "Vlan100"), + ] + exp_attr1 = {} + for an in range(len(exp_attrs1)): + exp_attr1[exp_attrs1[an][0]] = exp_attrs1[an][1] + + helper.check_object(self.pdb, "VXLAN_VRF_TABLE", "%s:%s" % (tunnel_name, vrf_map_name), exp_attr1) + vxlan_obj.check_vxlan_tunnel_map_entry_removed(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.check_vxlan_tunnel_vrf_map_entry(dvs, tunnel_name, 'Vrf-RED', '1000') + print ("\tTest VRF IPv4 Route with Tunnel Nexthop Add") + vxlan_obj.check_vrf_routes(dvs, "80.80.1.0/24", 'Vrf-RED', '7.7.7.7', tunnel_name, "00:11:11:11:11:11", '1000') + + print ("\tTest VRF IPv4 Route with Tunnel Nexthop Delete") + vxlan_obj.delete_vrf_route(dvs, "80.80.1.0/24", 'Vrf-RED') + vxlan_obj.check_del_tunnel_nexthop(dvs, 'Vrf-RED', '7.7.7.7', tunnel_name, "00:11:11:11:11:11", '1000') + vxlan_obj.check_del_vrf_routes(dvs, "80.80.1.0/24", 'Vrf-RED') + + print ("\tTesting Tunnel Vrf Map Entry removal") + vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') + vxlan_obj.check_vxlan_tunnel_vrf_map_entry_remove(dvs, tunnel_name, 'Vrf-RED', '1000') + + print ("\tTesting Vlan 100 interface delete") + vxlan_obj.delete_vlan_interface(dvs, "Vlan100", "100.100.3.1/24") + vxlan_obj.check_del_router_interface(dvs, "Vlan100") + + print ("\tTesting Tunnel Map entry removal") + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) + + print ("\tTesting SIP Tunnel Deletion") + vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) + vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') + time.sleep(2) + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) + vxlan_obj.remove_vrf(dvs, "Vrf-RED") + vxlan_obj.remove_vlan_member(dvs, "100", "Ethernet24") + vxlan_obj.remove_vlan(dvs, "100") # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_evpn_tunnel.py b/tests/test_evpn_tunnel.py index b58944f7ce..86f5ad53f6 100644 --- a/tests/test_evpn_tunnel.py +++ b/tests/test_evpn_tunnel.py @@ -59,6 +59,9 @@ def test_p2p_tunnel(self, dvs, testlog): vnilist = ['1000', '1001', '1002'] vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.create_vlan1(dvs,"Vlan101") + vxlan_obj.create_vlan1(dvs,"Vlan102") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') @@ -161,3 +164,95 @@ def test_p2mp_tunnel_with_dip(self, dvs, testlog): print("Testing SIP Tunnel Deletion") vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.remove_vlan(dvs, "100") + vxlan_obj.remove_vlan(dvs, "101") + vxlan_obj.remove_vlan(dvs, "102") + + def test_delayed_vlan_vni_map(self, dvs, testlog): + vxlan_obj = self.get_vxlan_obj() + + tunnel_name = 'tunnel_2' + map_name = 'map_1000_100' + map_name_1 = 'map_1001_101' + vlanlist = ['100'] + vnilist = ['1000'] + + vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.create_vlan1(dvs,"Vlan101") + + vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, tunnel_map_entry_count = 1) + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + + vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) + + vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan101', '7.7.7.7', '1001') + vxlan_obj.check_vxlan_dip_tunnel_not_created(dvs, tunnel_name, '6.6.6.6', '7.7.7.7') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') + + print("Testing VLAN 101 extension") + vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '7.7.7.7') + vxlan_obj.check_vlan_extension(dvs, '101', '7.7.7.7') + + print("Testing Vlan Extension removal") + vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan101', '7.7.7.7') + vxlan_obj.check_vlan_extension_delete(dvs, '101', '7.7.7.7') + vxlan_obj.check_vxlan_dip_tunnel_delete(dvs, '7.7.7.7') + + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') + vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) + + print("Testing SIP Tunnel Deletion") + vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') + vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.remove_vlan(dvs, "100") + vxlan_obj.remove_vlan(dvs, "101") + + def test_invalid_vlan_extension(self, dvs, testlog): + vxlan_obj = self.get_vxlan_obj() + + tunnel_name = 'tunnel_2' + map_name = 'map_1000_100' + map_name_1 = 'map_1001_101' + vlanlist = ['100'] + vnilist = ['1000'] + + vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + + vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, tunnel_map_entry_count = 1) + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + + vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) + + vxlan_obj.create_vrf(dvs, "Vrf-RED") + vxlan_obj.create_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED', '1000') + + vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7', '1000') + vxlan_obj.check_vlan_extension_not_created(dvs, '100', '7.7.7.7') + + vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') + vxlan_obj.remove_vrf(dvs, "Vrf-RED") + vxlan_obj.check_vxlan_dip_tunnel(dvs, tunnel_name, '6.6.6.6', '7.7.7.7') + print("Testing VLAN 100 extension") + vxlan_obj.check_vlan_extension(dvs, '100', '7.7.7.7') + + print("Testing Vlan Extension removal") + vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7') + vxlan_obj.check_vlan_extension_delete(dvs, '100', '7.7.7.7') + + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) + + print("Testing SIP Tunnel Deletion") + vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') + vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') diff --git a/tests/test_evpn_tunnel_p2mp.py b/tests/test_evpn_tunnel_p2mp.py index 1783980b73..bbbb786f9a 100644 --- a/tests/test_evpn_tunnel_p2mp.py +++ b/tests/test_evpn_tunnel_p2mp.py @@ -30,7 +30,7 @@ def test_p2mp_tunnel(self, dvs, testlog): vnilist = ['1000', '1001', '1002'] print("Testing SIP Tunnel Creation") - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False) print("Testing Tunnel Map Entry") vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) @@ -43,7 +43,7 @@ def test_p2mp_tunnel(self, dvs, testlog): print("Testing SIP Tunnel Deletion") vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) - vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) # Test 2 - Vlan extension Tests def test_vlan_extension(self, dvs, testlog): @@ -57,12 +57,15 @@ def test_vlan_extension(self, dvs, testlog): vnilist = ['1000', '1001', '1002'] vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.create_vlan1(dvs,"Vlan101") + vxlan_obj.create_vlan1(dvs,"Vlan102") vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_2, '1002', 'Vlan102') - vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist) + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False) vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) @@ -102,6 +105,17 @@ def test_vlan_extension(self, dvs, testlog): vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8') vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8') + print("Testing remote endpoint again to 8.8.8.8") + vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8', '1000') + print("Testing remote endpoint creation to 8.8.8.8") + + print("Testing VLAN 100 extension to 8.8.8.8") + vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8') + + print("Testing Last Vlan removal and remote endpoint delete for 8.8.8.8") + vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '8.8.8.8') + vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '8.8.8.8') + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name_2, '1002', 'Vlan102') @@ -110,4 +124,93 @@ def test_vlan_extension(self, dvs, testlog): print("Testing SIP Tunnel Deletion") vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) - vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) + vxlan_obj.remove_vlan(dvs, "100") + vxlan_obj.remove_vlan(dvs, "101") + vxlan_obj.remove_vlan(dvs, "102") + + def test_delayed_vlan_vni_map(self, dvs, testlog): + vxlan_obj = self.get_vxlan_obj() + + tunnel_name = 'tunnel_2' + map_name = 'map_1000_100' + map_name_1 = 'map_1001_101' + vlanlist = ['100'] + vnilist = ['1000'] + + vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + vxlan_obj.create_vlan1(dvs,"Vlan101") + + vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count = 1) + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + + vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) + + vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan101', '7.7.7.7', '1001') + vxlan_obj.check_vlan_extension_not_created_p2mp(dvs, '101', '6.6.6.6', '7.7.7.7') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') + + print("Testing VLAN 101 extension") + vxlan_obj.check_vlan_extension_p2mp(dvs, '101', '6.6.6.6', '7.7.7.7') + + print("Testing Vlan Extension removal") + vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan101', '7.7.7.7') + vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '101', '6.6.6.6', '7.7.7.7') + + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name_1, '1001', 'Vlan101') + vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) + + print("Testing SIP Tunnel Deletion") + vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') + vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) + vxlan_obj.remove_vlan(dvs, "100") + vxlan_obj.remove_vlan(dvs, "101") + + def test_invalid_vlan_extension(self, dvs, testlog): + vxlan_obj = self.get_vxlan_obj() + + tunnel_name = 'tunnel_2' + map_name = 'map_1000_100' + map_name_1 = 'map_1001_101' + vlanlist = ['100'] + vnilist = ['1000'] + + vxlan_obj.fetch_exist_entries(dvs) + vxlan_obj.create_vlan1(dvs,"Vlan100") + + vxlan_obj.create_vxlan_tunnel(dvs, tunnel_name, '6.6.6.6') + vxlan_obj.create_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + + vxlan_obj.check_vxlan_sip_tunnel(dvs, tunnel_name, '6.6.6.6', vlanlist, vnilist, ignore_bp=False, tunnel_map_entry_count = 1) + vxlan_obj.check_vxlan_tunnel_map_entry(dvs, tunnel_name, vlanlist, vnilist) + + vxlan_obj.create_evpn_nvo(dvs, 'nvo1', tunnel_name) + + vxlan_obj.create_vrf(dvs, "Vrf-RED") + vxlan_obj.create_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED', '1000') + + vxlan_obj.create_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7', '1000') + vxlan_obj.check_vlan_extension_not_created_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') + + vxlan_obj.remove_vxlan_vrf_tunnel_map(dvs, 'Vrf-RED') + vxlan_obj.remove_vrf(dvs, "Vrf-RED") + print("Testing VLAN 100 extension") + vxlan_obj.check_vlan_extension_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') + + print("Testing Vlan Extension removal") + vxlan_obj.remove_evpn_remote_vni(dvs, 'Vlan100', '7.7.7.7') + vxlan_obj.check_vlan_extension_delete_p2mp(dvs, '100', '6.6.6.6', '7.7.7.7') + + vxlan_obj.remove_vxlan_tunnel_map(dvs, tunnel_name, map_name, '1000', 'Vlan100') + vxlan_obj.check_vxlan_tunnel_map_entry_delete(dvs, tunnel_name, vlanlist, vnilist) + + print("Testing SIP Tunnel Deletion") + vxlan_obj.remove_evpn_nvo(dvs, 'nvo1') + vxlan_obj.remove_vxlan_tunnel(dvs, tunnel_name) + vxlan_obj.check_vxlan_sip_tunnel_delete(dvs, tunnel_name, '6.6.6.6', ignore_bp=False) diff --git a/tests/test_fabric.py b/tests/test_fabric.py new file mode 100644 index 0000000000..2d1ea8c293 --- /dev/null +++ b/tests/test_fabric.py @@ -0,0 +1,83 @@ +from swsscommon import swsscommon +from dvslib.dvs_database import DVSDatabase +import ast +import json + +# Fabric counters +NUMBER_OF_RETRIES = 10 + +counter_group_meta = { + 'fabric_port_counter': { + 'key': 'FABRIC_PORT', + 'group_name': 'FABRIC_PORT_STAT_COUNTER', + 'name_map': 'COUNTERS_FABRIC_PORT_NAME_MAP', + 'post_test': 'post_port_counter_test', + }, + 'fabric_queue_counter': { + 'key': 'FABRIC_QUEUE', + 'group_name': 'FABRIC_QUEUE_STAT_COUNTER', + 'name_map': 'COUNTERS_FABRIC_QUEUE_NAME_MAP', + }, +} + +class TestVirtualChassis(object): + + def wait_for_id_list(self, flex_db, stat, name, oid): + for retry in range(NUMBER_OF_RETRIES): + id_list = flex_db.db_connection.hgetall("FLEX_COUNTER_TABLE:" + stat + ":" + oid).items() + if len(id_list) > 0: + return + else: + time.sleep(1) + + assert False, "No ID list for counter " + str(name) + + def verify_flex_counters_populated(self, flex_db, counters_db, map, stat): + counters_keys = counters_db.db_connection.hgetall(map) + for counter_entry in counters_keys.items(): + name = counter_entry[0] + oid = counter_entry[1] + self.wait_for_id_list(flex_db, stat, name, oid) + + def test_voq_switch(self, vst): + """Test VOQ switch objects configuration. + + This test validates configuration of switch creation objects required for + VOQ switches. The switch_type, max_cores and switch_id attributes configuration + are verified. For the System port config list, it is verified that all the + configured system ports are avaiable in the asic db by checking the count. + """ + + if vst is None: + return + + dvss = vst.dvss + for name in dvss.keys(): + dvs = dvss[name] + # Get the config info + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + if cfg_switch_type == "fabric": + flex_db = dvs.get_flex_db() + counters_db = dvs.get_counters_db() + for ct in counter_group_meta.keys(): + meta_data = counter_group_meta[ct] + counter_key = meta_data['key'] + counter_stat = meta_data['group_name'] + counter_map = meta_data['name_map'] + self.verify_flex_counters_populated(flex_db, counters_db, counter_map, counter_stat) + + port_counters_keys = counters_db.db_connection.hgetall(meta_data['name_map']) + port_counters_stat_keys = flex_db.get_keys("FLEX_COUNTER_TABLE:" + meta_data['group_name']) + for port_stat in port_counters_stat_keys: + assert port_stat in dict(port_counters_keys.items()).values(), "Non port created on PORT_STAT_COUNTER group: {}".format(port_stat) + else: + print( "We do not check switch type:", cfg_switch_type ) + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass + diff --git a/tests/test_fdb.py b/tests/test_fdb.py index 9893a4e3b0..06c79f230f 100644 --- a/tests/test_fdb.py +++ b/tests/test_fdb.py @@ -31,9 +31,10 @@ class TestFdb(object): def test_FdbWarmRestartNotifications(self, dvs, testlog): dvs.setup_db() - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() - dvs.runcmd("crm config polling interval 1") + dvs.crm_poll_set("1") + dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_FDB_ENTRY', '1000') time.sleep(2) @@ -225,11 +226,10 @@ def test_FdbWarmRestartNotifications(self, dvs, testlog): assert ok, str(extra) # enable warm restart - (exitcode, result) = dvs.runcmd("config warm_restart enable swss") - assert exitcode == 0 + dvs.warm_restart_swss("true") # freeze orchagent for warm restart - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check", include_stderr=False) assert result == "RESTARTCHECK succeeded\n" time.sleep(2) @@ -317,14 +317,14 @@ def test_FdbWarmRestartNotifications(self, dvs, testlog): finally: # disable warm restart - dvs.runcmd("config warm_restart disable swss") + dvs.warm_restart_swss("false") # slow down crm polling - dvs.runcmd("crm config polling interval 10000") + dvs.crm_poll_set("10000") def test_FdbAddedAfterMemberCreated(self, dvs, testlog): dvs.setup_db() - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() time.sleep(2) # create a FDB entry in Application DB @@ -377,7 +377,7 @@ def test_FdbAddedAfterMemberCreated(self, dvs, testlog): ("SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID", iface_2_bridge_port_id["Ethernet0"])]) assert ok, str(extra) - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() dvs.remove_vlan_member("2", "Ethernet0") dvs.remove_vlan("2") diff --git a/tests/test_fdb_update.py b/tests/test_fdb_update.py index 5daf27804e..128dc3773b 100644 --- a/tests/test_fdb_update.py +++ b/tests/test_fdb_update.py @@ -56,8 +56,7 @@ def get_mac_by_bridge_id(self, dvs, bridge_id): def test_FDBAddedAndUpdated(self, dvs, testlog): dvs.setup_db() - - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() time.sleep(2) # create a FDB entry in Application DB @@ -173,7 +172,7 @@ def test_FDBAddedAndUpdated(self, dvs, testlog): def test_FDBLearnedAndUpdated(self, dvs, testlog): dvs.setup_db() - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() # create vlan; create vlan member dvs.create_vlan("6") @@ -261,12 +260,12 @@ def test_FDBLearnedAndUpdated(self, dvs, testlog): dvs.remove_vlan("6") # clear fdb - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() def test_FDBLearnedAndFlushed(self, dvs, testlog): dvs.setup_db() - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() VLAN = "9" VLAN_NAME = "Vlan9" diff --git a/tests/test_fgnhg.py b/tests/test_fgnhg.py index 2fa8a9d890..645853e24c 100644 --- a/tests/test_fgnhg.py +++ b/tests/test_fgnhg.py @@ -216,7 +216,7 @@ def startup_link(dvs, db, port): db.wait_for_field_match("PORT_TABLE", "Ethernet%d" % (port * 4), {"oper_status": "up"}) def run_warm_reboot(dvs): - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") # Stop swss before modifing the configDB dvs.stop_swss() @@ -280,7 +280,7 @@ def create_interface_n_fg_ecmp_config(dvs, nh_range_start, nh_range_end, fg_nhg_ ip_pref_key = "Ethernet" + str(i*4) + "|10.0.0." + str(i*2) + "/31" create_entry(config_db, IF_TB, if_name_key, fvs_nul) create_entry(config_db, IF_TB, ip_pref_key, fvs_nul) - dvs.runcmd("config interface startup " + if_name_key) + dvs.port_admin_set(if_name_key, "up") shutdown_link(dvs, app_db, i) startup_link(dvs, app_db, i) bank = 1 @@ -300,7 +300,7 @@ def remove_interface_n_fg_ecmp_config(dvs, nh_range_start, nh_range_end, fg_nhg_ ip_pref_key = "Ethernet" + str(i*4) + "|10.0.0." + str(i*2) + "/31" remove_entry(config_db, IF_TB, if_name_key) remove_entry(config_db, IF_TB, ip_pref_key) - dvs.runcmd("config interface shutdown " + if_name_key) + dvs.port_admin_set(if_name_key, "down") shutdown_link(dvs, app_db, i) remove_entry(config_db, FG_NHG_MEMBER, "10.0.0." + str(1 + i*2)) remove_entry(config_db, FG_NHG, fg_nhg_name) @@ -334,7 +334,7 @@ def fine_grained_ecmp_base_test(dvs, match_mode): create_entry(config_db, VLAN_MEMB_TB, vlan_name_key + "|" + if_name_key, fvs) create_entry(config_db, VLAN_IF_TB, vlan_name_key, fvs_nul) create_entry(config_db, VLAN_IF_TB, ip_pref_key, fvs_nul) - dvs.runcmd("config interface startup " + if_name_key) + dvs.port_admin_set(if_name_key, "up") dvs.servers[i].runcmd("ip link set down dev eth0") == 0 dvs.servers[i].runcmd("ip link set up dev eth0") == 0 bank = 0 @@ -619,7 +619,7 @@ def fine_grained_ecmp_base_test(dvs, match_mode): remove_entry(config_db, VLAN_IF_TB, vlan_name_key) remove_entry(config_db, VLAN_MEMB_TB, vlan_name_key + "|" + if_name_key) remove_entry(config_db, VLAN_TB, vlan_name_key) - dvs.runcmd("config interface shutdown " + if_name_key) + dvs.port_admin_set(if_name_key, "down") dvs.servers[i].runcmd("ip link set down dev eth0") == 0 remove_entry(config_db, "FG_NHG_MEMBER", "10.0.0." + str(1 + i*2)) @@ -770,7 +770,7 @@ def test_fgnhg_matchmode_nexthop_multi_route(self, dvs, testlog): ip_pref_key = "Ethernet" + str(i*4) + "|10.0.0." + str(i*2) + "/31" create_entry(config_db, IF_TB, if_name_key, fvs_nul) create_entry(config_db, IF_TB, ip_pref_key, fvs_nul) - dvs.runcmd("config interface startup " + if_name_key) + dvs.port_admin_set(if_name_key, "up") shutdown_link(dvs, app_db, i) startup_link(dvs, app_db, i) dvs.runcmd("arp -s 10.0.0." + str(1 + i*2) + " 00:00:00:00:00:" + str(1 + i*2)) diff --git a/tests/test_flex_counters.py b/tests/test_flex_counters.py index ea950af7c1..eefef8bab4 100644 --- a/tests/test_flex_counters.py +++ b/tests/test_flex_counters.py @@ -3,8 +3,9 @@ from swsscommon import swsscommon -TUNNEL_TYPE_MAP = "COUNTERS_TUNNEL_TYPE_MAP" -NUMBER_OF_RETRIES = 10 +TUNNEL_TYPE_MAP = "COUNTERS_TUNNEL_TYPE_MAP" +ROUTE_TO_PATTERN_MAP = "COUNTERS_ROUTE_TO_PATTERN_MAP" +NUMBER_OF_RETRIES = 10 CPU_PORT_OID = "0x0" counter_group_meta = { @@ -19,6 +20,11 @@ 'group_name': 'QUEUE_STAT_COUNTER', 'name_map': 'COUNTERS_QUEUE_NAME_MAP', }, + 'queue_watermark_counter': { + 'key': 'QUEUE_WATERMARK', + 'group_name': 'QUEUE_WATERMARK_STAT_COUNTER', + 'name_map': 'COUNTERS_QUEUE_NAME_MAP', + }, 'rif_counter': { 'key': 'RIF', 'group_name': 'RIF_STAT_COUNTER', @@ -36,6 +42,11 @@ 'group_name': 'PORT_BUFFER_DROP_STAT', 'name_map': 'COUNTERS_PORT_NAME_MAP', }, + 'pg_drop_counter': { + 'key': 'PG_DROP', + 'group_name': 'PG_DROP_STAT_COUNTER', + 'name_map': 'COUNTERS_PG_NAME_MAP', + }, 'pg_watermark_counter': { 'key': 'PG_WATERMARK', 'group_name': 'PG_WATERMARK_STAT_COUNTER', @@ -60,10 +71,16 @@ 'name_map': 'ACL_COUNTER_RULE_MAP', 'pre_test': 'pre_acl_tunnel_counter_test', 'post_test': 'post_acl_tunnel_counter_test', + }, + 'route_flow_counter': { + 'key': 'FLOW_CNT_ROUTE', + 'group_name': 'ROUTE_FLOW_COUNTER', + 'name_map': 'COUNTERS_ROUTE_NAME_MAP', + 'pre_test': 'pre_route_flow_counter_test', + 'post_test': 'post_route_flow_counter_test', } } - class TestFlexCounters(object): def setup_dbs(self, dvs): @@ -123,6 +140,18 @@ def wait_for_interval_set(self, group, interval): assert False, "Polling interval is not applied to FLEX_COUNTER_GROUP_TABLE for group {}, expect={}, actual={}".format(group, interval, interval_value) + def wait_for_buffer_pg_queue_counter(self, map, port, index, isSet): + for retry in range(NUMBER_OF_RETRIES): + counter_oid = self.counters_db.db_connection.hget(map, port + ':' + index) + if (isSet and counter_oid): + return counter_oid + elif (not isSet and not counter_oid): + return None + else: + time.sleep(1) + + assert False, "Counter not {} for port: {}, type: {}, index: {}".format("created" if isSet else "removed", port, map, index) + def verify_no_flex_counters_tables(self, counter_stat): counters_stat_keys = self.flex_db.get_keys("FLEX_COUNTER_TABLE:" + counter_stat) assert len(counters_stat_keys) == 0, "FLEX_COUNTER_TABLE:" + str(counter_stat) + " tables exist before enabling the flex counter group" @@ -157,13 +186,14 @@ def verify_only_phy_ports_created(self, meta_data): for port_stat in port_counters_stat_keys: assert port_stat in dict(port_counters_keys.items()).values(), "Non PHY port created on PORT_STAT_COUNTER group: {}".format(port_stat) - def set_flex_counter_group_status(self, group, map, status='enable'): + def set_flex_counter_group_status(self, group, map, status='enable', check_name_map=True): group_stats_entry = {"FLEX_COUNTER_STATUS": status} self.config_db.create_entry("FLEX_COUNTER_TABLE", group, group_stats_entry) - if status == 'enable': - self.wait_for_table(map) - else: - self.wait_for_table_empty(map) + if check_name_map: + if status == 'enable': + self.wait_for_table(map) + else: + self.wait_for_table_empty(map) def set_flex_counter_group_interval(self, key, group, interval): group_stats_entry = {"POLL_INTERVAL": interval} @@ -184,6 +214,7 @@ def test_flex_counters(self, dvs, counter_type): counter_map = meta_data['name_map'] pre_test = meta_data.get('pre_test') post_test = meta_data.get('post_test') + meta_data['dvs'] = dvs self.verify_no_flex_counters_tables(counter_stat) @@ -225,6 +256,37 @@ def pre_acl_tunnel_counter_test(self, meta_data): } ) + def pre_route_flow_counter_test(self, meta_data): + dvs = meta_data['dvs'] + self.config_db.create_entry('FLOW_COUNTER_ROUTE_PATTERN', '1.1.0.0/16', + { + 'max_match_count': '30' + } + ) + self.config_db.create_entry('FLOW_COUNTER_ROUTE_PATTERN', '2000::/64', + { + 'max_match_count': '30' + } + ) + + self.create_l3_intf("Ethernet0", "") + self.add_ip_address("Ethernet0", "10.0.0.0/31") + self.set_admin_status("Ethernet0", "up") + dvs.servers[0].runcmd("ip address add 10.0.0.1/31 dev eth0") + dvs.servers[0].runcmd("ip route add default via 10.0.0.0") + dvs.servers[0].runcmd("ping -c 1 10.0.0.1") + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 1.1.1.0/24 10.0.0.1\"") + + self.create_l3_intf("Ethernet4", "") + self.set_admin_status("Ethernet4", "up") + self.add_ip_address("Ethernet4", "2001::1/64") + dvs.runcmd("sysctl -w net.ipv6.conf.all.forwarding=1") + dvs.servers[1].runcmd("ip -6 address add 2001::2/64 dev eth0") + dvs.servers[1].runcmd("ip -6 route add default via 2001::1") + time.sleep(2) + dvs.servers[1].runcmd("ping -6 -c 1 2001::1") + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ipv6 route 2000::/64 2001::2\"") + def post_rif_counter_test(self, meta_data): self.config_db.db_connection.hdel('INTERFACE|Ethernet0|192.168.0.1/24', "NULL") @@ -241,7 +303,7 @@ def post_trap_flow_counter_test(self, meta_data): meta_data (object): flex counter meta data """ counters_keys = self.counters_db.db_connection.hgetall(meta_data['name_map']) - self.set_flex_counter_group_status(meta_data['key'], meta_data['group_name'], 'disable') + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map'], 'disable') for counter_entry in counters_keys.items(): self.wait_for_id_list_remove(meta_data['group_name'], counter_entry[0], counter_entry[1]) @@ -258,6 +320,46 @@ def post_acl_tunnel_counter_test(self, meta_data): self.config_db.delete_entry('ACL_RULE', 'DATAACL|RULE0') self.config_db.delete_entry('ACL_TABLE', 'DATAACL') + def post_route_flow_counter_test(self, meta_data): + dvs = meta_data['dvs'] + # Verify prefix to route pattern name map + self.wait_for_table(ROUTE_TO_PATTERN_MAP) + + # Remove route pattern and verify related couters are removed + v4_name_map_key = '1.1.1.0/24' + counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], v4_name_map_key) + assert counter_oid + self.config_db.delete_entry('FLOW_COUNTER_ROUTE_PATTERN', '1.1.0.0/16') + self.wait_for_id_list_remove(meta_data['group_name'], v4_name_map_key, counter_oid) + counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], v4_name_map_key) + assert not counter_oid + route_pattern = self.counters_db.db_connection.hget(ROUTE_TO_PATTERN_MAP, v4_name_map_key) + assert not route_pattern + + # Disable route flow counter and verify all counters are removed + v6_name_map_key = '2000::/64' + counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], v6_name_map_key) + assert counter_oid + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map'], 'disable') + self.wait_for_id_list_remove(meta_data['group_name'], v6_name_map_key, counter_oid) + self.wait_for_table_empty(meta_data['name_map']) + self.wait_for_table_empty(ROUTE_TO_PATTERN_MAP) + + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route {} 10.0.0.1\"".format(v4_name_map_key)) + self.remove_ip_address("Ethernet0", "10.0.0.0/31") + self.remove_l3_intf("Ethernet0") + self.set_admin_status("Ethernet0", "down") + dvs.servers[0].runcmd("ip route del default dev eth0") + dvs.servers[0].runcmd("ip address del 10.0.0.1/31 dev eth0") + + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ipv6 route 2000::/64 2001::2\"") + self.remove_ip_address("Ethernet4", "2001::1/64") + self.remove_l3_intf("Ethernet4") + self.set_admin_status("Ethernet4", "down") + dvs.servers[1].runcmd("ip -6 route del default dev eth0") + dvs.servers[1].runcmd("ip -6 address del 2001::2/64 dev eth0") + self.config_db.delete_entry('FLOW_COUNTER_ROUTE_PATTERN', '2000::/64') + def test_add_remove_trap(self, dvs): """Test steps: 1. Enable trap_flow_counter @@ -320,7 +422,7 @@ def test_add_remove_trap(self, dvs): assert oid, 'Add trap {}, but trap counter is not created'.format(removed_trap) self.wait_for_id_list(meta_data['group_name'], removed_trap, oid) - self.set_flex_counter_group_status(meta_data['key'], meta_data['group_name'], 'disable') + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map'], 'disable') def test_remove_trap_group(self, dvs): """Remove trap group and verify that all related trap counters are removed @@ -371,4 +473,325 @@ def test_remove_trap_group(self, dvs): for trap_id in trap_ids: assert trap_id not in counters_keys + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map'], 'disable') + + def test_update_route_pattern(self, dvs): + self.setup_dbs(dvs) + self.config_db.create_entry('FLOW_COUNTER_ROUTE_PATTERN', '1.1.0.0/16', + { + 'max_match_count': '30' + } + ) + self.create_l3_intf("Ethernet0", "") + self.create_l3_intf("Ethernet4", "") + self.add_ip_address("Ethernet0", "10.0.0.0/31") + self.add_ip_address("Ethernet4", "10.0.0.2/31") + self.set_admin_status("Ethernet0", "up") + self.set_admin_status("Ethernet4", "up") + # set ip address and default route + dvs.servers[0].runcmd("ip address add 10.0.0.1/31 dev eth0") + dvs.servers[0].runcmd("ip route add default via 10.0.0.0") + dvs.servers[1].runcmd("ip address add 10.0.0.3/31 dev eth0") + dvs.servers[1].runcmd("ip route add default via 10.0.0.2") + dvs.servers[0].runcmd("ping -c 1 10.0.0.3") + + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 1.1.1.0/24 10.0.0.1\"") + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 2.2.2.0/24 10.0.0.3\"") + + meta_data = counter_group_meta['route_flow_counter'] + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + self.wait_for_table(meta_data['name_map']) + self.wait_for_table(ROUTE_TO_PATTERN_MAP) + counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], '1.1.1.0/24') + self.wait_for_id_list(meta_data['group_name'], '1.1.1.0/24', counter_oid) + assert not self.counters_db.db_connection.hget(meta_data['name_map'], '2.2.2.0/24') + assert not self.counters_db.db_connection.hget(ROUTE_TO_PATTERN_MAP, '2.2.2.0/24') + + self.config_db.delete_entry('FLOW_COUNTER_ROUTE_PATTERN', '1.1.0.0/16') + self.wait_for_id_list_remove(meta_data['group_name'], '1.1.1.0/24', counter_oid) + self.wait_for_table_empty(meta_data['name_map']) + self.wait_for_table_empty(ROUTE_TO_PATTERN_MAP) + assert not self.counters_db.db_connection.hget(meta_data['name_map'], '1.1.1.0/24') + assert not self.counters_db.db_connection.hget(ROUTE_TO_PATTERN_MAP, '1.1.1.0/24') + + self.config_db.create_entry('FLOW_COUNTER_ROUTE_PATTERN', '2.2.0.0/16', + { + 'max_match_count': '30' + } + ) + self.wait_for_table(meta_data['name_map']) + self.wait_for_table(ROUTE_TO_PATTERN_MAP) + counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], '2.2.2.0/24') + self.wait_for_id_list(meta_data['group_name'], '2.2.2.0/24', counter_oid) + + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map'], 'disable') + self.wait_for_id_list_remove(meta_data['group_name'], '2.2.2.0/24', counter_oid) + self.wait_for_table_empty(meta_data['name_map']) + self.wait_for_table_empty(ROUTE_TO_PATTERN_MAP) + + self.config_db.delete_entry('FLOW_COUNTER_ROUTE_PATTERN', '2.2.0.0/16') + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route {} 10.0.0.1\"".format('1.1.1.0/24')) + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route {} 10.0.0.3\"".format('2.2.2.0/24')) + + # remove ip address + self.remove_ip_address("Ethernet0", "10.0.0.0/31") + self.remove_ip_address("Ethernet4", "10.0.0.2/31") + + # remove l3 interface + self.remove_l3_intf("Ethernet0") + self.remove_l3_intf("Ethernet4") + + self.set_admin_status("Ethernet0", "down") + self.set_admin_status("Ethernet4", "down") + + # remove ip address and default route + dvs.servers[0].runcmd("ip route del default dev eth0") + dvs.servers[0].runcmd("ip address del 10.0.0.1/31 dev eth0") + + dvs.servers[1].runcmd("ip route del default dev eth0") + dvs.servers[1].runcmd("ip address del 10.0.0.3/31 dev eth0") + + def test_add_remove_route_flow_counter(self, dvs): + self.setup_dbs(dvs) + self.config_db.create_entry('FLOW_COUNTER_ROUTE_PATTERN', '1.1.0.0/16', + { + 'max_match_count': '30' + } + ) + meta_data = counter_group_meta['route_flow_counter'] + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map'], check_name_map=False) + + self.create_l3_intf("Ethernet0", "") + self.add_ip_address("Ethernet0", "10.0.0.0/31") + self.set_admin_status("Ethernet0", "up") + dvs.servers[0].runcmd("ip address add 10.0.0.1/31 dev eth0") + dvs.servers[0].runcmd("ip route add default via 10.0.0.0") + dvs.servers[0].runcmd("ping -c 1 10.0.0.1") + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 1.1.1.0/24 10.0.0.1\"") + + self.wait_for_table(meta_data['name_map']) + self.wait_for_table(ROUTE_TO_PATTERN_MAP) + counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], '1.1.1.0/24') + self.wait_for_id_list(meta_data['group_name'], '1.1.1.0/24', counter_oid) + + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route {} 10.0.0.1\"".format('1.1.1.0/24')) + self.wait_for_id_list_remove(meta_data['group_name'], '1.1.1.0/24', counter_oid) + self.wait_for_table_empty(meta_data['name_map']) + self.wait_for_table_empty(ROUTE_TO_PATTERN_MAP) + + self.config_db.delete_entry('FLOW_COUNTER_ROUTE_PATTERN', '1.1.0.0/16') self.set_flex_counter_group_status(meta_data['key'], meta_data['group_name'], 'disable') + + # remove ip address + self.remove_ip_address("Ethernet0", "10.0.0.0/31") + + # remove l3 interface + self.remove_l3_intf("Ethernet0") + + self.set_admin_status("Ethernet0", "down") + + # remove ip address and default route + dvs.servers[0].runcmd("ip route del default dev eth0") + dvs.servers[0].runcmd("ip address del 10.0.0.1/31 dev eth0") + + def test_router_flow_counter_max_match_count(self, dvs): + self.setup_dbs(dvs) + self.config_db.create_entry('FLOW_COUNTER_ROUTE_PATTERN', '1.1.0.0/16', + { + 'max_match_count': '1' + } + ) + meta_data = counter_group_meta['route_flow_counter'] + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map'], check_name_map=False) + self.create_l3_intf("Ethernet0", "") + self.create_l3_intf("Ethernet4", "") + self.add_ip_address("Ethernet0", "10.0.0.0/31") + self.add_ip_address("Ethernet4", "10.0.0.2/31") + self.set_admin_status("Ethernet0", "up") + self.set_admin_status("Ethernet4", "up") + # set ip address and default route + dvs.servers[0].runcmd("ip address add 10.0.0.1/31 dev eth0") + dvs.servers[0].runcmd("ip route add default via 10.0.0.0") + dvs.servers[1].runcmd("ip address add 10.0.0.3/31 dev eth0") + dvs.servers[1].runcmd("ip route add default via 10.0.0.2") + dvs.servers[0].runcmd("ping -c 1 10.0.0.3") + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 1.1.1.0/24 10.0.0.1\"") + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 1.1.2.0/24 10.0.0.3\"") + + self.wait_for_table(meta_data['name_map']) + self.wait_for_table(ROUTE_TO_PATTERN_MAP) + counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], '1.1.1.0/24') + self.wait_for_id_list(meta_data['group_name'], '1.1.1.0/24', counter_oid) + assert not self.counters_db.db_connection.hget(meta_data['name_map'], '1.1.2.0/24') + self.config_db.update_entry('FLOW_COUNTER_ROUTE_PATTERN', '1.1.0.0/16', + { + 'max_match_count': '2' + } + ) + for _ in range(NUMBER_OF_RETRIES): + counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], '1.1.2.0/24') + if not counter_oid: + time.sleep(1) + else: + break + assert counter_oid + self.wait_for_id_list(meta_data['group_name'], '1.1.2.0/24', counter_oid) + + self.config_db.update_entry('FLOW_COUNTER_ROUTE_PATTERN', '1.1.0.0/16', + { + 'max_match_count': '1' + } + ) + + for _ in range(NUMBER_OF_RETRIES): + counters_keys = self.counters_db.db_connection.hgetall(meta_data['name_map']) + if len(counters_keys) == 1: + break + else: + time.sleep(1) + + assert len(counters_keys) == 1 + + to_remove = '1.1.2.0/24' if '1.1.2.0/24' in counters_keys else '1.1.1.0/24' + to_remove_nexthop = '10.0.0.3' if '1.1.2.0/24' in counters_keys else '10.0.0.1' + to_bound = '1.1.2.0/24' if '1.1.1.0/24' == to_remove else '1.1.1.0/24' + to_bound_nexthop = '10.0.0.1' if '1.1.2.0/24' in counters_keys else '10.0.0.3' + + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route {} {}\"".format(to_remove, to_remove_nexthop)) + for _ in range(NUMBER_OF_RETRIES): + counter_oid = self.counters_db.db_connection.hget(meta_data['name_map'], to_bound) + if not counter_oid: + time.sleep(1) + else: + break + assert counter_oid + self.wait_for_id_list(meta_data['group_name'], to_bound, counter_oid) + counters_keys = self.counters_db.db_connection.hgetall(meta_data['name_map']) + assert to_remove not in counters_keys + assert to_bound in counters_keys + counters_keys = self.counters_db.db_connection.hgetall(ROUTE_TO_PATTERN_MAP) + assert to_remove not in counters_keys + assert to_bound in counters_keys + + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route {} {}\"".format(to_bound, to_bound_nexthop)) + + # remove ip address + self.remove_ip_address("Ethernet0", "10.0.0.0/31") + self.remove_ip_address("Ethernet4", "10.0.0.2/31") + + # remove l3 interface + self.remove_l3_intf("Ethernet0") + self.remove_l3_intf("Ethernet4") + + self.set_admin_status("Ethernet0", "down") + self.set_admin_status("Ethernet4", "down") + + # remove ip address and default route + dvs.servers[0].runcmd("ip route del default dev eth0") + dvs.servers[0].runcmd("ip address del 10.0.0.1/31 dev eth0") + + dvs.servers[1].runcmd("ip route del default dev eth0") + dvs.servers[1].runcmd("ip address del 10.0.0.3/31 dev eth0") + self.config_db.delete_entry('FLOW_COUNTER_ROUTE_PATTERN', '1.1.0.0/16') + + def create_l3_intf(self, interface, vrf_name): + if len(vrf_name) == 0: + self.config_db.create_entry("INTERFACE", interface, {"NULL": "NULL"}) + else: + self.config_db.create_entry("INTERFACE", interface, {"vrf_name": vrf_name}) + + def remove_l3_intf(self, interface): + self.config_db.delete_entry("INTERFACE", interface) + + def add_ip_address(self, interface, ip): + self.config_db.create_entry("INTERFACE", interface + "|" + ip, {"NULL": "NULL"}) + + def remove_ip_address(self, interface, ip): + self.config_db.delete_entry("INTERFACE", interface + "|" + ip) + + def set_admin_status(self, interface, status): + self.config_db.update_entry("PORT", interface, {"admin_status": status}) + + def test_create_remove_buffer_pg_watermark_counter(self, dvs): + """ + Test steps: + 1. Enable PG flex counters. + 2. Configure new buffer prioriy group for a port + 3. Verify counter is automatically created + 4. Remove the new buffer prioriy group for the port + 5. Verify counter is automatically removed + + Args: + dvs (object): virtual switch object + """ + self.setup_dbs(dvs) + meta_data = counter_group_meta['pg_watermark_counter'] + + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|1', {'profile': 'ingress_lossy_profile'}) + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', True) + self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) + + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|1') + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', False) + self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) + + def test_create_remove_buffer_queue_counter(self, dvs): + """ + Test steps: + 1. Enable Queue flex counters. + 2. Configure new buffer queue for a port + 3. Verify counter is automatically created + 4. Remove the new buffer queue for the port + 5. Verify counter is automatically removed + + Args: + dvs (object): virtual switch object + """ + self.setup_dbs(dvs) + meta_data = counter_group_meta['queue_counter'] + + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|7', {'profile': 'egress_lossless_profile'}) + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', True) + self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) + + self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|7') + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', False) + self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) + + def test_create_remove_buffer_watermark_queue_pg_counter(self, dvs): + """ + Test steps: + 1. Enable Queue/Watermark/PG-drop flex counters. + 2. Configure new buffer queue for a port + 3. Verify counters is automatically created + 4. Remove the new buffer queue for the port + 5. Verify counters is automatically removed + + Args: + dvs (object): virtual switch object + """ + self.setup_dbs(dvs) + + # set flex counter + for counterpoll_type, meta_data in counter_group_meta.items(): + if 'queue' in counterpoll_type or 'pg' in counterpoll_type: + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|7', {'profile': 'ingress_lossy_profile'}) + self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|7', {'profile': 'egress_lossless_profile'}) + + for counterpoll_type, meta_data in counter_group_meta.items(): + if 'queue' in counterpoll_type or 'pg' in counterpoll_type: + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', True) + self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) + + self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|7') + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|7') + for counterpoll_type, meta_data in counter_group_meta.items(): + if 'queue' in counterpoll_type or 'pg' in counterpoll_type: + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', False) + self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) diff --git a/tests/test_gearbox.py b/tests/test_gearbox.py index 00a87c2f96..6707213990 100644 --- a/tests/test_gearbox.py +++ b/tests/test_gearbox.py @@ -49,20 +49,20 @@ def __init__(self, dvs): for i in [x for x in intf_table.getKeys() if sr not in x]: (status, fvs) = intf_table.get(i) assert status == True - self.interfaces[i] = {"attrs" : dict(fvs)} + self.interfaces[i] = dict(fvs) - def SanityCheck(self, dvs, testlog): + def SanityCheck(self, testlog): """ Verify data integrity of Gearbox objects in APPL_DB """ for i in self.interfaces: - phy_id = self.interfaces[i]["attrs"]["phy_id"] + phy_id = self.interfaces[i]["phy_id"] assert phy_id in self.phys - assert self.interfaces[i]["attrs"]["index"] in self.phys[phy_id]["ports"] + assert self.interfaces[i]["index"] in self.phys[phy_id]["ports"] - for lane in self.interfaces[i]["attrs"]["system_lanes"].split(','): + for lane in self.interfaces[i]["system_lanes"].split(','): assert lane in self.phys[phy_id]["lanes"] - for lane in self.interfaces[i]["attrs"]["line_lanes"].split(','): + for lane in self.interfaces[i]["line_lanes"].split(','): assert lane in self.phys[phy_id]["lanes"] class GBAsic(DVSDatabase): @@ -70,6 +70,7 @@ def __init__(self, db_id: int, connector: str, gearbox: Gearbox): DVSDatabase.__init__(self, db_id, connector) self.gearbox = gearbox self.ports = {} + self.port_oid_to_intf_idx = {} self._wait_for_gb_asic_db_to_initialize() for connector in self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT_CONNECTOR"): @@ -85,12 +86,34 @@ def __init__(self, db_id: int, connector: str, gearbox: Gearbox): for i in self.gearbox.interfaces: intf = self.gearbox.interfaces[i] - if intf["attrs"]["system_lanes"] == system_lanes: - assert intf["attrs"]["line_lanes"] == line_lanes - self.ports[intf["attrs"]["index"]] = (system_port_oid, line_port_oid) + if intf["system_lanes"] == system_lanes: + assert intf["line_lanes"] == line_lanes + self.ports[intf["index"]] = (system_port_oid, line_port_oid) + self.port_oid_to_intf_idx[system_port_oid] = (i, True) + self.port_oid_to_intf_idx[line_port_oid] = (i, False) assert len(self.ports) == len(self.gearbox.interfaces) + for serdes in self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT_SERDES"): + fvs = self.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT_SERDES", serdes) + port_oid = fvs.get("SAI_PORT_SERDES_ATTR_PORT_ID") + intf_idx, is_system = self.port_oid_to_intf_idx[port_oid] + intf = self.gearbox.interfaces[ intf_idx ] + appl_db_key_prefix = 'system_' if is_system else 'line_' + for asic_db_key, appl_db_key_suffix in [ + ("SAI_PORT_SERDES_ATTR_TX_FIR_MAIN", "tx_fir_main"), + ("SAI_PORT_SERDES_ATTR_TX_FIR_PRE1", "tx_fir_pre1"), + ("SAI_PORT_SERDES_ATTR_TX_FIR_PRE2", "tx_fir_pre2"), + ("SAI_PORT_SERDES_ATTR_TX_FIR_PRE3", "tx_fir_pre3"), + ("SAI_PORT_SERDES_ATTR_TX_FIR_POST1", "tx_fir_post1"), + ("SAI_PORT_SERDES_ATTR_TX_FIR_POST2", "tx_fir_post2"), + ("SAI_PORT_SERDES_ATTR_TX_FIR_POST3", "tx_fir_post3"), + ]: + if asic_db_key not in fvs: + continue + asic_db_value = fvs.get(asic_db_key).split(":")[-1] + assert intf[appl_db_key_prefix + appl_db_key_suffix] == asic_db_value + def _wait_for_gb_asic_db_to_initialize(self) -> None: """Wait up to 30 seconds for the default fields to appear in ASIC DB.""" def _verify_db_contents(): @@ -112,13 +135,50 @@ def _verify_db_contents(): init_polling_config = PollingConfig(2, 30, strict=True) wait_for_result(_verify_db_contents, init_polling_config) +@pytest.fixture(scope="module") +def gearbox(dvs): + return Gearbox(dvs) + +@pytest.fixture(scope="module") +def gbasic(dvs, gearbox): + return GBAsic(swsscommon.GB_ASIC_DB, dvs.redis_sock, gearbox) + +@pytest.fixture(scope="module") +def enable_port_counter(dvs): + flex_counter_table = swsscommon.Table(dvs.get_config_db().db_connection, + "FLEX_COUNTER_TABLE") + + # Enable port counter + flex_counter_table.hset("PORT", "FLEX_COUNTER_STATUS", "enable") + yield + # Disable port counter + flex_counter_table.hdel("PORT", "FLEX_COUNTER_STATUS") class TestGearbox(object): - def test_GearboxSanity(self, dvs, testlog): - Gearbox(dvs).SanityCheck(dvs, testlog) + def test_GearboxSanity(self, gearbox, testlog): + gearbox.SanityCheck(testlog) + + def test_GearboxCounter(self, dvs, gbasic, enable_port_counter, testlog): + counters_db = DVSDatabase(swsscommon.COUNTERS_DB, dvs.redis_sock) + gb_counters_db = DVSDatabase(swsscommon.GB_COUNTERS_DB, dvs.redis_sock) + + intf = gbasic.gearbox.interfaces["0"] + port_oid = counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "")[intf["name"]] + system_port_oid, line_port_oid = gbasic.ports["0"] + + fvs = gb_counters_db.wait_for_entry("COUNTERS", system_port_oid) + assert fvs.get("SAI_PORT_STAT_IF_OUT_ERRORS") + + fvs = gb_counters_db.wait_for_entry("COUNTERS", line_port_oid) + assert fvs.get("SAI_PORT_STAT_IF_IN_ERRORS") + + fvs = counters_db.wait_for_entry("COUNTERS", port_oid) + assert fvs.get("SAI_PORT_STAT_IF_IN_ERRORS") + + fvs = counters_db.wait_for_entry("COUNTERS", port_oid) + assert fvs.get("SAI_PORT_STAT_IF_IN_ERRORS") - def test_GbAsicFEC(self, dvs, testlog): - gbasic = GBAsic(swsscommon.GB_ASIC_DB, dvs.redis_sock, Gearbox(dvs)) + def test_GbAsicFEC(self, gbasic, testlog): # set fec rs on port 0 of phy 1 fvs = swsscommon.FieldValuePairs([("system_fec","rs")]) diff --git a/tests/test_hash.py b/tests/test_hash.py new file mode 100644 index 0000000000..9c08aabb65 --- /dev/null +++ b/tests/test_hash.py @@ -0,0 +1,174 @@ +import pytest +import logging + + +logging.basicConfig(level=logging.INFO) +hashlogger = logging.getLogger(__name__) + + +HASH_FIELD_LIST = [ + "DST_MAC", + "SRC_MAC", + "ETHERTYPE", + "IP_PROTOCOL", + "DST_IP", + "SRC_IP", + "L4_DST_PORT", + "L4_SRC_PORT" +] +INNER_HASH_FIELD_LIST = [ + "INNER_DST_MAC", + "INNER_SRC_MAC", + "INNER_ETHERTYPE", + "INNER_IP_PROTOCOL", + "INNER_DST_IP", + "INNER_SRC_IP", + "INNER_L4_DST_PORT", + "INNER_L4_SRC_PORT" +] +DEFAULT_HASH_FIELD_LIST = [ + "DST_MAC", + "SRC_MAC", + "ETHERTYPE", + "IN_PORT" +] + +SAI_HASH_FIELD_LIST = [ + "SAI_NATIVE_HASH_FIELD_DST_MAC", + "SAI_NATIVE_HASH_FIELD_SRC_MAC", + "SAI_NATIVE_HASH_FIELD_ETHERTYPE", + "SAI_NATIVE_HASH_FIELD_IP_PROTOCOL", + "SAI_NATIVE_HASH_FIELD_DST_IP", + "SAI_NATIVE_HASH_FIELD_SRC_IP", + "SAI_NATIVE_HASH_FIELD_L4_DST_PORT", + "SAI_NATIVE_HASH_FIELD_L4_SRC_PORT" +] +SAI_INNER_HASH_FIELD_LIST = [ + "SAI_NATIVE_HASH_FIELD_INNER_DST_MAC", + "SAI_NATIVE_HASH_FIELD_INNER_SRC_MAC", + "SAI_NATIVE_HASH_FIELD_INNER_ETHERTYPE", + "SAI_NATIVE_HASH_FIELD_INNER_IP_PROTOCOL", + "SAI_NATIVE_HASH_FIELD_INNER_DST_IP", + "SAI_NATIVE_HASH_FIELD_INNER_SRC_IP", + "SAI_NATIVE_HASH_FIELD_INNER_L4_DST_PORT", + "SAI_NATIVE_HASH_FIELD_INNER_L4_SRC_PORT" +] +SAI_DEFAULT_HASH_FIELD_LIST = [ + "SAI_NATIVE_HASH_FIELD_DST_MAC", + "SAI_NATIVE_HASH_FIELD_SRC_MAC", + "SAI_NATIVE_HASH_FIELD_ETHERTYPE", + "SAI_NATIVE_HASH_FIELD_IN_PORT" +] + + +@pytest.mark.usefixtures("dvs_hash_manager") +class TestHashBasicFlows: + @pytest.fixture(scope="class") + def hashData(self, dvs_hash_manager): + hashlogger.info("Initialize HASH data") + + hashlogger.info("Verify HASH count") + self.dvs_hash.verify_hash_count(0) + + hashlogger.info("Get ECMP/LAG HASH id") + hashIdList = sorted(self.dvs_hash.get_hash_ids()) + + # Assumption: VS has only two HASH objects: ECMP, LAG + meta_dict = { + "ecmp": hashIdList[0], + "lag": hashIdList[1] + } + + yield meta_dict + + hashlogger.info("Deinitialize HASH data") + + @pytest.mark.parametrize( + "hash,field", [ + pytest.param( + "ecmp", + "ecmp_hash", + id="ecmp-hash" + ), + pytest.param( + "lag", + "lag_hash", + id="lag-hash" + ) + ] + ) + @pytest.mark.parametrize( + "hfList,saiHfList", [ + pytest.param( + ",".join(HASH_FIELD_LIST), + SAI_HASH_FIELD_LIST, + id="outer-frame" + ), + pytest.param( + ",".join(INNER_HASH_FIELD_LIST), + SAI_INNER_HASH_FIELD_LIST, + id="inner-frame" + ) + ] + ) + def test_HashSwitchGlobalConfiguration(self, hash, field, hfList, saiHfList, testlog, hashData): + attr_dict = { + field: hfList + } + + hashlogger.info("Update {} hash".format(hash.upper())) + self.dvs_hash.update_switch_hash( + qualifiers=attr_dict + ) + + hashId = hashData[hash] + sai_attr_dict = { + "SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST": saiHfList + } + + hashlogger.info("Validate {} hash".format(hash.upper())) + self.dvs_hash.verify_hash_generic( + sai_hash_id=hashId, + sai_qualifiers=sai_attr_dict + ) + + @pytest.mark.parametrize( + "hash,field", [ + pytest.param( + "ecmp", + "ecmp_hash", + id="ecmp-hash" + ), + pytest.param( + "lag", + "lag_hash", + id="lag-hash" + ) + ] + ) + def test_HashDefaultSwitchGlobalConfiguration(self, hash, field, testlog, hashData): + attr_dict = { + field: ",".join(DEFAULT_HASH_FIELD_LIST) + } + + hashlogger.info("Update {} hash".format(hash.upper())) + self.dvs_hash.update_switch_hash( + qualifiers=attr_dict + ) + + hashId = hashData[hash] + sai_attr_dict = { + "SAI_HASH_ATTR_NATIVE_HASH_FIELD_LIST": SAI_DEFAULT_HASH_FIELD_LIST + } + + hashlogger.info("Validate {} hash".format(hash.upper())) + self.dvs_hash.verify_hash_generic( + sai_hash_id=hashId, + sai_qualifiers=sai_attr_dict + ) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_inband_intf_mgmt_vrf.py b/tests/test_inband_intf_mgmt_vrf.py index 05aa1f7389..4b1b8c86ed 100644 --- a/tests/test_inband_intf_mgmt_vrf.py +++ b/tests/test_inband_intf_mgmt_vrf.py @@ -14,7 +14,6 @@ def setup_db(self, dvs): def add_mgmt_vrf(self, dvs): initial_entries = set(self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER")) - #dvs.runcmd("config vrf add mgmt") dvs.runcmd("ip link add mgmt type vrf table 5000") dvs.runcmd("ifconfig mgmt up") time.sleep(2) diff --git a/tests/test_interface.py b/tests/test_interface.py index a57970b1e5..98f1527152 100644 --- a/tests/test_interface.py +++ b/tests/test_interface.py @@ -4,6 +4,8 @@ from swsscommon import swsscommon +VLAN_SUB_INTERFACE_SEPARATOR = '.' + class TestRouterInterface(object): def setup_db(self, dvs): self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) @@ -2193,6 +2195,100 @@ def test_VLanInterfaceIpv6LinkLocalOnly(self, dvs, testlog): # one loopback router interface assert len(intf_entries) == 1 + def set_loopback_action(self, interface, action): + if interface.startswith("PortChannel"): + tbl_name = "PORTCHANNEL_INTERFACE" + elif interface.startswith("Vlan"): + tbl_name = "VLAN_INTERFACE" + else: + sub_intf_sep_idx = interface.find(VLAN_SUB_INTERFACE_SEPARATOR) + if sub_intf_sep_idx != -1: + tbl_name = "VLAN_SUB_INTERFACE" + else: + tbl_name = "INTERFACE" + + fvs = swsscommon.FieldValuePairs([("loopback_action", action)]) + tbl = swsscommon.Table(self.cdb, tbl_name) + tbl.set(interface, fvs) + time.sleep(1) + + def loopback_action_test(self, iface, action): + # create interface + self.create_l3_intf(iface, "") + + # set interface loopback action in config db + self.set_loopback_action(iface, action) + + # check application database + tbl = swsscommon.Table(self.pdb, "INTF_TABLE") + (status, fvs) = tbl.get(iface) + assert status == True + + action_found = False + for fv in fvs: + if fv[0] == "loopback_action": + action_found = True + assert fv[1] == action + assert action_found == True + + # check asic db + tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE") + intf_entries = tbl.getKeys() + + action_map = {"drop": "SAI_PACKET_ACTION_DROP", "forward": "SAI_PACKET_ACTION_FORWARD"} + action_found = False + for key in intf_entries: + (status, fvs) = tbl.get(key) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_ROUTER_INTERFACE_ATTR_LOOPBACK_PACKET_ACTION": + action_found = True + assert fv[1] == action_map[action] + assert action_found == True + + # remove interface + self.remove_l3_intf(iface) + + def test_interfaceLoopbackActionDrop(self, dvs, testlog): + self.setup_db(dvs) + self.loopback_action_test("Ethernet8", "drop") + + def test_interfaceLoopbackActionForward(self, dvs, testlog): + self.setup_db(dvs) + self.loopback_action_test("Ethernet8", "forward") + + def test_subInterfaceLoopbackActionDrop(self, dvs, testlog): + self.setup_db(dvs) + self.loopback_action_test("Ethernet8.1", "drop") + + def test_subInterfaceLoopbackActionForward(self, dvs, testlog): + self.setup_db(dvs) + self.loopback_action_test("Ethernet8.1", "forward") + + def test_vlanInterfaceLoopbackActionDrop(self, dvs, testlog): + self.setup_db(dvs) + self.create_vlan("10") + self.loopback_action_test("Vlan10", "drop") + self.remove_vlan("10") + + def test_vlanInterfaceLoopbackActionForward(self, dvs, testlog): + self.setup_db(dvs) + self.create_vlan("20") + self.loopback_action_test("Vlan20", "forward") + self.remove_vlan("20") + + def test_portChannelInterfaceLoopbackActionDrop(self, dvs, testlog): + self.setup_db(dvs) + self.create_port_channel("PortChannel009") + self.loopback_action_test("PortChannel009", "drop") + self.remove_port_channel("PortChannel009") + + def test_portChannelInterfaceLoopbackActionForward(self, dvs, testlog): + self.setup_db(dvs) + self.create_port_channel("PortChannel010") + self.loopback_action_test("PortChannel010", "forward") + self.remove_port_channel("PortChannel010") # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_macsec.py b/tests/test_macsec.py index f74f31c008..9dc5a4ed53 100644 --- a/tests/test_macsec.py +++ b/tests/test_macsec.py @@ -1,7 +1,8 @@ from swsscommon import swsscommon +from swsscommon.swsscommon import CounterTable, MacsecCounter import conftest -import sys +import time import functools import typing import re @@ -89,16 +90,19 @@ def convert_key(self, key: str): StateDBTable.SEPARATOR)) +class ConfigTable(Table): + + def __init__(self, dvs: conftest.DockerVirtualSwitch, table_name: str): + super(ConfigTable, self).__init__(dvs.get_config_db(), table_name) + + def gen_sci(macsec_system_identifier: str, macsec_port_identifier: int) -> str: macsec_system_identifier = macsec_system_identifier.translate( str.maketrans("", "", ":.-")) sci = "{}{}".format( macsec_system_identifier, - str(macsec_port_identifier).zfill(4)) - sci = int(sci, 16) - if sys.byteorder == "little": - sci = int.from_bytes(sci.to_bytes(8, 'big'), 'little', signed=False) - return str(sci) + str(macsec_port_identifier).zfill(4)).lower() + return sci def gen_sc_key( @@ -321,6 +325,13 @@ def delete_transmit_sa(self, sai: str): del self.app_transmit_sa_table[sai] self.state_transmit_sa_table.wait_delete(sai) + @macsec_sa() + def set_macsec_pn( + self, + sai: str, + pn: int): + self.app_transmit_sa_table[sai] = {"next_pn": pn} + @macsec_sc() def set_enable_transmit_sa(self, sci: str, an: int, enable: bool): if enable: @@ -387,6 +398,21 @@ def get_macsec_sa( print(info.group(0)) return info.group(0) + @macsec_sa() + def get_macsec_xpn_counter( + self, + sai: str) -> int: + counter_table = CounterTable(self.dvs.get_counters_db().db_connection) + for i in range(3): + r, value = counter_table.hget( + MacsecCounter(), + sai, + "SAI_MACSEC_SA_ATTR_CURRENT_XPN") + if r: return int(value) + time.sleep(1) # wait a moment for polling counter + + return None + class TestMACsec(object): def init_macsec( @@ -475,6 +501,12 @@ def rekey_macsec( auth_key: str, ssci: int, salt: str): + wpa.set_macsec_pn( + port_name, + local_mac_address, + macsec_port_identifier, + an, + 0x00000000C0000000) wpa.create_receive_sa( port_name, peer_mac_address, @@ -650,6 +682,18 @@ def test_macsec_term_orch(self, dvs: conftest.DockerVirtualSwitch, testlog): peer_mac_address, macsec_port_identifier, 0)) + assert( + inspector.get_macsec_xpn_counter( + port_name, + local_mac_address, + macsec_port_identifier, + 0) == packet_number) + assert( + inspector.get_macsec_xpn_counter( + port_name, + peer_mac_address, + macsec_port_identifier, + 0) == packet_number) self.rekey_macsec( wpa, port_name, @@ -675,6 +719,18 @@ def test_macsec_term_orch(self, dvs: conftest.DockerVirtualSwitch, testlog): peer_mac_address, macsec_port_identifier, 1)) + assert( + inspector.get_macsec_xpn_counter( + port_name, + local_mac_address, + macsec_port_identifier, + 1) == packet_number) + assert( + inspector.get_macsec_xpn_counter( + port_name, + peer_mac_address, + macsec_port_identifier, + 1) == packet_number) assert( not inspector.get_macsec_sa( macsec_port, @@ -687,6 +743,18 @@ def test_macsec_term_orch(self, dvs: conftest.DockerVirtualSwitch, testlog): peer_mac_address, macsec_port_identifier, 0)) + assert( + not inspector.get_macsec_xpn_counter( + port_name, + local_mac_address, + macsec_port_identifier, + 0) == packet_number) + assert( + not inspector.get_macsec_xpn_counter( + port_name, + peer_mac_address, + macsec_port_identifier, + 0) == packet_number) # Exit MACsec port self.deinit_macsec( wpa, @@ -747,6 +815,87 @@ def test_macsec_attribute_change(self, dvs: conftest.DockerVirtualSwitch, testlo macsec_port_identifier, 0) + def test_macsec_with_portchannel(self, dvs: conftest.DockerVirtualSwitch, testlog): + + # Set MACsec enabled on Ethernet0 + ConfigTable(dvs, "PORT")["Ethernet0"] = {"macsec" : "test"} + StateDBTable(dvs, "FEATURE")["macsec"] = {"state": "enabled"} + + # Setup Port-channel + ConfigTable(dvs, "PORTCHANNEL")["PortChannel001"] = {"admin": "up", "mtu": "9100", "oper_status": "up"} + time.sleep(1) + + # create port channel member + ConfigTable(dvs, "PORTCHANNEL_MEMBER")["PortChannel001|Ethernet0"] = {"NULL": "NULL"} + ConfigTable(dvs, "PORTCHANNEL_INTERFACE")["PortChannel001"] = {"NULL": "NULL"} + ConfigTable(dvs, "PORTCHANNEL_INTERFACE")["PortChannel001|40.0.0.0/31"] = {"NULL": "NULL"} + time.sleep(3) + + # Check Portchannel member in ASIC db that shouldn't been created before MACsec enabled + lagmtbl = swsscommon.Table(swsscommon.DBConnector(1, dvs.redis_sock, 0), "ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER") + lagms = lagmtbl.getKeys() + assert len(lagms) == 0 + + # Create MACsec session + port_name = "Ethernet0" + local_mac_address = "00-15-5D-78-FF-C1" + peer_mac_address = "00-15-5D-78-FF-C2" + macsec_port_identifier = 1 + macsec_port = "macsec_eth1" + sak = "0" * 32 + auth_key = "0" * 32 + packet_number = 1 + ssci = 1 + salt = "0" * 24 + + wpa = WPASupplicantMock(dvs) + inspector = MACsecInspector(dvs) + + self.init_macsec( + wpa, + port_name, + local_mac_address, + macsec_port_identifier) + self.establish_macsec( + wpa, + port_name, + local_mac_address, + peer_mac_address, + macsec_port_identifier, + 0, + sak, + packet_number, + auth_key, + ssci, + salt) + time.sleep(3) + + # Check Portchannel member in ASIC db that should been created after MACsec enabled + lagmtbl = swsscommon.Table(swsscommon.DBConnector(1, dvs.redis_sock, 0), "ASIC_STATE:SAI_OBJECT_TYPE_LAG_MEMBER") + lagms = lagmtbl.getKeys() + assert len(lagms) == 1 + + self.deinit_macsec( + wpa, + inspector, + port_name, + macsec_port, + local_mac_address, + peer_mac_address, + macsec_port_identifier, + 0) + + # remove port channel member + del ConfigTable(dvs, "PORTCHANNEL_INTERFACE")["PortChannel001"] + del ConfigTable(dvs, "PORTCHANNEL_INTERFACE")["PortChannel001|40.0.0.0/31"] + del ConfigTable(dvs, "PORTCHANNEL_MEMBER")["PortChannel001|Ethernet0"] + + # remove port channel + del ConfigTable(dvs, "PORTCHANNEL")["PortChannel001"] + + # Clear MACsec enabled on Ethernet0 + ConfigTable(dvs, "PORT")["Ethernet0"] = {"macsec" : ""} + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down diff --git a/tests/test_mclag_cfg.py b/tests/test_mclag_cfg.py index 0a79c767da..f93632bd37 100644 --- a/tests/test_mclag_cfg.py +++ b/tests/test_mclag_cfg.py @@ -35,7 +35,37 @@ def check_table_doesnt_exists(db, table, key): return True, error_info - +def create_mclag_domain(dvs, domain_id, source_ip, peer_ip, peer_link): + tbl = swsscommon.Table(dvs.cdb, "MCLAG_DOMAIN") + fvs = swsscommon.FieldValuePairs([("source_ip", source_ip), + ("peer_ip", peer_ip), + ("peer_link", peer_link)]) + tbl.set(domain_id, fvs) + time.sleep(1) + +def remove_mclag_domain(dvs, domain_id): + tbl = swsscommon.Table(dvs.cdb, "MCLAG_DOMAIN") + tbl._del(domain_id) + time.sleep(1) + +def add_mclag_domain_field(dvs, domain_id, field, value): + tbl = swsscommon.Table(dvs.cdb, "MCLAG_DOMAIN") + fvs = swsscommon.FieldValuePairs([(field, value)]) + tbl.set(domain_id, fvs) + time.sleep(1) + +def create_mclag_interface(dvs, domain_id, mclag_interface): + tbl = swsscommon.Table(dvs.cdb, "MCLAG_INTERFACE") + fvs = swsscommon.FieldValuePairs([("if_type", "PortChannel")]) + key_string = domain_id + "|" + mclag_interface + tbl.set(key_string, fvs) + time.sleep(1) + +def remove_mclag_interface(dvs, domain_id, mclag_interface): + tbl = swsscommon.Table(dvs.cdb, "MCLAG_INTERFACE") + key_string = domain_id + "|" + mclag_interface + tbl._del(key_string) + time.sleep(1) # Test MCLAG Configs class TestMclagConfig(object): @@ -66,173 +96,115 @@ class TestMclagConfig(object): # Testcase 1 Verify Configuration of MCLAG Domain with src, peer ip and peer link config gets updated in CONFIG_DB @pytest.mark.dev_sanity def test_mclag_cfg_domain_add(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + dvs.setup_db() #cleanup existing entries - delete_table_keys(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE) - delete_table_keys(self.cfg_db, self.CFG_MCLAG_INTERFACE_TABLE) + delete_table_keys(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE) + delete_table_keys(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE) - cmd_string ="config mclag add {} {} {} {}".format(self.MCLAG_DOMAIN_ID, self.MCLAG_SRC_IP, self.MCLAG_PEER_IP, self.MCLAG_PEER_LINK) - dvs.runcmd(cmd_string) + create_mclag_domain(dvs, self.MCLAG_DOMAIN_ID, self.MCLAG_SRC_IP, self.MCLAG_PEER_IP, self.MCLAG_PEER_LINK) time.sleep(2) - + #check whether domain cfg table contents are same as configured values - ok,error_info = dvs.all_table_entry_has(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, - [ + ok,error_info = dvs.all_table_entry_has(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, + [ ("source_ip",self.MCLAG_SRC_IP), ("peer_ip",self.MCLAG_PEER_IP), ("peer_link",self.MCLAG_PEER_LINK) - ] + ] ) assert ok,error_info - # Testcase 2 Verify that second domain addition fails when there is already a domain configured - @pytest.mark.dev_sanity - def test_mclag_cfg_domain_add_2nd(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - cmd_string ="config mclag add {} {} {} {}".format(self.MCLAG_DOMAIN_2, self.MCLAG_SRC_IP, self.MCLAG_PEER_IP, self.MCLAG_PEER_LINK) - dvs.runcmd(cmd_string) - time.sleep(2) - - #check whether second domain config is not added to config db - key_string = self.MCLAG_DOMAIN_2 - ok,error_info = check_table_doesnt_exists(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE, key_string) - assert ok,error_info - - # Testcase 3 Verify Configuration of MCLAG Interface to existing domain @pytest.mark.dev_sanity def test_mclag_cfg_intf_add(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - cmd_string ="config mclag member add {} {}".format(self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE1) - dvs.runcmd(cmd_string) + dvs.setup_db() + + create_mclag_interface(dvs, self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE1) time.sleep(2) - + #check whether mclag interface config is reflected key_string = self.MCLAG_DOMAIN_ID + "|" + self.MCLAG_INTERFACE1 - ok,error_info = check_table_exists(self.cfg_db, self.CFG_MCLAG_INTERFACE_TABLE, key_string) + ok,error_info = check_table_exists(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE, key_string) assert ok,error_info # Testcase 4 Verify remove and add mclag interface @pytest.mark.dev_sanity def test_mclag_cfg_intf_remove_and_add(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + dvs.setup_db() - cmd_string ="config mclag member del {} {}".format(self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE1) - dvs.runcmd(cmd_string) + remove_mclag_interface(dvs, self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE1) time.sleep(2) - + #check whether mclag interface is removed key_string = self.MCLAG_DOMAIN_ID + "|" + self.MCLAG_INTERFACE1 - ok,error_info = check_table_doesnt_exists(self.cfg_db, self.CFG_MCLAG_INTERFACE_TABLE, key_string) + ok,error_info = check_table_doesnt_exists(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE, key_string) assert ok,error_info #add different mclag interface - cmd_string ="config mclag member del {} {}".format(self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE2) - dvs.runcmd(cmd_string) + create_mclag_interface(dvs, self.MCLAG_DOMAIN_ID, self.MCLAG_INTERFACE2) time.sleep(2) #check whether new mclag interface is added key_string = self.MCLAG_DOMAIN_ID + "|" + self.MCLAG_INTERFACE2 - ok,error_info = check_table_doesnt_exists(self.cfg_db, self.CFG_MCLAG_INTERFACE_TABLE, key_string) + ok,error_info = check_table_exists(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE, key_string) assert ok,error_info # Testcase 5 Verify Configuration of valid values for session timeout @pytest.mark.dev_sanity def test_mclag_cfg_session_timeout_valid_values(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + dvs.setup_db() for value in self.MCLAG_SESS_TMOUT_VALID_LIST: - cmd_string ="config mclag session-timeout {} {}".format(self.MCLAG_DOMAIN_ID, value) - dvs.runcmd(cmd_string) + add_mclag_domain_field(dvs, self.MCLAG_DOMAIN_ID, "session_timeout", value) + time.sleep(2) - + #check whether domain cfg table contents are same as configured values - ok,error_info = dvs.all_table_entry_has(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, - [ + ok,error_info = dvs.all_table_entry_has(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, + [ ("source_ip",self.MCLAG_SRC_IP), ("peer_ip",self.MCLAG_PEER_IP), ("peer_link",self.MCLAG_PEER_LINK), ("session_timeout",value) - ] + ] ) assert ok,error_info # Testcase 6 Verify Configuration of valid values for KA timer @pytest.mark.dev_sanity def test_mclag_cfg_ka_valid_values(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + dvs.setup_db() for value in self.MCLAG_KA_VALID_LIST: - cmd_string ="config mclag keepalive-interval {} {}".format(self.MCLAG_DOMAIN_ID, value) - dvs.runcmd(cmd_string) + add_mclag_domain_field(dvs, self.MCLAG_DOMAIN_ID, "keepalive_interval", value) time.sleep(2) - #check whether domain cfg table contents are same as configured values - ok,error_info = dvs.all_table_entry_has(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, - [ + ok,error_info = dvs.all_table_entry_has(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, + [ ("source_ip",self.MCLAG_SRC_IP), ("peer_ip",self.MCLAG_PEER_IP), ("peer_link",self.MCLAG_PEER_LINK), ("keepalive_interval",value) - ] + ] ) assert ok,error_info - - # Testcase 7 Verify Configuration of invalid values for KA - @pytest.mark.dev_sanity - def test_mclag_cfg_ka_invalid_values(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - for value in self.MCLAG_KA_INVALID_LIST: - cmd_string ="config mclag keepalive-interval {} {}".format(self.MCLAG_DOMAIN_ID, value) - dvs.runcmd(cmd_string) - time.sleep(2) - - #check whether domain cfg table contents are same as configured values - found,error_info = dvs.all_table_entry_has(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, - [ - ("keepalive_interval",value) - ] - ) - assert found == False, "invalid keepalive value %s written to CONFIG_DB" % value - - # Testcase 8 Verify Configuration of invalid values for session timeout - @pytest.mark.dev_sanity - def test_mclag_cfg_session_invalid_values(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) - - for value in self.MCLAG_SESS_TMOUT_INVALID_LIST: - cmd_string ="config mclag session-timeout {} {}".format(self.MCLAG_DOMAIN_ID, value) - dvs.runcmd(cmd_string) - time.sleep(2) - - #check whether domain cfg table contents are same as configured values - found,error_info = dvs.all_table_entry_has(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID, - [ - ("session_timeout",value) - ] - ) - assert found == False, "invalid keepalive value %s written to CONFIG_DB" % value - - # Testcase 9 Verify Deletion of MCLAG Domain + # Testcase 7 Verify Deletion of MCLAG Domain @pytest.mark.dev_sanity def test_mclag_cfg_domain_del(self, dvs, testlog): - self.cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + dvs.setup_db() - cmd_string ="config mclag del {}".format(self.MCLAG_DOMAIN_ID) - dvs.runcmd(cmd_string) + remove_mclag_domain(dvs, self.MCLAG_DOMAIN_ID) time.sleep(2) - + #check whether domain cfg table contents are same as configured values - ok, error_info = check_table_doesnt_exists(self.cfg_db, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID) + ok, error_info = check_table_doesnt_exists(dvs.cdb, self.CFG_MCLAG_DOMAIN_TABLE, self.MCLAG_DOMAIN_ID) assert ok,error_info #make sure mclag interface tables entries are also deleted when mclag domain is deleted - key_string = self.MCLAG_DOMAIN_ID - ok,error_info = check_table_doesnt_exists(self.cfg_db, self.CFG_MCLAG_INTERFACE_TABLE, key_string) + key_string = self.MCLAG_DOMAIN_ID + ok,error_info = check_table_doesnt_exists(dvs.cdb, self.CFG_MCLAG_INTERFACE_TABLE, key_string) assert ok,error_info - diff --git a/tests/test_mclag_fdb.py b/tests/test_mclag_fdb.py index 5049859437..8252db8421 100644 --- a/tests/test_mclag_fdb.py +++ b/tests/test_mclag_fdb.py @@ -20,6 +20,10 @@ def create_entry_pst(db, table, key, pairs): tbl = swsscommon.ProducerStateTable(db, table) create_entry(tbl, key, pairs) +def delete_entry_tbl(db, table, key): + tbl = swsscommon.Table(db, table) + tbl._del(key) + def delete_entry_pst(db, table, key): tbl = swsscommon.ProducerStateTable(db, table) tbl._del(key) @@ -32,6 +36,14 @@ def get_port_oid(dvs, port_name): return k[1] return None +def get_portchannel_oid(dvs, alias): + counters_db = swsscommon.DBConnector(swsscommon.COUNTERS_DB, dvs.redis_sock, 0) + lag_name_map_tbl = swsscommon.Table(counters_db, 'COUNTERS_LAG_NAME_MAP') + for k in lag_name_map_tbl.get('')[1]: + if k[0] == alias: + return k[1] + return None + def get_bridge_port_oid(dvs, port_oid): tbl = swsscommon.Table(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_BRIDGE_PORT") for key in tbl.getKeys(): @@ -76,7 +88,7 @@ def how_many_entries_exist(db, table): @pytest.mark.dev_sanity def test_mclagFdb_basic_config_add(dvs, testlog): dvs.setup_db() - dvs.runcmd("sonic-clear fdb all") + dvs.clear_fdb() time.sleep(2) vlan_before = how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_VLAN") @@ -483,7 +495,77 @@ def test_mclagFdb_static_mac_dynamic_move_reject(dvs, testlog): "MCLAG_FDB_TABLE", "Vlan200:3C:85:99:5E:00:01", ) -# Test-12 Verify cleanup of the basic config. +# Test-12 Verify Remote to Local Move. + +@pytest.mark.dev_sanity +def test_mclagFdb_remote_to_local_mac_move_ntf(dvs, testlog): + dvs.setup_db() + + #Add remote MAC to MCLAG_FDB_TABLE on PortChannel0005 + create_entry_pst( + dvs.pdb, + "MCLAG_FDB_TABLE", "Vlan200:3C:85:99:5E:00:01", + [ + ("port", "PortChannel0005"), + ("type", "dynamic"), + ] + ) + + # check that the FDB entry inserted into ASIC DB + assert how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY") == 1, "The MCLAG fdb entry not inserted to ASIC" + + ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY", + [("mac", "3C:85:99:5E:00:01"), ("bvid", str(dvs.getVlanOid("200")))], + [("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_STATIC"), + ("SAI_FDB_ENTRY_ATTR_ALLOW_MAC_MOVE", "true")] + ) + + assert ok, str(extra) + + mac = "3C:85:99:5E:00:01" + vlan_oid = dvs.getVlanOid("200") + switch_id = dvs.getSwitchOid() + port_oid = get_portchannel_oid(dvs, "PortChannel0008") + bp_port_oid = get_bridge_port_oid(dvs, port_oid) + + # send fdb_event SAI_FDB_EVENT_MOVE + ntf = swsscommon.NotificationProducer(dvs.adb, "NOTIFICATIONS") + fvp = swsscommon.FieldValuePairs() + ntf_data = "[{\"fdb_entry\":\"{\\\"bvid\\\":\\\""+vlan_oid+"\\\",\\\"mac\\\":\\\"3C:85:99:5E:00:01\\\",\\\"switch_id\\\":\\\""+switch_id+"\\\"}\",\"fdb_event\":\"SAI_FDB_EVENT_MOVE\",\"list\":[{\"id\":\"SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID\",\"value\":\""+bp_port_oid+"\"}]}]" + ntf.send("fdb_event", ntf_data, fvp) + + time.sleep(2) + + # check that the FDB entry was inserted into ASIC DB + assert how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY") == 1, "The fdb entry not inserted to ASIC" + ok, extra = dvs.is_fdb_entry_exists(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY", + [("mac", "3C:85:99:5E:00:01"), ("bvid", str(dvs.getVlanOid("200")))], + [("SAI_FDB_ENTRY_ATTR_TYPE", "SAI_FDB_ENTRY_TYPE_DYNAMIC"), + ("SAI_FDB_ENTRY_ATTR_ALLOW_MAC_MOVE", "false")] + ) + assert ok, str(extra) + + delete_entry_tbl( + dvs.sdb, + "FDB_TABLE", "Vlan200:3c:85:99:5e:00:01", + ) + + time.sleep(2) + + delete_entry_tbl( + dvs.adb, + "ASIC_STATE", "SAI_OBJECT_TYPE_FDB_ENTRY:{\"bvid\":\""+vlan_oid+"\",\"mac\":\"3C:85:99:5E:00:01\",\"switch_id\":\""+switch_id+"\"}" + ) + + # check that the FDB entry was deleted from ASIC DB + assert how_many_entries_exist(dvs.adb, "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY") == 0, "The MCLAG static fdb entry not deleted" + + delete_entry_pst( + dvs.pdb, + "MCLAG_FDB_TABLE", "Vlan200:3C:85:99:5E:00:01", + ) + +# Test-13 Verify cleanup of the basic config. @pytest.mark.dev_sanity def test_mclagFdb_basic_config_del(dvs, testlog): diff --git a/tests/test_mux.py b/tests/test_mux.py index e9eb027a9d..54340808ea 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -2,35 +2,83 @@ import pytest import json +from ipaddress import ip_network, ip_address, IPv4Address from swsscommon import swsscommon +from mux_neigh_miss_tests import * def create_fvs(**kwargs): return swsscommon.FieldValuePairs(list(kwargs.items())) tunnel_nh_id = 0 -class TestMuxTunnelBase(object): +class TestMuxTunnelBase(): APP_MUX_CABLE = "MUX_CABLE_TABLE" + APP_NEIGH_TABLE = "NEIGH_TABLE" + APP_ROUTE_TABLE = "ROUTE_TABLE" APP_TUNNEL_DECAP_TABLE_NAME = "TUNNEL_DECAP_TABLE" + APP_TUNNEL_ROUTE_TABLE_NAME = "TUNNEL_ROUTE_TABLE" ASIC_TUNNEL_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL" ASIC_TUNNEL_TERM_ENTRIES = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY" ASIC_RIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" ASIC_NEIGH_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY" ASIC_NEXTHOP_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP" + ASIC_NHG_MEMBER_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" ASIC_ROUTE_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY" + ASIC_FDB_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY" + ASIC_SWITCH_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH" CONFIG_MUX_CABLE = "MUX_CABLE" - + CONFIG_PEER_SWITCH = "PEER_SWITCH" + STATE_FDB_TABLE = "FDB_TABLE" + MUX_TUNNEL_0 = "MuxTunnel0" + PEER_SWITCH_HOST = "peer_switch_hostname" + CONFIG_TUNNEL_TABLE_NAME = "TUNNEL" + ASIC_QOS_MAP_TABLE_KEY = "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP" + TUNNEL_QOS_MAP_NAME = "AZURE_TUNNEL" + + SELF_IPV4 = "10.1.0.32" + PEER_IPV4 = "10.1.0.33" SERV1_IPV4 = "192.168.0.100" SERV1_IPV6 = "fc02:1000::100" + SERV1_SOC_IPV4 = "192.168.0.103" SERV2_IPV4 = "192.168.0.101" SERV2_IPV6 = "fc02:1000::101" + SERV3_IPV4 = "192.168.0.102" + SERV3_IPV6 = "fc02:1000::102" + NEIGH1_IPV4 = "192.168.0.200" + NEIGH1_IPV6 = "fc02:1000::200" + NEIGH2_IPV4 = "192.168.0.201" + NEIGH2_IPV6 = "fc02:1000::201" + NEIGH3_IPV4 = "192.168.0.202" + NEIGH3_IPV6 = "fc02:1000::202" IPV4_MASK = "/32" IPV6_MASK = "/128" TUNNEL_NH_ID = 0 ACL_PRIORITY = "999" - + VLAN_1000 = "Vlan1000" + + PING_CMD = "timeout 0.5 ping -c1 -W1 -i0 -n -q {ip}" + + SAI_ROUTER_INTERFACE_ATTR_TYPE = "SAI_ROUTER_INTERFACE_ATTR_TYPE" + SAI_ROUTER_INTERFACE_TYPE_VLAN = "SAI_ROUTER_INTERFACE_TYPE_VLAN" + + DEFAULT_TUNNEL_PARAMS = { + "tunnel_type": "IPINIP", + "dst_ip": SELF_IPV4, + "dscp_mode": "pipe", + "ecn_mode": "standard", + "ttl_mode": "pipe", + "encap_tc_to_queue_map": TUNNEL_QOS_MAP_NAME, + "encap_tc_to_dscp_map": TUNNEL_QOS_MAP_NAME, + "decap_dscp_to_tc_map": TUNNEL_QOS_MAP_NAME, + "decap_tc_to_pg_map": TUNNEL_QOS_MAP_NAME + } + + DEFAULT_PEER_SWITCH_PARAMS = { + "address_ipv4": PEER_IPV4 + } + ecn_modes_map = { "standard" : "SAI_TUNNEL_DECAP_ECN_MODE_STANDARD", "copy_from_outer": "SAI_TUNNEL_DECAP_ECN_MODE_COPY_FROM_OUTER" @@ -45,34 +93,52 @@ class TestMuxTunnelBase(object): "pipe" : "SAI_TUNNEL_TTL_MODE_PIPE_MODEL", "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" } + + TC_TO_DSCP_MAP = {str(i):str(i) for i in range(0, 8)} + TC_TO_QUEUE_MAP = {str(i):str(i) for i in range(0, 8)} + DSCP_TO_TC_MAP = {str(i):str(1) for i in range(0, 64)} + TC_TO_PRIORITY_GROUP_MAP = {str(i):str(i) for i in range(0, 8)} + def check_syslog(self, dvs, marker, err_log, expected_cnt): + (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) + assert num.strip() >= str(expected_cnt) - def create_vlan_interface(self, confdb, asicdb, dvs): + def create_vlan_interface(self, dvs): + confdb = dvs.get_config_db() fvs = {"vlanid": "1000"} - confdb.create_entry("VLAN", "Vlan1000", fvs) + confdb.create_entry("VLAN", self.VLAN_1000, fvs) fvs = {"tagging_mode": "untagged"} confdb.create_entry("VLAN_MEMBER", "Vlan1000|Ethernet0", fvs) confdb.create_entry("VLAN_MEMBER", "Vlan1000|Ethernet4", fvs) + confdb.create_entry("VLAN_MEMBER", "Vlan1000|Ethernet8", fvs) fvs = {"NULL": "NULL"} - confdb.create_entry("VLAN_INTERFACE", "Vlan1000", fvs) + confdb.create_entry("VLAN_INTERFACE", self.VLAN_1000, fvs) confdb.create_entry("VLAN_INTERFACE", "Vlan1000|192.168.0.1/24", fvs) confdb.create_entry("VLAN_INTERFACE", "Vlan1000|fc02:1000::1/64", fvs) - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface startup Ethernet4") - + dvs.port_admin_set("Ethernet0", "up") + dvs.port_admin_set("Ethernet4", "up") + dvs.port_admin_set("Ethernet8", "up") def create_mux_cable(self, confdb): - - fvs = { "server_ipv4":self.SERV1_IPV4+self.IPV4_MASK, "server_ipv6":self.SERV1_IPV6+self.IPV6_MASK } + fvs = { + "server_ipv4":self.SERV1_IPV4 + self.IPV4_MASK, + "server_ipv6":self.SERV1_IPV6 + self.IPV6_MASK, + "soc_ipv4": self.SERV1_SOC_IPV4 + self.IPV4_MASK, + "cable_type": "active-active" # "cable_type" is not used by orchagent, this is a dummy value + } confdb.create_entry(self.CONFIG_MUX_CABLE, "Ethernet0", fvs) - fvs = { "server_ipv4":self.SERV2_IPV4+self.IPV4_MASK, "server_ipv6":self.SERV2_IPV6+self.IPV6_MASK } + fvs = {"server_ipv4": self.SERV2_IPV4+self.IPV4_MASK, + "server_ipv6": self.SERV2_IPV6+self.IPV6_MASK} confdb.create_entry(self.CONFIG_MUX_CABLE, "Ethernet4", fvs) + fvs = {"server_ipv4": self.SERV3_IPV4+self.IPV4_MASK, + "server_ipv6": self.SERV3_IPV6+self.IPV6_MASK} + confdb.create_entry(self.CONFIG_MUX_CABLE, "Ethernet8", fvs) def set_mux_state(self, appdb, ifname, state_change): @@ -84,22 +150,74 @@ def set_mux_state(self, appdb, ifname, state_change): time.sleep(1) + def get_switch_oid(self, asicdb): + # Assumes only one switch is ever present + keys = asicdb.wait_for_n_keys(self.ASIC_SWITCH_TABLE, 1) + return keys[0] + + def get_vlan_rif_oid(self, asicdb): + # create_vlan_interface should be called before this method + # Assumes only one VLAN RIF is present + rifs = asicdb.get_keys(self.ASIC_RIF_TABLE) + + vlan_oid = '' + for rif_key in rifs: + entry = asicdb.get_entry(self.ASIC_RIF_TABLE, rif_key) + if entry[self.SAI_ROUTER_INTERFACE_ATTR_TYPE] == self.SAI_ROUTER_INTERFACE_TYPE_VLAN: + vlan_oid = rif_key + break + + return vlan_oid + + def get_nexthop_oid(self, asicdb, nexthop): + # gets nexthop oid + nexthop_keys = asicdb.get_keys(self.ASIC_NEXTHOP_TABLE) + + nexthop_oid = '' + for nexthop_key in nexthop_keys: + entry = asicdb.get_entry(self.ASIC_NEXTHOP_TABLE, nexthop_key) + if entry["SAI_NEXT_HOP_ATTR_IP"] == nexthop: + nexthop_oid = nexthop_key + break - def check_neigh_in_asic_db(self, asicdb, ip, expected=1): + return nexthop_oid + + def get_route_nexthop_oid(self, route_key, asicdb): + # gets nexthop oid + entry = asicdb.get_entry(self.ASIC_ROUTE_TABLE, route_key) + assert 'SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID' in entry - nbr = asicdb.wait_for_n_keys(self.ASIC_NEIGH_TABLE, expected) + return entry['SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID'] - found = False - for key in nbr: - entry = json.loads(key) - if entry["ip"] == ip: - found = True - entry = key - break + def check_tunnel_route_in_app_db(self, dvs, destinations, expected=True): + appdb = dvs.get_app_db() + + if expected: + appdb.wait_for_matching_keys(self.APP_TUNNEL_ROUTE_TABLE_NAME, destinations) + else: + appdb.wait_for_deleted_keys(self.APP_TUNNEL_ROUTE_TABLE_NAME, destinations) + + def check_neigh_in_asic_db(self, asicdb, ip, expected=True): + rif_oid = self.get_vlan_rif_oid(asicdb) + switch_oid = self.get_switch_oid(asicdb) + neigh_key_map = { + "ip": ip, + "rif": rif_oid, + "switch_id": switch_oid + } + expected_key = json.dumps(neigh_key_map, sort_keys=True, separators=(',', ':')) + + if expected: + nbr_keys = asicdb.wait_for_matching_keys(self.ASIC_NEIGH_TABLE, [expected_key]) - assert found - return entry + for key in nbr_keys: + if ip in key: + return key + else: + asicdb.wait_for_deleted_keys(self.ASIC_NEIGH_TABLE, [expected_key]) + + return '' def check_tnl_nexthop_in_asic_db(self, asicdb, expected=1): @@ -114,7 +232,6 @@ def check_tnl_nexthop_in_asic_db(self, asicdb, expected=1): assert tunnel_nh_id - def check_nexthop_in_asic_db(self, asicdb, key, standby=False): fvs = asicdb.get_entry(self.ASIC_ROUTE_TABLE, key) @@ -127,7 +244,6 @@ def check_nexthop_in_asic_db(self, asicdb, key, standby=False): else: assert (nhid != tunnel_nh_id) - def check_nexthop_group_in_asic_db(self, asicdb, key, num_tnl_nh=0): fvs = asicdb.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY", key) @@ -144,21 +260,34 @@ def check_nexthop_group_in_asic_db(self, asicdb, key, num_tnl_nh=0): for k in keys: fvs = asicdb.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER", k) assert fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg_id - + # Count the number of Nexthop member pointing to tunnel if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] == tunnel_nh_id: - count += 1 + count += 1 assert num_tnl_nh == count + def check_route_nexthop(self, dvs_route, asicdb, route, nexthop, tunnel=False): + route_key = dvs_route.check_asicdb_route_entries([route]) + route_nexthop_oid = self.get_route_nexthop_oid(route_key[0], asicdb) + + if tunnel: + assert route_nexthop_oid == nexthop + return - def add_neighbor(self, dvs, ip, mac, v6=False): + nexthop_oid = self.get_nexthop_oid(asicdb, nexthop) - if v6: + assert route_nexthop_oid == nexthop_oid + + def add_neighbor(self, dvs, ip, mac): + if ip_address(ip).version == 6: dvs.runcmd("ip -6 neigh replace " + ip + " lladdr " + mac + " dev Vlan1000") else: dvs.runcmd("ip -4 neigh replace " + ip + " lladdr " + mac + " dev Vlan1000") + def del_neighbor(self, dvs, ip): + cmd = 'ip neigh del {} dev {}'.format(ip, self.VLAN_1000) + dvs.runcmd(cmd) def add_fdb(self, dvs, port, mac): @@ -178,31 +307,48 @@ def del_fdb(self, dvs, mac): time.sleep(1) - def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): - - self.create_vlan_interface(confdb, asicdb, dvs) + def add_route(self, dvs, route, nexthops, ifaces=[]): + apdb = dvs.get_app_db() + nexthop_str = ",".join(nexthops) + if len(ifaces) == 0: + ifaces = [self.VLAN_1000 for k in range(len(nexthops))] + iface_str = ",".join(ifaces) + ps = swsscommon.ProducerStateTable(apdb.db_connection, self.APP_ROUTE_TABLE) + fvs = swsscommon.FieldValuePairs( + [ + ("nexthop", nexthop_str), + ("ifname", iface_str) + ] + ) + ps.set(route, fvs) + + def del_route(self, dvs, route): + apdb = dvs.get_app_db() + ps = swsscommon.ProducerStateTable(apdb.db_connection, self.APP_ROUTE_TABLE) + ps._del(route) - self.create_mux_cable(confdb) + def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet0", "active") self.set_mux_state(appdb, "Ethernet4", "standby") self.add_neighbor(dvs, self.SERV1_IPV4, "00:00:00:00:00:01") - # Broadcast neigh 192.168.0.255 is default added. Hence +1 for expected number - srv1_v4 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV4, 2) + srv1_v4 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV4) - self.add_neighbor(dvs, self.SERV1_IPV6, "00:00:00:00:00:01", True) - srv1_v6 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV6, 3) + self.add_neighbor(dvs, self.SERV1_IPV6, "00:00:00:00:00:01") + srv1_v6 = self.check_neigh_in_asic_db(asicdb, self.SERV1_IPV6) existing_keys = asicdb.get_keys(self.ASIC_NEIGH_TABLE) self.add_neighbor(dvs, self.SERV2_IPV4, "00:00:00:00:00:02") - self.add_neighbor(dvs, self.SERV2_IPV6, "00:00:00:00:00:02", True) + self.add_neighbor(dvs, self.SERV2_IPV6, "00:00:00:00:00:02") time.sleep(1) # In standby mode, the entry must not be added to Neigh table but Route asicdb.wait_for_matching_keys(self.ASIC_NEIGH_TABLE, existing_keys) - dvs_route.check_asicdb_route_entries([self.SERV2_IPV4+self.IPV4_MASK, self.SERV2_IPV6+self.IPV6_MASK]) + dvs_route.check_asicdb_route_entries( + [self.SERV2_IPV4+self.IPV4_MASK, self.SERV2_IPV6+self.IPV6_MASK] + ) # The first standby route also creates as tunnel Nexthop self.check_tnl_nexthop_in_asic_db(asicdb, 3) @@ -212,15 +358,47 @@ def create_and_test_neighbor(self, confdb, appdb, asicdb, dvs, dvs_route): asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, srv1_v4) asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, srv1_v6) - dvs_route.check_asicdb_route_entries([self.SERV1_IPV4+self.IPV4_MASK, self.SERV1_IPV6+self.IPV6_MASK]) + dvs_route.check_asicdb_route_entries( + [self.SERV1_IPV4+self.IPV4_MASK, self.SERV1_IPV6+self.IPV6_MASK] + ) # Change state to Active. This will add Neigh and delete Route self.set_mux_state(appdb, "Ethernet4", "active") - dvs_route.check_asicdb_deleted_route_entries([self.SERV2_IPV4+self.IPV4_MASK, self.SERV2_IPV6+self.IPV6_MASK]) - self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV4, 3) - self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV6, 3) + dvs_route.check_asicdb_deleted_route_entries( + [self.SERV2_IPV4+self.IPV4_MASK, self.SERV2_IPV6+self.IPV6_MASK] + ) + self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV4) + self.check_neigh_in_asic_db(asicdb, self.SERV2_IPV6) + + def create_and_test_soc(self, appdb, asicdb, dvs, dvs_route): + + self.set_mux_state(appdb, "Ethernet0", "active") + + self.add_fdb(dvs, "Ethernet0", "00-00-00-00-00-01") + self.add_neighbor(dvs, self.SERV1_SOC_IPV4, "00:00:00:00:00:01") + + time.sleep(1) + + srv1_soc_v4 = self.check_neigh_in_asic_db(asicdb, self.SERV1_SOC_IPV4) + self.check_tunnel_route_in_app_db(dvs, [self.SERV1_SOC_IPV4+self.IPV4_MASK], expected=False) + + self.set_mux_state(appdb, "Ethernet0", "standby") + + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, srv1_soc_v4) + dvs_route.check_asicdb_route_entries( + [self.SERV1_SOC_IPV4+self.IPV4_MASK] + ) + self.check_tunnel_route_in_app_db(dvs, [self.SERV1_SOC_IPV4+self.IPV4_MASK], expected=False) + + marker = dvs.add_log_marker() + + self.set_mux_state(appdb, "Ethernet0", "active") + self.set_mux_state(appdb, "Ethernet0", "active") + self.check_syslog(dvs, marker, "Maintaining current MUX state", 1) + self.set_mux_state(appdb, "Ethernet0", "init") + self.check_syslog(dvs, marker, "State transition from active to init is not-handled", 1) def create_and_test_fdb(self, appdb, asicdb, dvs, dvs_route): @@ -233,27 +411,27 @@ def create_and_test_fdb(self, appdb, asicdb, dvs, dvs_route): ip_1 = "fc02:1000::10" ip_2 = "fc02:1000::11" - self.add_neighbor(dvs, ip_1, "00:00:00:00:00:11", True) - self.add_neighbor(dvs, ip_2, "00:00:00:00:00:12", True) + self.add_neighbor(dvs, ip_1, "00:00:00:00:00:11") + self.add_neighbor(dvs, ip_2, "00:00:00:00:00:12") # ip_1 is on Active Mux, hence added to Host table - self.check_neigh_in_asic_db(asicdb, ip_1, 4) + self.check_neigh_in_asic_db(asicdb, ip_1) # ip_2 is on Standby Mux, hence added to Route table dvs_route.check_asicdb_route_entries([ip_2+self.IPV6_MASK]) # Check ip_1 move to standby mux, should be pointing to tunnel - self.add_neighbor(dvs, ip_1, "00:00:00:00:00:12", True) + self.add_neighbor(dvs, ip_1, "00:00:00:00:00:12") # ip_1 moved to standby Mux, hence added to Route table dvs_route.check_asicdb_route_entries([ip_1+self.IPV6_MASK]) # Check ip_2 move to active mux, should be host entry - self.add_neighbor(dvs, ip_2, "00:00:00:00:00:11", True) + self.add_neighbor(dvs, ip_2, "00:00:00:00:00:11") # ip_2 moved to active Mux, hence remove from Route table dvs_route.check_asicdb_deleted_route_entries([ip_2+self.IPV6_MASK]) - self.check_neigh_in_asic_db(asicdb, ip_2, 4) + self.check_neigh_in_asic_db(asicdb, ip_2) # Simulate FDB aging out test case ip_3 = "192.168.0.200" @@ -273,13 +451,18 @@ def create_and_test_fdb(self, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet4", "active") dvs_route.check_asicdb_deleted_route_entries([ip_3+self.IPV4_MASK]) + self.del_fdb(dvs, "00-00-00-00-00-11") + def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet0", "active") rtprefix = "2.3.4.0/24" - dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route " + rtprefix + " " + self.SERV1_IPV4 + "\"") + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"ip route " + rtprefix + + " " + self.SERV1_IPV4 + "\"" + ) pdb = dvs.get_app_db() pdb.wait_for_entry("ROUTE_TABLE", rtprefix) @@ -321,81 +504,398 @@ def create_and_test_route(self, appdb, asicdb, dvs, dvs_route): self.set_mux_state(appdb, "Ethernet4", "active") dvs_route.check_asicdb_deleted_route_entries([rtprefix]) - # Test ECMP routes - - self.set_mux_state(appdb, "Ethernet0", "active") - self.set_mux_state(appdb, "Ethernet4", "active") + def multi_nexthop_test(self, dvs, dvs_route, asicdb, appdb, route, neighbors, macs): + mux_ports = ["Ethernet0", "Ethernet4"] + starting_states = [(ACTIVE, ACTIVE), (ACTIVE, STANDBY), (STANDBY, ACTIVE), (STANDBY, STANDBY)] - rtprefix = "5.6.7.0/24" + # Set state to active for initial state + for port in mux_ports: + self.set_mux_state(appdb, port, "active") - dvs_route.check_asicdb_deleted_route_entries([rtprefix]) - - ps = swsscommon.ProducerStateTable(pdb.db_connection, "ROUTE_TABLE") + # add neighbors for initial state + for i,neighbor in enumerate(neighbors): + self.add_neighbor(dvs, neighbor, macs[i]) - fvs = swsscommon.FieldValuePairs([("nexthop", self.SERV1_IPV4 + "," + self.SERV2_IPV4), ("ifname", "Vlan1000,Vlan1000")]) - - ps.set(rtprefix, fvs) + try: + # toggle between states and add route in various combos of state + print("Testing add/remove/update of route") + for start in starting_states: + print("Adding route with %s: %s and %s: %s" % (mux_ports[0], start[0], mux_ports[1], start[1])) + self.set_mux_state(appdb, mux_ports[0], start[0]) + self.set_mux_state(appdb, mux_ports[1], start[1]) + self.add_route(dvs, route, neighbors) + if start[0] == "active": + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) + elif start[1] == "active": + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[1]) + else: + self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) + + print("Testing fdb update in %s, %s for %s" % (start[0], start[1], neighbors[0])) + # move neighbor 1 + self.add_neighbor(dvs, neighbors[0], "00:aa:bb:cc:dd:ee") + if start[0] == "active": + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) + elif start[1] == "active": + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[1]) + else: + self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) + + # move neighbor 1 back + self.add_neighbor(dvs, neighbors[0], macs[i]) + + print("Testing fdb update in %s, %s for %s" % (start[0], start[1], neighbors[1])) + # move neighbor 2 + self.add_neighbor(dvs, neighbors[0], "00:aa:bb:cc:dd:ee") + if start[0] == "active": + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) + elif start[1] == "active": + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[1]) + else: + self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) + + # move neighbor 2 back + self.add_neighbor(dvs, neighbors[0], macs[i]) + + self.del_route(dvs, route) + + + # toggle mux states to check setState actions + print("Testing toggling state") + for start in starting_states: + self.set_mux_state(appdb, mux_ports[0], start[0]) + self.set_mux_state(appdb, mux_ports[1], start[1]) + self.add_route(dvs, route, neighbors) + + for toggle_index,port in enumerate(mux_ports): + keep_index = (toggle_index + 1) % 2 + + print("keeping %s as %s while toggling %s from %s" % \ + (mux_ports[keep_index], start[keep_index], mux_ports[toggle_index], start[toggle_index])) + + if start[toggle_index] == ACTIVE: + print("setting %s to %s" % (mux_ports[toggle_index], STANDBY)) + self.set_mux_state(appdb, mux_ports[toggle_index], STANDBY) + if start[keep_index] == ACTIVE: + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[keep_index]) + else: + self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) + + print("setting %s to %s" % (mux_ports[toggle_index], ACTIVE)) + self.set_mux_state(appdb, mux_ports[toggle_index], ACTIVE) + if start[keep_index] == ACTIVE: + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[keep_index]) + else: + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[toggle_index]) + else: + print("setting %s to %s" % (mux_ports[toggle_index], ACTIVE)) + self.set_mux_state(appdb, mux_ports[toggle_index], ACTIVE) + if start[keep_index] == ACTIVE: + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[keep_index]) + else: + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[toggle_index]) + + print("setting %s to %s" % (mux_ports[toggle_index], STANDBY)) + self.set_mux_state(appdb, mux_ports[toggle_index], STANDBY) + if start[keep_index] == ACTIVE: + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[keep_index]) + else: + self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) + self.del_route(dvs, route) + + for neighbor in neighbors: + self.del_neighbor(dvs, neighbor) + + print("Testing add/remove of neighbors") + for start in starting_states: + print("Testing add/remove of neighbors in %s, %s" % start) + self.set_mux_state(appdb, mux_ports[0], start[0]) + self.set_mux_state(appdb, mux_ports[1], start[1]) + self.add_route(dvs, route, neighbors) + + # add first neighbor + self.add_neighbor(dvs, neighbors[0], macs[0]) + if start[0] == ACTIVE: + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) + + # add second neighbor + self.add_neighbor(dvs, neighbors[1], macs[1]) + time.sleep(1) + if start[0] == ACTIVE: + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) + elif start[1] == ACTIVE: + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[1]) + else: + self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) + + # remove neighbors (shouldn't work) + for neighbor in neighbors: + self.del_neighbor(dvs, neighbor) + if start[0] == ACTIVE: + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) + elif start[1] == ACTIVE: + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[1]) + else: + self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) + + # add neighbor again to trick ip + for i,neighbor in enumerate(neighbors): + self.add_neighbor(dvs, neighbor, macs[i]) + + self.del_route(dvs,route) + for neighbor in neighbors: + self.del_neighbor(dvs, neighbor) + + # add the neighbors + for i,neighbor in enumerate(neighbors): + self.add_neighbor(dvs, neighbor, macs[i]) + + print("Testing multiple routes pointing to one of the NH") + r2 = "3.4.5.0/24" + for start in starting_states: + print("Adding routes with %s: %s and %s: %s" % (mux_ports[0], start[0], mux_ports[1], start[1])) + + self.set_mux_state(appdb, mux_ports[0], start[0]) + self.set_mux_state(appdb, mux_ports[1], start[1]) + self.add_route(dvs, route, neighbors) + + # add R2 -> NH1 + # R2 should behave like a normal route. + self.add_route(dvs, r2, [neighbors[0]]) + if start[0] == "active": + self.check_route_nexthop(dvs_route, asicdb, r2, neighbors[0]) + else: + self.check_route_nexthop(dvs_route, asicdb, r2, tunnel_nh_id, True) - # Check if route was propagated to ASIC DB - rtkeys = dvs_route.check_asicdb_route_entries([rtprefix]) + self.del_route(dvs,route) + self.del_route(dvs,r2) - # Check for nexthop group and validate nexthop group member in asic db - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0]) + # remove the neighbors + for neighbor in neighbors: + self.del_neighbor(dvs, neighbor) - # Step: 1 - Change one NH to standby and verify ecmp route - self.set_mux_state(appdb, "Ethernet0", "standby") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1) + # add one neighbor as MUX other as standalone + self.add_neighbor(dvs, neighbors[0], macs[i]) + self.add_neighbor(dvs, neighbors[1], "00:00:00:00:00:00") - # Step: 2 - Change the other NH to standby and verify ecmp route - self.set_mux_state(appdb, "Ethernet4", "standby") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2) + print("Testing one mux, one standalone neighbor") + for start in starting_states: + print("Testing add/remove of neighbors in %s, %s" % start) + self.set_mux_state(appdb, mux_ports[0], start[0]) + self.set_mux_state(appdb, mux_ports[1], start[1]) + self.add_route(dvs, route, neighbors) - # Step: 3 - Change one NH to back to Active and verify ecmp route + if start[0] == "active": + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) + else: + # N2 should always be standby + self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) + + self.del_route(dvs,route) + + # remove the neighbors + for neighbor in neighbors: + self.del_neighbor(dvs, neighbor) + + finally: + # Cleanup + for port in mux_ports: + self.set_mux_state(appdb, port, "active") + self.del_route(dvs,route) + for neighbor in neighbors: + self.del_neighbor(dvs, neighbor) + + def create_and_test_multi_nexthop_routes(self, dvs, dvs_route, appdb, macs, asicdb): + ''' + Tests case where there are multiple nexthops tied to a route + If the nexthops are tied to a mux, then only the first active neighbor will be programmed + If not, the route should point to a regular ECMP group + ''' + + route_ipv4 = "2.3.4.0/24" + route_ipv6 = "2023::/64" + ipv4_neighbors = [self.SERV1_IPV4, self.SERV2_IPV4] + ipv6_neighbors = [self.SERV1_IPV6, self.SERV2_IPV6] + + self.multi_nexthop_test(dvs, dvs_route, asicdb, appdb, route_ipv4, ipv4_neighbors, macs) + self.multi_nexthop_test(dvs, dvs_route, asicdb, appdb, route_ipv6, ipv6_neighbors, macs) + + try: + # neighbor not tied to mux cable case + non_mux_ipv4 = ["11.11.11.11", "12.12.12.12"] + non_mux_ipv6 = ["2222::100", "2222::101"] + non_mux_macs = ["00:aa:bb:cc:dd:ee", "00:aa:bb:cc:dd:ff"] + print("Testing neighbors that are not tied to a mux cable") + + for i in range(2): + self.add_neighbor(dvs, non_mux_ipv4[i], non_mux_macs[i]) + self.add_neighbor(dvs, non_mux_ipv6[i], non_mux_macs[i]) + + self.add_route(dvs, route_ipv4, non_mux_ipv4) + self.add_route(dvs, route_ipv6, non_mux_ipv6) + + # Check for route pointing to first neighbor + self.check_route_nexthop(dvs_route, asicdb, route_ipv4, non_mux_ipv4[0]) + self.check_route_nexthop(dvs_route, asicdb, route_ipv6, non_mux_ipv6[0]) + + # Cleanup + self.del_route(dvs, route_ipv4) + self.del_route(dvs, route_ipv6) + for i in range(2): + self.del_neighbor(dvs, non_mux_ipv4[i]) + self.del_neighbor(dvs, non_mux_ipv6[i]) + + # neighbor not in mux cable case + non_mux_ipv4 = ["11.11.11.11", "12.12.12.12"] + non_mux_ipv6 = ["2222::100", "2222::101"] + non_mux_macs = ["00:aa:bb:cc:dd:ee", "00:aa:bb:cc:dd:ff"] + print("Testing neighbors that are not tied to a mux cable") + for i in range(2): + self.add_neighbor(dvs, non_mux_ipv4[i], non_mux_macs[i]) + self.add_neighbor(dvs, non_mux_ipv6[i], non_mux_macs[i]) + + self.add_route(dvs, route_ipv4, non_mux_ipv4) + self.add_route(dvs, route_ipv6, non_mux_ipv6) + + # Check for route pointing to first neighbor + self.check_route_nexthop(dvs_route, asicdb, route_ipv4, non_mux_ipv4[0]) + self.check_route_nexthop(dvs_route, asicdb, route_ipv6, non_mux_ipv6[0]) + + # Cleanup + self.del_route(dvs, route_ipv4) + self.del_route(dvs, route_ipv6) + for i in range(2): + self.del_neighbor(dvs, non_mux_ipv4[i]) + self.del_neighbor(dvs, non_mux_ipv6[i]) + + + # add one neighbor as MUX other as non-mux + mux_ports = ["Ethernet0", "Ethernet4"] + self.set_mux_state(appdb, mux_ports[0], ACTIVE) + self.set_mux_state(appdb, mux_ports[1], ACTIVE) + self.add_neighbor(dvs, ipv4_neighbors[0], macs[0]) + self.add_neighbor(dvs, non_mux_ipv4[0], "00:aa:bb:cc:dd:ee") + + print("Testing one mux, one standalone neighbor ipv4") + starting_states = [(ACTIVE, ACTIVE), (ACTIVE, STANDBY), (STANDBY, ACTIVE), (STANDBY, STANDBY)] + for start in starting_states: + print("Testing add/remove of neighbors in %s, %s" % start) + self.set_mux_state(appdb, mux_ports[0], start[0]) + self.set_mux_state(appdb, mux_ports[1], start[1]) + self.add_route(dvs, route_ipv4, [ipv4_neighbors[0], non_mux_ipv4[0]]) + + #N2 should always be active + self.check_route_nexthop(dvs_route, asicdb, route_ipv4, non_mux_ipv4[0]) + + self.del_route(dvs,route_ipv4) + + self.del_neighbor(dvs, ipv4_neighbors[0]) + self.del_neighbor(dvs, non_mux_ipv4[0]) + + # add one neighbor as MUX other as non-mux for ipv6 + self.set_mux_state(appdb, mux_ports[0], ACTIVE) + self.set_mux_state(appdb, mux_ports[1], ACTIVE) + self.add_neighbor(dvs, ipv6_neighbors[0], macs[0]) + self.add_neighbor(dvs, non_mux_ipv6[0], "00:aa:bb:cc:dd:ee") + + print("Testing one mux, one standalone neighbor ipv6") + mux_ports = ["Ethernet0", "Ethernet4"] + starting_states = [(ACTIVE, ACTIVE), (ACTIVE, STANDBY), (STANDBY, ACTIVE), (STANDBY, STANDBY)] + for start in starting_states: + print("Testing add/remove of neighbors in %s, %s" % start) + self.set_mux_state(appdb, mux_ports[0], start[0]) + self.set_mux_state(appdb, mux_ports[1], start[1]) + self.add_route(dvs, route_ipv6, [ipv6_neighbors[0], non_mux_ipv6[0]]) + + #N2 should always be active + self.check_route_nexthop(dvs_route, asicdb, route_ipv6, non_mux_ipv6[0]) + + self.del_route(dvs,route_ipv6) + + self.del_neighbor(dvs, ipv6_neighbors[0]) + self.del_neighbor(dvs, non_mux_ipv6[0]) + + finally: + # Cleanup + self.del_route(dvs, route_ipv4) + self.del_route(dvs, route_ipv6) + for i in range(2): + self.del_neighbor(dvs, non_mux_ipv4[i]) + self.del_neighbor(dvs, non_mux_ipv6[i]) + + def create_and_test_NH_routes(self, appdb, asicdb, dvs, dvs_route, mac): + ''' + Tests case where neighbor is removed in standby and added in active with route + ''' + nh_route = "2.2.2.0/24" + nh_route_ipv6 = "2023::/64" + neigh_ip = self.SERV1_IPV4 + neigh_ipv6 = self.SERV1_IPV6 + apdb = dvs.get_app_db() + + # Setup self.set_mux_state(appdb, "Ethernet0", "active") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1) - - # Step: 4 - Change the other NH to Active and verify ecmp route - self.set_mux_state(appdb, "Ethernet4", "active") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0]) - - ps._del(rtprefix) + self.add_neighbor(dvs, neigh_ip, mac) + self.add_neighbor(dvs, neigh_ipv6, mac) + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"ip route " + nh_route + + " " + neigh_ip + "\"" + ) + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"ipv6 route " + nh_route_ipv6 + + " " + neigh_ipv6 + "\"" + ) + apdb.wait_for_entry("ROUTE_TABLE", nh_route) + apdb.wait_for_entry("ROUTE_TABLE", nh_route_ipv6) + + rtkeys = dvs_route.check_asicdb_route_entries([nh_route]) + rtkeys_ipv6 = dvs_route.check_asicdb_route_entries([nh_route_ipv6]) + self.check_nexthop_in_asic_db(asicdb, rtkeys[0]) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0]) - # Test IPv6 ECMP routes and start with standby config + # Set state to standby and delete neighbor self.set_mux_state(appdb, "Ethernet0", "standby") - self.set_mux_state(appdb, "Ethernet4", "standby") - - rtprefix = "2020::/64" - - dvs_route.check_asicdb_deleted_route_entries([rtprefix]) - - ps = swsscommon.ProducerStateTable(pdb.db_connection, "ROUTE_TABLE") - - fvs = swsscommon.FieldValuePairs([("nexthop", self.SERV1_IPV6 + "," + self.SERV2_IPV6), ("ifname", "tun0,tun0")]) - - ps.set(rtprefix, fvs) + self.check_nexthop_in_asic_db(asicdb, rtkeys[0], True) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0], True) - # Check if route was propagated to ASIC DB - rtkeys = dvs_route.check_asicdb_route_entries([rtprefix]) + self.del_neighbor(dvs, neigh_ip) + self.del_neighbor(dvs, neigh_ipv6) + apdb.wait_for_deleted_entry(self.APP_NEIGH_TABLE, neigh_ip) + apdb.wait_for_deleted_entry(self.APP_NEIGH_TABLE, neigh_ipv6) + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_ip) + asicdb.wait_for_deleted_entry(self.ASIC_NEIGH_TABLE, neigh_ip) - # Check for nexthop group and validate nexthop group member in asic db - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2) + self.check_nexthop_in_asic_db(asicdb, rtkeys[0], True) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0], True) - # Step: 1 - Change one NH to active and verify ecmp route + # Set state to active, learn neighbor again self.set_mux_state(appdb, "Ethernet0", "active") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1) - # Step: 2 - Change the other NH to active and verify ecmp route - self.set_mux_state(appdb, "Ethernet4", "active") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0]) - - # Step: 3 - Change one NH to back to standby and verify ecmp route - self.set_mux_state(appdb, "Ethernet0", "standby") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 1) - - # Step: 4 - Change the other NH to standby and verify ecmp route - self.set_mux_state(appdb, "Ethernet4", "standby") - self.check_nexthop_group_in_asic_db(asicdb, rtkeys[0], 2) + self.add_neighbor(dvs, neigh_ip, mac) + self.add_neighbor(dvs, neigh_ipv6, mac) + self.check_neigh_in_asic_db(asicdb, neigh_ip) + self.check_neigh_in_asic_db(asicdb, neigh_ipv6) + self.check_nexthop_in_asic_db(asicdb, rtkeys[0]) + self.check_nexthop_in_asic_db(asicdb, rtkeys_ipv6[0]) + dvs.runcmd( + "ip neigh flush " + neigh_ip + ) + dvs.runcmd( + "ip neigh flush " + neigh_ipv6 + ) + + # Cleanup + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"no ip route " + nh_route + + " " + neigh_ip + "\"" + ) + dvs.runcmd( + "vtysh -c \"configure terminal\" -c \"no ipv6 route " + nh_route_ipv6 + + " " + neigh_ipv6 + "\"" + ) + self.del_neighbor(dvs, neigh_ip) + self.del_neighbor(dvs, neigh_ipv6) def get_expected_sai_qualifiers(self, portlist, dvs_acl): expected_sai_qualifiers = { @@ -405,59 +905,66 @@ def get_expected_sai_qualifiers(self, portlist, dvs_acl): return expected_sai_qualifiers - - def create_and_test_acl(self, appdb, asicdb, dvs, dvs_acl): + def create_and_test_acl(self, appdb, dvs_acl): # Start with active, verify NO ACL rules exists self.set_mux_state(appdb, "Ethernet0", "active") self.set_mux_state(appdb, "Ethernet4", "active") + self.set_mux_state(appdb, "Ethernet8", "active") dvs_acl.verify_no_acl_rules() - # Set one mux port to standby, verify ACL rule with inport bitmap (1 port) + # Set mux port in active-active cable type, no ACL rules programmed self.set_mux_state(appdb, "Ethernet0", "standby") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0"], dvs_acl) + dvs_acl.verify_no_acl_rules() + + # Set one mux port to standby, verify ACL rule with inport bitmap (1 port) + self.set_mux_state(appdb, "Ethernet4", "standby") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) # Set two mux ports to standby, verify ACL rule with inport bitmap (2 ports) - self.set_mux_state(appdb, "Ethernet4", "standby") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0","Ethernet4"], dvs_acl) + self.set_mux_state(appdb, "Ethernet8", "standby") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4", "Ethernet8"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) - # Set one mux port to active, verify ACL rule with inport bitmap (1 port) self.set_mux_state(appdb, "Ethernet0", "active") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4"], dvs_acl) + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4", "Ethernet8"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) - # Set last mux port to active, verify ACL rule is deleted + # Set one mux port to active, verify ACL rule with inport bitmap (1 port) self.set_mux_state(appdb, "Ethernet4", "active") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet8"], dvs_acl) + dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) + + # Set last mux port to active, verify ACL rule is deleted + self.set_mux_state(appdb, "Ethernet8", "active") dvs_acl.verify_no_acl_rules() # Set unknown state and verify the behavior as standby - self.set_mux_state(appdb, "Ethernet0", "unknown") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0"], dvs_acl) + self.set_mux_state(appdb, "Ethernet4", "unknown") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) # Verify change while setting unknown from active - self.set_mux_state(appdb, "Ethernet4", "unknown") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0","Ethernet4"], dvs_acl) + self.set_mux_state(appdb, "Ethernet8", "unknown") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4", "Ethernet8"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) - self.set_mux_state(appdb, "Ethernet0", "active") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4"], dvs_acl) + self.set_mux_state(appdb, "Ethernet4", "active") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet8"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) - self.set_mux_state(appdb, "Ethernet0", "standby") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0","Ethernet4"], dvs_acl) + self.set_mux_state(appdb, "Ethernet4", "standby") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4", "Ethernet8"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) # Verify no change while setting unknown from standby - self.set_mux_state(appdb, "Ethernet0", "unknown") - sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet0","Ethernet4"], dvs_acl) + self.set_mux_state(appdb, "Ethernet4", "unknown") + sai_qualifier = self.get_expected_sai_qualifiers(["Ethernet4", "Ethernet8"], dvs_acl) dvs_acl.verify_acl_rule(sai_qualifier, action="DROP", priority=self.ACL_PRIORITY) - - def create_and_test_metrics(self, appdb, statedb, dvs): + def create_and_test_metrics(self, appdb, statedb): # Set to active and test attributes for start and end time self.set_mux_state(appdb, "Ethernet0", "active") @@ -471,7 +978,7 @@ def create_and_test_metrics(self, appdb, statedb, dvs): assert fvs != {} start = end = False - for f,v in fvs.items(): + for f, _ in fvs.items(): if f == "orch_switch_active_start": start = True elif f == "orch_switch_active_end": @@ -493,7 +1000,7 @@ def create_and_test_metrics(self, appdb, statedb, dvs): assert fvs != {} start = end = False - for f,v in fvs.items(): + for f, v in fvs.items(): if f == "orch_switch_standby_start": start = True elif f == "orch_switch_standby_end": @@ -502,26 +1009,17 @@ def create_and_test_metrics(self, appdb, statedb, dvs): assert start assert end - def check_interface_exists_in_asicdb(self, asicdb, sai_oid): asicdb.wait_for_entry(self.ASIC_RIF_TABLE, sai_oid) return True - def check_vr_exists_in_asicdb(self, asicdb, sai_oid): asicdb.wait_for_entry(self.ASIC_VRF_TABLE, sai_oid) return True - - def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip): + def create_and_test_peer(self, asicdb, tc_to_dscp_map_oid=None, tc_to_queue_map_oid=None): """ Create PEER entry verify all needed enties in ASIC DB exists """ - peer_attrs = { - "address_ipv4": peer_ip - } - - db.create_entry("PEER_SWITCH", peer_name, peer_attrs) - # check asic db table # There will be two tunnels, one P2MP and another P2P tunnels = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TABLE, 2) @@ -542,13 +1040,18 @@ def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip): fvs = asicdb.wait_for_entry(self.ASIC_TUNNEL_TABLE, p2p_obj) + if tc_to_dscp_map_oid: + assert "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP" in fvs + if tc_to_queue_map_oid: + assert "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_TO_QUEUE_MAP" in fvs + for field, value in fvs.items(): if field == "SAI_TUNNEL_ATTR_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_ATTR_ENCAP_SRC_IP": - assert value == src_ip + assert value == self.SELF_IPV4 elif field == "SAI_TUNNEL_ATTR_ENCAP_DST_IP": - assert value == peer_ip + assert value == self.PEER_IPV4 elif field == "SAI_TUNNEL_ATTR_PEER_MODE": assert value == "SAI_TUNNEL_PEER_MODE_P2P" elif field == "SAI_TUNNEL_ATTR_OVERLAY_INTERFACE": @@ -557,49 +1060,62 @@ def create_and_test_peer(self, db, asicdb, peer_name, peer_ip, src_ip): assert self.check_interface_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_ATTR_ENCAP_TTL_MODE": assert value == "SAI_TUNNEL_TTL_MODE_PIPE_MODEL" + elif field == "SAI_TUNNEL_ATTR_DECAP_TTL_MODE": + assert value == "SAI_TUNNEL_TTL_MODE_PIPE_MODEL" elif field == "SAI_TUNNEL_ATTR_LOOPBACK_PACKET_ACTION": assert value == "SAI_PACKET_ACTION_DROP" + elif field == "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_AND_COLOR_TO_DSCP_MAP": + assert value == tc_to_dscp_map_oid + elif field == "SAI_TUNNEL_ATTR_ENCAP_QOS_TC_TO_QUEUE_MAP": + assert value == tc_to_queue_map_oid + elif field == "SAI_TUNNEL_ATTR_ENCAP_DSCP_MODE": + assert value == "SAI_TUNNEL_DSCP_MODE_PIPE_MODEL" + elif field == "SAI_TUNNEL_ATTR_DECAP_DSCP_MODE": + assert value == "SAI_TUNNEL_DSCP_MODE_PIPE_MODEL" else: assert False, "Field %s is not tested" % field - - def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips): + def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips, src_ip=None): tunnel_term_entries = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TERM_ENTRIES, len(dst_ips)) - + expected_term_type = "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P" if src_ip else "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" + expected_len = 6 if src_ip else 5 for term_entry in tunnel_term_entries: fvs = asicdb.get_entry(self.ASIC_TUNNEL_TERM_ENTRIES, term_entry) - assert len(fvs) == 5 + assert len(fvs) == expected_len for field, value in fvs.items(): if field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID": assert self.check_vr_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE": - assert value == "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" + assert value == expected_term_type elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID": assert value == tunnel_sai_oid elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP": assert value in dst_ips + elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP" and src_ip: + assert value == src_ip else: assert False, "Field %s is not tested" % field - - def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): + def create_and_test_tunnel(self, db, asicdb, tunnel_name, tunnel_params): """ Create tunnel and verify all needed enties in ASIC DB exists """ - is_symmetric_tunnel = "src_ip" in kwargs; + is_symmetric_tunnel = "src_ip" in tunnel_params - # create tunnel entry in DB - ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) - - fvs = create_fvs(**kwargs) + # 6 parameters to check in case of decap tunnel + # + 1 (SAI_TUNNEL_ATTR_ENCAP_SRC_IP) in case of symmetric tunnel + expected_len = 7 if is_symmetric_tunnel else 6 - ps.set(tunnel_name, fvs) + if 'decap_tc_to_pg_map_id' in tunnel_params: + expected_len += 1 + decap_tc_to_pg_map_id = tunnel_params.pop('decap_tc_to_pg_map_id') - # wait till config will be applied - time.sleep(1) + if 'decap_dscp_to_tc_map_id' in tunnel_params: + expected_len += 1 + decap_dscp_to_tc_map_id = tunnel_params.pop('decap_dscp_to_tc_map_id') # check asic db table tunnels = asicdb.wait_for_n_keys(self.ASIC_TUNNEL_TABLE, 1) @@ -608,19 +1124,18 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): fvs = asicdb.wait_for_entry(self.ASIC_TUNNEL_TABLE, tunnel_sai_obj) - # 6 parameters to check in case of decap tunnel - # + 1 (SAI_TUNNEL_ATTR_ENCAP_SRC_IP) in case of symmetric tunnel - assert len(fvs) == 7 if is_symmetric_tunnel else 6 + assert len(fvs) == expected_len + + expected_ecn_mode = self.ecn_modes_map[tunnel_params["ecn_mode"]] + expected_dscp_mode = self.dscp_modes_map[tunnel_params["dscp_mode"]] + expected_ttl_mode = self.ttl_modes_map[tunnel_params["ttl_mode"]] - expected_ecn_mode = self.ecn_modes_map[kwargs["ecn_mode"]] - expected_dscp_mode = self.dscp_modes_map[kwargs["dscp_mode"]] - expected_ttl_mode = self.ttl_modes_map[kwargs["ttl_mode"]] for field, value in fvs.items(): if field == "SAI_TUNNEL_ATTR_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_ATTR_ENCAP_SRC_IP": - assert value == kwargs["src_ip"] + assert value == tunnel_params["src_ip"] elif field == "SAI_TUNNEL_ATTR_DECAP_ECN_MODE": assert value == expected_ecn_mode elif field == "SAI_TUNNEL_ATTR_DECAP_TTL_MODE": @@ -631,11 +1146,16 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): assert self.check_interface_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE": assert self.check_interface_exists_in_asicdb(asicdb, value) + elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP": + assert value == decap_dscp_to_tc_map_id + elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP": + assert value == decap_tc_to_pg_map_id else: assert False, "Field %s is not tested" % field - self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, kwargs["dst_ip"].split(",")) + src_ip = tunnel_params['src_ip'] if 'src_ip' in tunnel_params else None + self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, tunnel_params["dst_ip"].split(","), src_ip) def remove_and_test_tunnel(self, db, asicdb, tunnel_name): """ Removes tunnel and checks that ASIC db is clear""" @@ -650,7 +1170,7 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): status, fvs = tunnel_table.get(tunnel_sai_obj) # get overlay loopback interface oid to check if it is deleted with the tunnel - overlay_infs_id = {f:v for f,v in fvs}["SAI_TUNNEL_ATTR_OVERLAY_INTERFACE"] + overlay_infs_id = {f:v for f, v in fvs}["SAI_TUNNEL_ATTR_OVERLAY_INTERFACE"] ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) ps.set(tunnel_name, create_fvs(), 'DEL') @@ -663,6 +1183,39 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): assert len(tunnel_app_table.getKeys()) == 0 assert not self.check_interface_exists_in_asicdb(asicdb, overlay_infs_id) + def check_app_db_neigh_table( + self, appdb, intf, neigh_ip, + mac="00:00:00:00:00:00", expect_entry=True + ): + key = "{}:{}".format(intf, neigh_ip) + if isinstance(ip_address(neigh_ip), IPv4Address): + family = 'IPv4' + else: + family = 'IPv6' + + if expect_entry: + appdb.wait_for_matching_keys(self.APP_NEIGH_TABLE, [key]) + appdb.wait_for_field_match(self.APP_NEIGH_TABLE, key, {'family': family}) + appdb.wait_for_field_match(self.APP_NEIGH_TABLE, key, {'neigh': mac}) + else: + appdb.wait_for_deleted_keys(self.APP_NEIGH_TABLE, key) + def add_qos_map(self, configdb, asicdb, qos_map_type_name, qos_map_name, qos_map): + current_oids = asicdb.get_keys(self.ASIC_QOS_MAP_TABLE_KEY) + # Apply QoS map to config db + table = swsscommon.Table(configdb.db_connection, qos_map_type_name) + fvs = swsscommon.FieldValuePairs(list(qos_map.items())) + table.set(qos_map_name, fvs) + time.sleep(1) + + diff = set(asicdb.get_keys(self.ASIC_QOS_MAP_TABLE_KEY)) - set(current_oids) + assert len(diff) == 1 + oid = diff.pop() + return oid + + def remove_qos_map(self, configdb, qos_map_type_name, qos_map_oid): + """ Remove the testing qos map""" + table = swsscommon.Table(configdb.db_connection, qos_map_type_name) + table._del(qos_map_oid) def cleanup_left_over(self, db, asicdb): """ Cleanup APP and ASIC tables """ @@ -679,77 +1232,349 @@ def cleanup_left_over(self, db, asicdb): for key in tunnel_app_table.getKeys(): tunnel_table._del(key) + def ping_ip(self, dvs, ip): + dvs.runcmd(self.PING_CMD.format(ip=ip)) + + def check_neighbor_state( + self, dvs, dvs_route, neigh_ip, expect_route=True, + expect_neigh=False, expected_mac='00:00:00:00:00:00' + ): + """ + Checks the status of neighbor entries in APPL and ASIC DB + """ + if expect_route and expect_neigh: + pytest.fail('expect_routes and expect_neigh cannot both be True') + app_db = dvs.get_app_db() + asic_db = dvs.get_asic_db() + prefix = str(ip_network(neigh_ip)) + self.check_app_db_neigh_table( + app_db, self.VLAN_1000, neigh_ip, + mac=expected_mac, expect_entry=expect_route + ) + if expect_route: + self.check_tnl_nexthop_in_asic_db(asic_db) + routes = dvs_route.check_asicdb_route_entries([prefix]) + for route in routes: + self.check_nexthop_in_asic_db(asic_db, route, standby=expect_route) + else: + dvs_route.check_asicdb_deleted_route_entries([prefix]) + self.check_neigh_in_asic_db(asic_db, neigh_ip, expected=expect_neigh) + + def execute_action(self, action, dvs, test_info): + if action in (PING_SERV, PING_NEIGH): + self.ping_ip(dvs, test_info[IP]) + elif action in (ACTIVE, STANDBY): + app_db_connector = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + self.set_mux_state(app_db_connector, test_info[INTF], action) + elif action == RESOLVE_ENTRY: + self.add_neighbor(dvs, test_info[IP], test_info[MAC]) + elif action == DELETE_ENTRY: + self.del_neighbor(dvs, test_info[IP]) + else: + pytest.fail('Invalid test action {}'.format(action)) + + @pytest.fixture(scope='module') + def setup_vlan(self, dvs): + self.create_vlan_interface(dvs) + + @pytest.fixture(scope='module') + def setup_mux_cable(self, dvs): + config_db = dvs.get_config_db() + self.create_mux_cable(config_db) + + @pytest.fixture(scope='module') + def setup_tunnel(self, dvs): + app_db_connector = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + ps = swsscommon.ProducerStateTable(app_db_connector, self.APP_TUNNEL_DECAP_TABLE_NAME) + fvs = create_fvs(**self.DEFAULT_TUNNEL_PARAMS) + ps.set(self.MUX_TUNNEL_0, fvs) + + @pytest.fixture + def setup_peer_switch(self, dvs): + config_db = dvs.get_config_db() + config_db.create_entry( + self.CONFIG_PEER_SWITCH, + self.PEER_SWITCH_HOST, + self.DEFAULT_PEER_SWITCH_PARAMS + ) + + @pytest.fixture + def remove_peer_switch(self, dvs): + config_db = dvs.get_config_db() + config_db.delete_entry(self.CONFIG_PEER_SWITCH, self.PEER_SWITCH_HOST) + + @pytest.fixture(params=['IPv4', 'IPv6']) + def ip_version(self, request): + return request.param + + def clear_neighbors(self, dvs): + _, neighs_str = dvs.runcmd('ip neigh show all') + neighs = [entry.split()[0] for entry in neighs_str.split('\n')[:-1]] + + for neigh in neighs: + self.del_neighbor(dvs, neigh) + + @pytest.fixture + def neighbor_cleanup(self, dvs): + """ + Ensures that all kernel neighbors are removed before and after tests + """ + self.clear_neighbors(dvs) + yield + self.clear_neighbors(dvs) + + @pytest.fixture + def server_test_ips(self, ip_version): + if ip_version == 'IPv4': + return [self.SERV1_IPV4, self.SERV2_IPV4, self.SERV3_IPV4] + else: + return [self.SERV1_IPV6, self.SERV2_IPV6, self.SERV3_IPV6] + + @pytest.fixture + def neigh_test_ips(self, ip_version): + if ip_version == 'IPv4': + return [self.NEIGH1_IPV4, self.NEIGH2_IPV4, self.NEIGH3_IPV4] + else: + return [self.NEIGH1_IPV6, self.NEIGH2_IPV6, self.NEIGH3_IPV6] + + @pytest.fixture + def ips_for_test(self, server_test_ips, neigh_test_ips, neigh_miss_test_sequence): + # Assumes that each test sequence has at exactly one of + # PING_NEIGH OR PING_SERV as a step + for step in neigh_miss_test_sequence: + if step[TEST_ACTION] == PING_SERV: + return server_test_ips + if step[TEST_ACTION] == PING_NEIGH: + return neigh_test_ips + + # If we got here, the test sequence did not contain a ping command + pytest.fail('No ping command found in test sequence {}'.format(neigh_miss_test_sequence)) + + @pytest.fixture + def ip_to_intf_map(self, server_test_ips, neigh_test_ips): + map = { + server_test_ips[0]: 'Ethernet0', + server_test_ips[1]: 'Ethernet4', + server_test_ips[2]: 'Ethernet8', + neigh_test_ips[0]: 'Ethernet0', + neigh_test_ips[1]: 'Ethernet4', + neigh_test_ips[2]: 'Ethernet8' + } + return map + + @pytest.fixture( + params=NEIGH_MISS_TESTS, + ids=['->'.join([step[TEST_ACTION] for step in scenario]) + for scenario in NEIGH_MISS_TESTS] + ) + def neigh_miss_test_sequence(self, request): + return request.param + + @pytest.fixture + def intf_fdb_map(self, dvs, setup_vlan): + """ + Note: this fixture invokes the setup_vlan fixture so that + the interfaces are brought up before attempting to access FDB information + """ + state_db = dvs.get_state_db() + keys = state_db.get_keys(self.STATE_FDB_TABLE) + + fdb_map = {} + for key in keys: + entry = state_db.get_entry(self.STATE_FDB_TABLE, key) + mac = key.replace('{}:'.format(self.VLAN_1000), '') + port = entry['port'] + fdb_map[port] = mac + + return fdb_map + class TestMuxTunnel(TestMuxTunnelBase): """ Tests for Mux tunnel creation and removal """ + @pytest.fixture(scope='class') + def setup(self, dvs): + db = dvs.get_config_db() + asicdb = dvs.get_asic_db() - def test_Tunnel(self, dvs, testlog): - """ test IPv4 Mux tunnel creation """ + tc_to_dscp_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_DSCP_MAP) + tc_to_queue_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_QUEUE_MAP) + + dscp_to_tc_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.DSCP_TO_TC_MAP) + tc_to_pg_map_oid = self.add_qos_map(db, asicdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_PRIORITY_GROUP_MAP) + yield tc_to_dscp_map_oid, tc_to_queue_map_oid, dscp_to_tc_map_oid, tc_to_pg_map_oid + + self.remove_qos_map(db, swsscommon.CFG_TC_TO_DSCP_MAP_TABLE_NAME, tc_to_dscp_map_oid) + self.remove_qos_map(db, swsscommon.CFG_TC_TO_QUEUE_MAP_TABLE_NAME, tc_to_queue_map_oid) + self.remove_qos_map(db, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, dscp_to_tc_map_oid) + self.remove_qos_map(db, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, tc_to_pg_map_oid) + + + def test_Tunnel(self, dvs, setup_tunnel, testlog, setup): + """ test IPv4 Mux tunnel creation """ db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() #self.cleanup_left_over(db, asicdb) + _, _, dscp_to_tc_map_oid, tc_to_pg_map_oid = setup + tunnel_params = self.DEFAULT_TUNNEL_PARAMS + tunnel_params["decap_dscp_to_tc_map_id"] = dscp_to_tc_map_oid + tunnel_params["decap_tc_to_pg_map_id"] = tc_to_pg_map_oid # create tunnel IPv4 tunnel - self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", tunnel_type="IPINIP", - dst_ip="10.1.0.32", dscp_mode="uniform", - ecn_mode="standard", ttl_mode="pipe") + self.create_and_test_tunnel(db, asicdb, self.MUX_TUNNEL_0, tunnel_params) + def test_Peer(self, dvs, setup_peer_switch, setup_tunnel, setup, testlog): - def test_Peer(self, dvs, testlog): """ test IPv4 Mux tunnel creation """ - db = dvs.get_config_db() asicdb = dvs.get_asic_db() + + encap_tc_to_dscp_map_id, encap_tc_to_queue_map_id, _, _ = setup - self.create_and_test_peer(db, asicdb, "peer", "1.1.1.1", "10.1.0.32") - + self.create_and_test_peer(asicdb, encap_tc_to_dscp_map_id, encap_tc_to_queue_map_id) - def test_Neighbor(self, dvs, dvs_route, testlog): + def test_Neighbor(self, dvs, dvs_route, setup_vlan, setup_mux_cable, testlog): """ test Neighbor entries and mux state change """ confdb = dvs.get_config_db() - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() self.create_and_test_neighbor(confdb, appdb, asicdb, dvs, dvs_route) - def test_Fdb(self, dvs, dvs_route, testlog): """ test Fdb entries and mux state change """ - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() self.create_and_test_fdb(appdb, asicdb, dvs, dvs_route) - def test_Route(self, dvs, dvs_route, testlog): """ test Route entries and mux state change """ - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() self.create_and_test_route(appdb, asicdb, dvs, dvs_route) + def test_NH(self, dvs, dvs_route, intf_fdb_map, setup, setup_mux_cable, + setup_peer_switch, setup_tunnel, testlog): + """ test NH routes and mux state change """ + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = dvs.get_asic_db() + mac = intf_fdb_map["Ethernet0"] + + # get tunnel nexthop + self.check_tnl_nexthop_in_asic_db(asicdb, 5) + + self.create_and_test_NH_routes(appdb, asicdb, dvs, dvs_route, mac) + + def test_multi_nexthop(self, dvs, dvs_route, intf_fdb_map, neighbor_cleanup, testlog): + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = dvs.get_asic_db() + macs = [intf_fdb_map["Ethernet0"], intf_fdb_map["Ethernet4"]] + + self.create_and_test_multi_nexthop_routes(dvs, dvs_route, appdb, macs, asicdb) def test_acl(self, dvs, dvs_acl, testlog): """ test acl and mux state change """ - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - asicdb = dvs.get_asic_db() + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) - self.create_and_test_acl(appdb, asicdb, dvs, dvs_acl) + try: + self.create_and_test_acl(appdb, dvs_acl) + finally: + self.set_mux_state(appdb, "Ethernet0", "active") + self.set_mux_state(appdb, "Ethernet4", "active") + self.set_mux_state(appdb, "Ethernet8", "active") + dvs_acl.verify_no_acl_rules() def test_mux_metrics(self, dvs, testlog): """ test metrics for mux state change """ - appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) statedb = dvs.get_state_db() - self.create_and_test_metrics(appdb, statedb, dvs) + self.create_and_test_metrics(appdb, statedb) + + def test_neighbor_miss( + self, dvs, dvs_route, ips_for_test, neigh_miss_test_sequence, + ip_to_intf_map, intf_fdb_map, neighbor_cleanup, setup_vlan, + setup_mux_cable, setup_tunnel, setup_peer_switch, testlog + ): + ip = ips_for_test[0] + intf = ip_to_intf_map[ip] + mac = intf_fdb_map[intf] + test_info = { + IP: ip, + INTF: intf, + MAC: mac + } + + for step in neigh_miss_test_sequence: + self.execute_action(step[TEST_ACTION], dvs, test_info) + exp_result = step[EXPECTED_RESULT] + self.check_neighbor_state( + dvs, dvs_route, ip, + expect_route=exp_result[EXPECT_ROUTE], + expect_neigh=exp_result[EXPECT_NEIGH], + expected_mac=mac if exp_result[REAL_MAC] else '00:00:00:00:00:00' + ) + + def test_neighbor_miss_no_mux( + self, dvs, dvs_route, setup_vlan, setup_tunnel, setup, + setup_peer_switch, neighbor_cleanup, testlog + ): + config_db = dvs.get_config_db() + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + test_ip = self.SERV1_SOC_IPV4 + self.ping_ip(dvs, test_ip) + + # no mux present, no standalone tunnel route installed + self.check_neighbor_state(dvs, dvs_route, test_ip, expect_route=False) + + # setup the mux + config_db = dvs.get_config_db() + self.create_mux_cable(config_db) + # tunnel route should be installed immediately after mux setup + self.check_neighbor_state(dvs, dvs_route, test_ip, expect_route=True) + + # set port state as standby + self.set_mux_state(appdb, "Ethernet0", "standby") + self.check_neighbor_state(dvs, dvs_route, test_ip, expect_route=True) + + # set port state as active + self.set_mux_state(appdb, "Ethernet0", "active") + self.check_neighbor_state(dvs, dvs_route, test_ip, expect_route=True) + + # clear the FAILED neighbor + self.clear_neighbors(dvs) + self.check_neighbor_state(dvs, dvs_route, test_ip, expect_route=False) + + def test_neighbor_miss_no_peer( + self, dvs, dvs_route, setup_vlan, setup_mux_cable, setup_tunnel, + remove_peer_switch, neighbor_cleanup, testlog + ): + """ + test neighbor miss with no peer switch configured + No new entries are expected in APPL_DB or ASIC_DB + """ + test_ips = [self.NEIGH3_IPV4, self.SERV3_IPV4, self.NEIGH1_IPV6, self.SERV1_IPV6] + + for ip in test_ips: + self.ping_ip(dvs, ip) + + for ip in test_ips: + self.check_neighbor_state(dvs, dvs_route, ip, expect_route=False) + + def test_soc_ip(self, dvs, dvs_route, setup_vlan, setup_mux_cable, testlog): + appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = dvs.get_asic_db() + self.create_and_test_soc(appdb, asicdb, dvs, dvs_route) # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_nat.py b/tests/test_nat.py index 9e87b5f54c..1c509e464f 100644 --- a/tests/test_nat.py +++ b/tests/test_nat.py @@ -15,13 +15,10 @@ def setup_db(self, dvs): self.config_db = dvs.get_config_db() def set_interfaces(self, dvs): - fvs = {"NULL": "NULL"} - self.config_db.create_entry("INTERFACE", "Ethernet0|67.66.65.1/24", fvs) - self.config_db.create_entry("INTERFACE", "Ethernet4|18.18.18.1/24", fvs) - self.config_db.create_entry("INTERFACE", "Ethernet0", fvs) - self.config_db.create_entry("INTERFACE", "Ethernet4", fvs) - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface startup Ethernet4") + dvs.interface_ip_add("Ethernet0", "67.66.65.1/24") + dvs.interface_ip_add("Ethernet4", "18.18.18.1/24") + dvs.port_admin_set("Ethernet0", "up") + dvs.port_admin_set("Etherent4", "up") dvs.servers[0].runcmd("ip link set down dev eth0") dvs.servers[0].runcmd("ip link set up dev eth0") @@ -33,7 +30,7 @@ def set_interfaces(self, dvs): dvs.servers[1].runcmd("ifconfig eth0 18.18.18.2/24") dvs.servers[1].runcmd("ip route add default via 18.18.18.1") - dvs.runcmd("config nat add interface Ethernet0 -nat_zone 1") + dvs.set_nat_zone("Ethernet0", "1") time.sleep(1) @@ -48,10 +45,10 @@ def test_NatGlobalTable(self, dvs, testlog): self.setup_db(dvs) # enable NAT feature - dvs.runcmd("config nat feature enable") - dvs.runcmd("config nat set timeout 450") - dvs.runcmd("config nat set udp-timeout 360") - dvs.runcmd("config nat set tcp-timeout 900") + dvs.nat_mode_set("enabled") + dvs.nat_timeout_set("450") + dvs.nat_udp_timeout_set("360") + dvs.nat_tcp_timeout_set("900") # check NAT global values in appdb self.app_db.wait_for_n_keys("NAT_GLOBAL_TABLE", 1) @@ -82,7 +79,7 @@ def test_AddNatStaticEntry(self, dvs, testlog): dvs.servers[0].runcmd("ping -c 1 18.18.18.2") # add a static nat entry - dvs.runcmd("config nat add static basic 67.66.65.1 18.18.18.2") + dvs.add_nat_basic_entry("67.66.65.1", "18.18.18.2") # check the entry in the config db self.config_db.wait_for_n_keys("STATIC_NAT", 1) @@ -115,7 +112,7 @@ def test_DelNatStaticEntry(self, dvs, testlog): self.setup_db(dvs) # delete a static nat entry - dvs.runcmd("config nat remove static basic 67.66.65.1 18.18.18.2") + dvs.del_nat_basic_entry("67.66.65.1") # check the entry is no there in the config db self.config_db.wait_for_n_keys("STATIC_NAT", 0) @@ -134,7 +131,7 @@ def test_AddNaPtStaticEntry(self, dvs, testlog): dvs.servers[0].runcmd("ping -c 1 18.18.18.2") # add a static nat entry - dvs.runcmd("config nat add static udp 67.66.65.1 670 18.18.18.2 180") + dvs.add_nat_udp_entry("67.66.65.1", "670", "18.18.18.2", "180") # check the entry in the config db self.config_db.wait_for_n_keys("STATIC_NAPT", 1) @@ -165,7 +162,7 @@ def test_DelNaPtStaticEntry(self, dvs, testlog): self.setup_db(dvs) # delete a static nat entry - dvs.runcmd("config nat remove static udp 67.66.65.1 670 18.18.18.2 180") + dvs.del_nat_udp_entry("67.66.65.1", "670") # check the entry is no there in the config db self.config_db.wait_for_n_keys("STATIC_NAPT", 0) @@ -186,8 +183,8 @@ def test_AddTwiceNatEntry(self, dvs, testlog): dvs.servers[1].runcmd("ping -c 1 67.66.65.2") # add a twice nat entry - dvs.runcmd("config nat add static basic 67.66.65.2 18.18.18.1 -nat_type snat -twice_nat_id 9") - dvs.runcmd("config nat add static basic 67.66.65.1 18.18.18.2 -nat_type dnat -twice_nat_id 9") + dvs.add_twice_nat_basic_entry("67.66.65.2", "18.18.18.1", "snat", "9") + dvs.add_twice_nat_basic_entry("67.66.65.1", "18.18.18.2", "dnat", "9") # check the entry in the config db self.config_db.wait_for_n_keys("STATIC_NAT", 2) @@ -220,8 +217,8 @@ def test_DelTwiceNatStaticEntry(self, dvs, testlog): self.setup_db(dvs) # delete a static nat entry - dvs.runcmd("config nat remove static basic 67.66.65.2 18.18.18.1") - dvs.runcmd("config nat remove static basic 67.66.65.1 18.18.18.2") + dvs.del_twice_nat_basic_entry("67.66.65.2") + dvs.del_twice_nat_basic_entry("67.66.65.1") # check the entry is no there in the config db self.config_db.wait_for_n_keys("STATIC_NAT", 0) @@ -241,8 +238,8 @@ def test_AddTwiceNaPtEntry(self, dvs, testlog): dvs.servers[1].runcmd("ping -c 1 67.66.65.2") # add a twice nat entry - dvs.runcmd("config nat add static udp 67.66.65.2 670 18.18.18.1 181 -nat_type snat -twice_nat_id 7") - dvs.runcmd("config nat add static udp 67.66.65.1 660 18.18.18.2 182 -nat_type dnat -twice_nat_id 7") + dvs.add_twice_nat_udp_entry("67.66.65.2", "670", "18.18.18.1", "181", "snat", "7") + dvs.add_twice_nat_udp_entry("67.66.65.1", "660", "18.18.18.2", "182", "dnat", "7") # check the entry in the config db self.config_db.wait_for_n_keys("STATIC_NAPT", 2) @@ -277,8 +274,8 @@ def test_DelTwiceNaPtStaticEntry(self, dvs, testlog): self.setup_db(dvs) # delete a static nat entry - dvs.runcmd("config nat remove static udp 67.66.65.2 670 18.18.18.1 181") - dvs.runcmd("config nat remove static udp 67.66.65.1 660 18.18.18.2 182") + dvs.del_twice_nat_udp_entry("67.66.65.2", "670") + dvs.del_twice_nat_udp_entry("67.66.65.1", "660") # check the entry is not there in the config db self.config_db.wait_for_n_keys("STATIC_NAPT", 0) @@ -294,7 +291,7 @@ def test_VerifyConntrackTimeoutForNatEntry(self, dvs, testlog): dvs.servers[0].runcmd("ping -c 1 18.18.18.2") # add a static nat entry - dvs.runcmd("config nat add static basic 67.66.65.1 18.18.18.2") + dvs.add_nat_basic_entry("67.66.65.1", "18.18.18.2") # check the conntrack timeout for static entry def _check_conntrack_for_static_entry(): @@ -321,7 +318,7 @@ def _check_conntrack_for_static_entry(): wait_for_result(_check_conntrack_for_static_entry) # delete a static nat entry - dvs.runcmd("config nat remove static basic 67.66.65.1 18.18.18.2") + dvs.del_nat_basic_entry("67.66.65.1") def test_DoNotNatAclAction(self, dvs_acl, testlog): @@ -360,7 +357,7 @@ def test_CrmSnatAndDnatEntryUsedCount(self, dvs, testlog): dvs.servers[0].runcmd("ping -c 1 18.18.18.2") # set pooling interval to 1 - dvs.runcmd("crm config polling interval 1") + dvs.crm_poll_set("1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_SNAT_ENTRY', '1000') dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_DNAT_ENTRY', '1000') @@ -376,7 +373,7 @@ def test_CrmSnatAndDnatEntryUsedCount(self, dvs, testlog): avail_dnat_counter = dvs.getCrmCounterValue('STATS', 'crm_stats_dnat_entry_available') # add a static nat entry - dvs.runcmd("config nat add static basic 67.66.65.1 18.18.18.2") + dvs.add_nat_basic_entry("67.66.65.1", "18.18.18.2") #check the entry in asic db, 3 keys = SNAT, DNAT and DNAT_Pool keys = self.asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NAT_ENTRY", 3) @@ -405,7 +402,7 @@ def test_CrmSnatAndDnatEntryUsedCount(self, dvs, testlog): assert avail_dnat_counter - new_avail_dnat_counter == 1 # delete a static nat entry - dvs.runcmd("config nat remove static basic 67.66.65.1 18.18.18.2") + dvs.del_nat_basic_entry("67.66.65.1") dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_SNAT_ENTRY', '1000') dvs.setReadOnlyAttr('SAI_OBJECT_TYPE_SWITCH', 'SAI_SWITCH_ATTR_AVAILABLE_DNAT_ENTRY', '1000') diff --git a/tests/test_neighbor.py b/tests/test_neighbor.py index 4893faeb21..59618c75ce 100644 --- a/tests/test_neighbor.py +++ b/tests/test_neighbor.py @@ -6,6 +6,14 @@ class TestNeighbor(object): + CONFIG_PEER_SWITCH = "PEER_SWITCH" + PEER_SWITCH_HOST = "peer_switch_hostname" + PEER_IPV4 = "10.1.0.33" + + DEFAULT_PEER_SWITCH_PARAMS = { + "address_ipv4": PEER_IPV4 + } + def setup_db(self, dvs): self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) @@ -413,6 +421,108 @@ def test_FlushResolveNeighborIpv4(self, dvs, testlog): (exitcode, output) = dvs.runcmd(['sh', '-c', "supervisorctl status nbrmgrd | awk '{print $2}'"]) assert output == "RUNNING\n" + def test_Ipv4LinkLocalNeighbor(self, dvs, testlog): + self.setup_db(dvs) + + # bring up interface + self.set_admin_status("Ethernet8", "up") + + # create interface + self.create_l3_intf("Ethernet8", "") + + # assign IP to interface + self.add_ip_address("Ethernet8", "10.0.0.1/24") + + # add neighbor + self.add_neighbor("Ethernet8", "169.254.0.0", "00:01:02:03:04:05") + + # check application database + tbl = swsscommon.Table(self.pdb, "NEIGH_TABLE:Ethernet8") + intf_entries = tbl.getKeys() + assert len(intf_entries) == 1 + + # check ASIC neighbor database + tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY") + intf_entries = tbl.getKeys() + assert len(intf_entries) == 1 + + # remove neighbor + self.remove_neighbor("Ethernet8", "169.254.0.0") + + # remove IP from interface + self.remove_ip_address("Ethernet8", "10.0.0.1/24") + + # remove interface + self.remove_l3_intf("Ethernet8") + + # bring down interface + self.set_admin_status("Ethernet8", "down") + + # check application database + tbl = swsscommon.Table(self.pdb, "NEIGH_TABLE:Ethernet8") + intf_entries = tbl.getKeys() + assert len(intf_entries) == 0 + + # check ASIC neighbor database + tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY") + intf_entries = tbl.getKeys() + assert len(intf_entries) == 0 + + def test_Ipv4LinkLocalNeighborWithDualToR(self, dvs, testlog, setup_peer_switch): + self.setup_db(dvs) + + # bring up interface + self.set_admin_status("Ethernet8", "up") + + # create interface + self.create_l3_intf("Ethernet8", "") + + # assign IP to interface + self.add_ip_address("Ethernet8", "10.0.0.1/24") + + # add neighbor + self.add_neighbor("Ethernet8", "169.254.0.0", "00:01:02:03:04:05") + + # check application database + tbl = swsscommon.Table(self.pdb, "NEIGH_TABLE:Ethernet8") + intf_entries = tbl.getKeys() + assert len(intf_entries) == 0 + + # check ASIC neighbor database + tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY") + intf_entries = tbl.getKeys() + assert len(intf_entries) == 0 + + # remove neighbor + self.remove_neighbor("Ethernet8", "169.254.0.0") + + # remove IP from interface + self.remove_ip_address("Ethernet8", "10.0.0.1/24") + + # remove interface + self.remove_l3_intf("Ethernet8") + + # bring down interface + self.set_admin_status("Ethernet8", "down") + + # check application database + tbl = swsscommon.Table(self.pdb, "NEIGH_TABLE:Ethernet8") + intf_entries = tbl.getKeys() + assert len(intf_entries) == 0 + + # check ASIC neighbor database + tbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY") + intf_entries = tbl.getKeys() + assert len(intf_entries) == 0 + + @pytest.fixture + def setup_peer_switch(self, dvs): + config_db = dvs.get_config_db() + config_db.create_entry( + self.CONFIG_PEER_SWITCH, + self.PEER_SWITCH_HOST, + self.DEFAULT_PEER_SWITCH_PARAMS + ) # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_nhg.py b/tests/test_nhg.py index 2d004f6c1b..6647a8d0de 100644 --- a/tests/test_nhg.py +++ b/tests/test_nhg.py @@ -128,14 +128,14 @@ def peer_ip(self, i): return "10.0.0." + str(i * 2 + 1) def port_mac(self, i): - return "00:00:00:00:00:0" + str(i) + return "00:00:00:00:00:0" + str(i + 1) def config_intf(self, i): fvs = {'NULL': 'NULL'} self.config_db.create_entry("INTERFACE", self.port_name(i), fvs) self.config_db.create_entry("INTERFACE", "{}|{}".format(self.port_name(i), self.port_ipprefix(i)), fvs) - self.dvs.runcmd("config interface startup " + self.port_name(i)) + self.dvs.port_admin_set(self.port_name(i), "up") self.dvs.runcmd("arp -s {} {}".format(self.peer_ip(i), self.port_mac(i))) assert self.dvs.servers[i].runcmd("ip link set down dev eth0") == 0 assert self.dvs.servers[i].runcmd("ip link set up dev eth0") == 0 @@ -149,6 +149,41 @@ def flap_intf(self, i, status): assert bool(fvs) assert fvs["oper_status"] == status + # BFD utilities for static route BFD and ecmp acceleration -- begin + def get_exist_bfd_session(self): + return set(self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION")) + + def create_bfd_session(self, key, pairs): + tbl = swsscommon.ProducerStateTable(self.app_db.db_connection, "BFD_SESSION_TABLE") + fvs = swsscommon.FieldValuePairs(list(pairs.items())) + tbl.set(key, fvs) + + def remove_bfd_session(self, key): + tbl = swsscommon.ProducerStateTable(self.app_db.db_connection, "BFD_SESSION_TABLE") + tbl._del(key) + + def check_asic_bfd_session_value(self, key, expected_values): + fvs = self.asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", key) + for k, v in expected_values.items(): + assert fvs[k] == v + + def check_state_bfd_session_value(self, key, expected_values): + fvs = self.state_db.get_entry("BFD_SESSION_TABLE", key) + for k, v in expected_values.items(): + assert fvs[k] == v + + def update_bfd_session_state(self, dvs, session, state): + bfd_sai_state = {"Admin_Down": "SAI_BFD_SESSION_STATE_ADMIN_DOWN", + "Down": "SAI_BFD_SESSION_STATE_DOWN", + "Init": "SAI_BFD_SESSION_STATE_INIT", + "Up": "SAI_BFD_SESSION_STATE_UP"} + + ntf = swsscommon.NotificationProducer(self.asic_db.db_connection, "NOTIFICATIONS") + fvp = swsscommon.FieldValuePairs() + ntf_data = "[{\"bfd_session_id\":\""+session+"\",\"session_state\":\""+bfd_sai_state[state]+"\"}]" + ntf.send("bfd_session_state_change", ntf_data, fvp) + # BFD utilities for static route BFD and ecmp acceleration -- end + def init_test(self, dvs, num_intfs): self.dvs = dvs self.app_db = self.dvs.get_app_db() @@ -952,6 +987,57 @@ def test_route_nhg(self, ordered_ecmp, dvs, dvs_route, testlog): else: assert fvs.get("SAI_NEXT_HOP_GROUP_MEMBER_ATTR_SEQUENCE_ID") is None + # BFD: test validate/invalidate nexthop group member when bfd state changes -- begin + bfdSessions = self.get_exist_bfd_session() + # Create BFD session + fieldValues = {"local_addr": "10.0.0.2"} + self.create_bfd_session("default:default:10.0.0.3", fieldValues) + time.sleep(1) + + # Checked created BFD session in ASIC_DB + createdSessions = self.get_exist_bfd_session() - bfdSessions + assert len(createdSessions) == 1 + session = createdSessions.pop() + + expected_adb_values = { + "SAI_BFD_SESSION_ATTR_SRC_IP_ADDRESS": "10.0.0.2", + "SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS": "10.0.0.3", + "SAI_BFD_SESSION_ATTR_TYPE": "SAI_BFD_SESSION_TYPE_ASYNC_ACTIVE", + "SAI_BFD_SESSION_ATTR_IPHDR_VERSION": "4" + } + self.check_asic_bfd_session_value(session, expected_adb_values) + + # Check STATE_DB entry related to the BFD session + expected_sdb_values = {"state": "Down", "type": "async_active", "local_addr" : "10.0.0.2"} + self.check_state_bfd_session_value("default|default|10.0.0.3", expected_sdb_values) + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Down") + time.sleep(1) + # Confirm BFD session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Down" + self.check_state_bfd_session_value("default|default|10.0.0.3", expected_sdb_values) + + #check nexthop group member is removed + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) + assert len(keys) == 2 + + # Send BFD session state notification to update BFD session state + self.update_bfd_session_state(dvs, session, "Up") + time.sleep(1) + # Confirm BFD session state in STATE_DB is updated as expected + expected_sdb_values["state"] = "Up" + self.check_state_bfd_session_value("default|default|10.0.0.3", expected_sdb_values) + + #check nexthop group member is added back + keys = self.asic_db.get_keys(self.ASIC_NHGM_STR) + assert len(keys) == 3 + + # Remove the BFD session + self.remove_bfd_session("default:default:10.0.0.3") + self.asic_db.wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION", session) + # BFD: test validate/invalidate nexthop group member when bfd state changes -- end + # Remove route 2.2.2.0/24 self.rt_ps._del(rtprefix) diff --git a/tests/test_nvgre_tunnel.py b/tests/test_nvgre_tunnel.py new file mode 100644 index 0000000000..90fe560141 --- /dev/null +++ b/tests/test_nvgre_tunnel.py @@ -0,0 +1,381 @@ +import time +import json +import random +import time +import pytest + + +from swsscommon import swsscommon +from pprint import pprint + + +NVGRE_TUNNEL = 'NVGRE_TUNNEL' +NVGRE_TUNNEL_MAP = 'NVGRE_TUNNEL_MAP' + + +SAI_OBJECT_TYPE_TUNNEL = 'ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL' +SAI_OBJECT_TYPE_TUNNEL_MAP = 'ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP' +SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY = 'ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY' + + +def create_entry(tbl, key, pairs): + fvs = swsscommon.FieldValuePairs(pairs) + tbl.set(key, fvs) + time.sleep(1) + + +def create_entry_tbl(db, table, separator, key, pairs): + tbl = swsscommon.Table(db, table) + create_entry(tbl, key, pairs) + + +def delete_entry_tbl(db, table, key): + tbl = swsscommon.Table(db, table) + tbl._del(key) + time.sleep(1) + + +def get_all_created_entries(db, table, existed_entries): + tbl = swsscommon.Table(db, table) + entries = set(tbl.getKeys()) + new_entries = list(entries - existed_entries) + assert len(new_entries) >= 0, "DB entries was't created" + new_entries.sort() + return new_entries + + +def get_created_entries(db, table, existed_entries, count): + new_entries = get_all_created_entries(db, table, existed_entries) + assert len(new_entries) == count, "Wrong number of created entries." + return new_entries + + +def get_exist_entries(dvs, table): + db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl = swsscommon.Table(db, table) + return set(tbl.getKeys()) + + +def get_created_entry(db, table, existed_entries): + tbl = swsscommon.Table(db, table) + entries = set(tbl.getKeys()) + new_entries = list(entries - existed_entries) + assert len(new_entries) == 1, "Wrong number of created entries." + return new_entries[0] + + +def how_many_entries_exist(db, table): + tbl = swsscommon.Table(db, table) + return len(tbl.getKeys()) + + +def get_lo(dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + tbl = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE') + + entries = tbl.getKeys() + lo_id = None + for entry in entries: + status, fvs = tbl.get(entry) + assert status, "Got an error when get a key" + for key, value in fvs: + if key == 'SAI_ROUTER_INTERFACE_ATTR_TYPE' and value == 'SAI_ROUTER_INTERFACE_TYPE_LOOPBACK': + lo_id = entry + break + else: + assert False, 'Don\'t found loopback id' + + return lo_id + + +def check_object(db, table, key, expected_attributes): + tbl = swsscommon.Table(db, table) + keys = tbl.getKeys() + assert key in keys, "The desired key is not presented" + + status, fvs = tbl.get(key) + assert status, "Got an error when get a key" + + assert len(fvs) == len(expected_attributes), "Unexpected number of attributes" + + attr_keys = {entry[0] for entry in fvs} + + for name, value in fvs: + assert expected_attributes[name] == value, "Wrong value %s for the attribute %s = %s" % \ + (value, name, expected_attributes[name]) + + +loopback_id = 0 + + +class NvgreTunnel(object): + tunnel_ids = set() + tunnel_map_ids = set() + tunnel_map_entry_ids = set() + tunnel_map_map = {} + tunnel = {} + + + def fetch_exist_entries(self, dvs): + self.tunnel_ids = get_exist_entries(dvs, SAI_OBJECT_TYPE_TUNNEL) + self.tunnel_map_ids = get_exist_entries(dvs, SAI_OBJECT_TYPE_TUNNEL_MAP) + self.tunnel_map_entry_ids = get_exist_entries(dvs, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY) + + global loopback_id + if not loopback_id: + loopback_id = get_lo(dvs) + + + def create_nvgre_tunnel(self, dvs, tunnel_name, src_ip): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + create_entry_tbl(conf_db, NVGRE_TUNNEL, '|', tunnel_name, [ ('src_ip', src_ip) ]) + time.sleep(1) + + + def check_nvgre_tunnel(self, dvs, tunnel_name, src_ip): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + global loopback_id + + tunnel_id = get_created_entry(asic_db, SAI_OBJECT_TYPE_TUNNEL, self.tunnel_ids) + tunnel_map_ids = get_created_entries(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, self.tunnel_map_ids, 4) + + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP) == (len(self.tunnel_map_ids) + 4), "The TUNNEL_MAP wasn't created" + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY) == len(self.tunnel_map_entry_ids), "The TUNNEL_MAP_ENTRY is created too early" + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL) == (len(self.tunnel_ids) + 1), "The TUNNEL wasn't created" + + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, tunnel_map_ids[0], { 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VLAN_ID_TO_VSID' }) + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, tunnel_map_ids[1], { 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_BRIDGE_IF_TO_VSID' }) + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, tunnel_map_ids[2], { 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VSID_TO_VLAN_ID' }) + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, tunnel_map_ids[3], { 'SAI_TUNNEL_MAP_ATTR_TYPE': 'SAI_TUNNEL_MAP_TYPE_VSID_TO_BRIDGE_IF' }) + + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL, tunnel_id, + { + 'SAI_TUNNEL_ATTR_TYPE': 'SAI_TUNNEL_TYPE_NVGRE', + 'SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE': loopback_id, + 'SAI_TUNNEL_ATTR_DECAP_MAPPERS': f'2:{tunnel_map_ids[2]},{tunnel_map_ids[3]}', + 'SAI_TUNNEL_ATTR_ENCAP_MAPPERS': f'2:{tunnel_map_ids[0]},{tunnel_map_ids[1]}', + 'SAI_TUNNEL_ATTR_ENCAP_SRC_IP': src_ip + } + ) + + self.tunnel_map_ids.update(tunnel_map_ids) + self.tunnel_ids.add(tunnel_id) + self.tunnel_map_map[tunnel_name] = tunnel_map_ids + self.tunnel[tunnel_name] = tunnel_id + + + def check_invalid_nvgre_tunnel(self, dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL) == len(self.tunnel_ids), "Invalid TUNNEL was created" + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP) == len(self.tunnel_map_ids), "Invalid TUNNEL_MAP was created" + + + def remove_nvgre_tunnel(self, dvs, tunnel_name): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + delete_entry_tbl(conf_db, NVGRE_TUNNEL, tunnel_name) + time.sleep(1) + + + def check_remove_nvgre_tunnel(self, dvs, tunnel_name): + self.fetch_exist_entries(dvs) + self.tunnel.pop(tunnel_name, None) + self.tunnel_map_map.pop(tunnel_name, None) + + + def create_nvgre_tunnel_map_entry(self, dvs, tunnel_name, tunnel_map_entry_name, vlan_id, vsid): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + create_entry_tbl( + conf_db, + NVGRE_TUNNEL_MAP, '|', f'{tunnel_name}|{tunnel_map_entry_name}', + [ + ('vsid', vsid), + ('vlan_id', f'Vlan{vlan_id}'), + ], + ) + time.sleep(1) + + + def check_nvgre_tunnel_map_entry(self, dvs, tunnel_name, vlan_id, vsid): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + if (self.tunnel_map_map.get(tunnel_name) is None): + tunnel_map_ids = get_created_entries(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP, self.tunnel_map_ids, 4) + else: + tunnel_map_ids = self.tunnel_map_map[tunnel_name] + + tunnel_map_entry_id = get_created_entries(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY, self.tunnel_map_entry_ids, 1) + + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY) == (len(self.tunnel_map_entry_ids) + 1), "The TUNNEL_MAP_ENTRY is created too early" + + check_object(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY, tunnel_map_entry_id[0], + { + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP_TYPE': 'SAI_TUNNEL_MAP_TYPE_VSID_TO_VLAN_ID', + 'SAI_TUNNEL_MAP_ENTRY_ATTR_TUNNEL_MAP': tunnel_map_ids[2], + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VSID_ID_KEY': vsid, + 'SAI_TUNNEL_MAP_ENTRY_ATTR_VLAN_ID_VALUE': vlan_id, + } + ) + + self.tunnel_map_entry_ids.update(tunnel_map_entry_id) + + + def check_invalid_nvgre_tunnel_map_entry(self, dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + assert how_many_entries_exist(asic_db, SAI_OBJECT_TYPE_TUNNEL_MAP_ENTRY) == len(self.tunnel_map_entry_ids), "Invalid TUNNEL_MAP_ENTRY was created" + + + def remove_nvgre_tunnel_map_entry(self, dvs, tunnel_name, tunnel_map_entry_name): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + delete_entry_tbl(conf_db, NVGRE_TUNNEL_MAP, f'{tunnel_name}|{tunnel_map_entry_name}') + time.sleep(1) + + + def check_remove_nvgre_tunnel_map_entry(self, dvs): + self.fetch_exist_entries(dvs) + + +@pytest.mark.usefixtures('dvs_vlan_manager') +class TestNvgreTunnel(object): + + def get_nvgre_tunnel_obj(self): + return NvgreTunnel() + + + def test_nvgre_create_tunnel_map_entry(self, dvs, testlog): + try: + tunnel_name = 'tunnel_1' + tunnel_map_entry_name = 'entry_1' + src_ip = '10.0.0.1' + vlan_id = '500' + vsid = '850' + + nvgre_obj = self.get_nvgre_tunnel_obj() + nvgre_obj.fetch_exist_entries(dvs) + + self.dvs_vlan.create_vlan(vlan_id) + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name, src_ip) + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name, src_ip) + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name, tunnel_map_entry_name, vlan_id, vsid) + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name, vlan_id, vsid) + finally: + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name, tunnel_map_entry_name) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name) + + self.dvs_vlan.remove_vlan(vlan_id) + + + def test_multiple_nvgre_tunnels_entries(self, dvs, testlog): + try: + tunnel_name_1 = 'tunnel_1' + tunnel_name_2 = 'tunnel_2' + tunnel_name_3 = 'tunnel_3' + entry_1 = 'entry_1' + entry_2 = 'entry_2' + entry_3 = 'entry_3' + entry_4 = 'entry_4' + + nvgre_obj = self.get_nvgre_tunnel_obj() + nvgre_obj.fetch_exist_entries(dvs) + + self.dvs_vlan.create_vlan('501') + self.dvs_vlan.create_vlan('502') + self.dvs_vlan.create_vlan('503') + self.dvs_vlan.create_vlan('504') + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name_1, '10.0.0.1') + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name_1, '10.0.0.1') + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name_1, entry_1, '501', '801') + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name_1, '501', '801') + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name_2, '10.0.0.2') + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name_2, '10.0.0.2') + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name_2, entry_2, '502', '802') + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name_2, '502', '802') + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name_3, '10.0.0.3') + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name_3, '10.0.0.3') + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name_3, entry_3, '503', '803') + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name_3, '503', '803') + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name_3, entry_4, '504', '804') + nvgre_obj.check_nvgre_tunnel_map_entry(dvs, tunnel_name_3, '504', '804') + finally: + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name_1, entry_1) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name_1) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name_1) + + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name_2, entry_2) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name_2) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name_2) + + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name_3, entry_3) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel_map_entry(dvs, tunnel_name_3, entry_4) + nvgre_obj.check_remove_nvgre_tunnel_map_entry(dvs) + + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name_3) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name_3) + + self.dvs_vlan.remove_vlan('501') + self.dvs_vlan.remove_vlan('502') + self.dvs_vlan.remove_vlan('503') + self.dvs_vlan.remove_vlan('504') + + + def test_invalid_nvgre_tunnel(self, dvs, testlog): + nvgre_obj = self.get_nvgre_tunnel_obj() + nvgre_obj.fetch_exist_entries(dvs) + + nvgre_obj.create_nvgre_tunnel(dvs, 'tunnel_1', '1111.1111.1111.1111') + nvgre_obj.check_invalid_nvgre_tunnel(dvs) + + + def test_invalid_nvgre_tunnel_map_entry(self, dvs, testlog): + try: + tunnel_name = 'tunnel_1' + tunnel_map_entry_name = 'entry_1' + src_ip = '10.0.0.1' + vlan_id = '500' + vsid = 'INVALID' + + nvgre_obj = self.get_nvgre_tunnel_obj() + nvgre_obj.fetch_exist_entries(dvs) + + self.dvs_vlan.create_vlan(vlan_id) + + nvgre_obj.create_nvgre_tunnel(dvs, tunnel_name, src_ip) + nvgre_obj.check_nvgre_tunnel(dvs, tunnel_name, src_ip) + + nvgre_obj.create_nvgre_tunnel_map_entry(dvs, tunnel_name, tunnel_map_entry_name, vlan_id, vsid) + nvgre_obj.check_invalid_nvgre_tunnel_map_entry(dvs) + finally: + nvgre_obj.remove_nvgre_tunnel(dvs, tunnel_name) + nvgre_obj.check_remove_nvgre_tunnel(dvs, tunnel_name) + + self.dvs_vlan.remove_vlan(vlan_id) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_pbh.py b/tests/test_pbh.py index 328e8231bc..65401a3ea9 100644 --- a/tests/test_pbh.py +++ b/tests/test_pbh.py @@ -1,6 +1,8 @@ import pytest import logging +import test_flex_counters as flex_counter_module + PBH_HASH_FIELD_NAME = "inner_ip_proto" PBH_HASH_FIELD_HASH_FIELD = "INNER_IP_PROTOCOL" @@ -128,6 +130,7 @@ def test_PbhTablePortChannelBinding(self, testlog): self.dvs_lag.get_and_verify_port_channel(0) +@pytest.mark.usefixtures("dvs_hash_manager") class TestPbhBasicFlows: def test_PbhHashFieldCreationDeletion(self, testlog): try: @@ -160,12 +163,12 @@ def test_PbhHashCreationDeletion(self, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) finally: # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -203,7 +206,76 @@ def test_PbhRuleCreationDeletion(self, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) + + # PBH table + pbhlogger.info("Create PBH table: {}".format(PBH_TABLE_NAME)) + self.dvs_pbh.create_pbh_table( + table_name=PBH_TABLE_NAME, + interface_list=PBH_TABLE_INTERFACE_LIST, + description=PBH_TABLE_DESCRIPTION + ) + self.dvs_acl.verify_acl_table_count(1) + + # PBH rule + attr_dict = { + "ether_type": PBH_RULE_ETHER_TYPE, + "ip_protocol": PBH_RULE_IP_PROTOCOL, + "gre_key": PBH_RULE_GRE_KEY, + "inner_ether_type": PBH_RULE_INNER_ETHER_TYPE + } + + pbhlogger.info("Create PBH rule: {}".format(PBH_RULE_NAME)) + self.dvs_pbh.create_pbh_rule( + table_name=PBH_TABLE_NAME, + rule_name=PBH_RULE_NAME, + priority=PBH_RULE_PRIORITY, + qualifiers=attr_dict, + hash_name=PBH_RULE_HASH + ) + self.dvs_acl.verify_acl_rule_count(1) + finally: + # PBH rule + pbhlogger.info("Remove PBH rule: {}".format(PBH_RULE_NAME)) + self.dvs_pbh.remove_pbh_rule(PBH_TABLE_NAME, PBH_RULE_NAME) + self.dvs_acl.verify_acl_rule_count(0) + + # PBH table + pbhlogger.info("Remove PBH table: {}".format(PBH_TABLE_NAME)) + self.dvs_pbh.remove_pbh_table(PBH_TABLE_NAME) + self.dvs_acl.verify_acl_table_count(0) + + # PBH hash + pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) + self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) + self.dvs_hash.verify_hash_count(0) + + # PBH hash field + pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) + self.dvs_pbh.remove_pbh_hash_field(PBH_HASH_FIELD_NAME) + self.dvs_pbh.verify_pbh_hash_field_count(0) + + +@pytest.mark.usefixtures("dvs_hash_manager") +class TestPbhBasicEditFlows: + def test_PbhRuleUpdate(self, testlog): + try: + # PBH hash field + pbhlogger.info("Create PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) + self.dvs_pbh.create_pbh_hash_field( + hash_field_name=PBH_HASH_FIELD_NAME, + hash_field=PBH_HASH_FIELD_HASH_FIELD, + sequence_id=PBH_HASH_FIELD_SEQUENCE_ID + ) + self.dvs_pbh.verify_pbh_hash_field_count(1) + + # PBH hash + pbhlogger.info("Create PBH hash: {}".format(PBH_HASH_NAME)) + self.dvs_pbh.create_pbh_hash( + hash_name=PBH_HASH_NAME, + hash_field_list=PBH_HASH_HASH_FIELD_LIST + ) + self.dvs_hash.verify_hash_count(1) # PBH table pbhlogger.info("Create PBH table: {}".format(PBH_TABLE_NAME)) @@ -231,6 +303,43 @@ def test_PbhRuleCreationDeletion(self, testlog): hash_name=PBH_RULE_HASH ) self.dvs_acl.verify_acl_rule_count(1) + + attr_dict = { + "ether_type": "0x86dd", + "ipv6_next_header": "0x2f", + "inner_ether_type": "0x0800" + } + + pbhlogger.info("Update PBH rule: {}".format(PBH_RULE_NAME)) + self.dvs_pbh.update_pbh_rule( + table_name=PBH_TABLE_NAME, + rule_name=PBH_RULE_NAME, + priority="100", + qualifiers=attr_dict, + hash_name=PBH_RULE_HASH, + packet_action="SET_LAG_HASH", + flow_counter="ENABLED" + ) + + hash_id = self.dvs_hash.get_hash_ids(1)[0] + counter_id = self.dvs_acl.get_acl_counter_ids(1)[0] + + sai_attr_dict = { + "SAI_ACL_ENTRY_ATTR_PRIORITY": self.dvs_acl.get_simple_qualifier_comparator("100"), + "SAI_ACL_ENTRY_ATTR_FIELD_ETHER_TYPE": self.dvs_acl.get_simple_qualifier_comparator("34525&mask:0xffff"), + "SAI_ACL_ENTRY_ATTR_FIELD_IP_PROTOCOL": self.dvs_acl.get_simple_qualifier_comparator("disabled"), + "SAI_ACL_ENTRY_ATTR_FIELD_IPV6_NEXT_HEADER": self.dvs_acl.get_simple_qualifier_comparator("47&mask:0xff"), + "SAI_ACL_ENTRY_ATTR_FIELD_GRE_KEY": self.dvs_acl.get_simple_qualifier_comparator("disabled"), + "SAI_ACL_ENTRY_ATTR_FIELD_INNER_ETHER_TYPE": self.dvs_acl.get_simple_qualifier_comparator("2048&mask:0xffff"), + "SAI_ACL_ENTRY_ATTR_ACTION_SET_ECMP_HASH_ID": self.dvs_acl.get_simple_qualifier_comparator("disabled"), + "SAI_ACL_ENTRY_ATTR_ACTION_SET_LAG_HASH_ID": self.dvs_acl.get_simple_qualifier_comparator(hash_id), + "SAI_ACL_ENTRY_ATTR_ACTION_COUNTER": self.dvs_acl.get_simple_qualifier_comparator(counter_id) + } + + self.dvs_acl.verify_acl_rule_generic( + sai_qualifiers=sai_attr_dict + ) + finally: # PBH rule pbhlogger.info("Remove PBH rule: {}".format(PBH_RULE_NAME)) @@ -245,7 +354,7 @@ def test_PbhRuleCreationDeletion(self, testlog): # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -253,6 +362,122 @@ def test_PbhRuleCreationDeletion(self, testlog): self.dvs_pbh.verify_pbh_hash_field_count(0) + def test_PbhRuleUpdateFlowCounter(self, dvs, testlog): + try: + # PBH hash field + pbhlogger.info("Create PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) + self.dvs_pbh.create_pbh_hash_field( + hash_field_name=PBH_HASH_FIELD_NAME, + hash_field=PBH_HASH_FIELD_HASH_FIELD, + sequence_id=PBH_HASH_FIELD_SEQUENCE_ID + ) + self.dvs_pbh.verify_pbh_hash_field_count(1) + + # PBH hash + pbhlogger.info("Create PBH hash: {}".format(PBH_HASH_NAME)) + self.dvs_pbh.create_pbh_hash( + hash_name=PBH_HASH_NAME, + hash_field_list=PBH_HASH_HASH_FIELD_LIST + ) + self.dvs_hash.verify_hash_count(1) + + # PBH table + pbhlogger.info("Create PBH table: {}".format(PBH_TABLE_NAME)) + self.dvs_pbh.create_pbh_table( + table_name=PBH_TABLE_NAME, + interface_list=PBH_TABLE_INTERFACE_LIST, + description=PBH_TABLE_DESCRIPTION + ) + self.dvs_acl.verify_acl_table_count(1) + + # Prepare ACL FLEX Counter environment + meta_data = flex_counter_module.counter_group_meta['acl_counter'] + counter_key = meta_data['key'] + counter_stat = meta_data['group_name'] + counter_map = meta_data['name_map'] + + test_flex_counters = flex_counter_module.TestFlexCounters() + test_flex_counters.setup_dbs(dvs) + test_flex_counters.verify_no_flex_counters_tables(counter_stat) + + # PBH rule + pbhlogger.info("Create PBH rule: {}".format(PBH_RULE_NAME)) + + attr_dict = { + "ether_type": PBH_RULE_ETHER_TYPE, + "ip_protocol": PBH_RULE_IP_PROTOCOL, + "gre_key": PBH_RULE_GRE_KEY, + "inner_ether_type": PBH_RULE_INNER_ETHER_TYPE + } + + self.dvs_pbh.create_pbh_rule( + table_name=PBH_TABLE_NAME, + rule_name=PBH_RULE_NAME, + priority=PBH_RULE_PRIORITY, + qualifiers=attr_dict, + hash_name=PBH_RULE_HASH, + packet_action="SET_ECMP_HASH", + flow_counter="ENABLED" + ) + self.dvs_acl.verify_acl_rule_count(1) + self.dvs_acl.get_acl_counter_ids(1) + + pbhlogger.info("Enable a ACL FLEX counter") + test_flex_counters.set_flex_counter_group_status(counter_key, counter_map) + test_flex_counters.set_flex_counter_group_interval(counter_key, counter_stat, '1000') + test_flex_counters.verify_flex_counters_populated(counter_map, counter_stat) + + pbhlogger.info("Disable a flow counter for PBH rule: {}".format(PBH_RULE_NAME)) + self.dvs_pbh.update_pbh_rule( + table_name=PBH_TABLE_NAME, + rule_name=PBH_RULE_NAME, + priority=PBH_RULE_PRIORITY, + qualifiers=attr_dict, + hash_name=PBH_RULE_HASH, + packet_action="SET_ECMP_HASH", + flow_counter="DISABLED" + ) + self.dvs_acl.get_acl_counter_ids(0) + + pbhlogger.info("Enable a flow counter for PBH rule: {}".format(PBH_RULE_NAME)) + self.dvs_pbh.update_pbh_rule( + table_name=PBH_TABLE_NAME, + rule_name=PBH_RULE_NAME, + priority=PBH_RULE_PRIORITY, + qualifiers=attr_dict, + hash_name=PBH_RULE_HASH, + packet_action="SET_ECMP_HASH", + flow_counter="ENABLED" + ) + self.dvs_acl.get_acl_counter_ids(1) + + finally: + # PBH rule + pbhlogger.info("Remove PBH rule: {}".format(PBH_RULE_NAME)) + self.dvs_pbh.remove_pbh_rule(PBH_TABLE_NAME, PBH_RULE_NAME) + self.dvs_acl.verify_acl_rule_count(0) + + # PBH table + pbhlogger.info("Remove PBH table: {}".format(PBH_TABLE_NAME)) + self.dvs_pbh.remove_pbh_table(PBH_TABLE_NAME) + self.dvs_acl.verify_acl_table_count(0) + + # PBH hash + pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) + self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) + self.dvs_hash.verify_hash_count(0) + + # PBH hash field + pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) + self.dvs_pbh.remove_pbh_hash_field(PBH_HASH_FIELD_NAME) + self.dvs_pbh.verify_pbh_hash_field_count(0) + + # ACL FLEX counter + pbhlogger.info("Disable ACL FLEX counter") + test_flex_counters.post_trap_flow_counter_test(meta_data) + + +@pytest.mark.usefixtures("dvs_hash_manager") @pytest.mark.usefixtures("dvs_lag_manager") class TestPbhExtendedFlows: class PbhRefCountHelper(object): @@ -374,13 +599,13 @@ def create_hash(self, meta_dict, pbh_ref_count): hash_field_list=meta_dict["hash_field_list"] ) pbh_ref_count.incPbhHashCount() - self.dvs_pbh.verify_pbh_hash_count(pbh_ref_count.getPbhHashCount()) + self.dvs_hash.verify_hash_count(pbh_ref_count.getPbhHashCount()) def remove_hash(self, meta_dict, pbh_ref_count): pbhlogger.info("Remove PBH hash: {}".format(meta_dict["name"])) self.dvs_pbh.remove_pbh_hash(meta_dict["name"]) pbh_ref_count.decPbhHashCount() - self.dvs_pbh.verify_pbh_hash_count(pbh_ref_count.getPbhHashCount()) + self.dvs_hash.verify_hash_count(pbh_ref_count.getPbhHashCount()) def create_table(self, meta_dict, pbh_ref_count): pbhlogger.info("Create PBH table: {}".format(meta_dict["name"])) @@ -687,6 +912,7 @@ def test_PbhNvgreVxlanConfiguration(self, testlog, pbh_nvgre, pbh_vxlan): pass +@pytest.mark.usefixtures("dvs_hash_manager") class TestPbhDependencyFlows: def test_PbhHashCreationDeletionWithDependencies(self, testlog): try: @@ -696,7 +922,7 @@ def test_PbhHashCreationDeletionWithDependencies(self, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Create PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -706,7 +932,7 @@ def test_PbhHashCreationDeletionWithDependencies(self, testlog): sequence_id=PBH_HASH_FIELD_SEQUENCE_ID ) self.dvs_pbh.verify_pbh_hash_field_count(1) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) finally: # PBH hash field pbhlogger.info("Remove PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -716,7 +942,7 @@ def test_PbhHashCreationDeletionWithDependencies(self, testlog): # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) self.dvs_pbh.verify_pbh_hash_field_count(0) def test_PbhRuleCreationDeletionWithDependencies(self, testlog): @@ -727,7 +953,7 @@ def test_PbhRuleCreationDeletionWithDependencies(self, testlog): hash_name=PBH_HASH_NAME, hash_field_list=PBH_HASH_HASH_FIELD_LIST ) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) # PBH hash field pbhlogger.info("Create PBH hash field: {}".format(PBH_HASH_FIELD_NAME)) @@ -737,7 +963,7 @@ def test_PbhRuleCreationDeletionWithDependencies(self, testlog): sequence_id=PBH_HASH_FIELD_SEQUENCE_ID ) self.dvs_pbh.verify_pbh_hash_field_count(1) - self.dvs_pbh.verify_pbh_hash_count(1) + self.dvs_hash.verify_hash_count(1) # PBH rule attr_dict = { @@ -787,7 +1013,7 @@ def test_PbhRuleCreationDeletionWithDependencies(self, testlog): # PBH hash pbhlogger.info("Remove PBH hash: {}".format(PBH_HASH_NAME)) self.dvs_pbh.remove_pbh_hash(PBH_HASH_NAME) - self.dvs_pbh.verify_pbh_hash_count(0) + self.dvs_hash.verify_hash_count(0) self.dvs_pbh.verify_pbh_hash_field_count(0) diff --git a/tests/test_pfcwd.py b/tests/test_pfcwd.py index 78cd851574..c88b6f6e96 100644 --- a/tests/test_pfcwd.py +++ b/tests/test_pfcwd.py @@ -103,7 +103,7 @@ def setup_test(self, dvs): # set cable len to non zero value. if port is down, default cable len is 0 self.set_cable_len(port, "5m") # startup port - dvs.runcmd("config interface startup {}".format(port)) + dvs.port_admin_set(port, "up") # enable pfcwd self.set_flex_counter_status("PFCWD", "enable") @@ -120,7 +120,7 @@ def teardown_test(self, dvs): if self.orig_cable_len: self.set_cable_len(port, self.orig_cable_len[port]) # shutdown port - dvs.runcmd("config interface shutdown {}".format(port)) + dvs.port_admin_set(port, "down") def get_db_handle(self, dvs): self.app_db = dvs.get_app_db() @@ -148,9 +148,11 @@ def _get_bitmask(self, queues): return str(mask) def set_ports_pfc(self, status='enable', pfc_queues=[3,4]): + keyname = 'pfcwd_sw_enable' for port in self.test_ports: if 'enable' in status: - fvs = {'pfc_enable': ",".join([str(q) for q in pfc_queues])} + queues = ",".join([str(q) for q in pfc_queues]) + fvs = {keyname: queues, 'pfc_enable': queues} self.config_db.create_entry("PORT_QOS_MAP", port, fvs) else: self.config_db.delete_entry("PORT_QOS_MAP", port) @@ -212,7 +214,7 @@ def set_storm_state(self, queues, state="enabled"): queue_name = port + ":" + str(queue) self.counters_db.update_entry("COUNTERS", self.queue_oids[queue_name], fvs) - def test_pfcwd_single_queue(self, dvs, setup_teardown_test): + def test_pfcwd_software_single_queue(self, dvs, setup_teardown_test): try: # enable PFC on queues test_queues = [3, 4] @@ -253,7 +255,7 @@ def test_pfcwd_single_queue(self, dvs, setup_teardown_test): self.reset_pfcwd_counters(storm_queue) self.stop_pfcwd_on_ports() - def test_pfcwd_multi_queue(self, dvs, setup_teardown_test): + def test_pfcwd_software_multi_queue(self, dvs, setup_teardown_test): try: # enable PFC on queues test_queues = [3, 4] diff --git a/tests/test_pg_drop_counter.py b/tests/test_pg_drop_counter.py index 1cdd834747..6d97af5f5c 100644 --- a/tests/test_pg_drop_counter.py +++ b/tests/test_pg_drop_counter.py @@ -2,7 +2,6 @@ import re import time import json -import pytest import redis from swsscommon import swsscommon @@ -58,14 +57,11 @@ def verify_value(self, dvs, obj_ids, entry_name, expected_value): assert found, "entry name %s not found" % (entry_name) def set_up_flex_counter(self): - pg_stats_entry = {"PG_COUNTER_ID_LIST": "{}".format(pg_drop_attr)} - for pg in self.pgs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", "PG_DROP_STAT_COUNTER:{}".format(pg), pg_stats_entry) - fc_status_enable = {"FLEX_COUNTER_STATUS": "enable"} - self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_DROP", fc_status_enable) self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", fc_status_enable) + # Wait for DB's to populate by orchagent + time.sleep(2) def clear_flex_counter(self): for pg in self.pgs: @@ -73,14 +69,15 @@ def clear_flex_counter(self): self.config_db.delete_entry("FLEX_COUNTER_TABLE", "PG_DROP") self.config_db.delete_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK") - - + def test_pg_drop_counters(self, dvs): self.setup_dbs(dvs) - self.pgs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP") - try: - self.set_up_flex_counter() + self.set_up_flex_counter() + # Get all configured counters OID's + self.pgs = self.counters_db.db_connection.hgetall("COUNTERS_PG_NAME_MAP").values() + assert self.pgs is not None and len(self.pgs) > 0 + try: self.populate_asic(dvs, "0") time.sleep(self.DEFAULT_POLL_INTERVAL) self.verify_value(dvs, self.pgs, pg_drop_attr, "0") @@ -94,3 +91,4 @@ def test_pg_drop_counters(self, dvs): self.verify_value(dvs, self.pgs, pg_drop_attr, "123") finally: self.clear_flex_counter() + diff --git a/tests/test_port.py b/tests/test_port.py index 4766c87deb..c880a88c5a 100644 --- a/tests/test_port.py +++ b/tests/test_port.py @@ -59,11 +59,11 @@ def test_PortMtu(self, dvs, testlog): assert fv[1] == "9100" def test_PortNotification(self, dvs, testlog): - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface ip add Ethernet0 10.0.0.0/31") + dvs.port_admin_set("Ethernet0", "up") + dvs.interface_ip_add("Ethernet0", "10.0.0.0/31") - dvs.runcmd("config interface startup Ethernet4") - dvs.runcmd("config interface ip add Ethernet4 10.0.0.2/31") + dvs.port_admin_set("Ethernet4", "up") + dvs.interface_ip_add("Ethernet4", "10.0.0.2/31") dvs.servers[0].runcmd("ip link set down dev eth0") == 0 @@ -126,11 +126,11 @@ def test_PortFecForce(self, dvs, testlog): adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) def test_PortFec(self, dvs, testlog): - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface ip add Ethernet0 10.0.0.0/31") + dvs.port_admin_set("Ethernet0", "up") + dvs.interface_ip_add("Ethernet0", "10.0.0.0/31") - dvs.runcmd("config interface startup Ethernet4") - dvs.runcmd("config interface ip add Ethernet4 10.0.0.2/31") + dvs.port_admin_set("Ethernet4", "up") + dvs.interface_ip_add("Ethernet4", "10.0.0.2/31") dvs.servers[0].runcmd("ip link set down dev eth0") == 0 @@ -277,6 +277,17 @@ def test_PortIpredriver(self, dvs, testlog): if fv[0] == "SAI_PORT_ATTR_SERDES_IPREDRIVER": assert fv[1] == ipre_val_asic + def test_PortHostif(self, dvs): + adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF") + host_intfs = atbl.getKeys() + for intf in host_intfs: + status, fvs = atbl.get(intf) + assert status, "Error getting value for key" + attributes = dict(fvs) + hostif_queue = attributes.get("SAI_HOSTIF_ATTR_QUEUE") + assert hostif_queue == "7" + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_port_add_remove.py b/tests/test_port_add_remove.py new file mode 100644 index 0000000000..54cd6599c9 --- /dev/null +++ b/tests/test_port_add_remove.py @@ -0,0 +1,525 @@ +import pytest +import time +import buffer_model +from dvslib.dvs_common import PollingConfig + +# the port to be removed and add +PORT_A = "Ethernet64" +PORT_B = "Ethernet68" + +""" +DELETE_CREATE_ITERATIONS defines the number of iteration of delete and create to ports, +we add different timeouts between delete/create to catch potential race condition that can lead to system crush + +Add \ Remove of Buffers can be done only when the model is dynamic. +""" +DELETE_CREATE_ITERATIONS = 10 + +@pytest.yield_fixture +def dynamic_buffer(dvs): + buffer_model.enable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) + yield + buffer_model.disable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) + + +@pytest.mark.usefixtures('dvs_port_manager') +@pytest.mark.usefixtures("dynamic_buffer") +class TestPortAddRemove(object): + + def set_mmu(self,dvs): + state_db = dvs.get_state_db() + # set mmu size + fvs = {"mmu_size": "12766208"} + state_db.create_entry("BUFFER_MAX_PARAM_TABLE", "global", fvs) + + + def test_remove_add_remove_port_with_buffer_cfg(self, dvs, testlog): + config_db = dvs.get_config_db() + asic_db = dvs.get_asic_db() + state_db = dvs.get_state_db() + app_db = dvs.get_app_db() + + # set mmu size + self.set_mmu(dvs) + + # Startup interface + dvs.port_admin_set(PORT_A, 'up') + + # get port info + port_info = config_db.get_entry("PORT", PORT_A) + + # get the number of ports before removal + num_of_ports = len(asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) + + # remove buffer pg cfg for the port (record the buffer pgs before removing them) + pgs = config_db.get_keys('BUFFER_PG') + buffer_pgs = {} + for key in pgs: + if PORT_A in key: + buffer_pgs[key] = config_db.get_entry('BUFFER_PG', key) + config_db.delete_entry('BUFFER_PG', key) + app_db.wait_for_deleted_entry("BUFFER_PG_TABLE", key) + + # modify buffer queue entry to egress_lossless_profile instead of egress_lossy_profile + config_db.update_entry("BUFFER_QUEUE", "%s|0-2"%PORT_A, {"profile": "egress_lossless_profile"}) + + # remove buffer queue cfg for the port + queues = config_db.get_keys('BUFFER_QUEUE') + buffer_queues = {} + for key in queues: + if PORT_A in key: + buffer_queues[key] = config_db.get_entry('BUFFER_QUEUE', key) + config_db.delete_entry('BUFFER_QUEUE', key) + app_db.wait_for_deleted_entry('BUFFER_QUEUE_TABLE', key) + + # Shutdown interface + dvs.port_admin_set(PORT_A, 'down') + + # try to remove this port + config_db.delete_entry('PORT', PORT_A) + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_of_ports-1, + polling_config = PollingConfig(polling_interval = 1, timeout = 5.00, strict = True)) + + # verify that the port was removed properly since all buffer configuration was removed also + assert len(num) == num_of_ports - 1 + + # set back the port + config_db.update_entry("PORT", PORT_A, port_info) + + # verify that the port has been readded + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_of_ports, + polling_config = PollingConfig(polling_interval = 1, timeout = 5.00, strict = True)) + + assert len(num) == num_of_ports + + # re-add buffer pg and queue cfg to the port + for key, pg in buffer_pgs.items(): + config_db.update_entry("BUFFER_PG", key, pg) + + for key, queue in buffer_queues.items(): + config_db.update_entry("BUFFER_QUEUE", key, queue) + + time.sleep(5) + + # Remove the port with buffer configuration + config_db.delete_entry('PORT', PORT_A) + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_of_ports-1, + polling_config = PollingConfig(polling_interval = 1, timeout = 5.00, strict = False)) + + # verify that the port wasn't removed since we still have buffer cfg + assert len(num) == num_of_ports + + # Remove buffer pgs + for key in buffer_pgs.keys(): + config_db.delete_entry('BUFFER_PG', key) + app_db.wait_for_deleted_entry("BUFFER_PG_TABLE", key) + + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_of_ports-1, + polling_config = PollingConfig(polling_interval = 1, timeout = 5.00, strict = False)) + + # verify that the port wasn't removed since we still have buffer cfg + assert len(num) == num_of_ports + + # Remove buffer queue + for key in buffer_queues.keys(): + config_db.delete_entry('BUFFER_QUEUE', key) + app_db.wait_for_deleted_entry('BUFFER_QUEUE_TABLE', key) + + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_of_ports-1, + polling_config = PollingConfig(polling_interval = 1, timeout = 5.00, strict = True)) + + # verify that the port wasn't removed since we still have buffer cfg + assert len(num) == num_of_ports - 1 + + # set back the port as it is required for next test + config_db.update_entry("PORT", PORT_A, port_info) + + + + @pytest.mark.parametrize("scenario", ["one_port", "all_ports"]) + def test_add_remove_all_the_ports(self, dvs, testlog, scenario): + config_db = dvs.get_config_db() + state_db = dvs.get_state_db() + asic_db = dvs.get_asic_db() + app_db = dvs.get_app_db() + + # set mmu size + self.set_mmu(dvs) + + # get the number of ports before removal + num_of_ports = len(asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) + + # remove buffer pg cfg for the port + if scenario == "all_ports": + ports = config_db.get_keys('PORT') + elif scenario == "one_port": + ports = [PORT_A] + else: + assert False + + # delete all PGs and QUEUEs from the relevant ports + pgs = config_db.get_keys('BUFFER_PG') + queues = config_db.get_keys('BUFFER_QUEUE') + + for port in ports: + for key in pgs: + if port in key: + config_db.delete_entry('BUFFER_PG', key) + app_db.wait_for_deleted_entry('BUFFER_PG_TABLE', key) + + for key in queues: + if port in key: + config_db.delete_entry('BUFFER_QUEUE', key) + app_db.wait_for_deleted_entry('BUFFER_QUEUE_TABLE', key) + + ports_info = {} + + for key in ports: + # read port info and save it + ports_info[key] = config_db.get_entry("PORT", key) + + + for i in range(DELETE_CREATE_ITERATIONS): + # remove ports + for key in ports: + config_db.delete_entry('PORT',key) + app_db.wait_for_deleted_entry("PORT_TABLE", key) + + # verify remove port + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_of_ports-len(ports)) + + assert len(num) == num_of_ports-len(ports) + + # add port + """ + DELETE_CREATE_ITERATIONS defines the number of iteration of delete and create to ports, + we add different timeouts between delete/create to catch potential race condition that can lead to system crush. + """ + time.sleep(i%3) + for key in ports: + config_db.update_entry("PORT", key, ports_info[key]) + app_db.wait_for_entry('PORT_TABLE',key) + + # verify add port + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_of_ports) + + assert len(num) == num_of_ports + + time.sleep((i%2)+1) + + # run ping + dvs.setup_db() + + dvs.create_vlan("6") + dvs.create_vlan_member("6", PORT_A) + dvs.create_vlan_member("6", PORT_B) + + port_entry_a = state_db.get_entry("PORT_TABLE",PORT_A) + port_entry_b = state_db.get_entry("PORT_TABLE",PORT_B) + port_admin_a = port_entry_a['admin_status'] + port_admin_b = port_entry_b['admin_status'] + + dvs.set_interface_status("Vlan6", "up") + dvs.add_ip_address("Vlan6", "6.6.6.1/24") + dvs.set_interface_status(PORT_A, "up") + dvs.set_interface_status(PORT_B, "up") + + dvs.servers[16].runcmd("ifconfig eth0 6.6.6.6/24 up") + dvs.servers[16].runcmd("ip route add default via 6.6.6.1") + dvs.servers[17].runcmd("ifconfig eth0 6.6.6.7/24 up") + dvs.servers[17].runcmd("ip route add default via 6.6.6.1") + + time.sleep(2) + + rc = dvs.servers[16].runcmd("ping -c 1 6.6.6.7") + assert rc == 0 + + rc = dvs.servers[17].runcmd("ping -c 1 6.6.6.6") + assert rc == 0 + + dvs.set_interface_status(PORT_A, port_admin_a) + dvs.set_interface_status(PORT_B, port_admin_b) + dvs.remove_vlan_member("6", PORT_A) + dvs.remove_vlan_member("6", PORT_B) + dvs.remove_ip_address("Vlan6", "6.6.6.1/24") + dvs.remove_vlan("6") + + +@pytest.mark.usefixtures("dynamic_buffer") +@pytest.mark.usefixtures("dvs_port_manager") +class TestPortAddRemoveDup(object): + def test_add_remove_with_dup_lanes(self, testlog, dvs): + config_db = dvs.get_config_db() + app_db = dvs.get_app_db() + state_db = dvs.get_state_db() + + # set mmu size + fvs = {"mmu_size": "12766208"} + state_db.create_entry("BUFFER_MAX_PARAM_TABLE", "global", fvs) + + # get port count + port_count = len(self.dvs_port.get_port_ids()) + + # get port info + port_info = config_db.get_entry("PORT", PORT_A) + + # remove buffer pg cfg for the port + pgs = config_db.get_keys("BUFFER_PG") + buffer_pgs = {} + for key in pgs: + if PORT_A in key: + buffer_pgs[key] = config_db.get_entry("BUFFER_PG", key) + config_db.delete_entry("BUFFER_PG", key) + app_db.wait_for_deleted_entry("BUFFER_PG_TABLE", key.replace(config_db.separator, app_db.separator)) + + # remove buffer queue cfg for the port + queues = config_db.get_keys("BUFFER_QUEUE") + buffer_queues = {} + for key in queues: + if PORT_A in key: + buffer_queues[key] = config_db.get_entry("BUFFER_QUEUE", key) + config_db.delete_entry("BUFFER_QUEUE", key) + app_db.wait_for_deleted_entry("BUFFER_QUEUE_TABLE", key.replace(config_db.separator, app_db.separator)) + + # shutdown port + dvs.port_admin_set(PORT_A, "down") + + # remove port + self.dvs_port.remove_port_generic(PORT_A) + self.dvs_port.verify_port_count(port_count-1) + + # make port config with duplicate lanes + dup_lanes = port_info["lanes"] + dup_lanes += ",{}".format(port_info["lanes"].split(",")[-1]) + + # add port + self.dvs_port.create_port_generic(PORT_A, dup_lanes, port_info["speed"]) + self.dvs_port.verify_port_count(port_count) + + # shutdown port + dvs.port_admin_set(PORT_A, "down") + + # remove port + self.dvs_port.remove_port_generic(PORT_A) + self.dvs_port.verify_port_count(port_count-1) + + # make port config + port_lanes = port_info.pop("lanes") + port_speed = port_info.pop("speed") + + # re-add port + self.dvs_port.create_port_generic(PORT_A, port_lanes, port_speed, port_info) + self.dvs_port.verify_port_count(port_count) + + # re-add buffer pg and queue cfg to the port + for key, pg in buffer_pgs.items(): + config_db.update_entry("BUFFER_PG", key, pg) + app_db.wait_for_entry("BUFFER_PG_TABLE", key.replace(config_db.separator, app_db.separator)) + + for key, queue in buffer_queues.items(): + config_db.update_entry("BUFFER_QUEUE", key, queue) + app_db.wait_for_entry("BUFFER_QUEUE_TABLE", key.replace(config_db.separator, app_db.separator)) + + +@pytest.mark.usefixtures("dvs_port_manager") +class TestPortAddRemoveInvalidMandatoryParam(object): + @pytest.mark.parametrize( + "port,lanes,speed", [ + pytest.param("Ethernet1000", "", "10000", id="empty-lanes-list"), + pytest.param("Ethernet1004", "1004,x,1006,1007", "10000", id="invalid-lanes-list"), + pytest.param("Ethernet1008", "1008,1009,1010,1011", "", id="empty-speed"), + pytest.param("Ethernet1012", "1012,1013,1014,1015", "invalid", id="invalid-speed"), + pytest.param("Ethernet1016", "1016,1017,1018,1019", "0", id="out-of-range-speed") + ] + ) + def test_add_remove_neg(self, testlog, port, lanes, speed): + # get port count + port_asicdb_count = len(self.dvs_port.get_port_ids(dbid=self.dvs_port.ASIC_DB)) + port_appdb_count = len(self.dvs_port.get_port_ids(dbid=self.dvs_port.APPL_DB)) + + # add port + self.dvs_port.create_port_generic(port, lanes, speed) + self.dvs_port.verify_port_count(port_appdb_count+1, self.dvs_port.APPL_DB) + self.dvs_port.verify_port_count(port_asicdb_count, self.dvs_port.ASIC_DB) + + # remove port + self.dvs_port.remove_port_generic(port) + self.dvs_port.verify_port_count(port_appdb_count, self.dvs_port.APPL_DB) + self.dvs_port.verify_port_count(port_asicdb_count, self.dvs_port.ASIC_DB) + + +@pytest.mark.usefixtures("dvs_port_manager") +class TestPortAddRemoveInvalidSerdesParam(object): + @pytest.fixture(scope="class") + def port_attr(self): + meta_dict = { + "port": "Ethernet1000", + "lanes": "1000,1001,1002,1003", + "speed": "100000", + "port_asicdb_count": len(self.dvs_port.get_port_ids(dbid=self.dvs_port.ASIC_DB)), + "port_appdb_count": len(self.dvs_port.get_port_ids(dbid=self.dvs_port.APPL_DB)) + } + yield meta_dict + + def verify_add_remove(self, attr, qualifiers): + # add port + self.dvs_port.create_port_generic(attr["port"], attr["lanes"], attr["speed"], qualifiers) + self.dvs_port.verify_port_count(attr["port_appdb_count"]+1, self.dvs_port.APPL_DB) + self.dvs_port.verify_port_count(attr["port_asicdb_count"], self.dvs_port.ASIC_DB) + + # remove port + self.dvs_port.remove_port_generic(attr["port"]) + self.dvs_port.verify_port_count(attr["port_appdb_count"], self.dvs_port.APPL_DB) + self.dvs_port.verify_port_count(attr["port_asicdb_count"], self.dvs_port.ASIC_DB) + + @pytest.mark.parametrize( + "serdes", [ + pytest.param("preemphasis", id="preemphasis"), + pytest.param("idriver", id="idriver"), + pytest.param("ipredriver", id="ipredriver"), + pytest.param("pre1", id="pre1"), + pytest.param("pre2", id="pre2"), + pytest.param("pre3", id="pre3"), + pytest.param("main", id="main"), + pytest.param("post1", id="post1"), + pytest.param("post2", id="post2"), + pytest.param("post3", id="post3"), + pytest.param("attn", id="attn") + ] + ) + def test_add_remove_neg(self, testlog, port_attr, serdes): + qualifiers = { serdes: "" } + self.verify_add_remove(port_attr, qualifiers) + + qualifiers = { serdes: "invalid" } + self.verify_add_remove(port_attr, qualifiers) + + +@pytest.mark.usefixtures("dvs_port_manager") +class TestPortAddRemoveInvalidParam(object): + def verify_add_remove(self, qualifiers): + port = "Ethernet1000" + lanes = "1000,1001,1002,1003" + speed = "100000" + + # get port count + port_asicdb_count = len(self.dvs_port.get_port_ids(dbid=self.dvs_port.ASIC_DB)) + port_appdb_count = len(self.dvs_port.get_port_ids(dbid=self.dvs_port.APPL_DB)) + + # add port + self.dvs_port.create_port_generic(port, lanes, speed, qualifiers) + self.dvs_port.verify_port_count(port_appdb_count+1, self.dvs_port.APPL_DB) + self.dvs_port.verify_port_count(port_asicdb_count, self.dvs_port.ASIC_DB) + + # remove port + self.dvs_port.remove_port_generic(port) + self.dvs_port.verify_port_count(port_appdb_count, self.dvs_port.APPL_DB) + self.dvs_port.verify_port_count(port_asicdb_count, self.dvs_port.ASIC_DB) + + def test_add_remove_neg_alias(self, testlog): + qualifiers = { "alias": "" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_index(self, testlog): + qualifiers = { "index": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "index": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_autoneg(self, testlog): + qualifiers = { "autoneg": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "autoneg": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_adv_speeds(self, testlog): + qualifiers = { "adv_speeds": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "adv_speeds": "0" } + self.verify_add_remove(qualifiers) + + qualifiers = { "adv_speeds": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_interface_type(self, testlog): + qualifiers = { "interface_type": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "interface_type": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_adv_interface_types(self, testlog): + qualifiers = { "adv_interface_types": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "adv_interface_types": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_fec(self, testlog): + qualifiers = { "fec": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "fec": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_mtu(self, testlog): + qualifiers = { "mtu": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "mtu": "0" } + self.verify_add_remove(qualifiers) + + qualifiers = { "mtu": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_tpid(self, testlog): + qualifiers = { "tpid": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "tpid": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_pfc_asym(self, testlog): + qualifiers = { "pfc_asym": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "pfc_asym": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_learn_mode(self, testlog): + qualifiers = { "learn_mode": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "learn_mode": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_link_training(self, testlog): + qualifiers = { "link_training": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "link_training": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_role(self, testlog): + qualifiers = { "role": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "role": "invalid" } + self.verify_add_remove(qualifiers) + + def test_add_remove_neg_admin_status(self, testlog): + qualifiers = { "admin_status": "" } + self.verify_add_remove(qualifiers) + + qualifiers = { "admin_status": "invalid" } + self.verify_add_remove(qualifiers) diff --git a/tests/test_port_an.py b/tests/test_port_an.py index 93add09b9a..5356d2e837 100644 --- a/tests/test_port_an.py +++ b/tests/test_port_an.py @@ -254,12 +254,11 @@ def test_PortAutoNegWarm(self, dvs, testlog): cfvs = swsscommon.FieldValuePairs([("admin_status", "up")]) ctbl.set("Ethernet0", cfvs) - # enable warm restart - (exitcode, result) = dvs.runcmd("config warm_restart enable swss") - assert exitcode == 0 + + dvs.warm_restart_swss("true") # freeze orchagent for warm restart - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check", include_stderr=False) assert result == "RESTARTCHECK succeeded\n" time.sleep(2) @@ -290,10 +289,69 @@ def test_PortAutoNegWarm(self, dvs, testlog): finally: # disable warm restart - dvs.runcmd("config warm_restart disable swss") + dvs.warm_restart_swss("disable") # slow down crm polling - dvs.runcmd("crm config polling interval 10000") + dvs.crm_poll_set("10000") + + def test_PortAutoNegRemoteAdvSpeeds(self, dvs, testlog): + + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) + + ctbl = swsscommon.Table(cdb, "PORT") + stbl = swsscommon.Table(sdb, "PORT_TABLE") + + # set autoneg = true and admin_status = up + fvs = swsscommon.FieldValuePairs([("autoneg","on"),("admin_status","up")]) + ctbl.set("Ethernet0", fvs) + + time.sleep(10) + + (status, fvs) = stbl.get("Ethernet0") + assert status == True + assert "rmt_adv_speeds" in [fv[0] for fv in fvs] + + def test_PortAdvWithoutAutoneg(self, dvs, testlog): + + db = swsscommon.DBConnector(0, dvs.redis_sock, 0) + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) + + tbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + ctbl = swsscommon.Table(cdb, "PORT") + stbl = swsscommon.Table(sdb, "PORT_TABLE") + + # set autoneg = off + fvs = swsscommon.FieldValuePairs([("autoneg", "off")]) + ctbl.set("Ethernet0", fvs) + + time.sleep(1) + fvs = swsscommon.FieldValuePairs([("adv_speeds", "100,1000"), + ("adv_interface_types", "CR2,CR4")]) + ctbl.set("Ethernet0", fvs) + + time.sleep(1) + + adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + assert "SAI_PORT_ATTR_AUTO_NEG_MODE" in [fv[0] for fv in fvs] + assert "SAI_PORT_ATTR_ADVERTISED_SPEED" in [fv[0] for fv in fvs] + assert "SAI_PORT_ATTR_ADVERTISED_INTERFACE_TYPE" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_AUTO_NEG_MODE": + assert fv[1] == "false" + elif fv[0] == "SAI_PORT_ATTR_ADVERTISED_SPEED": + assert fv[1] == "2:100,1000" + elif fv[0] == "SAI_PORT_ATTR_ADVERTISED_INTERFACE_TYPE": + assert fv[1] == "2:SAI_PORT_INTERFACE_TYPE_CR2,SAI_PORT_INTERFACE_TYPE_CR4" + + # set admin up + cfvs = swsscommon.FieldValuePairs([("admin_status", "up")]) + ctbl.set("Ethernet0", cfvs) # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_port_config.py b/tests/test_port_config.py index d584899e97..b6f51e4e86 100644 --- a/tests/test_port_config.py +++ b/tests/test_port_config.py @@ -7,7 +7,7 @@ from dvslib.dvs_common import wait_for_result, PollingConfig -@pytest.yield_fixture +@pytest.fixture def port_config(request, dvs): file_name = "/usr/share/sonic/hwsku/port_config.ini" dvs.runcmd("cp %s %s.bak" % (file_name, file_name)) diff --git a/tests/test_port_dpb_vlan.py b/tests/test_port_dpb_vlan.py index df03a5ecf9..e6f89beb1a 100644 --- a/tests/test_port_dpb_vlan.py +++ b/tests/test_port_dpb_vlan.py @@ -52,6 +52,7 @@ def test_dependency(self, dvs): self.dvs_vlan.remove_vlan(vlan) self.dvs_vlan.get_and_verify_vlan_ids(0) + @pytest.mark.skip(reason="Failing. Under investigation") def test_one_port_one_vlan(self, dvs): dpb = DPB() vlan = "100" @@ -117,6 +118,7 @@ def test_one_port_one_vlan(self, dvs): self.dvs_vlan.remove_vlan(vlan) self.dvs_vlan.get_and_verify_vlan_ids(0) + @pytest.mark.skip(reason="Failing. Under investigation") def test_one_port_multiple_vlan(self, dvs): dpb = DPB() @@ -182,6 +184,7 @@ def test_one_port_multiple_vlan(self, dvs): self.dvs_vlan.remove_vlan("102") self.dvs_vlan.get_and_verify_vlan_ids(0) + @pytest.mark.skip(reason="Failing. Under investigation") def test_all_port_10_vlans(self, dvs): num_vlans = 10 start_vlan = 100 diff --git a/tests/test_port_lt.py b/tests/test_port_lt.py new file mode 100644 index 0000000000..0da6abb071 --- /dev/null +++ b/tests/test_port_lt.py @@ -0,0 +1,139 @@ +import time +import os +import pytest + +from swsscommon import swsscommon + + +class TestPortLinkTraining(object): + def test_PortLinkTrainingForce(self, dvs, testlog): + + db = swsscommon.DBConnector(0, dvs.redis_sock, 0) + adb = dvs.get_asic_db() + + tbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + fvs = swsscommon.FieldValuePairs([("link_training","off")]) + tbl.set("Ethernet0", fvs) + + tbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + fvs = swsscommon.FieldValuePairs([("link_training","on")]) + tbl.set("Ethernet4", fvs) + + # validate if link_training false is pushed to asic db when set first time + port_oid = adb.port_name_map["Ethernet0"] + expected_fields = {"SAI_PORT_ATTR_LINK_TRAINING_ENABLE":"false"} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + # validate if link_training true is pushed to asic db when set first time + port_oid = adb.port_name_map["Ethernet4"] + expected_fields = {"SAI_PORT_ATTR_LINK_TRAINING_ENABLE":"true"} + adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + def test_PortLinkTrainingCold(self, dvs, testlog): + db = swsscommon.DBConnector(0, dvs.redis_sock, 0) + + tbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + + # set link_training = true + fvs = swsscommon.FieldValuePairs([("link_training","on")]) + + tbl.set("Ethernet0", fvs) + + time.sleep(1) + + adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + + atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + assert "SAI_PORT_ATTR_LINK_TRAINING_ENABLE" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_LINK_TRAINING_ENABLE": + assert fv[1] == "true" + + # change link_training to false + fvs = swsscommon.FieldValuePairs([("link_training","off")]) + + tbl.set("Ethernet0", fvs) + + time.sleep(1) + + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + assert "SAI_PORT_ATTR_LINK_TRAINING_ENABLE" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_LINK_TRAINING_ENABLE": + assert fv[1] == "false" + + def test_PortLinkTrainingWarm(self, dvs, testlog): + + db = swsscommon.DBConnector(0, dvs.redis_sock, 0) + cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) + + tbl = swsscommon.ProducerStateTable(db, "PORT_TABLE") + ctbl = swsscommon.Table(cdb, "PORT") + stbl = swsscommon.Table(sdb, "PORT_TABLE") + + # set link_training = true + fvs = swsscommon.FieldValuePairs([("link_training","on")]) + ctbl.set("Ethernet0", fvs) + + time.sleep(1) + + adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + + atbl = swsscommon.Table(adb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + assert "SAI_PORT_ATTR_LINK_TRAINING_ENABLE" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_AUTO_NEG_MODE": + assert fv[1] == "true" + + # set admin up + cfvs = swsscommon.FieldValuePairs([("admin_status", "up")]) + ctbl.set("Ethernet0", cfvs) + + # enable warm restart + (exitcode, result) = dvs.runcmd("config warm_restart enable swss") + assert exitcode == 0 + + # freeze orchagent for warm restart + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check", include_stderr=False) + assert result == "RESTARTCHECK succeeded\n" + time.sleep(2) + + try: + # restart orchagent + # clean port state + dvs.stop_swss() + ports = stbl.getKeys() + for port in ports: + stbl._del(port) + dvs.start_swss() + time.sleep(2) + + # check ASIC DB after warm restart + (status, fvs) = atbl.get(dvs.asicdb.portnamemap["Ethernet0"]) + assert status == True + + assert "SAI_PORT_ATTR_LINK_TRAINING_ENABLE" in [fv[0] for fv in fvs] + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_LINK_TRAINING_ENABLE": + assert fv[1] == "true" + + finally: + # disable warm restart + dvs.runcmd("config warm_restart disable swss") + # slow down crm polling + dvs.runcmd("crm config polling interval 10000") + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass diff --git a/tests/test_portchannel.py b/tests/test_portchannel.py index ee612ec46d..0a922e6936 100644 --- a/tests/test_portchannel.py +++ b/tests/test_portchannel.py @@ -1,3 +1,4 @@ +import pytest import time import re import json @@ -6,6 +7,7 @@ from swsscommon import swsscommon +@pytest.mark.usefixtures('dvs_lag_manager') class TestPortchannel(object): def test_Portchannel(self, dvs, testlog): @@ -89,6 +91,28 @@ def test_Portchannel(self, dvs, testlog): lagms = lagmtbl.getKeys() assert len(lagms) == 0 + @pytest.mark.parametrize("fast_rate", [False, True]) + def test_Portchannel_fast_rate(self, dvs, testlog, fast_rate): + po_id = "0003" + po_member = "Ethernet16" + + # Create PortChannel + self.dvs_lag.create_port_channel(po_id, fast_rate=fast_rate) + self.dvs_lag.get_and_verify_port_channel(1) + + # Add member to PortChannel + self.dvs_lag.create_port_channel_member(po_id, po_member) + self.dvs_lag.get_and_verify_port_channel_members(1) + + # test fast rate configuration + self.dvs_lag.get_and_verify_port_channel_fast_rate(po_id, fast_rate) + + # remove PortChannel + self.dvs_lag.create_port_channel_member(po_id, po_member) + self.dvs_lag.remove_port_channel(po_id) + self.dvs_lag.get_and_verify_port_channel(0) + + def test_Portchannel_lacpkey(self, dvs, testlog): portchannelNamesAuto = [("PortChannel001", "Ethernet0", 1001), ("PortChannel002", "Ethernet4", 1002), @@ -108,7 +132,7 @@ def test_Portchannel_lacpkey(self, dvs, testlog): for portchannel in portchannelNamesAuto: tbl.set(portchannel[0], fvs) - + fvs_no_lacp_key = swsscommon.FieldValuePairs( [("admin_status", "up"), ("mtu", "9100"), ("oper_status", "up")]) tbl.set(portchannelNames[0][0], fvs_no_lacp_key) @@ -382,6 +406,63 @@ def test_Portchannel_tpid(self, dvs, testlog): tbl._del("PortChannel0002") time.sleep(1) + def test_portchannel_member_netdev_oper_status(self, dvs, testlog): + config_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + # create port-channel + tbl = swsscommon.Table(config_db, "PORTCHANNEL") + fvs = swsscommon.FieldValuePairs([("admin_status", "up"),("mtu", "9100"),("oper_status", "up")]) + tbl.set("PortChannel111", fvs) + + # set port-channel oper status + tbl = swsscommon.ProducerStateTable(app_db, "LAG_TABLE") + fvs = swsscommon.FieldValuePairs([("admin_status", "up"),("mtu", "9100"),("oper_status", "up")]) + tbl.set("PortChannel111", fvs) + + # add members to port-channel + tbl = swsscommon.Table(config_db, "PORTCHANNEL_MEMBER") + fvs = swsscommon.FieldValuePairs([("NULL", "NULL")]) + tbl.set("PortChannel111|Ethernet0", fvs) + tbl.set("PortChannel111|Ethernet4", fvs) + + # wait for port-channel netdev creation + time.sleep(1) + + # set netdev oper status + (exitcode, _) = dvs.runcmd("ip link set up dev Ethernet0") + assert exitcode == 0, "ip link set failed" + + (exitcode, _) = dvs.runcmd("ip link set up dev Ethernet4") + assert exitcode == 0, "ip link set failed" + + (exitcode, _) = dvs.runcmd("ip link set dev PortChannel111 carrier on") + assert exitcode == 0, "ip link set failed" + + # verify port-channel members netdev oper status + tbl = swsscommon.Table(state_db, "PORT_TABLE") + status, fvs = tbl.get("Ethernet0") + assert status is True + fvs = dict(fvs) + assert fvs['netdev_oper_status'] == 'up' + + status, fvs = tbl.get("Ethernet4") + assert status is True + fvs = dict(fvs) + assert fvs['netdev_oper_status'] == 'up' + + # remove port-channel members + tbl = swsscommon.Table(config_db, "PORTCHANNEL_MEMBER") + tbl._del("PortChannel111|Ethernet0") + tbl._del("PortChannel111|Ethernet4") + + # remove port-channel + tbl = swsscommon.Table(config_db, "PORTCHANNEL") + tbl._del("PortChannel111") + + # wait for port-channel deletion + time.sleep(1) # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_qos_map.py b/tests/test_qos_map.py index 301bd3c6d6..905b0dacaa 100644 --- a/tests/test_qos_map.py +++ b/tests/test_qos_map.py @@ -3,6 +3,32 @@ from swsscommon import swsscommon +CFG_TC_TO_DSCP_MAP_TABLE_NAME = "TC_TO_DSCP_MAP" +CFG_TC_TO_DSCP_MAP_KEY = "AZURE" +TC_TO_DSCP_MAP = { + "0": "20", + "1": "16", + "2": "5", + "3": "43", + "4": "34", + "5": "52", + "6": "61", + "7": "17", +} + +CFG_TC_TO_DOT1P_MAP_TABLE_NAME = "TC_TO_DOT1P_MAP" +CFG_TC_TO_DOT1P_MAP_KEY = "AZURE" +TC_TO_DOT1P_MAP = { + "0": "0", + "1": "6", + "2": "5", + "3": "3", + "4": "4", + "5": "2", + "6": "1", + "7": "7", +} + CFG_DOT1P_TO_TC_MAP_TABLE_NAME = "DOT1P_TO_TC_MAP" CFG_DOT1P_TO_TC_MAP_KEY = "AZURE" DOT1P_TO_TC_MAP = { @@ -32,9 +58,166 @@ CFG_PORT_QOS_MAP_TABLE_NAME = "PORT_QOS_MAP" CFG_PORT_QOS_DOT1P_MAP_FIELD = "dot1p_to_tc_map" CFG_PORT_QOS_MPLS_TC_MAP_FIELD = "mpls_tc_to_tc_map" +CFG_PORT_QOS_TC_DOT1P_MAP_FIELD = "tc_to_dot1p_map" +CFG_PORT_QOS_TC_DSCP_MAP_FIELD = "tc_to_dscp_map" CFG_PORT_TABLE_NAME = "PORT" +#Tests for TC-to-DSCP qos map configuration +class TestTcDscp(object): + def connect_dbs(self, dvs): + self.asic_db = swsscommon.DBConnector(1, dvs.redis_sock, 0) + self.config_db = swsscommon.DBConnector(4, dvs.redis_sock, 0) + + def create_tc_dscp_profile(self): + tbl = swsscommon.Table(self.config_db, CFG_TC_TO_DSCP_MAP_TABLE_NAME) + fvs = swsscommon.FieldValuePairs(list(TC_TO_DSCP_MAP.items())) + tbl.set(CFG_TC_TO_DSCP_MAP_KEY, fvs) + time.sleep(1) + + def find_tc_dscp_profile(self): + found = False + tc_dscp_map_raw = None + tbl = swsscommon.Table(self.asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP") + keys = tbl.getKeys() + for key in keys: + (status, fvs) = tbl.get(key) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST": + tc_dscp_map_raw = fv[1] + elif fv[0] == "SAI_QOS_MAP_ATTR_TYPE" and fv[1] == "SAI_QOS_MAP_TYPE_TC_AND_COLOR_TO_DSCP": + found = True + + if found: + break + + assert found == True + + return (key, tc_dscp_map_raw) + + def apply_tc_dscp_profile_on_all_ports(self): + tbl = swsscommon.Table(self.config_db, CFG_PORT_QOS_MAP_TABLE_NAME) + fvs = swsscommon.FieldValuePairs([(CFG_PORT_QOS_TC_DSCP_MAP_FIELD, CFG_TC_TO_DSCP_MAP_KEY)]) + ports = swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME).getKeys() + for port in ports: + tbl.set(port, fvs) + + time.sleep(1) + + + def test_tc_dscp_cfg(self, dvs): + self.connect_dbs(dvs) + self.create_tc_dscp_profile() + _, tc_dscp_map_raw = self.find_tc_dscp_profile() + + tc_dscp_map = json.loads(tc_dscp_map_raw) + for tc2dscp in tc_dscp_map['list']: + tc_val = str(tc2dscp['key']['tc']) + dscp_val = str(tc2dscp['value']['dscp']) + assert dscp_val == TC_TO_DSCP_MAP[tc_val] + + def test_port_tc_dscp(self, dvs): + self.connect_dbs(dvs) + self.create_tc_dscp_profile() + oid, _ = self.find_tc_dscp_profile() + + self.apply_tc_dscp_profile_on_all_ports() + + cnt = 0 + tbl = swsscommon.Table(self.asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + keys = tbl.getKeys() + for key in keys: + (status, fvs) = tbl.get(key) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_QOS_TC_AND_COLOR_TO_DSCP_MAP": + cnt += 1 + assert fv[1] == oid + + port_cnt = len(swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME).getKeys()) + assert port_cnt == cnt + + +#Tests for TC-to-Dot1p qos map configuration +class TestTcDot1p(object): + def connect_dbs(self, dvs): + self.asic_db = swsscommon.DBConnector(1, dvs.redis_sock, 0) + self.config_db = swsscommon.DBConnector(4, dvs.redis_sock, 0) + + def create_tc_dot1p_profile(self): + tbl = swsscommon.Table(self.config_db, CFG_TC_TO_DOT1P_MAP_TABLE_NAME) + fvs = swsscommon.FieldValuePairs(list(TC_TO_DOT1P_MAP.items())) + tbl.set(CFG_TC_TO_DOT1P_MAP_KEY, fvs) + time.sleep(1) + + def find_tc_dot1p_profile(self): + found = False + tc_dot1p_map_raw = None + tbl = swsscommon.Table(self.asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP") + keys = tbl.getKeys() + for key in keys: + (status, fvs) = tbl.get(key) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_QOS_MAP_ATTR_MAP_TO_VALUE_LIST": + tc_dot1p_map_raw = fv[1] + elif fv[0] == "SAI_QOS_MAP_ATTR_TYPE" and fv[1] == "SAI_QOS_MAP_TYPE_TC_AND_COLOR_TO_DOT1P": + found = True + + if found: + break + + assert found == True + + return (key, tc_dot1p_map_raw) + + def apply_tc_dot1p_profile_on_all_ports(self): + tbl = swsscommon.Table(self.config_db, CFG_PORT_QOS_MAP_TABLE_NAME) + fvs = swsscommon.FieldValuePairs([(CFG_PORT_QOS_TC_DOT1P_MAP_FIELD, CFG_TC_TO_DOT1P_MAP_KEY)]) + ports = swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME).getKeys() + for port in ports: + tbl.set(port, fvs) + + time.sleep(1) + + + def test_tc_dot1p_cfg(self, dvs): + self.connect_dbs(dvs) + self.create_tc_dot1p_profile() + _, tc_dot1p_map_raw = self.find_tc_dot1p_profile() + + tc_dot1p_map = json.loads(tc_dot1p_map_raw) + for tc2dot1p in tc_dot1p_map['list']: + tc_val = str(tc2dot1p['key']['tc']) + dot1p_val = str(tc2dot1p['value']['dot1p']) + assert dot1p_val == TC_TO_DOT1P_MAP[tc_val] + + def test_port_tc_dot1p(self, dvs): + self.connect_dbs(dvs) + self.create_tc_dot1p_profile() + oid, _ = self.find_tc_dot1p_profile() + + self.apply_tc_dot1p_profile_on_all_ports() + cnt = 0 + tbl = swsscommon.Table(self.asic_db, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + keys = tbl.getKeys() + for key in keys: + (status, fvs) = tbl.get(key) + assert status == True + + for fv in fvs: + if fv[0] == "SAI_PORT_ATTR_QOS_TC_AND_COLOR_TO_DOT1P_MAP": + cnt += 1 + assert fv[1] == oid + + port_cnt = len(swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME).getKeys()) + assert port_cnt == cnt + +#Tests for Dot1p-to-TC qos map configuration class TestDot1p(object): def connect_dbs(self, dvs): self.asic_db = swsscommon.DBConnector(1, dvs.redis_sock, 0) @@ -370,6 +553,73 @@ def test_port_mpls_tc(self, dvs): port_cnt = len(swsscommon.Table(self.config_db, CFG_PORT_TABLE_NAME).getKeys()) assert port_cnt == cnt +class TestDscpToTcMap(object): + ASIC_QOS_MAP_STR = "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP" + ASIC_PORT_STR = "ASIC_STATE:SAI_OBJECT_TYPE_PORT" + ASIC_SWITCH_STR = "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH" + + def init_test(self, dvs): + dvs.setup_db() + self.asic_db = dvs.get_asic_db() + self.config_db = dvs.get_config_db() + self.asic_qos_map_ids = self.asic_db.get_keys(self.ASIC_QOS_MAP_STR) + self.asic_qos_map_count = len(self.asic_qos_map_ids) + self.dscp_to_tc_table = swsscommon.Table(self.config_db.db_connection, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME) + self.port_qos_table = swsscommon.Table(self.config_db.db_connection, swsscommon.CFG_PORT_QOS_MAP_TABLE_NAME) + + def get_qos_id(self): + diff = set(self.asic_db.get_keys(self.ASIC_QOS_MAP_STR)) - set(self.asic_qos_map_ids) + assert len(diff) <= 1 + return None if len(diff) == 0 else diff.pop() + + def test_dscp_to_tc_map_applied_to_switch(self, dvs): + self.init_test(dvs) + dscp_to_tc_map_id = None + created_new_map = False + try: + existing_map = self.dscp_to_tc_table.getKeys() + if "AZURE" not in existing_map: + # Create a DSCP_TO_TC map + dscp_to_tc_map = [(str(i), str(i)) for i in range(0, 63)] + self.dscp_to_tc_table.set("AZURE", swsscommon.FieldValuePairs(dscp_to_tc_map)) + + self.asic_db.wait_for_n_keys(self.ASIC_QOS_MAP_STR, self.asic_qos_map_count + 1) + + # Get the DSCP_TO_TC map ID + dscp_to_tc_map_id = self.get_qos_id() + assert(dscp_to_tc_map_id is not None) + + # Assert the expected values + fvs = self.asic_db.get_entry(self.ASIC_QOS_MAP_STR, dscp_to_tc_map_id) + assert(fvs.get("SAI_QOS_MAP_ATTR_TYPE") == "SAI_QOS_MAP_TYPE_DSCP_TO_TC") + created_new_map = True + else: + for id in self.asic_qos_map_ids: + fvs = self.asic_db.get_entry(self.ASIC_QOS_MAP_STR, id) + if fvs.get("SAI_QOS_MAP_ATTR_TYPE") == "SAI_QOS_MAP_TYPE_DSCP_TO_TC": + dscp_to_tc_map_id = id + break + switch_oid = dvs.getSwitchOid() + + # Insert switch level map entry + self.port_qos_table.set("global", [("dscp_to_tc_map", "AZURE")]) + time.sleep(1) + + # Check the switch level DSCP_TO_TC_MAP is applied + fvs = self.asic_db.get_entry(self.ASIC_SWITCH_STR, switch_oid) + assert(fvs.get("SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP") == dscp_to_tc_map_id) + + # Remove the global level DSCP_TO_TC_MAP + self.port_qos_table._del("global") + time.sleep(1) + + # Check the global level DSCP_TO_TC_MAP is set to SAI_ + fvs = self.asic_db.get_entry(self.ASIC_SWITCH_STR, switch_oid) + assert(fvs.get("SAI_SWITCH_ATTR_QOS_DSCP_TO_TC_MAP") == "oid:0x0") + finally: + if created_new_map: + self.dscp_to_tc_table._del("AZURE") + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_route.py b/tests/test_route.py index 9c56ef52a8..dfa6d04cc4 100644 --- a/tests/test_route.py +++ b/tests/test_route.py @@ -604,6 +604,7 @@ def test_RouteAddRemoveIpv6RouteUnresolvedNeigh(self, dvs, testlog): dvs.servers[1].runcmd("ip -6 route del default dev eth0") dvs.servers[1].runcmd("ip -6 address del 2001::2/64 dev eth0") + @pytest.mark.skip(reason="Failing. Under investigation") def test_RouteAddRemoveIpv4RouteWithVrf(self, dvs, testlog): self.setup_db(dvs) @@ -1034,6 +1035,84 @@ def test_PerfAddRemoveRoute(self, dvs, testlog): dvs.servers[1].runcmd("ip route del default dev eth0") dvs.servers[1].runcmd("ip address del 10.0.0.3/31 dev eth0") +class TestFpmSyncResponse(TestRouteBase): + @pytest.fixture + def setup(self, dvs): + self.setup_db(dvs) + + # create l3 interface + self.create_l3_intf("Ethernet0", "") + # set ip address + self.add_ip_address("Ethernet0", "10.0.0.0/31") + # bring up interface + self.set_admin_status("Ethernet0", "up") + + # set ip address and default route + dvs.servers[0].runcmd("ip address add 10.0.0.1/31 dev eth0") + dvs.servers[0].runcmd("ip route add default via 10.0.0.0") + + dvs.runcmd("ping -c 1 10.0.0.1") + + yield + + # remove ip address and default route + dvs.servers[0].runcmd("ip route del default dev eth0") + dvs.servers[0].runcmd("ip address del 10.0.0.1/31 dev eth0") + + # bring interface down + self.set_admin_status("Ethernet0", "down") + # remove ip address + self.remove_ip_address("Ethernet0", "10.0.0.0/31") + # remove l3 interface + self.remove_l3_intf("Ethernet0") + + def is_offloaded(self, dvs, route): + rc, output = dvs.runcmd(f"vtysh -c 'show ip route {route} json'") + assert rc == 0 + + route_entry = json.loads(output) + return bool(route_entry[route][0].get('offloaded')) + + @pytest.mark.xfail(reason="Requires VS docker update in https://github.com/sonic-net/sonic-buildimage/pull/12853") + @pytest.mark.parametrize("suppress_state", ["enabled", "disabled"]) + def test_offload(self, suppress_state, setup, dvs): + route = "1.1.1.0/24" + + # enable route suppression + rc, _ = dvs.runcmd(f"config suppress-fib-pending {suppress_state}") + assert rc == 0, "Failed to configure suppress-fib-pending" + + time.sleep(5) + + try: + rc, _ = dvs.runcmd("bash -c 'kill -SIGSTOP $(pidof orchagent)'") + assert rc == 0, "Failed to suspend orchagent" + + rc, _ = dvs.runcmd(f"ip route add {route} via 10.0.0.1 proto bgp") + assert rc == 0, "Failed to configure route" + + time.sleep(5) + + if suppress_state == 'disabled': + assert self.is_offloaded(dvs,route), f"{route} is expected to be offloaded (suppression is {suppress_state})" + return + + assert not self.is_offloaded(dvs, route), f"{route} is expected to be not offloaded (suppression is {suppress_state})" + + rc, _ = dvs.runcmd("bash -c 'kill -SIGCONT $(pidof orchagent)'") + assert rc == 0, "Failed to resume orchagent" + + def check_offloaded(): + return (self.is_offloaded(dvs, route), None) + + wait_for_result(check_offloaded, failure_message=f"{route} is expected to be offloaded after orchagent resume") + finally: + dvs.runcmd("bash -c 'kill -SIGCONT $(pidof orchagent)'") + dvs.runcmd(f"ip route del {route}") + + # make sure route suppression is disabled + dvs.runcmd("config suppress-fib-pending disabled") + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_sflow.py b/tests/test_sflow.py index e3c95a6946..25e3a8eaf9 100644 --- a/tests/test_sflow.py +++ b/tests/test_sflow.py @@ -146,7 +146,6 @@ def test_SamplingRatePortCfgUpdate(self, dvs, testlog): ''' self.setup_sflow(dvs) appldb = dvs.get_app_db() - #dvs.runcmd("portconfig -p {} -s {}".format("Ethernet0", "25000")) self.cdb.update_entry("PORT", "Ethernet0", {'speed' : "25000"}) expected_fields = {"sample_rate": self.speed_rate_table["25000"]} appldb.wait_for_field_match("SFLOW_SESSION_TABLE", "Ethernet0", expected_fields) @@ -254,6 +253,93 @@ def test_Teardown(self, dvs, testlog): self.cdb.delete_entry("SFLOW", "global") self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", 0) + def test_globalSetSampleDir(self, dvs, testlog): + self.setup_sflow(dvs) + + # Verify that the session is up first + port_oid = self.adb.port_name_map["Ethernet0"] + expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"} + expected_fields_egr = {"SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"} + + self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + self.cdb.update_entry("SFLOW", "global", {"sample_direction": "both"}) + self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + self.cdb.update_entry("SFLOW", "global", {"sample_direction": "tx"}) + self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + self.cdb.delete_entry("SFLOW", "global") + self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + def test_globalAllSetDir(self, dvs, testlog): + self.setup_sflow(dvs) + # Verify that the session is up first + port_oid = self.adb.port_name_map["Ethernet0"] + self.cdb.update_entry("SFLOW_SESSION", "all", {"sample_direction": "both"}) + expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"} + expected_fields_egr = {"SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"} + self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + self.cdb.update_entry("SFLOW_SESSION", "all", {"sample_direction": "tx"}) + self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + self.cdb.delete_entry("SFLOW", "global") + self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + def test_InterfaceSetDir(self, dvs, testlog): + self.setup_sflow(dvs) + + # Get the global session info as a baseline + port_oid = self.adb.port_name_map["Ethernet0"] + expected_fields = ["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"] + fvs = self.adb.wait_for_fields("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + global_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"] + + # Then create the interface session + session_params = {"admin_state": "up", "sample_rate": "1000", "sample_direction": "both"} + self.cdb.create_entry("SFLOW_SESSION", "Ethernet0", session_params) + + # Verify that the new interface session has been created and is different from the global one + port_oid = self.adb.port_name_map["Ethernet0"] + expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": global_session} + fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + + expected_fields_egr = {"SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE": global_session} + fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + local_ing_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"] + local_egr_session = fvs["SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE"] + + self.cdb.update_entry("SFLOW_SESSION", "Ethernet0", {"sample_direction": "tx"}) + + expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"} + expected_fields_egr = {"SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE": local_egr_session} + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + self.cdb.update_entry("SFLOW_SESSION", "Ethernet0", {"sample_direction": "rx"}) + + expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": local_ing_session} + expected_fields_egr = {"SAI_PORT_ATTR_EGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"} + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + # interface config higher precedence then global/all. Changing all sample-dir should not affect existing interface config + self.cdb.create_entry("SFLOW_SESSION", "all", {"admin_state": "up", "sample_direction": "both"}) + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields) + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields_egr) + + # interface delete will set fallback to all (sample-direction) if enabled. + self.cdb.delete_entry("SFLOW_SESSION", "Ethernet0") + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": local_ing_session}) + fvs = self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": local_egr_session}) # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_srv6.py b/tests/test_srv6.py index 0d134acc2b..68bee80a52 100644 --- a/tests/test_srv6.py +++ b/tests/test_srv6.py @@ -56,6 +56,8 @@ def test_mysid(self, dvs, testlog): # create MySID entries mysid1='16:8:8:8:baba:2001:10::' mysid2='16:8:8:8:baba:2001:20::' + mysid3='16:8:8:8:fcbb:bb01:800::' + mysid4='16:8:8:8:baba:2001:40::' # create MySID END fvs = swsscommon.FieldValuePairs([('action', 'end')]) @@ -90,25 +92,61 @@ def test_mysid(self, dvs, testlog): elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT46" + # create MySID uN + fvs = swsscommon.FieldValuePairs([('action', 'un')]) + key = self.create_mysid(mysid3, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "fcbb:bb01:800::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UN" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + + # create MySID END.DT4 with default vrf + fvs = swsscommon.FieldValuePairs([('action', 'end.dt4'), ('vrf', 'default')]) + key = self.create_mysid(mysid4, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "baba:2001:40::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_VRF": + assert True + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4" + # delete MySID self.remove_mysid(mysid1) self.remove_mysid(mysid2) + self.remove_mysid(mysid3) + self.remove_mysid(mysid4) # remove vrf self.remove_vrf("VrfDt46") - class TestSrv6(object): def setup_db(self, dvs): self.pdb = dvs.get_app_db() self.adb = dvs.get_asic_db() self.cdb = dvs.get_config_db() - def create_sidlist(self, segname, ips): + def create_sidlist(self, segname, ips, type=None): table = "ASIC_STATE:SAI_OBJECT_TYPE_SRV6_SIDLIST" existed_entries = get_exist_entries(self.adb.db_connection, table) - fvs=swsscommon.FieldValuePairs([('path', ips)]) + if type is None: + fvs=swsscommon.FieldValuePairs([('path', ips)]) + else: + fvs=swsscommon.FieldValuePairs([('path', ips), ('type', type)]) segtbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "SRV6_SID_LIST_TABLE") segtbl.set(segname, fvs) @@ -222,9 +260,30 @@ def test_srv6(self, dvs, testlog): # create 2nd seg lists - self.create_sidlist('seg2', 'baba:2002:10::,baba:2002:20::') - # create 3rd seg lists - self.create_sidlist('seg3', 'baba:2003:10::,baba:2003:20::') + sidlist_id = self.create_sidlist('seg2', 'baba:2002:10::,baba:2002:20::', 'insert.red') + + # check ASIC SAI_OBJECT_TYPE_SRV6_SIDLIST database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_SRV6_SIDLIST") + (status, fvs) = tbl.get(sidlist_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_SRV6_SIDLIST_ATTR_SEGMENT_LIST": + assert fv[1] == "2:baba:2002:10::,baba:2002:20::" + elif fv[0] == "SAI_SRV6_SIDLIST_ATTR_TYPE": + assert fv[1] == "SAI_SRV6_SIDLIST_TYPE_INSERT_RED" + + # create 3rd seg lists with unsupported or wrong naming of sid list type, for this case, it will use default type: ENCAPS_RED + sidlist_id = self.create_sidlist('seg3', 'baba:2003:10::,baba:2003:20::', 'reduced') + + # check ASIC SAI_OBJECT_TYPE_SRV6_SIDLIST database + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_SRV6_SIDLIST") + (status, fvs) = tbl.get(sidlist_id) + assert status == True + for fv in fvs: + if fv[0] == "SAI_SRV6_SIDLIST_ATTR_SEGMENT_LIST": + assert fv[1] == "2:baba:2003:10::,baba:2003:20::" + elif fv[0] == "SAI_SRV6_SIDLIST_ATTR_TYPE": + assert fv[1] == "SAI_SRV6_SIDLIST_TYPE_ENCAPS_RED" # create 2nd v4 route with single sidlists self.create_srv6_route('20.20.20.21/32','seg2','1001:2000::1') diff --git a/tests/test_storm_control.py b/tests/test_storm_control.py new file mode 100644 index 0000000000..ec4da04917 --- /dev/null +++ b/tests/test_storm_control.py @@ -0,0 +1,316 @@ +from swsscommon import swsscommon +import os +import sys +import time +import json +from distutils.version import StrictVersion +import pytest + +class TestStormControl(object): + def setup_db(self,dvs): + self.pdb = swsscommon.DBConnector(0, dvs.redis_sock, 0) + self.adb = swsscommon.DBConnector(1, dvs.redis_sock, 0) + self.cdb = swsscommon.DBConnector(4, dvs.redis_sock, 0) + self.sdb = swsscommon.DBConnector(6, dvs.redis_sock, 0) + dvs.runcmd(['sh', '-c', "echo 0 > /var/log/syslog"]) + + def create_port_channel(self, dvs, lag_name): + dvs.runcmd("config portchannel add " + lag_name) + time.sleep(1) + + def delete_port_channel(self, dvs, lag_name): + dvs.runcmd("config portchannel del " + lag_name) + time.sleep(1) + + def add_port_channel_member(self, dvs, lag_name, member): + dvs.runcmd("config portchannel member add "+ lag_name + " "+ member) + time.sleep(1) + + def remove_port_channel_member(self, dvs, lag_name, member): + dvs.runcmd("config portchannel member del "+ lag_name + " "+ member) + time.sleep(1) + + def create_vlan(self, dvs, vlan): + dvs.runcmd("config vlan add " + vlan) + time.sleep(1) + + def delete_vlan(self, dvs, vlan): + dvs.runcmd("config vlan del " + vlan) + time.sleep(1) + + def add_vlan_member(self, dvs, vlan, interface): + dvs.runcmd("config vlan member add " + vlan + " " + interface) + time.sleep(1) + + def remove_vlan_member(self, dvs, vlan, interface): + dvs.runcmd("config vlan member del " + vlan + " " + interface) + time.sleep(1) + + def add_storm_session(self, if_name, storm_type, kbps_value): + tbl = swsscommon.Table(self.cdb, "PORT_STORM_CONTROL") + fvs = swsscommon.FieldValuePairs([("kbps", str(kbps_value))]) + key = if_name + "|" + storm_type + tbl.set(key,fvs) + time.sleep(1) + + def delete_storm_session(self, if_name, storm_type): + tbl = swsscommon.Table(self.cdb, "PORT_STORM_CONTROL") + key = if_name + "|" + storm_type + tbl._del(key) + time.sleep(1) + + def test_bcast_storm(self,dvs,testlog): + self.setup_db(dvs) + + if_name = "Ethernet0" + storm_type = "broadcast" + #User input is Kbps + #Orchagent converts the value to CIR as below and programs the ASIC DB + #kbps_value * 1000 / 8 + kbps_value = 1000000 + self.add_storm_control_on_interface(dvs,if_name,storm_type,kbps_value) + self.del_storm_control(dvs,if_name,storm_type) + + def del_storm_control(self, dvs, if_name, storm_type): + self.setup_db(dvs) + port_oid = dvs.asicdb.portnamemap[if_name] + atbl = swsscommon.Table(self.adb, "ASIC_STATE:SAI_OBJECT_TYPE_PORT") + status, fvs = atbl.get(dvs.asicdb.portnamemap[if_name]) + assert status == True + + storm_type_port_attr = self.get_port_attr_for_storm_type(storm_type) + + policer_oid = 0 + for fv in fvs: + if fv[0] == storm_type_port_attr: + policer_oid = fv[1] + + self.delete_storm_session(if_name, storm_type) + tbl = swsscommon.Table(self.cdb, "PORT_STORM_CONTROL") + (status,fvs) = tbl.get(if_name+"|"+storm_type) + assert status == False + + atbl = swsscommon.Table(self.adb,"ASIC_STATE:SAI_OBJECT_TYPE_PORT") + status, fvs = atbl.get(dvs.asicdb.portnamemap[if_name]) + assert status == True + + for fv in fvs: + if fv[0] == storm_type_port_attr: + assert fv[1] == "oid:0x0" + + if policer_oid != 0: + atbl = swsscommon.Table(self.adb,"ASIC_STATE:SAI_OBJECT_TYPE_POLICER") + status, fvs = atbl.get(policer_oid) + assert status == False + + def test_uucast_storm(self,dvs,testlog): + self.setup_db(dvs) + + if_name = "Ethernet0" + storm_type = "unknown-unicast" + #User input is Kbps + #Orchagent converts the value to CIR as below and programs the ASIC DB + #kbps_value * 1000 / 8 + kbps_value = 1000000 + + self.add_storm_control_on_interface(dvs,if_name,storm_type,kbps_value) + self.del_storm_control(dvs,if_name,storm_type) + + def test_umcast_storm(self,dvs,testlog): + self.setup_db(dvs) + + if_name = "Ethernet0" + storm_type = "unknown-multicast" + #User input is Kbps + #Orchagent converts the value to CIR as below and programs the ASIC DB + #kbps_value * 1000 / 8 + kbps_value = 1000000 + + self.add_storm_control_on_interface(dvs,if_name,storm_type,kbps_value) + self.del_storm_control(dvs,if_name,storm_type) + + def get_port_attr_for_storm_type(self,storm_type): + port_attr = "" + if storm_type == "broadcast": + port_attr = "SAI_PORT_ATTR_BROADCAST_STORM_CONTROL_POLICER_ID" + elif storm_type == "unknown-unicast": + port_attr = "SAI_PORT_ATTR_FLOOD_STORM_CONTROL_POLICER_ID" + elif storm_type == "unknown-multicast": + port_attr = "SAI_PORT_ATTR_MULTICAST_STORM_CONTROL_POLICER_ID" + + return port_attr + + def check_storm_control_on_interface(self,dvs,if_name,storm_type,kbps_value): + print ("interface {} storm_type {} kbps {}".format(if_name,storm_type, kbps_value)) + tbl = swsscommon.Table(self.cdb,"PORT_STORM_CONTROL") + (status,fvs) = tbl.get(if_name+"|"+storm_type) + + assert status == True + assert len(fvs) > 0 + + port_oid = dvs.asicdb.portnamemap[if_name] + + atbl = swsscommon.Table(self.adb,"ASIC_STATE:SAI_OBJECT_TYPE_PORT") + status, fvs = atbl.get(dvs.asicdb.portnamemap[if_name]) + assert status == True + + policer_oid = 0 + + storm_type_port_attr = self.get_port_attr_for_storm_type(storm_type) + + for fv in fvs: + if fv[0] == storm_type_port_attr: + assert fv[1] != "oid:0x0" + policer_oid = fv[1] + + if policer_oid != 0: + atbl = swsscommon.Table(self.adb,"ASIC_STATE:SAI_OBJECT_TYPE_POLICER") + status, fvs = atbl.get(policer_oid) + assert status == True + + bps = 0 + + for fv in fvs: + if fv[0] == "SAI_POLICER_ATTR_CIR": + bps = fv[1] + + #Retrieved value of bps from ASIC_DB is converted back to user input kbps + kbps = int(int(bps) / int(1000) * 8) + print ("Kbps value {}".format(kbps)) + + assert str(kbps) == str(kbps_value) + + + def add_storm_control_on_interface(self,dvs,if_name,storm_type,kbps_value): + print ("interface {} storm_type {} kbps {}".format(if_name,storm_type,kbps_value)) + self.add_storm_session(if_name, storm_type, kbps_value) + self.check_storm_control_on_interface(dvs,if_name,storm_type,kbps_value) + + def test_add_storm_all_interfaces(self,dvs,testlog): + self.setup_db(dvs) + + tbl = swsscommon.Table(self.cdb,"PORT") + for key in tbl.getKeys(): + self.add_storm_control_on_interface(dvs,key,"broadcast",1000000) + self.add_storm_control_on_interface(dvs,key,"unknown-unicast",2000000) + self.add_storm_control_on_interface(dvs,key,"unknown-multicast",3000000) + self.del_storm_control(dvs,key,"broadcast") + self.del_storm_control(dvs,key,"unknown-unicast") + self.del_storm_control(dvs,key,"unknown-multicast") + + def test_warm_restart_all_interfaces(self,dvs,testlog): + self.setup_db(dvs) + + tbl = swsscommon.Table(self.cdb,"PORT") + for key in tbl.getKeys(): + self.add_storm_control_on_interface(dvs,key,"broadcast",1000000) + self.add_storm_control_on_interface(dvs,key,"unknown-unicast",2000000) + self.add_storm_control_on_interface(dvs,key,"unknown-multicast",3000000) + #dvs.runcmd("config save -y") + # enable warm restart + dvs.warm_restart_swss("true") + + # freeze orchagent for warm restart + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check", include_stderr=False) + assert result == "RESTARTCHECK succeeded\n" + time.sleep(2) + + dvs.stop_swss() + time.sleep(10) + dvs.start_swss() + time.sleep(10) + + for key in tbl.getKeys(): + self.check_storm_control_on_interface(dvs,key,"broadcast",1000000) + self.check_storm_control_on_interface(dvs,key,"unknown-unicast",2000000) + self.check_storm_control_on_interface(dvs,key,"unknown-multicast",3000000) + self.del_storm_control(dvs,key,"broadcast") + self.del_storm_control(dvs,key,"unknown-unicast") + self.del_storm_control(dvs,key,"unknown-multicast") + # disable warm restart + dvs.warm_restart_swss("false") + + def test_add_storm_lag_interface(self,dvs,testlog): + self.setup_db(dvs) + lag_name = "PortChannel10" + member_interface = "Ethernet0" + kbps_value = 1000000 + storm_list = ["broadcast","unknown-unicast","unknown-multicast"] + kbps_value_list = [1000000,2000000,3000000] + + #Create LAG interface and add member + self.create_port_channel(dvs,lag_name) + self.add_port_channel_member(dvs,lag_name,member_interface) + + #click CLI verification + #for storm_type in storm_list: + # dvs.runcmd("config interface storm-control add "+lag_name+" "+storm_type+" "+str(kbps_value)) + # tbl = swsscommon.Table(self.cdb,"PORT_STORM_CONTROL") + # (status,fvs) = tbl.get(lag_name+"|"+storm_type) + # assert status == False + # assert len(fvs) == 0 + + #Orchagent verification + storm_list_db = ["broadcast","unknown-unicast","unknown-multicast"] + for storm_type,kbps_value in zip(storm_list_db,kbps_value_list): + #Cleanup syslog + dvs.runcmd(['sh', '-c', "echo 0 > /var/log/syslog"]) + time.sleep(1) + print ("storm type: {} kbps value: {}".format(storm_type,kbps_value)) + #Add storm entry to config DB directly + self.add_storm_session(lag_name,storm_type,kbps_value) + tbl = swsscommon.Table(self.cdb,"PORT_STORM_CONTROL") + (status,fvs) = tbl.get(lag_name+"|"+storm_type) + assert status == True + assert len(fvs) > 0 + time.sleep(1) + #grep for error message in syslog + (exitcode,num) = dvs.runcmd(['sh', '-c', 'cat /var/log/syslog | grep -i "handlePortStormControlTable: {}: Unsupported / Invalid interface PortChannel10"'.format(storm_type)]) + time.sleep(1) + assert exitcode == 0 + self.delete_storm_session(lag_name, storm_type) + self.remove_port_channel_member(dvs,lag_name,member_interface) + self.delete_port_channel(dvs,lag_name) + + def test_add_storm_vlan_interface(self,dvs,testlog): + self.setup_db(dvs) + vlan_id = 99 + member_interface = "Ethernet4" + kbps_value = 1000000 + storm_list = ["broadcast","unknown-unicast","unknown-multicast"] + kbps_value_list = [1000000,2000000,3000000] + vlan_name = "Vlan"+str(vlan_id) + + #Create VLAN interface and add member + self.create_vlan(dvs,str(vlan_id)) + self.add_vlan_member(dvs,str(vlan_id),member_interface) + + #click CLI verification + #for storm_type in storm_list: + # dvs.runcmd("config interface storm-control add Vlan"+str(vlan_id)+" "+storm_type+" "+str(kbps_value)) + # tbl = swsscommon.Table(self.cdb,"PORT_STORM_CONTROL") + # (status,fvs) = tbl.get("Vlan"+str(vlan_id)+"|"+storm_type) + # assert status == False + # assert len(fvs) == 0 + + #Orchagent verification + storm_list_db = ["broadcast","unknown-unicast","unknown-multicast"] + for storm_type,kbps_value in zip(storm_list_db,kbps_value_list): + #Cleanup syslog + dvs.runcmd(['sh', '-c', "echo 0 > /var/log/syslog"]) + time.sleep(1) + print ("storm type: {} kbps value: {}".format(storm_type,kbps_value)) + #Add storm entry to config DB directly + self.add_storm_session(vlan_name,storm_type,kbps_value) + tbl = swsscommon.Table(self.cdb,"PORT_STORM_CONTROL") + (status,fvs) = tbl.get(vlan_name+"|"+storm_type) + assert status == True + assert len(fvs) > 0 + time.sleep(1) + #grep for error message in syslog + (exitcode,num) = dvs.runcmd(['sh', '-c', 'cat /var/log/syslog | grep -i "handlePortStormControlTable: {}: Unsupported / Invalid interface {}"'.format(storm_type,vlan_name)]) + time.sleep(1) + assert exitcode == 0 + self.delete_storm_session(vlan_name, storm_type) + self.remove_vlan_member(dvs,str(vlan_id),member_interface) + self.delete_vlan(dvs,str(vlan_id)) diff --git a/tests/test_sub_port_intf.py b/tests/test_sub_port_intf.py index 748e680e2a..ec76ec13bb 100644 --- a/tests/test_sub_port_intf.py +++ b/tests/test_sub_port_intf.py @@ -184,11 +184,12 @@ def set_parent_port_admin_status(self, dvs, port_name, status): self.config_db.create_entry(tbl_name, port_name, fvs) time.sleep(1) - if port_name.startswith(ETHERNET_PREFIX): - self.set_parent_port_oper_status(dvs, port_name, "down") - self.set_parent_port_oper_status(dvs, port_name, "up") - else: - self.set_parent_port_oper_status(dvs, port_name, "up") + if status == "up": + if port_name.startswith(ETHERNET_PREFIX): + self.set_parent_port_oper_status(dvs, port_name, "down") + self.set_parent_port_oper_status(dvs, port_name, "up") + else: + self.set_parent_port_oper_status(dvs, port_name, "up") def create_vxlan_tunnel(self, tunnel_name, vtep_ip): fvs = { @@ -763,7 +764,7 @@ def test_sub_port_intf_appl_db_proc_seq(self, dvs): self._test_sub_port_intf_appl_db_proc_seq(dvs, self.LAG_SUB_PORT_INTERFACE_UNDER_TEST, admin_up=True, vrf_name=self.VNET_UNDER_TEST) self._test_sub_port_intf_appl_db_proc_seq(dvs, self.LAG_SUB_PORT_INTERFACE_UNDER_TEST, admin_up=False, vrf_name=self.VNET_UNDER_TEST) - def _test_sub_port_intf_admin_status_change(self, dvs, sub_port_intf_name, vrf_name=None): + def _test_sub_port_intf_admin_status_change(self, dvs, sub_port_intf_name, vrf_name=None, defer_parent_adminup=False): substrs = sub_port_intf_name.split(VLAN_SUB_INTERFACE_SEPARATOR) parent_port = substrs[0] parent_port = self.get_parent_port(sub_port_intf_name) @@ -771,7 +772,16 @@ def _test_sub_port_intf_admin_status_change(self, dvs, sub_port_intf_name, vrf_n vrf_oid = self.default_vrf_oid old_rif_oids = self.get_oids(ASIC_RIF_TABLE) + if defer_parent_adminup: + self.set_parent_port_admin_status(dvs, parent_port, "down") + _, oa_pid = dvs.runcmd("pgrep orchagent") + oa_pid = oa_pid.strip() + # This is to block orchagent daemon in order to simulate the scenario that + # there are a large number of items pending in orchagent's m_toSync queue + dvs.runcmd("kill -s SIGSTOP {}".format(oa_pid)) + self.set_parent_port_admin_status(dvs, parent_port, "up") + if vrf_name: self.create_vrf(vrf_name) vrf_oid = self.get_newly_created_oid(ASIC_VIRTUAL_ROUTER_TABLE, [vrf_oid]) @@ -781,6 +791,10 @@ def _test_sub_port_intf_admin_status_change(self, dvs, sub_port_intf_name, vrf_n if vrf_name is None or not vrf_name.startswith(VNET_PREFIX): self.add_sub_port_intf_ip_addr(sub_port_intf_name, self.IPV6_ADDR_UNDER_TEST) + if defer_parent_adminup: + dvs.runcmd("kill -s SIGCONT {}".format(oa_pid)) + time.sleep(1) + fv_dict = { ADMIN_STATUS: "up", } @@ -871,6 +885,7 @@ def test_sub_port_intf_admin_status_change(self, dvs): self._test_sub_port_intf_admin_status_change(dvs, self.SUB_PORT_INTERFACE_UNDER_TEST) self._test_sub_port_intf_admin_status_change(dvs, self.LAG_SUB_PORT_INTERFACE_UNDER_TEST) + self._test_sub_port_intf_admin_status_change(dvs, self.LAG_SUB_PORT_INTERFACE_UNDER_TEST, defer_parent_adminup=True) self._test_sub_port_intf_admin_status_change(dvs, self.SUB_PORT_INTERFACE_UNDER_TEST, self.VRF_UNDER_TEST) self._test_sub_port_intf_admin_status_change(dvs, self.LAG_SUB_PORT_INTERFACE_UNDER_TEST, self.VRF_UNDER_TEST) diff --git a/tests/test_tunnel.py b/tests/test_tunnel.py index b69e6b6b73..4b96eb5060 100644 --- a/tests/test_tunnel.py +++ b/tests/test_tunnel.py @@ -7,13 +7,15 @@ def create_fvs(**kwargs): return swsscommon.FieldValuePairs(list(kwargs.items())) - class TestTunnelBase(object): APP_TUNNEL_DECAP_TABLE_NAME = "TUNNEL_DECAP_TABLE" ASIC_TUNNEL_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL" ASIC_TUNNEL_TERM_ENTRIES = "ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY" ASIC_RIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" ASIC_VRF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER" + ASIC_QOS_MAP_TABLE_KEY = "ASIC_STATE:SAI_OBJECT_TYPE_QOS_MAP" + TUNNEL_QOS_MAP_NAME = "AZURE_TUNNEL" + CONFIG_TUNNEL_TABLE_NAME = "TUNNEL" ecn_modes_map = { "standard" : "SAI_TUNNEL_DECAP_ECN_MODE_STANDARD", @@ -30,6 +32,9 @@ class TestTunnelBase(object): "uniform" : "SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL" } + # Define 2 dummy maps + DSCP_TO_TC_MAP = {str(i):str(1) for i in range(0, 64)} + TC_TO_PRIORITY_GROUP_MAP = {str(i):str(i) for i in range(0, 8)} def check_interface_exists_in_asicdb(self, asicdb, sai_oid): if_table = swsscommon.Table(asicdb, self.ASIC_RIF_TABLE) @@ -41,43 +46,59 @@ def check_vr_exists_in_asicdb(self, asicdb, sai_oid): status, fvs = vfr_table.get(sai_oid) return status - def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips): + def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips, src_ip=None): tunnel_term_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TERM_ENTRIES) tunnel_term_entries = tunnel_term_table.getKeys() assert len(tunnel_term_entries) == len(dst_ips) + expected_term_type = "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2P" if src_ip else "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" + expected_len = 6 if src_ip else 5 for term_entry in tunnel_term_entries: status, fvs = tunnel_term_table.get(term_entry) assert status == True - assert len(fvs) == 5 + assert len(fvs) == expected_len for field, value in fvs: if field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID": assert self.check_vr_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE": - assert value == "SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP" + assert value == expected_term_type elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE": assert value == "SAI_TUNNEL_TYPE_IPINIP" elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID": assert value == tunnel_sai_oid elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP": assert value in dst_ips + elif field == "SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_SRC_IP" and src_ip: + assert value == src_ip else: assert False, "Field %s is not tested" % field def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): """ Create tunnel and verify all needed enties in ASIC DB exists """ - is_symmetric_tunnel = "src_ip" in kwargs; - - # create tunnel entry in DB - ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) - - fvs = create_fvs(**kwargs) - - ps.set(tunnel_name, fvs) + is_symmetric_tunnel = "src_ip" in kwargs + + decap_dscp_to_tc_map_oid = None + decap_tc_to_pg_map_oid = None + skip_tunnel_creation = False + + if "decap_dscp_to_tc_map_oid" in kwargs: + decap_dscp_to_tc_map_oid = kwargs.pop("decap_dscp_to_tc_map_oid") + + if "decap_tc_to_pg_map_oid" in kwargs: + decap_tc_to_pg_map_oid = kwargs.pop("decap_tc_to_pg_map_oid") + + if "skip_tunnel_creation" in kwargs: + skip_tunnel_creation = kwargs.pop("skip_tunnel_creation") + + if not skip_tunnel_creation: + fvs = create_fvs(**kwargs) + # create tunnel entry in DB + ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) + ps.set(tunnel_name, fvs) # wait till config will be applied time.sleep(1) @@ -95,11 +116,18 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): assert status == True # 6 parameters to check in case of decap tunnel # + 1 (SAI_TUNNEL_ATTR_ENCAP_SRC_IP) in case of symmetric tunnel - assert len(fvs) == 7 if is_symmetric_tunnel else 6 + expected_len = 7 if is_symmetric_tunnel else 6 expected_ecn_mode = self.ecn_modes_map[kwargs["ecn_mode"]] expected_dscp_mode = self.dscp_modes_map[kwargs["dscp_mode"]] expected_ttl_mode = self.ttl_modes_map[kwargs["ttl_mode"]] + + if decap_dscp_to_tc_map_oid: + expected_len += 1 + if decap_tc_to_pg_map_oid: + expected_len += 1 + + assert len(fvs) == expected_len for field, value in fvs: if field == "SAI_TUNNEL_ATTR_TYPE": @@ -116,10 +144,14 @@ def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs): assert self.check_interface_exists_in_asicdb(asicdb, value) elif field == "SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE": assert self.check_interface_exists_in_asicdb(asicdb, value) + elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_DSCP_TO_TC_MAP": + assert value == decap_dscp_to_tc_map_oid + elif field == "SAI_TUNNEL_ATTR_DECAP_QOS_TC_TO_PRIORITY_GROUP_MAP": + assert value == decap_tc_to_pg_map_oid else: assert False, "Field %s is not tested" % field - - self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, kwargs["dst_ip"].split(",")) + src_ip = kwargs["src_ip"] if "src_ip" in kwargs else None + self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, kwargs["dst_ip"].split(","), src_ip) def remove_and_test_tunnel(self, db, asicdb, tunnel_name): """ Removes tunnel and checks that ASIC db is clear""" @@ -147,6 +179,26 @@ def remove_and_test_tunnel(self, db, asicdb, tunnel_name): assert len(tunnel_app_table.getKeys()) == 0 assert not self.check_interface_exists_in_asicdb(asicdb, overlay_infs_id) + def add_qos_map(self, configdb, asicdb, qos_map_type_name, qos_map_name, qos_map): + """ Add qos map for testing""" + qos_table = swsscommon.Table(asicdb, self.ASIC_QOS_MAP_TABLE_KEY) + current_oids = qos_table.getKeys() + + # Apply QoS map to config db + table = swsscommon.Table(configdb, qos_map_type_name) + fvs = swsscommon.FieldValuePairs(list(qos_map.items())) + table.set(qos_map_name, fvs) + time.sleep(1) + + diff = set(qos_table.getKeys()) - set(current_oids) + assert len(diff) == 1 + oid = diff.pop() + return oid + + def remove_qos_map(self, configdb, qos_map_type_name, qos_map_name): + """ Remove the testing qos map""" + table = swsscommon.Table(configdb, qos_map_type_name) + table._del(qos_map_name) def cleanup_left_over(self, db, asicdb): """ Cleanup APP and ASIC tables """ @@ -159,10 +211,9 @@ def cleanup_left_over(self, db, asicdb): for key in tunnel_term_table.getKeys(): tunnel_term_table._del(key) - tunnel_app_table = swsscommon.Table(asicdb, self.APP_TUNNEL_DECAP_TABLE_NAME) + tunnel_app_table = swsscommon.Table(db, self.APP_TUNNEL_DECAP_TABLE_NAME) for key in tunnel_app_table.getKeys(): - tunnel_table._del(key) - + tunnel_app_table._del(key) class TestDecapTunnel(TestTunnelBase): """ Tests for decap tunnel creation and removal """ @@ -194,7 +245,84 @@ def test_TunnelDecap_v6(self, dvs, testlog): dst_ip="2::2,3::3", dscp_mode="pipe", ecn_mode="copy_from_outer", ttl_mode="uniform") self.remove_and_test_tunnel(db, asicdb,"IPINIPv6Decap") + + def test_TunnelDecap_MuxTunnel(self, dvs, testlog): + """ Test MuxTunnel creation. """ + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + self.cleanup_left_over(db, asicdb) + + dscp_to_tc_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.DSCP_TO_TC_MAP) + tc_to_pg_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_PRIORITY_GROUP_MAP) + + # Create MuxTunnel0 with QoS remapping attributes + params = { + "tunnel_type": "IPINIP", + "src_ip": "1.1.1.1", + "dst_ip": "1.1.1.2", + "dscp_mode": "pipe", + "ecn_mode": "copy_from_outer", + "ttl_mode": "uniform", + "decap_dscp_to_tc_map": "AZURE_TUNNEL", + "decap_dscp_to_tc_map_oid": dscp_to_tc_map_oid, + "decap_tc_to_pg_map": "AZURE_TUNNEL", + "decap_tc_to_pg_map_oid": tc_to_pg_map_oid + } + self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", **params) + + # Remove Tunnel first + self.remove_and_test_tunnel(db, asicdb,"MuxTunnel0") + + self.remove_qos_map(configdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME) + self.remove_qos_map(configdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME) + + def test_TunnelDecap_MuxTunnel_with_retry(self, dvs, testlog): + """ Test MuxTunnel creation. """ + db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + configdb = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + + self.cleanup_left_over(db, asicdb) + + # Create MuxTunnel0 with QoS remapping attributes + params = { + "tunnel_type": "IPINIP", + "src_ip": "1.1.1.1", + "dst_ip": "1.1.1.2", + "dscp_mode": "pipe", + "ecn_mode": "copy_from_outer", + "ttl_mode": "uniform", + "decap_dscp_to_tc_map": "AZURE_TUNNEL", + "decap_tc_to_pg_map": "AZURE_TUNNEL", + } + # Verify tunnel is not created when decap_dscp_to_tc_map/decap_tc_to_pg_map is specified while oid is not ready in qosorch + fvs = create_fvs(**params) + # create tunnel entry in DB + ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME) + ps.set("MuxTunnel0", fvs) + + time.sleep(1) + # check asic db table + tunnel_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TABLE) + tunnels = tunnel_table.getKeys() + assert len(tunnels) == 0 + + #Verify tunneldecaporch creates tunnel when qos map is available + dscp_to_tc_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.DSCP_TO_TC_MAP) + tc_to_pg_map_oid = self.add_qos_map(configdb, asicdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, self.TUNNEL_QOS_MAP_NAME, self.TC_TO_PRIORITY_GROUP_MAP) + params.update({ + "decap_dscp_to_tc_map_oid": dscp_to_tc_map_oid, + "decap_tc_to_pg_map_oid": tc_to_pg_map_oid, + "skip_tunnel_creation": True + }) + self.create_and_test_tunnel(db, asicdb, tunnel_name="MuxTunnel0", **params) + + # Cleanup + self.remove_and_test_tunnel(db, asicdb,"MuxTunnel0") + self.remove_qos_map(configdb, swsscommon.CFG_DSCP_TO_TC_MAP_TABLE_NAME, dscp_to_tc_map_oid) + self.remove_qos_map(configdb, swsscommon.CFG_TC_TO_PRIORITY_GROUP_MAP_TABLE_NAME, tc_to_pg_map_oid) class TestSymmetricTunnel(TestTunnelBase): """ Tests for symmetric tunnel creation and removal """ diff --git a/tests/test_virtual_chassis.py b/tests/test_virtual_chassis.py index 9f4d6ddedb..6f95944c79 100644 --- a/tests/test_virtual_chassis.py +++ b/tests/test_virtual_chassis.py @@ -1,6 +1,9 @@ from swsscommon import swsscommon from dvslib.dvs_database import DVSDatabase import ast +import time +import pytest +import buffer_model class TestVirtualChassis(object): @@ -38,7 +41,7 @@ def config_inbandif_port(self, vct, ibport): # Configure only for line cards if cfg_switch_type == "voq": - dvs.runcmd(f"config interface startup {ibport}") + dvs.port_admin_set(f"{ibport}", "up") config_db.create_entry("VOQ_INBAND_INTERFACE", f"{ibport}", {"inband_type": "port"}) def del_inbandif_port(self, vct, ibport): @@ -135,6 +138,7 @@ def test_voq_switch(self, vct): spcfg = ast.literal_eval(value) assert spcfg['count'] == sp_count, "Number of systems ports configured is invalid" + @pytest.mark.skip(reason="Failing. Under investigation") def test_chassis_app_db_sync(self, vct): """Test chassis app db syncing. @@ -155,6 +159,7 @@ def test_chassis_app_db_sync(self, vct): keys = chassis_app_db.get_keys("SYSTEM_INTERFACE") assert len(keys), "No chassis app db syncing is done" + @pytest.mark.skip(reason="Failing. Under investigation") def test_chassis_system_interface(self, vct): """Test RIF record creation in ASIC_DB for remote interfaces. @@ -211,6 +216,7 @@ def test_chassis_system_interface(self, vct): # Remote system ports's switch id should not match local switch id assert spcfginfo["attached_switch_id"] != lc_switch_id, "RIF system port with wrong switch_id" + @pytest.mark.skip(reason="Failing. Under investigation") def test_chassis_system_neigh(self, vct): """Test neigh record create/delete and syncing to chassis app db. @@ -239,156 +245,175 @@ def test_chassis_system_neigh(self, vct): # Test neighbor on Ethernet4 since Ethernet0 is used as Inband port test_neigh_dev = "Ethernet4" test_neigh_ip = "10.8.104.3" - test_neigh_mac = "00:01:02:03:04:05" - dvss = vct.dvss - print("name {}".format(dvss.keys())) - for name in dvss.keys(): - dvs = dvss[name] - - config_db = dvs.get_config_db() - metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") - - cfg_switch_type = metatbl.get("switch_type") - - # Neighbor record verifiation done in line card - if cfg_switch_type == "voq": - lc_switch_id = metatbl.get("switch_id") - assert lc_switch_id != "", "Got error in getting switch_id from CONFIG_DB DEVICE_METADATA" - if lc_switch_id == "0": - - # Add a static neighbor - _, res = dvs.runcmd(['sh', "-c", "ip neigh show"]) - _, res = dvs.runcmd(['sh', "-c", f"ip neigh add {test_neigh_ip} lladdr {test_neigh_mac} dev {test_neigh_dev}"]) - assert res == "", "Error configuring static neigh" - - asic_db = dvs.get_asic_db() - asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", 1) - neighkeys = asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY") - assert len(neighkeys), "No neigh entries in ASIC_DB" - - # Check for presence of the neighbor in ASIC_DB - test_neigh = "" - for nkey in neighkeys: - ne = ast.literal_eval(nkey) - if ne['ip'] == test_neigh_ip: - test_neigh = nkey - break + # Grouping together the checks done during neighbor entry create into a function chassis_system_neigh_create() + # if action is "add" it creates a new neighbor entry in local asic + # if action is "change" it updates an existing neighbor entry with the mac_address + def chassis_system_neigh_create(): + dvss = vct.dvss + print("name {}".format(dvss.keys())) + for name in dvss.keys(): + dvs = dvss[name] - assert test_neigh != "", "Neigh not found in ASIC_DB" + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + + # Neighbor record verifiation done in line card + if cfg_switch_type == "voq": + lc_switch_id = metatbl.get("switch_id") + assert lc_switch_id != "", "Got error in getting switch_id from CONFIG_DB DEVICE_METADATA" + if lc_switch_id == "0": + + # Add a static neighbor + _, res = dvs.runcmd(['sh', "-c", "ip neigh show"]) + _, res = dvs.runcmd(['sh', "-c", f"ip neigh {action} {test_neigh_ip} lladdr {mac_address} dev {test_neigh_dev}"]) + assert res == "", "Error configuring static neigh" + + asic_db = dvs.get_asic_db() + asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", 1) + neighkeys = asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY") + assert len(neighkeys), "No neigh entries in ASIC_DB" + + # Check for presence of the neighbor in ASIC_DB + test_neigh = "" + for nkey in neighkeys: + ne = ast.literal_eval(nkey) + if ne['ip'] == test_neigh_ip: + test_neigh = nkey + break - # Preserve test neigh asic db key for delete verification later - test_neigh_asic_db_key = test_neigh + assert test_neigh != "", "Neigh not found in ASIC_DB" - # Check for presence of encap index, retrieve and store it for sync verification - test_neigh_entry = asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", test_neigh) - test_neigh_entry_attrs = asic_db.wait_for_fields("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", test_neigh, ["SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX"]) - print(test_neigh) - print(test_neigh_entry) - print(test_neigh_entry_attrs) - encap_index = test_neigh_entry_attrs["SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX"] - assert encap_index != "" and encap_index != None, "VOQ encap index is not programmed in ASIC_DB" + # Preserve test neigh asic db key for delete verification later + test_neigh_asic_db_key = test_neigh - break + # Check for presence of encap index, retrieve and store it for sync verification + test_neigh_entry = asic_db.wait_for_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", test_neigh) + test_neigh_entry_attrs = asic_db.wait_for_fields("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", test_neigh, ["SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX"]) + print(test_neigh) + print(test_neigh_entry) + print(test_neigh_entry_attrs) + encap_index = test_neigh_entry_attrs["SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX"] + assert encap_index != "" and encap_index != None, "VOQ encap index is not programmed in ASIC_DB" - # Verify neighbor record syncing with encap index - for name in dvss.keys(): - if name.startswith("supervisor"): - dvs = dvss[name] - chassis_app_db = DVSDatabase(swsscommon.CHASSIS_APP_DB, dvs.redis_chassis_sock) - chassis_app_db.wait_for_n_keys("SYSTEM_NEIGH", 1) - sysneighkeys = chassis_app_db.get_keys("SYSTEM_NEIGH") - - print(sysneighkeys) - test_sysneigh = "" - for sysnk in sysneighkeys: - sysnk_tok = sysnk.split("|") - assert len(sysnk_tok) == 3, "Invalid system neigh key in chassis app db" - if sysnk_tok[2] == test_neigh_ip: - test_sysneigh = sysnk break - assert test_sysneigh != "", "Neigh is not sync-ed to chassis app db" + # Verify neighbor record syncing with encap index + for name in dvss.keys(): + if name.startswith("supervisor"): + dvs = dvss[name] + chassis_app_db = DVSDatabase(swsscommon.CHASSIS_APP_DB, dvs.redis_chassis_sock) + chassis_app_db.wait_for_n_keys("SYSTEM_NEIGH", 1) + sysneighkeys = chassis_app_db.get_keys("SYSTEM_NEIGH") + + print(sysneighkeys) + test_sysneigh = "" + for sysnk in sysneighkeys: + sysnk_tok = sysnk.split("|") + assert len(sysnk_tok) == 3, "Invalid system neigh key in chassis app db" + if sysnk_tok[2] == test_neigh_ip: + test_sysneigh = sysnk + break - # Preserve test sys neigh chassis app db key for delete verification later - test_sysneigh_chassis_app_db_key = test_sysneigh + assert test_sysneigh != "", "Neigh is not sync-ed to chassis app db" - test_sysneigh_entry = chassis_app_db.get_entry("SYSTEM_NEIGH", test_sysneigh) - sys_neigh_encap_index = test_sysneigh_entry.get("encap_index") - assert sys_neigh_encap_index != "", "System neigh in chassis app db does not have encap index" + # Preserve test sys neigh chassis app db key for delete verification later + test_sysneigh_chassis_app_db_key = test_sysneigh - assert encap_index == sys_neigh_encap_index, "Encap index not sync-ed correctly" + test_sysneigh_entry = chassis_app_db.get_entry("SYSTEM_NEIGH", test_sysneigh) + sys_neigh_encap_index = test_sysneigh_entry.get("encap_index") + assert sys_neigh_encap_index != "", "System neigh in chassis app db does not have encap index" - break + assert encap_index == sys_neigh_encap_index, "Encap index not sync-ed correctly" - # Verify programming of remote neighbor in asic db and programming of static route and static - # neigh in the kernel for the remote neighbor. The neighbor created in linecard 1 will be a - # remote neighbor in other linecards. Verity existence of the test neighbor in linecards other - # than linecard 1 - for name in dvss.keys(): - dvs = dvss[name] + break - config_db = dvs.get_config_db() - metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + # Add a delay for the programming of neighbor in remote LC + time.sleep(10) - cfg_switch_type = metatbl.get("switch_type") + # Verify programming of remote neighbor in asic db and programming of static route and static + # neigh in the kernel for the remote neighbor. The neighbor created in linecard 1 will be a + # remote neighbor in other linecards. Verity existence of the test neighbor in linecards other + # than linecard 1 + for name in dvss.keys(): + dvs = dvss[name] - # Neighbor record verifiation done in line card - if cfg_switch_type == "voq": - lc_switch_id = metatbl.get("switch_id") - assert lc_switch_id != "", "Got error in getting switch_id from CONFIG_DB DEVICE_METADATA" - if lc_switch_id != "0": - # Linecard other than linecard 1 - asic_db = dvs.get_asic_db() - asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", 1) - neighkeys = asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY") - assert len(neighkeys), "No neigh entries in ASIC_DB" - - # Check for presence of the remote neighbor in ASIC_DB - remote_neigh = "" - for nkey in neighkeys: - ne = ast.literal_eval(nkey) - if ne['ip'] == test_neigh_ip: - remote_neigh = nkey - break + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + + # Neighbor record verifiation done in line card + if cfg_switch_type == "voq": + lc_switch_id = metatbl.get("switch_id") + assert lc_switch_id != "", "Got error in getting switch_id from CONFIG_DB DEVICE_METADATA" + if lc_switch_id != "0": + # Linecard other than linecard 1 + asic_db = dvs.get_asic_db() + asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", 1) + neighkeys = asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY") + assert len(neighkeys), "No neigh entries in ASIC_DB" + + # Check for presence of the remote neighbor in ASIC_DB + remote_neigh = "" + for nkey in neighkeys: + ne = ast.literal_eval(nkey) + if ne['ip'] == test_neigh_ip: + remote_neigh = nkey + break - assert remote_neigh != "", "Remote neigh not found in ASIC_DB" + assert remote_neigh != "", "Remote neigh not found in ASIC_DB" - # Preserve remote neigh asic db neigh key for delete verification later - test_remote_neigh_asic_db_key = remote_neigh + # Preserve remote neigh asic db neigh key for delete verification later + test_remote_neigh_asic_db_key = remote_neigh - # Check for kernel entries + # Check for kernel entries - _, output = dvs.runcmd("ip neigh show") - assert f"{test_neigh_ip} dev {inband_port}" in output, "Kernel neigh not found for remote neighbor" + _, output = dvs.runcmd("ip neigh show") + assert f"{test_neigh_ip} dev {inband_port}" in output, "Kernel neigh not found for remote neighbor" - _, output = dvs.runcmd("ip route show") - assert f"{test_neigh_ip} dev {inband_port} scope link" in output, "Kernel route not found for remote neighbor" + _, output = dvs.runcmd("ip route show") + assert f"{test_neigh_ip} dev {inband_port} scope link" in output, "Kernel route not found for remote neighbor" - # Check for ASIC_DB entries. + # Check for ASIC_DB entries. - # Check for presence of encap index, retrieve and store it for sync verification - remote_neigh_entry = asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", remote_neigh) + # Check for presence of encap index, retrieve and store it for sync verification + remote_neigh_entry = asic_db.get_entry("ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY", remote_neigh) - # Validate encap index - remote_encap_index = remote_neigh_entry.get("SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX") - assert remote_encap_index != "", "VOQ encap index is not programmed for remote neigh in ASIC_DB" - assert remote_encap_index == encap_index, "Encap index of remote neigh mismatch with allocated encap index" + # Validate encap index + remote_encap_index = remote_neigh_entry.get("SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX") + assert remote_encap_index != "", "VOQ encap index is not programmed for remote neigh in ASIC_DB" + assert remote_encap_index == encap_index, "Encap index of remote neigh mismatch with allocated encap index" - # Validate MAC - mac = remote_neigh_entry.get("SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS") - assert mac != "", "MAC address is not programmed for remote neigh in ASIC_DB" - assert mac == test_neigh_mac, "Encap index of remote neigh mismatch with allocated encap index" + # Validate MAC + mac = remote_neigh_entry.get("SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS") + assert mac != "", "MAC address is not programmed for remote neigh in ASIC_DB" + assert mac == mac_address, "Encap index of remote neigh mismatch with allocated encap index" - # Check for other mandatory attributes - # For remote neighbors, is_local must be "false" - is_local = remote_neigh_entry.get("SAI_NEIGHBOR_ENTRY_ATTR_IS_LOCAL") - assert is_local != "", "is_local attribute is not programmed for remote neigh in ASIC_DB" - assert is_local == "false", "is_local attribute is true for remote neigh" + # Check for other mandatory attributes + # For remote neighbors, is_local must be "false" + is_local = remote_neigh_entry.get("SAI_NEIGHBOR_ENTRY_ATTR_IS_LOCAL") + assert is_local != "", "is_local attribute is not programmed for remote neigh in ASIC_DB" + assert is_local == "false", "is_local attribute is true for remote neigh" - break + break + + return test_neigh_asic_db_key, test_sysneigh_chassis_app_db_key, test_remote_neigh_asic_db_key + + # First step is to add a new neighbor and check local/chassis_db/remote entry creation + mac_address = "00:01:02:03:04:77" + action = "add" + chassis_system_neigh_create() + + # Second step to update the mac address and check local/chassis_db/remote entry creation + mac_address = "00:01:02:03:04:05" + action = "change" + test_neigh_asic_db_key, test_sysneigh_chassis_app_db_key, test_remote_neigh_asic_db_key = chassis_system_neigh_create() # Verify system neighbor delete and clearing + dvss = vct.dvss for name in dvss.keys(): dvs = dvss[name] @@ -462,6 +487,7 @@ def test_chassis_system_neigh(self, vct): # Cleanup inband if configuration self.del_inbandif_port(vct, inband_port) + @pytest.mark.skip(reason="Failing. Under investigation") def test_chassis_system_lag(self, vct): """Test PortChannel in VOQ based chassis systems. @@ -598,6 +624,7 @@ def test_chassis_system_lag(self, vct): break + @pytest.mark.skip(reason="Failing. Under investigation") def test_chassis_system_lag_id_allocator_table_full(self, vct): """Test lag id allocator table full. @@ -675,6 +702,7 @@ def test_chassis_system_lag_id_allocator_table_full(self, vct): break + @pytest.mark.skip(reason="Failing. Under investigation") def test_chassis_system_lag_id_allocator_del_id(self, vct): """Test lag id allocator's release id and re-use id processing. @@ -825,7 +853,93 @@ def test_chassis_system_lag_id_allocator_del_id(self, vct): assert len(lagmemberkeys) == 0, "Stale system lag member entries in asic db" break - + + def test_chassis_add_remove_ports(self, vct): + """Test removing and adding a port in a VOQ chassis. + + Test validates that when a port is created the port is removed from the default vlan. + """ + dvss = vct.dvss + for name in dvss.keys(): + dvs = dvss[name] + buffer_model.enable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) + + config_db = dvs.get_config_db() + app_db = dvs.get_app_db() + asic_db = dvs.get_asic_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + cfg_switch_type = metatbl.get("switch_type") + + if cfg_switch_type == "voq": + num_ports = len(asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) + # Get the port info we'll flap + port = config_db.get_keys('PORT')[0] + port_info = config_db.get_entry("PORT", port) + + # Remove port's other configs + pgs = config_db.get_keys('BUFFER_PG') + queues = config_db.get_keys('BUFFER_QUEUE') + for key in pgs: + if port in key: + config_db.delete_entry('BUFFER_PG', key) + app_db.wait_for_deleted_entry('BUFFER_PG_TABLE', key) + + for key in queues: + if port in key: + config_db.delete_entry('BUFFER_QUEUE', key) + app_db.wait_for_deleted_entry('BUFFER_QUEUE_TABLE', key) + + # Remove port + config_db.delete_entry('PORT', port) + app_db.wait_for_deleted_entry('PORT_TABLE', port) + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_ports-1) + assert len(num) == num_ports-1 + + marker = dvs.add_log_marker() + + # Create port + config_db.update_entry("PORT", port, port_info) + app_db.wait_for_entry("PORT_TABLE", port) + num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", + num_ports) + assert len(num) == num_ports + + # Check that we see the logs for removing default vlan + matching_log = "removeDefaultVlanMembers: Remove 0 VLAN members from default VLAN" + _, logSeen = dvs.runcmd( [ "sh", "-c", + "awk '/{}/,ENDFILE {{print;}}' /var/log/syslog | grep '{}' | wc -l".format( marker, matching_log ) ] ) + assert logSeen.strip() == "1" + + buffer_model.disable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) + + def test_voq_egress_queue_counter(self, vct): + if vct is None: + return + dvss = vct.dvss + dvs = None + for name in dvss.keys(): + if "supervisor" in name: + continue + dvs = dvss[name] + break + assert dvs + _, _ = dvs.runcmd("counterpoll queue enable") + + num_voqs_per_port = 8 + # vs-switch creates 20 queues per port. + num_queues_per_local_port = 20 + num_ports_per_linecard = 32 + num_local_ports = 32 + num_linecards = 3 + num_sysports = num_ports_per_linecard * num_linecards + num_egress_queues = num_local_ports * num_queues_per_local_port + num_voqs = ( num_ports_per_linecard * num_voqs_per_port * num_linecards ) + num_queues_to_be_polled = num_voqs + num_egress_queues + + flex_db = dvs.get_flex_db() + flex_db.wait_for_n_keys("FLEX_COUNTER_TABLE:QUEUE_STAT_COUNTER", num_queues_to_be_polled) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_vlan.py b/tests/test_vlan.py index 6e43227a56..28d3de3a29 100644 --- a/tests/test_vlan.py +++ b/tests/test_vlan.py @@ -2,7 +2,7 @@ import pytest from distutils.version import StrictVersion -from dvslib.dvs_common import PollingConfig +from dvslib.dvs_common import PollingConfig, wait_for_result @pytest.mark.usefixtures("testlog") @pytest.mark.usefixtures('dvs_vlan_manager') @@ -436,6 +436,104 @@ def test_VlanHostIf(self, dvs): self.dvs_vlan.get_and_verify_vlan_ids(0) self.dvs_vlan.get_and_verify_vlan_hostif_ids(len(dvs.asic_db.hostif_name_map) - 1) + def test_VlanGratArp(self, dvs): + def arp_accept_enabled(): + rc, res = dvs.runcmd("cat /proc/sys/net/ipv4/conf/Vlan{}/arp_accept".format(vlan)) + return (res.strip("\n") == "1", res) + + def arp_accept_disabled(): + rc, res = dvs.runcmd("cat /proc/sys/net/ipv4/conf/Vlan{}/arp_accept".format(vlan)) + return (res.strip("\n") == "0", res) + + vlan = "2" + self.dvs_vlan.create_vlan(vlan) + self.dvs_vlan.create_vlan_interface(vlan) + self.dvs_vlan.set_vlan_intf_property(vlan, "grat_arp", "enabled") + + wait_for_result(arp_accept_enabled, PollingConfig(), "IPv4 arp_accept not enabled") + + # Not currently possible to test `accept_untracked_na` as it doesn't exist in the kernel for + # our test VMs (only present in kernels 5.19 and above) + + self.dvs_vlan.set_vlan_intf_property(vlan, "grat_arp", "disabled") + + wait_for_result(arp_accept_disabled, PollingConfig(), "IPv4 arp_accept not disabled") + + self.dvs_vlan.remove_vlan_interface(vlan) + self.dvs_vlan.remove_vlan(vlan) + self.dvs_vlan.get_and_verify_vlan_ids(0) + + def test_VlanProxyArp(self, dvs): + + def proxy_arp_enabled(): + rc, proxy_arp_res = dvs.runcmd("cat /proc/sys/net/ipv4/conf/Vlan{}/proxy_arp".format(vlan)) + rc, pvlan_res = dvs.runcmd("cat /proc/sys/net/ipv4/conf/Vlan{}/proxy_arp_pvlan".format(vlan)) + + return (proxy_arp_res.strip("\n") == "1" and pvlan_res.strip("\n") == "1", (proxy_arp_res, pvlan_res)) + + def proxy_arp_disabled(): + rc, proxy_arp_res = dvs.runcmd("cat /proc/sys/net/ipv4/conf/Vlan{}/proxy_arp".format(vlan)) + rc, pvlan_res = dvs.runcmd("cat /proc/sys/net/ipv4/conf/Vlan{}/proxy_arp_pvlan".format(vlan)) + + return (proxy_arp_res.strip("\n") == "0" and pvlan_res.strip("\n") == "0", (proxy_arp_res, pvlan_res)) + + vlan = "2" + self.dvs_vlan.create_vlan(vlan) + self.dvs_vlan.create_vlan_interface(vlan) + self.dvs_vlan.set_vlan_intf_property(vlan, "proxy_arp", "enabled") + + wait_for_result(proxy_arp_enabled, PollingConfig(), 'IPv4 proxy_arp or proxy_arp_pvlan not enabled') + + self.dvs_vlan.set_vlan_intf_property(vlan, "proxy_arp", "disabled") + + wait_for_result(proxy_arp_disabled, PollingConfig(), 'IPv4 proxy_arp or proxy_arp_pvlan not disabled') + + self.dvs_vlan.remove_vlan_interface(vlan) + self.dvs_vlan.remove_vlan(vlan) + self.dvs_vlan.get_and_verify_vlan_ids(0) + + def test_VlanMemberLinkDown(self, dvs): + + # TODO: add_ip_address has a dependency on cdb within dvs, + # so we still need to setup the db. This should be refactored. + dvs.setup_db() + + vlan = "1000" + vlan_ip = "192.168.0.1/21" + interface = "Ethernet0" + vlan_interface = "Vlan%s" % vlan + server_ip = "192.168.0.100" + vlan_intf_sysctl_param_path = "/proc/sys/net/ipv4/conf/%s/arp_evict_nocarrier" % vlan_interface + + self.dvs_vlan.create_vlan(vlan) + vlan_oid = self.dvs_vlan.get_and_verify_vlan_ids(1)[0] + self.dvs_vlan.verify_vlan(vlan_oid, vlan) + self.dvs_vlan.create_vlan_member(vlan, interface) + self.dvs_vlan.verify_vlan_member(vlan_oid, interface) + dvs.set_interface_status(interface, "up") + dvs.add_ip_address(vlan_interface, vlan_ip) + dvs.runcmd("ip neigh replace %s lladdr 11:22:33:44:55:66 dev %s nud stale" % (server_ip, vlan_interface)) + + neigh_oid = self.dvs_vlan.app_db.wait_for_n_keys("NEIGH_TABLE", 1)[0] + assert vlan_interface in neigh_oid and server_ip in neigh_oid + + # NOTE: arp_evict_nocarrier is available for kernel >= v5.16 and current + # docker-sonic-vs is based on kernel v5.4.0, so test only if this sysctl + # param is present + rc, res = dvs.runcmd("cat %s" % vlan_intf_sysctl_param_path) + if rc == 0: + assert res.strip() == "0" + dvs.set_interface_status(interface, "down") + neigh_oid = self.dvs_vlan.app_db.wait_for_n_keys("NEIGH_TABLE", 1)[0] + assert vlan_interface in neigh_oid and server_ip in neigh_oid + + dvs.runcmd("ip neigh flush all") + dvs.remove_ip_address(vlan_interface, vlan_ip) + self.dvs_vlan.remove_vlan_member(vlan, interface) + self.dvs_vlan.get_and_verify_vlan_member_ids(0) + self.dvs_vlan.remove_vlan(vlan) + self.dvs_vlan.get_and_verify_vlan_ids(0) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_vnet.py b/tests/test_vnet.py index 41217de92e..3b1ef6efd9 100644 --- a/tests/test_vnet.py +++ b/tests/test_vnet.py @@ -64,7 +64,7 @@ def get_created_entry(db, table, existed_entries): def get_all_created_entries(db, table, existed_entries): tbl = swsscommon.Table(db, table) entries = set(tbl.getKeys()) - new_entries = list(entries - existed_entries) + new_entries = list(entries - set(existed_entries)) assert len(new_entries) >= 0, "Get all could be no new created entries." new_entries.sort() return new_entries @@ -140,11 +140,11 @@ def delete_vnet_local_routes(dvs, prefix, vnet_name): time.sleep(2) -def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor=""): - set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor) +def create_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", adv_prefix=""): + set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac=mac, vni=vni, ep_monitor=ep_monitor, profile=profile, primary=primary, monitoring=monitoring, adv_prefix=adv_prefix) -def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor=""): +def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor="", profile="", primary="", monitoring="", adv_prefix=""): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) attrs = [ @@ -160,6 +160,18 @@ def set_vnet_routes(dvs, prefix, vnet_name, endpoint, mac="", vni=0, ep_monitor= if ep_monitor: attrs.append(('endpoint_monitor', ep_monitor)) + if profile: + attrs.append(('profile', profile)) + + if primary: + attrs.append(('primary', primary)) + + if monitoring: + attrs.append(('monitoring', monitoring)) + + if adv_prefix: + attrs.append(('adv_prefix', adv_prefix)) + tbl = swsscommon.Table(conf_db, "VNET_ROUTE_TUNNEL") fvs = swsscommon.FieldValuePairs(attrs) tbl.set("%s|%s" % (vnet_name, prefix), fvs) @@ -314,7 +326,7 @@ def delete_phy_interface(dvs, ifname, ipaddr): time.sleep(2) -def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_prefix=False): +def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_prefix=False, overlay_dmac=""): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -330,6 +342,9 @@ def create_vnet_entry(dvs, name, tunnel, vni, peer_list, scope="", advertise_pre if advertise_prefix: attrs.append(('advertise_prefix', 'true')) + if overlay_dmac: + attrs.append(('overlay_dmac', overlay_dmac)) + # create the VXLAN tunnel Term entry in Config DB create_entry_tbl( conf_db, @@ -362,6 +377,9 @@ def create_vxlan_tunnel(dvs, name, src_ip): attrs, ) +def delete_vxlan_tunnel(dvs, name): + conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + delete_entry_tbl(conf_db, "VXLAN_TUNNEL", name) def create_vxlan_tunnel_map(dvs, tunnel_name, tunnel_map_entry_name, vlan, vni_id): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) @@ -438,6 +456,15 @@ def update_bfd_session_state(dvs, addr, state): ntf_data = "[{\"bfd_session_id\":\""+bfd_id+"\",\"session_state\":\""+bfd_sai_state[state]+"\"}]" ntf.send("bfd_session_state_change", ntf_data, fvp) +def update_monitor_session_state(dvs, addr, monitor, state): + state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) + create_entry_tbl( + state_db, + "VNET_MONITOR_TABLE", '|', "%s|%s" % (monitor,addr), + [ + ("state", state), + ] + ) def get_bfd_session_id(dvs, addr): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) @@ -447,7 +474,7 @@ def get_bfd_session_id(dvs, addr): status, fvs = tbl.get(entry) fvs = dict(fvs) assert status, "Got an error when get a key" - if fvs["SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS"] == addr: + if fvs["SAI_BFD_SESSION_ATTR_DST_IP_ADDRESS"] == addr and fvs["SAI_BFD_SESSION_ATTR_MULTIHOP"] == "true": return entry return None @@ -487,13 +514,19 @@ def check_remove_state_db_routes(dvs, vnet, prefix): assert vnet + '|' + prefix not in keys -def check_routes_advertisement(dvs, prefix): +def check_routes_advertisement(dvs, prefix, profile=""): state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) tbl = swsscommon.Table(state_db, "ADVERTISE_NETWORK_TABLE") keys = tbl.getKeys() assert prefix in keys + if profile: + status, fvs = tbl.get(prefix) + assert status, "Got an error when get a key" + fvs = dict(fvs) + assert fvs['profile'] == profile + def check_remove_routes_advertisement(dvs, prefix): state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) @@ -503,6 +536,11 @@ def check_remove_routes_advertisement(dvs, prefix): assert prefix not in keys +def check_syslog(dvs, marker, err_log): + (exitcode, num) = dvs.runcmd(['sh', '-c', "awk \'/%s/,ENDFILE {print;}\' /var/log/syslog | grep \"%s\" | wc -l" % (marker, err_log)]) + assert num.strip() == "0" + + loopback_id = 0 def_vr_id = 0 switch_mac = None @@ -522,6 +560,7 @@ class VnetVxlanVrfTunnel(object): ASIC_NEXT_HOP_GROUP = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP" ASIC_NEXT_HOP_GROUP_MEMBER = "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP_GROUP_MEMBER" ASIC_BFD_SESSION = "ASIC_STATE:SAI_OBJECT_TYPE_BFD_SESSION" + APP_VNET_MONITOR = "VNET_MONITOR_TABLE" def __init__(self): self.tunnel_map_ids = set() @@ -621,6 +660,18 @@ def check_vxlan_tunnel(self, dvs, tunnel_name, src_ip): self.tunnel_map_map[tunnel_name] = tunnel_map_id self.tunnel[tunnel_name] = tunnel_id + def check_del_vxlan_tunnel(self, dvs): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + old_tunnel = get_deleted_entries(asic_db, self.ASIC_TUNNEL_TABLE, self.tunnel_ids, 1) + check_deleted_object(asic_db, self.ASIC_TUNNEL_TABLE, old_tunnel[0]) + self.tunnel_ids.remove(old_tunnel[0]) + + old_tunnel_maps = get_deleted_entries(asic_db, self.ASIC_TUNNEL_MAP, self.tunnel_map_ids, 4) + for old_tunnel_map in old_tunnel_maps: + check_deleted_object(asic_db, self.ASIC_TUNNEL_MAP, old_tunnel_map) + self.tunnel_map_ids.remove(old_tunnel_map) + def check_vxlan_tunnel_entry(self, dvs, tunnel_name, vnet_name, vni_id): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) @@ -848,6 +899,26 @@ def check_next_hop_group_member(self, dvs, nhg, ordered_ecmp, expected_endpoint, assert self.serialize_endpoint_group(endpoints) == expected_endpoint_str + def get_nexthop_groups(self, dvs, nhg): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + tbl_nhgm = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP_GROUP_MEMBER) + tbl_nh = swsscommon.Table(asic_db, self.ASIC_NEXT_HOP) + nhg_data = {} + nhg_data['id'] = nhg + entries = set(tbl_nhgm.getKeys()) + nhg_data['endpoints'] = [] + for entry in entries: + status, fvs = tbl_nhgm.get(entry) + fvs = dict(fvs) + assert status, "Got an error when get a key" + if fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_GROUP_ID"] == nhg: + nh_key = fvs["SAI_NEXT_HOP_GROUP_MEMBER_ATTR_NEXT_HOP_ID"] + status, nh_fvs = tbl_nh.get(nh_key) + nh_fvs = dict(nh_fvs) + assert status, "Got an error when get a key" + endpoint = nh_fvs["SAI_NEXT_HOP_ATTR_IP"] + nhg_data['endpoints'].append(endpoint) + return nhg_data def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], route_ids=[], nhg="", ordered_ecmp="false", nh_seq_id=None): asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) endpoint_str = name + "|" + self.serialize_endpoint_group(endpoints) @@ -911,6 +982,74 @@ def check_vnet_ecmp_routes(self, dvs, name, endpoints, tunnel, mac=[], vni=[], r return new_route, new_nhg + def check_priority_vnet_ecmp_routes(self, dvs, name, endpoints_primary, tunnel, mac=[], vni=[], route_ids=[], count =1, prefix =""): + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + endpoint_str_primary = name + "|" + self.serialize_endpoint_group(endpoints_primary) + new_nhgs = [] + expected_attrs_primary = {} + for idx, endpoint in enumerate(endpoints_primary): + expected_attr = { + "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_TUNNEL_ENCAP", + "SAI_NEXT_HOP_ATTR_IP": endpoint, + "SAI_NEXT_HOP_ATTR_TUNNEL_ID": self.tunnel[tunnel], + } + if vni and vni[idx]: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_VNI': vni[idx]}) + if mac and mac[idx]: + expected_attr.update({'SAI_NEXT_HOP_ATTR_TUNNEL_MAC': mac[idx]}) + expected_attrs_primary[endpoint] = expected_attr + + if len(endpoints_primary) == 1: + if route_ids: + new_route = route_ids + else: + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + return new_route + else : + new_nhgs = get_all_created_entries(asic_db, self.ASIC_NEXT_HOP_GROUP, self.nhgs) + found_match = False + + for nhg in new_nhgs: + nhg_data = self.get_nexthop_groups(dvs, nhg) + eplist = self.serialize_endpoint_group(nhg_data['endpoints']) + if eplist == self.serialize_endpoint_group(endpoints_primary): + self.nhg_ids[endpoint_str_primary] = nhg + found_match = True + + assert found_match, "the expected Nexthop group was not found." + + # Check routes in ingress VRF + expected_nhg_attr = { + "SAI_NEXT_HOP_GROUP_ATTR_TYPE": "SAI_NEXT_HOP_GROUP_TYPE_DYNAMIC_UNORDERED_ECMP", + } + for nhg in new_nhgs: + check_object(asic_db, self.ASIC_NEXT_HOP_GROUP, nhg, expected_nhg_attr) + + # Check nexthop group member + self.check_next_hop_group_member(dvs, self.nhg_ids[endpoint_str_primary], "false", endpoints_primary, expected_attrs_primary) + + if route_ids: + new_route = route_ids + else: + new_route = get_created_entries(asic_db, self.ASIC_ROUTE_ENTRY, self.routes, count) + + #Check if the route is in expected VRF + active_nhg = self.nhg_ids[endpoint_str_primary] + for idx in range(count): + if prefix != "" and prefix not in new_route[idx] : + continue + check_object(asic_db, self.ASIC_ROUTE_ENTRY, new_route[idx], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": active_nhg, + } + ) + rt_key = json.loads(new_route[idx]) + + + self.routes.update(new_route) + del self.nhg_ids[endpoint_str_primary] + return new_route + def check_del_vnet_routes(self, dvs, name, prefixes=[]): # TODO: Implement for VRF VNET @@ -924,12 +1063,73 @@ def _access_function(): return True + def check_custom_monitor_app_db(self, dvs, prefix, endpoint, packet_type, overlay_dmac): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + key = endpoint + ':' + prefix + check_object(app_db, self.APP_VNET_MONITOR, key, + { + "packet_type": packet_type, + "overlay_dmac" : overlay_dmac + } + ) + return True + + def check_custom_monitor_deleted(self, dvs, prefix, endpoint): + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + key = endpoint + ':' + prefix + check_deleted_object(app_db, self.APP_VNET_MONITOR, key) class TestVnetOrch(object): def get_vnet_obj(self): return VnetVxlanVrfTunnel() + def setup_db(self, dvs): + self.pdb = dvs.get_app_db() + self.adb = dvs.get_asic_db() + self.cdb = dvs.get_config_db() + self.sdb = dvs.get_state_db() + + def clear_srv_config(self, dvs): + dvs.servers[0].runcmd("ip address flush dev eth0") + dvs.servers[1].runcmd("ip address flush dev eth0") + dvs.servers[2].runcmd("ip address flush dev eth0") + dvs.servers[3].runcmd("ip address flush dev eth0") + + def set_admin_status(self, interface, status): + self.cdb.update_entry("PORT", interface, {"admin_status": status}) + + def create_l3_intf(self, interface, vrf_name): + if len(vrf_name) == 0: + self.cdb.create_entry("INTERFACE", interface, {"NULL": "NULL"}) + else: + self.cdb.create_entry("INTERFACE", interface, {"vrf_name": vrf_name}) + + def add_ip_address(self, interface, ip): + self.cdb.create_entry("INTERFACE", interface + "|" + ip, {"NULL": "NULL"}) + + def remove_ip_address(self, interface, ip): + self.cdb.delete_entry("INTERFACE", interface + "|" + ip) + + def create_route_entry(self, key, pairs): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + fvs = swsscommon.FieldValuePairs(list(pairs.items())) + tbl.set(key, fvs) + + def remove_route_entry(self, key): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "ROUTE_TABLE") + tbl._del(key) + + def check_route_entries(self, destinations): + def _access_function(): + route_entries = self.adb.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY") + route_destinations = [json.loads(route_entry)["dest"] + for route_entry in route_entries] + return (all(destination in route_destinations for destination in destinations), None) + + wait_for_result(_access_function) + + @pytest.fixture(params=["true", "false"]) def ordered_ecmp(self, dvs, request): @@ -1049,6 +1249,9 @@ def test_vnet_orch_1(self, dvs, testlog): delete_vnet_entry(dvs, 'Vnet_2000') vnet_obj.check_del_vnet_entry(dvs, 'Vnet_2000') + delete_vxlan_tunnel(dvs, tunnel_name) + vnet_obj.check_del_vxlan_tunnel(dvs) + ''' Test 2 - Two VNets, One HSMs per VNet ''' @@ -1175,6 +1378,9 @@ def test_vnet_orch_2(self, dvs, testlog): delete_vnet_entry(dvs, 'Vnet_2') vnet_obj.check_del_vnet_entry(dvs, 'Vnet_2') + delete_vxlan_tunnel(dvs, tunnel_name) + vnet_obj.check_del_vxlan_tunnel(dvs) + ''' Test 3 - Two VNets, One HSMs per VNet, Peering ''' @@ -1255,6 +1461,9 @@ def test_vnet_orch_3(self, dvs, testlog): delete_vnet_entry(dvs, 'Vnet_20') vnet_obj.check_del_vnet_entry(dvs, 'Vnet_20') + delete_vxlan_tunnel(dvs, tunnel_name) + vnet_obj.check_del_vxlan_tunnel(dvs) + ''' Test 4 - IPv6 Vxlan tunnel test ''' @@ -1396,6 +1605,9 @@ def test_vnet_orch_4(self, dvs, testlog): delete_vnet_entry(dvs, 'Vnet3001') vnet_obj.check_del_vnet_entry(dvs, 'Vnet3001') + delete_vxlan_tunnel(dvs, tunnel_name) + vnet_obj.check_del_vxlan_tunnel(dvs) + ''' Test 5 - Default VNet test ''' @@ -1412,6 +1624,9 @@ def test_vnet_orch_5(self, dvs, testlog): vnet_obj.check_default_vnet_entry(dvs, 'Vnet_5') vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_5', '4789') + delete_vnet_entry(dvs, 'Vnet_5') + vnet_obj.check_default_vnet_entry(dvs, 'Vnet_5') + ''' Test 6 - Test VxLAN tunnel with multiple maps ''' @@ -1431,6 +1646,10 @@ def test_vnet_vxlan_multi_map(self, dvs, testlog): create_vxlan_tunnel_map(dvs, tunnel_name, 'map_1', 'Vlan1000', '1000') + delete_vnet_entry(dvs, 'Vnet1') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet1') + delete_vxlan_tunnel(dvs, tunnel_name) + ''' Test 7 - Test for vnet tunnel routes with ECMP nexthop group ''' @@ -1499,6 +1718,7 @@ def test_vnet_orch_7(self, dvs, ordered_ecmp, testlog): delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) ''' Test 8 - Test for ipv6 vnet tunnel routes with ECMP nexthop group @@ -1585,6 +1805,7 @@ def test_vnet_orch_8(self, dvs, ordered_ecmp, testlog): delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) ''' @@ -1716,6 +1937,7 @@ def test_vnet_orch_9(self, dvs, ordered_ecmp, testlog): delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) ''' @@ -1852,7 +2074,7 @@ def test_vnet_orch_10(self, dvs, ordered_ecmp, testlog): delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) - + delete_vxlan_tunnel(dvs, tunnel_name) ''' Test 11 - Test for vnet tunnel routes with both single endpoint and ECMP group with endpoint health monitor @@ -1960,6 +2182,7 @@ def test_vnet_orch_11(self, dvs, ordered_ecmp, testlog): delete_vnet_entry(dvs, vnet_name) vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) ''' @@ -1981,7 +2204,7 @@ def test_vnet_orch_12(self, dvs, testlog): vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '12.12.12.12') vnet_obj.fetch_exist_entries(dvs) - create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3') + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3', profile="test_profile") # default bfd status is down, route should not be programmed in this status vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.1/32"]) @@ -1995,14 +2218,14 @@ def test_vnet_orch_12(self, dvs, testlog): time.sleep(2) route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.3'], tunnel_name) check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.3']) - check_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") # Remove endpoint from group if it goes down update_bfd_session_state(dvs, '12.1.0.2', 'Down') time.sleep(2) route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.3']) - check_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") # Create another tunnel route with endpoint group overlapped with route1 vnet_obj.fetch_exist_entries(dvs) @@ -2024,15 +2247,15 @@ def test_vnet_orch_12(self, dvs, testlog): route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1'], tunnel_name, route_ids=route1, nhg=nhg1_1) check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1']) - check_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile") # Set the route1 to a new group - set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3,12.0.0.4', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3,12.1.0.4') + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet12', '12.0.0.1,12.0.0.2,12.0.0.3,12.0.0.4', ep_monitor='12.1.0.1,12.1.0.2,12.1.0.3,12.1.0.4', profile="test_profile2") update_bfd_session_state(dvs, '12.1.0.4', 'Up') time.sleep(2) route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.4'], tunnel_name, route_ids=route1) check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.4']) - check_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile2") # Check the previous nexthop group is removed vnet_obj.fetch_exist_entries(dvs) @@ -2043,7 +2266,7 @@ def test_vnet_orch_12(self, dvs, testlog): time.sleep(2) route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet12', ['12.0.0.1', '12.0.0.2', '12.0.0.4'], tunnel_name, route_ids=route1, nhg=nhg1_2) check_state_db_routes(dvs, 'Vnet12', "100.100.1.1/32", ['12.0.0.1', '12.0.0.2', '12.0.0.4']) - check_routes_advertisement(dvs, "100.100.1.1/32") + check_routes_advertisement(dvs, "100.100.1.1/32", "test_profile2") # Set all endpoint to down state update_bfd_session_state(dvs, '12.1.0.1', 'Down') @@ -2089,7 +2312,1121 @@ def test_vnet_orch_12(self, dvs, testlog): delete_vnet_entry(dvs, 'Vnet12') vnet_obj.check_del_vnet_entry(dvs, 'Vnet12') + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 13 - Test for configuration idempotent behaviour + ''' + def test_vnet_orch_13(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_13' + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + create_vnet_entry(dvs, 'Vnet13', tunnel_name, '10008', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet13') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet13', '10008') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + + # Create an ECMP tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet13', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet13', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet13', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet13', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet13', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet13', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + # Check only one group is present + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 in vnet_obj.nhgs + assert len(vnet_obj.nhgs) == 1 + assert nhg1_1 == nhg1_2 + + # Remove one of the tunnel routes + delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet13') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet13', ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet13', "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + assert len(vnet_obj.nhgs) == 0 + delete_vnet_entry(dvs, 'Vnet13') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet13') + + ''' + Test 14 - Test for configuration idempotent behaviour 2 + ''' + def test_vnet_orch_14(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_14' + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + create_vnet_entry(dvs, 'Vnet14', tunnel_name, '10008', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet14') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet14', '10008') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + + # Create an ECMP tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet14', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14', 'fd:8:1::1,fd:8:1::2,fd:8:1::3') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet14', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + #update nexthops for the same tunnel. + set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14', 'fd:8:1::1,fd:8:1::2,fd:8:1::3,fd:8:1::4') + route1, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet14', ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128", ['fd:8:1::1', 'fd:8:1::2', 'fd:8:1::3', 'fd:8:1::4']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # Check the previous nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + assert nhg1_2 in vnet_obj.nhgs + + # Remove the tunnel route + delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet14', ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + # Remove the tunnel route + delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet14') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet14', ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet14', "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_2 not in vnet_obj.nhgs + assert nhg1_1 not in vnet_obj.nhgs + + delete_vnet_entry(dvs, 'Vnet14') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet14') + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 15 - Test for configuration idempotent behaviour single endpoint + ''' + def test_vnet_orch_15(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_15' + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + create_vnet_entry(dvs, 'Vnet15', tunnel_name, '10008', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet15') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet15', '10008') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::32') + + # Create an tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet15', 'fd:8:1::1') + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet15', 'fd:8:1::1', tunnel_name) + check_state_db_routes(dvs, 'Vnet15', "fd:8:10::32/128", ['fd:8:1::1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet15', 'fd:8:1::1') + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet15', 'fd:8:1::1', tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet15', "fd:8:10::32/128", ['fd:8:1::1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + # Check only one group is present + vnet_obj.fetch_exist_entries(dvs) + assert len(vnet_obj.nhops) == 1 + + # Remove one of the tunnel routes + delete_vnet_routes(dvs, "fd:8:10::32/128", 'Vnet15') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet15', ["fd:8:10::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet15', "fd:8:10::32/128") + check_remove_routes_advertisement(dvs, "fd:8:10::32/128") + + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert len(vnet_obj.nhops) == 0 + delete_vnet_entry(dvs, 'Vnet15') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet15') + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 16 - Test for configuration idempotent behaviour single endpoint with BFD + ''' + def test_vnet_orch_16(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_16' + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, 'fd:8::33') + create_vnet_entry(dvs, 'Vnet16', tunnel_name, '10008', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet16') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet16', '10008') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:8::33') + + # Create a tunnel route + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16', 'fd:8:2::1', ep_monitor='fd:8:2::1') + update_bfd_session_state(dvs, 'fd:8:2::1', 'Up') + time.sleep(2) + + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet16', 'fd:8:2::1', tunnel_name) + check_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128", ['fd:8:2::1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:11::32/128") + + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16', 'fd:8:2::1', ep_monitor='fd:8:2::1') + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet16', 'fd:8:2::1', tunnel_name, route_ids=route1) + check_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128", ['fd:8:2::1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:11::32/128") + # Check only one group is present + vnet_obj.fetch_exist_entries(dvs) + assert len(vnet_obj.nhops) == 1 + + update_bfd_session_state(dvs, 'fd:8:2::1', 'Down') + time.sleep(2) + # readd same tunnel again + set_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16', 'fd:8:2::1', ep_monitor='fd:8:2::1') + + update_bfd_session_state(dvs, 'fd:8:2::1', 'Up') + time.sleep(2) + + route1 = vnet_obj.check_vnet_routes(dvs, 'Vnet16', 'fd:8:2::1', tunnel_name,route_ids=route1) + check_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128", ['fd:8:2::1']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "fd:8:11::32/128") + + + # Remove one of the tunnel routes + delete_vnet_routes(dvs, "fd:8:11::32/128", 'Vnet16') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet16', ["fd:8:11::32/128"]) + check_remove_state_db_routes(dvs, 'Vnet16', "fd:8:11::32/128") + check_remove_routes_advertisement(dvs, "fd:8:11::32/128") + + # Check the nexthop group still exists + vnet_obj.fetch_exist_entries(dvs) + assert len(vnet_obj.nhops) == 0 + delete_vnet_entry(dvs, 'Vnet16') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet16') + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 17 - Test for configuration idempotent behaviour multiple endpoint with BFD + ''' + def test_vnet_orch_17(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_17' + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, 'Vnet17', tunnel_name, '10017', "") + + vnet_obj.check_vnet_entry(dvs, 'Vnet17') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet17', '10017') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17', '9.0.0.1,9.0.0.2,9.0.0.3', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + + # default bfd status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, 'Vnet17', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + #readd the route + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17', '9.0.0.1,9.0.0.2,9.0.0.3',ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet17', ["100.100.1.1/32"]) + check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Route should be properly configured when all bfd session states go up + update_bfd_session_state(dvs, '9.1.0.1', 'Up') + update_bfd_session_state(dvs, '9.1.0.2', 'Up') + update_bfd_session_state(dvs, '9.1.0.3', 'Up') + time.sleep(2) + + route1, nhg1_1 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet17', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name) + check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + #readd the active route + set_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17', '9.0.0.1,9.0.0.2,9.0.0.3',ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3') + route2, nhg1_2 = vnet_obj.check_vnet_ecmp_routes(dvs, 'Vnet17', ['9.0.0.1', '9.0.0.2', '9.0.0.3'], tunnel_name, route_ids=route1, nhg=nhg1_1) + check_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32", ['9.0.0.1', '9.0.0.2', '9.0.0.3']) + # The default Vnet setting does not advertise prefix + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + assert nhg1_1 == nhg1_2 + assert len(vnet_obj.nhgs) == 1 + + # Remove tunnel route + delete_vnet_routes(dvs, "100.100.1.1/32", 'Vnet17') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet17', ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, 'Vnet17', "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + + # Check the corresponding nexthop group is removed + vnet_obj.fetch_exist_entries(dvs) + assert nhg1_1 not in vnet_obj.nhgs + # Check the BFD session specific to the endpoint group is removed while others exist + check_del_bfd_session(dvs, ['9.1.0.1', '9.1.0.2', '9.1.0.3']) + + delete_vnet_entry(dvs, 'Vnet17') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet17') + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 18 - Test for priority vnet tunnel routes with ECMP nexthop group. test primary secondary switchover. + ''' + def test_vnet_orch_18(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + tunnel_name = 'tunnel_18' + vnet_name = 'vnet18' + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10018', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10018') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', primary ='9.1.0.1,9.1.0.2', monitoring='custom', adv_prefix='100.100.1.0/24') + + # default monitor status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + # Route should be properly configured when all monitor session states go up. Only primary Endpoints should be in use. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') + + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + # Remove first primary endpoint from group. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') + time.sleep(2) + route1= vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + # Switch to secondary if both primary down + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.3','9.1.0.4']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + # removing first endpoint of secondary. route should remain on secondary NHG + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.4']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + # removing last endpoint of secondary. route should be removed + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') + time.sleep(2) + + new_nhgs = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP_GROUP, []) + assert len(new_nhgs) == 0 + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + + #Route should come up with secondary endpoints. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') + + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.3','9.1.0.4']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + #Route should be switched to the primary endpoint. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + time.sleep(2) + route1= vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + #Route should be updated with the second primary endpoint. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + #Route should not be impacted by seconday endpoints going down. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + #Route should not be impacted by seconday endpoints coming back up. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24") + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + time.sleep(2) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + # Confirm the monitor sessions are removed + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.4") + + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 19 - Test for 2 priority vnet tunnel routes with overlapping primary secondary ECMP nexthop group. + ''' + def test_vnet_orch_19(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + tunnel_name = 'tunnel_19' + vnet_name = 'Vnet19' + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.19') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10019', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10019') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.19') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', profile="Test_profile", primary ='9.1.0.1,9.1.0.2', monitoring='custom', adv_prefix='100.100.1.0/24') + create_vnet_routes(dvs, "200.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', ep_monitor='9.1.0.1,9.1.0.2,9.1.0.3,9.1.0.4', primary ='9.1.0.3,9.1.0.4', monitoring='custom', adv_prefix='200.100.1.0/24') + + # default monitor session status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "200.100.1.0/24") + + # Route should be properly configured when all monitor session states go up. Only primary Endpoints should be in use. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.1', 'up') + time.sleep(2) + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.2', 'up') + time.sleep(2) + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.1','9.1.0.2']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'up') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.3', 'up') + time.sleep(2) + + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'up') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.4', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.1','9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1','9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3','9.1.0.4']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.1', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3','9.1.0.4']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.2', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.3','9.1.0.4']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.3','9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.3','9.1.0.4']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.3', 'down') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.3', 'down') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.4'], tunnel_name, route_ids=route1, prefix="100.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.4']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + route2 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['9.1.0.4'], tunnel_name, route_ids=route1, prefix="200.100.1.1/32") + check_state_db_routes(dvs, vnet_name, "200.100.1.1/32", ['9.1.0.4']) + check_routes_advertisement(dvs, "200.100.1.0/24", "") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.4', 'down') + update_monitor_session_state(dvs, '200.100.1.1/32', '9.1.0.4', 'down') + time.sleep(2) + + #we should still have two NHGs but no active route + new_nhgs = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP_GROUP, vnet_obj.nhgs) + assert len(new_nhgs) == 0 + check_remove_routes_advertisement(dvs, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "200.100.1.1/32") + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_state_db_routes(dvs, vnet_name, "200.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + check_remove_routes_advertisement(dvs, "200.100.1.0/24") + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + delete_vnet_routes(dvs, "200.100.1.1/32", vnet_name) + + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["200.100.1.1/32"]) + + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_state_db_routes(dvs, vnet_name, "200.100.1.1/32") + + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + check_remove_routes_advertisement(dvs, "200.100.1.0/24") + + + # Confirm the monitor sessions are removed + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.4") + + vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "200.100.1.1/32", "9.1.0.4") + + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 20 - Test for Single enpoint priority vnet tunnel routes. Test primary secondary switchover. + ''' + def test_vnet_orch_20(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + tunnel_name = 'tunnel_20' + vnet_name = 'Vnet20' + asic_db = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0) + + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10020', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10020') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.9') + + vnet_obj.fetch_exist_entries(dvs) + create_vnet_routes(dvs, "100.100.1.1/32", vnet_name, '9.1.0.1,9.1.0.2', ep_monitor='9.1.0.1,9.1.0.2', primary ='9.1.0.1', profile="Test_profile", monitoring='custom', adv_prefix='100.100.1.0/24') + + # default monitor session status is down, route should not be programmed in this status + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", []) + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + # Route should be properly configured when all monitor session states go up. Only primary Endpoints should be in use. + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + time.sleep(2) + nhids = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP,set()) + tbl_nh = swsscommon.Table(asic_db, vnet_obj.ASIC_NEXT_HOP) + nexthops = dict() + for nhid in nhids: + status, nh_fvs = tbl_nh.get(nhid) + nh_fvs = dict(nh_fvs) + for key in nh_fvs.keys(): + if key == 'SAI_NEXT_HOP_ATTR_IP': + nexthops[nh_fvs[key]] = nhid + assert len(nexthops.keys()) == 1 + + route = get_created_entries(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, vnet_obj.routes, 1) + check_object(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, route[0], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": nexthops['9.1.0.1'], + } + ) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') + time.sleep(2) + + route = get_created_entries(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, vnet_obj.routes, 1) + check_object(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, route[0], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": nexthops['9.1.0.1'], + } + ) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'up') + + time.sleep(2) + + nhids = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP,set()) + tbl_nh = swsscommon.Table(asic_db, vnet_obj.ASIC_NEXT_HOP) + nexthops = dict() + for nhid in nhids: + status, nh_fvs = tbl_nh.get(nhid) + nh_fvs = dict(nh_fvs) + for key in nh_fvs.keys(): + if key == 'SAI_NEXT_HOP_ATTR_IP': + nexthops[nh_fvs[key]] = nhid + assert len(nexthops.keys()) == 1 + + route = get_created_entries(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, vnet_obj.routes, 1) + check_object(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, route[0], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": nexthops['9.1.0.2'], + } + ) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.2']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'up') + time.sleep(2) + + nhids = get_all_created_entries(asic_db, vnet_obj.ASIC_NEXT_HOP,set()) + tbl_nh = swsscommon.Table(asic_db, vnet_obj.ASIC_NEXT_HOP) + nexthops = dict() + for nhid in nhids: + status, nh_fvs = tbl_nh.get(nhid) + nh_fvs = dict(nh_fvs) + for key in nh_fvs.keys(): + if key == 'SAI_NEXT_HOP_ATTR_IP': + nexthops[nh_fvs[key]] = nhid + assert len(nexthops.keys()) == 1 + + route = get_created_entries(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, vnet_obj.routes, 1) + check_object(asic_db, vnet_obj.ASIC_ROUTE_ENTRY, route[0], + { + "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID": nexthops['9.1.0.1'], + } + ) + check_state_db_routes(dvs, vnet_name, "100.100.1.1/32", ['9.1.0.1']) + check_routes_advertisement(dvs, "100.100.1.0/24", "Test_profile") + + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.1', 'down') + update_monitor_session_state(dvs, '100.100.1.1/32', '9.1.0.2', 'down') + + time.sleep(2) + + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "200.100.1.0/24") + + + # Remove tunnel route 1 + delete_vnet_routes(dvs, "100.100.1.1/32", vnet_name) + + vnet_obj.check_del_vnet_routes(dvs, vnet_name, ["100.100.1.1/32"]) + check_remove_state_db_routes(dvs, vnet_name, "100.100.1.1/32") + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.1/32", "9.1.0.2") + + delete_vnet_entry(dvs, vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 21 - Test for priority vxlan tunnel with adv_prefix, adv profile. test route re-addition, route update, primary seocndary swap. + ''' + def test_vnet_orch_21(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_21' + vnet_name = "Vnet21" + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10021', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10021') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, 'fd:10::32') + vnet_obj.fetch_exist_entries(dvs) + + #Add first Route + create_vnet_routes(dvs, "fd:10:10::1/128", vnet_name, 'fd:10:1::1,fd:10:1::2,fd:10:1::3,fd:10:1::4', ep_monitor='fd:10:2::1,fd:10:2::2,fd:10:2::3,fd:10:2::4', profile = "test_prf", primary ='fd:10:1::3,fd:10:1::4',monitoring='custom', adv_prefix="fd:10:10::/64") + update_monitor_session_state(dvs, 'fd:10:10::1/128', 'fd:10:2::1', 'up') + update_monitor_session_state(dvs, 'fd:10:10::1/128', 'fd:10:2::2', 'up') + + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['fd:10:1::1','fd:10:1::2'], tunnel_name, prefix="fd:10:10::1/128") + check_state_db_routes(dvs, vnet_name, "fd:10:10::1/128", ['fd:10:1::1,fd:10:1::2']) + check_routes_advertisement(dvs, "fd:10:10::/64", "test_prf") + + #add 2nd route + create_vnet_routes(dvs, "fd:10:10::21/128", vnet_name, 'fd:11:1::1,fd:11:1::2,fd:11:1::3,fd:11:1::4', ep_monitor='fd:11:2::1,fd:11:2::2,fd:11:2::3,fd:11:2::4', profile = "test_prf", primary ='fd:11:1::1,fd:11:1::2',monitoring='custom', adv_prefix='fd:10:10::/64') + update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::1', 'up') + update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::2', 'up') + update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::3', 'up') + update_monitor_session_state(dvs, 'fd:10:10::21/128', 'fd:11:2::4', 'up') + + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['fd:11:1::1','fd:11:1::2'], tunnel_name, route_ids=route1, prefix="fd:10:10::21/128") + check_state_db_routes(dvs, vnet_name, "fd:10:10::21/128", ['fd:11:1::1,fd:11:1::2']) + check_routes_advertisement(dvs, "fd:10:10::/64", "test_prf") + + #remove first route + delete_vnet_routes(dvs, "fd:10:10::1/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["fd:10:10::1/128"]) + check_remove_state_db_routes(dvs, 'Vnet12', "fd:10:10::1/128") + + #adv should still be up. + check_routes_advertisement(dvs, "fd:10:10::/64") + + #add 3rd route + create_vnet_routes(dvs, "fd:10:10::31/128", vnet_name, 'fd:11:1::1,fd:11:1::2,fd:11:1::3,fd:11:1::4', ep_monitor='fd:11:2::1,fd:11:2::2,fd:11:2::3,fd:11:2::4', profile = "test_prf", primary ='fd:11:1::1,fd:11:1::2',monitoring='custom', adv_prefix='fd:10:10::/64') + update_monitor_session_state(dvs, 'fd:10:10::31/128', 'fd:11:2::1', 'up') + update_monitor_session_state(dvs, 'fd:10:10::31/128', 'fd:11:2::2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['fd:11:1::1','fd:11:1::2'], tunnel_name, route_ids=route1, prefix="fd:10:10::31/128") + check_state_db_routes(dvs, vnet_name, "fd:10:10::31/128", ['fd:11:1::1,fd:11:1::2']) + check_routes_advertisement(dvs, "fd:10:10::/64", "test_prf") + + #delete 2nd route + delete_vnet_routes(dvs, "fd:10:10::21/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["fd:10:10::21/128"]) + check_remove_state_db_routes(dvs, 'Vnet12', "fd:10:10::21/128") + + #adv should still be up. + check_routes_advertisement(dvs, "fd:10:10::/64") + + #remove 3rd route + delete_vnet_routes(dvs, "fd:10:10::31/128", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["fd:10:10::31/128"]) + check_remove_state_db_routes(dvs, 'Vnet12', "fd:10:10::31/128") + + #adv should be gone. + check_remove_routes_advertisement(dvs, "fd:10:10::/64") + delete_vnet_entry(dvs,vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 22 - Test for vxlan custom monitoring with adv_prefix. Add route twice and change nexthops case + ''' + def test_vnet_orch_22(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_22' + vnet_name = "Vnet22" + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10022') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') + + vnet_obj.fetch_exist_entries(dvs) + #Add first Route + create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'up') + time.sleep(2) + vnet_obj.check_vnet_routes(dvs, vnet_name, '19.0.0.1', tunnel_name) + check_state_db_routes(dvs, vnet_name, "100.100.1.11/32", ['19.0.0.1']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #Add first Route again + create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') + check_state_db_routes(dvs, vnet_name, "100.100.1.11/32", ['19.0.0.1']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #remove first route + delete_vnet_routes(dvs, "100.100.1.11/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.11/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.11/32") + + #adv should be gone. + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + #add 2nd route + create_vnet_routes(dvs, "100.100.1.57/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.1,5.0.0.2',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.57/32', '5.1.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.57/32', '5.1.0.2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, prefix="100.100.1.57/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.57/32", ['5.0.0.1,5.0.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #modify 2nd route switch primary with secondary + create_vnet_routes(dvs, "100.100.1.57/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.3,5.0.0.4',monitoring='custom', adv_prefix='100.100.1.0/24') + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.57/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.57/32", ['5.0.0.1','5.0.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #delete 2nd route + delete_vnet_routes(dvs, "100.100.1.57/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.57/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.57/32") + #adv should be gone. + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + #add 3rd route + create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.1,5.0.0.2',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.2', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, prefix="100.100.1.67/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.1,5.0.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #modify 3rd route next hops to secondary + create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.1,5.0.0.2,5.0.0.3,5.0.0.4', ep_monitor='5.1.0.1,5.1.0.2,5.1.0.3,5.1.0.4', profile = "test_prf", primary ='5.0.0.3,5.0.0.4',monitoring='custom', adv_prefix='100.100.1.0/24') + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.1','5.0.0.2'], tunnel_name, route_ids=route1, prefix="100.100.1.67/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.1','5.0.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #modify 3rd route next hops to a new set. + create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.5,5.0.0.6,5.0.0.7,5.0.0.8', ep_monitor='5.1.0.5,5.1.0.6,5.1.0.7,5.1.0.8', profile = "test_prf", primary ='5.0.0.5,5.0.0.6',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.5', 'up') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.6', 'up') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.5','5.0.0.6'], tunnel_name, route_ids=route1, prefix="100.100.1.67/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.5,5.0.0.6']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.7', 'up') + update_monitor_session_state(dvs, '100.100.1.67/32', '5.1.0.8', 'up') + + create_vnet_routes(dvs, "100.100.1.67/32", vnet_name, '5.0.0.5,5.0.0.6,5.0.0.7,5.0.0.8', ep_monitor='5.1.0.5,5.1.0.6,5.1.0.7,5.1.0.8', profile = "test_prf", primary ='5.0.0.7,5.0.0.8',monitoring='custom', adv_prefix='100.100.1.0/24') + time.sleep(2) + route1 = vnet_obj.check_priority_vnet_ecmp_routes(dvs, vnet_name, ['5.0.0.7','5.0.0.8'], tunnel_name, route_ids=route1, prefix="100.100.1.67/32") + check_state_db_routes(dvs, vnet_name, "100.100.1.67/32", ['5.0.0.7,5.0.0.8']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #delete 3rd route + delete_vnet_routes(dvs, "100.100.1.67/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.67/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.67/32") + #adv should be gone. + check_remove_routes_advertisement(dvs, "100.100.1.0/24") + + #Add priority route with no secondary enpoints + create_vnet_routes(dvs, "100.100.1.71/32", vnet_name, '19.0.0.1,19.0.0.2', ep_monitor='19.0.0.1,19.0.0.2', profile = "test_prf", primary ='19.0.0.1,19.0.0.2',monitoring='custom', adv_prefix='100.100.1.0/24') + update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.1', 'up') + update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.2', 'up') + + #verify that no BFD sessions are created. + check_del_bfd_session(dvs, ['19.0.0.1']) + check_del_bfd_session(dvs, ['19.0.0.2']) + time.sleep(2) + check_state_db_routes(dvs, vnet_name, "100.100.1.71/32", ['19.0.0.1,19.0.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.1', 'down') + check_state_db_routes(dvs, vnet_name, "100.100.1.71/32", ['19.0.0.2']) + # The default Vnet setting does not advertise prefix + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + update_monitor_session_state(dvs, '100.100.1.71/32', '19.0.0.2', 'down') + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.71/32") + + #remove first route + delete_vnet_routes(dvs, "100.100.1.71/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.71/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.71/32") + + delete_vnet_entry(dvs,vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 23 - Test for vxlan custom monitoring. CHanging the overlay_dmac of the Vnet on the fly. + ''' + def test_vnet_orch_23(self, dvs, testlog): + vnet_obj = self.get_vnet_obj() + + tunnel_name = 'tunnel_22' + vnet_name = "Vnet22" + vnet_obj.fetch_exist_entries(dvs) + + create_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + delete_vnet_entry(dvs,vnet_name) + + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:77") + delete_vnet_entry(dvs,vnet_name) + + #update the Dmac of the vnet before adding any routes. + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:77") + + vnet_obj.check_vnet_entry(dvs, vnet_name) + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, vnet_name, '10022') + + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '9.9.9.3') + + vnet_obj.fetch_exist_entries(dvs) + #Add first Route + create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:77") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:77") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:77") + + #update the Dmac after a route is added. + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:88") + + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:88") + + #bring up an enpoint. + update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'up') + + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:88") + + #update the Dmac to empty. This should have no impact. + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="") + + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:88") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:88") + + #remove first route + delete_vnet_routes(dvs, "100.100.1.11/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.11/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.11/32") + + #make sure that the app db entries are removed. + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.4") + time.sleep(2) + + #bring down an enpoint. + update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'down') + + create_vnet_entry(dvs, vnet_name, tunnel_name, '10022', "", advertise_prefix=True, overlay_dmac="22:33:33:44:44:66") + + #Add first Route again + create_vnet_routes(dvs, "100.100.1.11/32", vnet_name, '19.0.0.1,19.0.0.2,19.0.0.3', ep_monitor='19.1.0.1,19.1.0.2,19.1.0.3', profile = "test_prf", primary ='19.0.0.1',monitoring='custom', adv_prefix='100.100.1.0/24') + + #bring up the endpoint. + update_monitor_session_state(dvs, '100.100.1.11/32', '19.1.0.1', 'up') + + # The default Vnet setting advertises the prefix. + check_routes_advertisement(dvs, "100.100.1.0/24", "test_prf") + + #verify the appdb entries. + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.1", "vxlan", "22:33:33:44:44:66") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.2", "vxlan", "22:33:33:44:44:66") + vnet_obj.check_custom_monitor_app_db(dvs, "100.100.1.11/32", "19.1.0.3", "vxlan", "22:33:33:44:44:66") + + #remove first route + delete_vnet_routes(dvs, "100.100.1.11/32", vnet_name) + vnet_obj.check_del_vnet_routes(dvs, 'Vnet12', ["100.100.1.11/32"]) + check_remove_state_db_routes(dvs, 'Vnet12', "100.100.1.11/32") + + #make sure that the app db entries are removed. + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.1") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.2") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.3") + vnet_obj.check_custom_monitor_deleted(dvs, "100.100.1.11/32", "19.1.0.4") + time.sleep(2) + delete_vnet_entry(dvs,vnet_name) + vnet_obj.check_del_vnet_entry(dvs, vnet_name) + delete_vxlan_tunnel(dvs, tunnel_name) + + ''' + Test 24 - Test duplicate route addition and removal. + ''' + def test_vnet_orch_24(self, dvs, testlog): + self.setup_db(dvs) + self.clear_srv_config(dvs) + + vnet_obj = self.get_vnet_obj() + vnet_obj.fetch_exist_entries(dvs) + + # create vxlan tunnel and vnet in default vrf + tunnel_name = 'tunnel_24' + create_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10') + create_vnet_entry(dvs, 'Vnet_2000', tunnel_name, '2000', "", 'default') + + vnet_obj.check_default_vnet_entry(dvs, 'Vnet_2000') + vnet_obj.check_vxlan_tunnel_entry(dvs, tunnel_name, 'Vnet_2000', '2000') + vnet_obj.check_vxlan_tunnel(dvs, tunnel_name, '10.10.10.10') + vnet_obj.fetch_exist_entries(dvs) + + # create vnet route + create_vnet_routes(dvs, "100.100.1.0/24", 'Vnet_2000', '10.10.10.3') + vnet_obj.check_vnet_routes(dvs, 'Vnet_2000', '10.10.10.3', tunnel_name) + check_state_db_routes(dvs, 'Vnet_2000', "100.100.1.0/24", ['10.10.10.3']) + time.sleep(2) + + # create l3 interface + self.create_l3_intf("Ethernet0", "") + + # set ip address + self.add_ip_address("Ethernet0", "10.10.10.1/24") + + # bring up interface + self.set_admin_status("Ethernet0", "up") + + # set ip address and default route + dvs.servers[0].runcmd("ip address add 10.10.10.3/24 dev eth0") + dvs.servers[0].runcmd("ip route add default via 10.10.10.1") + + marker = dvs.add_log_marker("/var/log/syslog") + time.sleep(2) + + # add another route for same prefix as vnet route + dvs.runcmd("vtysh -c \"configure terminal\" -c \"ip route 100.100.1.0/24 10.10.10.3\"") + + # check application database + self.pdb.wait_for_entry("ROUTE_TABLE", "100.100.1.0/24") + + # check ASIC route database + self.check_route_entries(["100.100.1.0/24"]) + + log_string = "Encountered failure in create operation, exiting orchagent, SAI API: SAI_API_ROUTE, status: SAI_STATUS_NOT_EXECUTED" + # check for absence of log_string in syslog + check_syslog(dvs, marker, log_string) + + # remove route entry + dvs.runcmd("vtysh -c \"configure terminal\" -c \"no ip route 100.100.1.0/24 10.10.10.3\"") + + # delete vnet route + delete_vnet_routes(dvs, "100.100.1.0/24", 'Vnet_2000') + vnet_obj.check_del_vnet_routes(dvs, 'Vnet_2000') + check_remove_state_db_routes(dvs, 'Vnet_2000', "100.100.1.0/24") + + # delete vnet + delete_vnet_entry(dvs, 'Vnet_2000') + vnet_obj.check_del_vnet_entry(dvs, 'Vnet_2000') + # delete vxlan tunnel + delete_vxlan_tunnel(dvs, tunnel_name) # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_vxlan_tunnel.py b/tests/test_vxlan_tunnel.py index 14fe28261f..d296fcc741 100644 --- a/tests/test_vxlan_tunnel.py +++ b/tests/test_vxlan_tunnel.py @@ -26,6 +26,18 @@ def create_entry_pst(db, table, separator, key, pairs): create_entry(tbl, key, pairs) +def delete_entry_pst(db, table, key): + tbl = swsscommon.ProducerStateTable(db, table) + tbl._del(key) + time.sleep(1) + + +def delete_entry_tbl(db, table, key): + tbl = swsscommon.Table(db, table) + tbl._del(key) + time.sleep(1) + + def how_many_entries_exist(db, table): tbl = swsscommon.Table(db, table) return len(tbl.getKeys()) @@ -324,6 +336,66 @@ def test_vxlan_term_orch(self, dvs, testlog): create_vxlan_tunnel_entry(dvs, 'tunnel_4', 'entry_2', tunnel_map_map, 'Vlan57', '857', tunnel_map_ids, tunnel_map_entry_ids, tunnel_ids, tunnel_term_ids) +def apply_test_vnet_cfg(cfg_db): + + # create VXLAN Tunnel + create_entry_tbl( + cfg_db, + "VXLAN_TUNNEL", '|', "tunnel1", + [ + ("src_ip", "1.1.1.1") + ], + ) + + # create VNET + create_entry_tbl( + cfg_db, + "VNET", '|', "tunnel1", + [ + ("vxlan_tunnel", "tunnel1"), + ("vni", "1") + ], + ) + + return + + +@pytest.fixture +def env_setup(dvs): + cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + app_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) + + create_entry_pst( + app_db, + "SWITCH_TABLE", ':', "switch", + [ + ("vxlan_router_mac", "00:01:02:03:04:05") + ], + ) + + apply_test_vnet_cfg(cfg_db) + + yield + + delete_entry_pst(app_db, "SWITCH_TABLE", "switch") + delete_entry_tbl(cfg_db, "VXLAN_TUNNEL", "tunnel1") + delete_entry_tbl(cfg_db, "VNET", "Vnet1") + +def test_vnet_cleanup_config_reload(dvs, env_setup): + + # Restart vxlanmgrd Process + dvs.runcmd(["systemctl", "restart", "vxlanmgrd"]) + + # Reapply cfg to simulate cfg reload + cfg_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) + apply_test_vnet_cfg(cfg_db) + + time.sleep(0.5) + + # Check if the netdevices is created as expected + ret, stdout = dvs.runcmd(["ip", "link", "show"]) + assert "Vxlan1" in stdout + assert "Brvxlan1" in stdout # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_warm_reboot.py b/tests/test_warm_reboot.py index 36028dfc69..b2edc42587 100644 --- a/tests/test_warm_reboot.py +++ b/tests/test_warm_reboot.py @@ -118,12 +118,14 @@ def how_many_entries_exist(db, table): def stop_neighsyncd(dvs): dvs.runcmd(['sh', '-c', 'pkill -x neighsyncd']) + time.sleep(1) def start_neighsyncd(dvs): dvs.runcmd(['sh', '-c', 'supervisorctl start neighsyncd']) def stop_restore_neighbors(dvs): dvs.runcmd(['sh', '-c', 'pkill -x restore_neighbors']) + time.sleep(1) def start_restore_neighbors(dvs): dvs.runcmd(['sh', '-c', 'supervisorctl start restore_neighbors']) @@ -237,6 +239,20 @@ def ping_new_ips(dvs): dvs.runcmd(['sh', '-c', "ping -c 1 -W 0 -q {}.0.0.{} > /dev/null 2>&1".format(i*4, j+NUM_NEIGH_PER_INTF+2)]) dvs.runcmd(['sh', '-c', "ping6 -c 1 -W 0 -q {}00::{} > /dev/null 2>&1".format(i*4, j+NUM_NEIGH_PER_INTF+2)]) +def warm_restart_set(dvs, app, enable): + db = swsscommon.DBConnector(6, dvs.redis_sock, 0) + tbl = swsscommon.Table(db, "WARM_RESTART_ENABLE_TABLE") + fvs = swsscommon.FieldValuePairs([("enable",enable)]) + tbl.set(app, fvs) + time.sleep(1) + + +def warm_restart_timer_set(dvs, app, timer, val): + db = swsscommon.DBConnector(4, dvs.redis_sock, 0) + tbl = swsscommon.Table(db, "WARM_RESTART") + fvs = swsscommon.FieldValuePairs([(timer, val)]) + tbl.set(app, fvs) + time.sleep(1) class TestWarmReboot(object): def test_PortSyncdWarmRestart(self, dvs, testlog): @@ -245,10 +261,10 @@ def test_PortSyncdWarmRestart(self, dvs, testlog): appl_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") - dvs.runcmd("config interface startup Ethernet16") - dvs.runcmd("config interface startup Ethernet20") + dvs.port_admin_set("Ethernet16", "up") + dvs.port_admin_set("Ethernet20", "up") time.sleep(1) @@ -259,8 +275,8 @@ def test_PortSyncdWarmRestart(self, dvs, testlog): intf_tbl.set("Ethernet20|11.0.0.9/29", fvs) intf_tbl.set("Ethernet16", fvs) intf_tbl.set("Ethernet20", fvs) - dvs.runcmd("config interface startup Ethernet16") - dvs.runcmd("config interface startup Ethernet20") + dvs.port_admin_set("Ethernet16", "up") + dvs.port_admin_set("Ethernet20", "up") dvs.servers[4].runcmd("ip link set down dev eth0") == 0 dvs.servers[4].runcmd("ip link set up dev eth0") == 0 @@ -293,6 +309,7 @@ def test_PortSyncdWarmRestart(self, dvs, testlog): # restart portsyncd dvs.runcmd(['sh', '-c', 'pkill -x portsyncd']) + time.sleep(1) pubsub = dvs.SubscribeAsicDbObject("SAI_OBJECT_TYPE") dvs.runcmd(['sh', '-c', 'supervisorctl start portsyncd']) @@ -329,7 +346,6 @@ def test_PortSyncdWarmRestart(self, dvs, testlog): intf_tbl._del("Ethernet20") time.sleep(2) - def test_VlanMgrdWarmRestart(self, dvs, testlog): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) @@ -339,12 +355,12 @@ def test_VlanMgrdWarmRestart(self, dvs, testlog): dvs.runcmd("ifconfig Ethernet16 0") dvs.runcmd("ifconfig Ethernet20 0") - dvs.runcmd("config interface startup Ethernet16 ") - dvs.runcmd("config interface startup Ethernet20 ") + dvs.port_admin_set("Ethernet16", "up") + dvs.port_admin_set("Ethernet20", "up") time.sleep(1) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") # create vlan create_entry_tbl( @@ -387,8 +403,6 @@ def test_VlanMgrdWarmRestart(self, dvs, testlog): intf_tbl.set("Vlan20|11.0.0.9/29", fvs) intf_tbl.set("Vlan16", fvs) intf_tbl.set("Vlan20", fvs) - dvs.runcmd("config interface startup Vlan16") - dvs.runcmd("config interface startup Vlan20") dvs.servers[4].runcmd("ifconfig eth0 11.0.0.2/29") dvs.servers[4].runcmd("ip route add default via 11.0.0.1") @@ -415,6 +429,7 @@ def test_VlanMgrdWarmRestart(self, dvs, testlog): restore_count = swss_get_RestoreCount(dvs, state_db) dvs.runcmd(['sh', '-c', 'pkill -x vlanmgrd']) + time.sleep(1) pubsub = dvs.SubscribeAsicDbObject("SAI_OBJECT_TYPE") @@ -453,7 +468,7 @@ def test_IntfMgrdWarmRestartNoInterfaces(self, dvs, testlog): state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) restore_count = swss_get_RestoreCount(dvs, state_db) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") dvs.runcmd("supervisorctl restart intfmgrd") reached_desired_state = False @@ -474,7 +489,7 @@ def test_swss_neighbor_syncup(self, dvs, testlog): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") # # Testcase1: @@ -503,8 +518,8 @@ def test_swss_neighbor_syncup(self, dvs, testlog): intf_tbl.set("{}".format(intfs[1]), fvs) intf_tbl.set("{}".format(intfs[0]), fvs) intf_tbl.set("{}".format(intfs[1]), fvs) - dvs.runcmd("config interface startup {}".format(intfs[0])) - dvs.runcmd("config interface startup {}".format(intfs[1])) + dvs.port_admin_set(intfs[0], "up") + dvs.port_admin_set(intfs[1], "up") ips = ["24.0.0.2", "24.0.0.3", "28.0.0.2", "28.0.0.3"] v6ips = ["2400::2", "2400::3", "2800::2", "2800::3"] @@ -748,7 +763,7 @@ def test_swss_neighbor_syncup(self, dvs, testlog): # setup timer in configDB timer_value = "15" - dvs.runcmd("config warm_restart neighsyncd_timer {}".format(timer_value)) + warm_restart_timer_set(dvs, "swss", "neighsyncd_timer", timer_value) # get restore_count restore_count = swss_get_RestoreCount(dvs, state_db) @@ -847,7 +862,7 @@ def test_OrchagentWarmRestartReadyCheck(self, dvs, testlog): time.sleep(1) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") config_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) intf_tbl = swsscommon.Table(config_db, "INTERFACE") @@ -856,8 +871,8 @@ def test_OrchagentWarmRestartReadyCheck(self, dvs, testlog): intf_tbl.set("Ethernet4|10.0.0.2/31", fvs) intf_tbl.set("Ethernet0", fvs) intf_tbl.set("Ethernet4", fvs) - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface startup Ethernet4") + dvs.port_admin_set("Ethernet0", "up") + dvs.port_admin_set("Ethernet4", "up") dvs.servers[0].runcmd("ifconfig eth0 10.0.0.1/31") dvs.servers[0].runcmd("ip route add default via 10.0.0.0") @@ -876,23 +891,23 @@ def test_OrchagentWarmRestartReadyCheck(self, dvs, testlog): time.sleep(1) # Should fail, since neighbor for next 20.0.0.1 has not been not resolved yet - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check", include_stderr=False) assert result == "RESTARTCHECK failed\n" # Should succeed, the option for skipPendingTaskCheck -s and noFreeze -n have been provided. # Wait up to 500 milliseconds for response from orchagent. Default wait time is 1000 milliseconds. - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check -n -s -w 500") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check -n -s -w 500", include_stderr=False) assert result == "RESTARTCHECK succeeded\n" # Remove unfinished routes ps._del("3.3.3.0/24") time.sleep(1) - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check", include_stderr=False) assert result == "RESTARTCHECK succeeded\n" # Should fail since orchagent has been frozen at last step. - (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check -n -s -w 500") + (exitcode, result) = dvs.runcmd("/usr/bin/orchagent_restart_check -n -s -w 500", include_stderr=False) assert result == "RESTARTCHECK failed\n" # Cleaning previously pushed route-entry to ease life of subsequent testcases. @@ -916,7 +931,7 @@ def test_swss_port_state_syncup(self, dvs, testlog): conf_db = swsscommon.DBConnector(swsscommon.CONFIG_DB, dvs.redis_sock, 0) state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") tbl = swsscommon.Table(appl_db, swsscommon.APP_PORT_TABLE_NAME) @@ -931,9 +946,9 @@ def test_swss_port_state_syncup(self, dvs, testlog): intf_tbl.set("Ethernet0", fvs) intf_tbl.set("Ethernet4", fvs) intf_tbl.set("Ethernet8", fvs) - dvs.runcmd("config interface startup Ethernet0") - dvs.runcmd("config interface startup Ethernet4") - dvs.runcmd("config interface startup Ethernet8") + dvs.port_admin_set("Ethernet0", "up") + dvs.port_admin_set("Ethernet4", "up") + dvs.port_admin_set("Ethernet8", "up") dvs.runcmd("arp -s 10.0.0.1 00:00:00:00:00:01") dvs.runcmd("arp -s 10.0.0.3 00:00:00:00:00:02") @@ -1063,7 +1078,6 @@ def test_swss_port_state_syncup(self, dvs, testlog): # ################################################################################ - def test_routing_WarmRestart(self, dvs, testlog): appl_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) @@ -1102,9 +1116,9 @@ def test_routing_WarmRestart(self, dvs, testlog): intf_tbl.set("{}".format(intfs[1]), fvs) intf_tbl.set("{}".format(intfs[2]), fvs) intf_tbl.set("{}".format(intfs[2]), fvs) - dvs.runcmd("config interface startup {}".format(intfs[0])) - dvs.runcmd("config interface startup {}".format(intfs[1])) - dvs.runcmd("config interface startup {}".format(intfs[2])) + dvs.port_admin_set(intfs[0], "up") + dvs.port_admin_set(intfs[1], "up") + dvs.port_admin_set(intfs[2], "up") time.sleep(1) @@ -1199,8 +1213,8 @@ def test_routing_WarmRestart(self, dvs, testlog): # The following two instructions will be substituted by the commented ones # once the later ones are added to sonic-utilities repo. - dvs.runcmd("config warm_restart enable bgp") - dvs.runcmd("config warm_restart bgp_timer {}".format(restart_timer)) + warm_restart_set(dvs, "bgp", "true") + warm_restart_timer_set(dvs, "bgp", "bgp_timer", str(restart_timer)) time.sleep(1) @@ -1249,7 +1263,8 @@ def test_routing_WarmRestart(self, dvs, testlog): rt_key = json.loads(addobjs[0]['key']) rt_val = json.loads(addobjs[0]['vals']) assert rt_key == "192.168.100.0/24" - assert rt_val == {"ifname": "Ethernet0", "nexthop": "111.0.0.2"} + assert rt_val.get("ifname") == "Ethernet0" + assert rt_val.get("nexthop") == "111.0.0.2" # Verify the changed prefix is seen in sairedis (addobjs, delobjs) = dvs.GetSubscribedAsicDbObjects(pubsubAsicDB) @@ -1321,7 +1336,8 @@ def test_routing_WarmRestart(self, dvs, testlog): rt_key = json.loads(addobjs[0]['key']) rt_val = json.loads(addobjs[0]['vals']) assert rt_key == "192.168.200.0/24" - assert rt_val == {"ifname": "Ethernet0,Ethernet4,Ethernet8", "nexthop": "111.0.0.2,122.0.0.2,133.0.0.2"} + assert rt_val.get("ifname") == "Ethernet0,Ethernet4,Ethernet8" + assert rt_val.get("nexthop") == "111.0.0.2,122.0.0.2,133.0.0.2" # Verify the changed prefix is seen in sairedis (addobjs, delobjs) = dvs.GetSubscribedAsicDbObjects(pubsubAsicDB) @@ -1394,7 +1410,8 @@ def test_routing_WarmRestart(self, dvs, testlog): rt_key = json.loads(addobjs[0]['key']) rt_val = json.loads(addobjs[0]['vals']) assert rt_key == "192.168.1.3" - assert rt_val == {"ifname": "Ethernet0,Ethernet4,Ethernet8", "nexthop": "111.0.0.2,122.0.0.2,133.0.0.2"} + assert rt_val.get("ifname") == "Ethernet0,Ethernet4,Ethernet8" + assert rt_val.get("nexthop") == "111.0.0.2,122.0.0.2,133.0.0.2" # Verify the changed prefix is seen in sairedis (addobjs, delobjs) = dvs.GetSubscribedAsicDbObjects(pubsubAsicDB) @@ -1432,7 +1449,8 @@ def test_routing_WarmRestart(self, dvs, testlog): rt_key = json.loads(addobjs[0]['key']) rt_val = json.loads(addobjs[0]['vals']) assert rt_key == "192.168.1.3" - assert rt_val == {"ifname": "Ethernet0,Ethernet4", "nexthop": "111.0.0.2,122.0.0.2"} + assert rt_val.get("ifname") == "Ethernet0,Ethernet4" + assert rt_val.get("nexthop") == "111.0.0.2,122.0.0.2" # Verify the changed prefix is seen in sairedis (addobjs, delobjs) = dvs.GetSubscribedAsicDbObjects(pubsubAsicDB) @@ -1469,7 +1487,8 @@ def test_routing_WarmRestart(self, dvs, testlog): rt_key = json.loads(addobjs[0]['key']) rt_val = json.loads(addobjs[0]['vals']) assert rt_key == "fc00:4:4::1" - assert rt_val == {"ifname": "Ethernet0", "nexthop": "1110::2"} + assert rt_val.get("ifname") == "Ethernet0" + assert rt_val.get("nexthop") == "1110::2" # Verify the changed prefix is seen in sairedis (addobjs, delobjs) = dvs.GetSubscribedAsicDbObjects(pubsubAsicDB) @@ -1567,7 +1586,8 @@ def test_routing_WarmRestart(self, dvs, testlog): rt_key = json.loads(addobjs[0]['key']) rt_val = json.loads(addobjs[0]['vals']) assert rt_key == "192.168.100.0/24" - assert rt_val == {"ifname": "Ethernet0", "nexthop": "111.0.0.2"} + assert rt_val.get("ifname") == "Ethernet0" + assert rt_val.get("nexthop") == "111.0.0.2" # Verify the changed prefix is seen in sairedis (addobjs, delobjs) = dvs.GetSubscribedAsicDbObjects(pubsubAsicDB) @@ -1679,7 +1699,8 @@ def test_routing_WarmRestart(self, dvs, testlog): rt_key = json.loads(addobjs[0]['key']) rt_val = json.loads(addobjs[0]['vals']) assert rt_key == "192.168.100.0/24" - assert rt_val == {"ifname": "Ethernet4", "nexthop": "122.0.0.2"} + assert rt_val.get("ifname") == "Ethernet4" + assert rt_val.get("nexthop") == "122.0.0.2" # Verify the changed prefix is seen in sairedis (addobjs, delobjs) = dvs.GetSubscribedAsicDbObjects(pubsubAsicDB) @@ -1711,7 +1732,7 @@ def test_routing_WarmRestart(self, dvs, testlog): del_entry_tbl(state_db, "BGP_STATE_TABLE", "IPv4|eoiu") del_entry_tbl(state_db, "BGP_STATE_TABLE", "IPv6|eoiu") - dvs.runcmd("config warm_restart bgp_timer {}".format(restart_timer)) + warm_restart_timer_set(dvs, "bgp", "bgp_timer", str(restart_timer)) # Restart zebra dvs.stop_zebra() dvs.start_zebra() @@ -1854,7 +1875,7 @@ def test_system_warmreboot_neighbor_syncup(self, dvs, testlog): flush_neigh_entries(dvs) time.sleep(5) - dvs.runcmd("config warm_restart enable system") + warm_restart_set(dvs, "system", "true") # Test neighbors on NUM_INTF (e,g 8) interfaces # Ethernet32/36/.../60, with ip: 32.0.0.1/24... 60.0.0.1/24 @@ -1877,7 +1898,7 @@ def test_system_warmreboot_neighbor_syncup(self, dvs, testlog): intf_tbl.set("Ethernet{}|{}00::1/64".format(i*4, i*4), fvs) intf_tbl.set("Ethernet{}".format(i*4, i*4), fvs) intf_tbl.set("Ethernet{}".format(i*4, i*4), fvs) - dvs.runcmd("config interface startup Ethernet{}".format(i*4, i*4)) + dvs.port_admin_set("Ethernet{}".format(i*4), "up") dvs.servers[i].runcmd("ip link set up dev eth0") dvs.servers[i].runcmd("ip addr flush dev eth0") #result = dvs.servers[i].runcmd_output("ifconfig eth0 | grep HWaddr | awk '{print $NF}'") @@ -2103,7 +2124,7 @@ def test_system_warmreboot_neighbor_syncup(self, dvs, testlog): swss_app_check_RestoreCount_single(state_db, restore_count, "neighsyncd") # disable system warm restart - dvs.runcmd("config warm_restart disable system") + warm_restart_set(dvs, "system", "false") for i in range(8, 8+NUM_INTF): intf_tbl._del("Ethernet{}|{}.0.0.1/24".format(i*4, i*4)) @@ -2117,11 +2138,11 @@ def test_VrfMgrdWarmRestart(self, dvs, testlog): appl_db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) state_db = swsscommon.DBConnector(swsscommon.STATE_DB, dvs.redis_sock, 0) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") # bring up interface - dvs.runcmd("config interface startup Ethernet0 ") - dvs.runcmd("config interface startup Ethernet4 ") + dvs.port_admin_set("Ethernet0", "up") + dvs.port_admin_set("Ethernet4", "up") # create vrf create_entry_tbl(conf_db, "VRF", "Vrf_1", [('empty', 'empty')]) @@ -2160,6 +2181,7 @@ def test_VrfMgrdWarmRestart(self, dvs, testlog): (exitcode, vrf_before) = dvs.runcmd(['sh', '-c', "ip link show | grep Vrf"]) dvs.runcmd(['sh', '-c', 'pkill -x vrfmgrd']) + time.sleep(1) pubsub = dvs.SubscribeAsicDbObject("SAI_OBJECT_TYPE") @@ -2285,7 +2307,7 @@ def test_MirrorSessionWarmReboot(self, dvs): # Monitor port should not change b/c routes are ECMP state_db.wait_for_field_match("MIRROR_SESSION_TABLE", "test_session", {"monitor_port": "Ethernet12"}) - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") dvs.stop_swss() dvs.start_swss() @@ -2332,7 +2354,7 @@ def test_EverflowWarmReboot(self, dvs, dvs_acl): asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_ACL_ENTRY", 1 + len(asic_db.default_acl_entries)) # Execute the warm reboot - dvs.runcmd("config warm_restart enable swss") + dvs.warm_restart_swss("true") dvs.stop_swss() dvs.start_swss() @@ -2365,6 +2387,35 @@ def test_EverflowWarmReboot(self, dvs, dvs_acl): dvs.start_swss() dvs.check_swss_ready() + def test_TunnelMgrdWarmRestart(self, dvs): + tunnel_name = "MuxTunnel0" + tunnel_table = "TUNNEL_DECAP_TABLE" + tunnel_params = { + "tunnel_type": "IPINIP", + "dst_ip": "10.1.0.32", + "dscp_mode": "uniform", + "ecn_mode": "standard", + "ttl_mode": "pipe" + } + + pubsub = dvs.SubscribeAppDbObject(tunnel_table) + + dvs.runcmd("config warm_restart enable swss") + config_db = dvs.get_config_db() + config_db.create_entry("TUNNEL", tunnel_name, tunnel_params) + + app_db = dvs.get_app_db() + app_db.wait_for_matching_keys(tunnel_table, [tunnel_name]) + + nadd, ndel = dvs.CountSubscribedObjects(pubsub) + assert nadd == len(tunnel_params) + assert ndel == 1 # Expect 1 deletion as part of table creation + + dvs.runcmd("supervisorctl restart tunnelmgrd") + dvs.check_services_ready() + nadd, ndel = dvs.CountSubscribedObjects(pubsub) + assert nadd == 0 + assert ndel == 0 # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_watermark.py b/tests/test_watermark.py index 6d7c993125..a8cee70aa1 100644 --- a/tests/test_watermark.py +++ b/tests/test_watermark.py @@ -104,22 +104,8 @@ def verify_value(self, dvs, obj_ids, table_name, watermark_name, expected_value) assert found, "no such watermark found" def set_up_flex_counter(self, dvs): - for q in self.qs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "QUEUE_WATERMARK_STAT_COUNTER:{}".format(q), - WmFCEntry.queue_stats_entry) - - for pg in self.pgs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "PG_WATERMARK_STAT_COUNTER:{}".format(pg), - WmFCEntry.pg_stats_entry) - - for buffer in self.buffers: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "BUFFER_POOL_WATERMARK_STAT_COUNTER:{}".format(buffer), - WmFCEntry.buffer_stats_entry) - fc_status_enable = {"FLEX_COUNTER_STATUS": "enable"} + self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", fc_status_enable) @@ -130,7 +116,8 @@ def set_up_flex_counter(self, dvs): "BUFFER_POOL_WATERMARK", fc_status_enable) - self.populate_asic_all(dvs, "0") + # Wait for DB's to populate by orchagent + time.sleep(2) def clear_flex_counter(self, dvs): for q in self.qs: @@ -150,10 +137,14 @@ def clear_flex_counter(self, dvs): self.config_db.delete_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK") def set_up(self, dvs): - self.qs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_QUEUE") - self.pgs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP") + self.pgs = self.counters_db.db_connection.hgetall("COUNTERS_PG_NAME_MAP").values() + assert self.pgs is not None and len(self.pgs) > 0 + self.qs = self.counters_db.db_connection.hgetall("COUNTERS_QUEUE_NAME_MAP").values() + assert self.qs is not None and len(self.pgs) > 0 self.buffers = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_POOL") + self.populate_asic_all(dvs, "0") + db = swsscommon.DBConnector(swsscommon.COUNTERS_DB, dvs.redis_sock, 0) tbl = swsscommon.Table(db, "COUNTERS_QUEUE_TYPE_MAP") @@ -172,11 +163,17 @@ def set_up(self, dvs): tbl.set('', [(q, "SAI_QUEUE_TYPE_ALL")]) self.all_q.append(q) + def clear_watermark(self, dvs, data): + adb = swsscommon.DBConnector(0, dvs.redis_sock, 0) + msg = json.dumps(data, separators=(',',':')) + adb.publish('WATERMARK_CLEAR_REQUEST', msg) + time.sleep(1) + def test_telemetry_period(self, dvs): self.setup_dbs(dvs) + self.set_up_flex_counter(dvs) self.set_up(dvs) try: - self.set_up_flex_counter(dvs) self.enable_unittests(dvs, "true") self.populate_asic_all(dvs, "100") @@ -191,7 +188,10 @@ def test_telemetry_period(self, dvs): self.populate_asic_all(dvs, "123") - dvs.runcmd("config watermark telemetry interval {}".format(5)) + interval = {"interval": "5"} + self.config_db.create_entry("WATERMARK_TABLE", + "TELEMETRY_INTERVAL", + interval) time.sleep(self.DEFAULT_TELEMETRY_INTERVAL + 1) time.sleep(self.NEW_INTERVAL + 1) @@ -257,10 +257,7 @@ def test_clear(self, dvs): # clear pg shared watermark, and verify that headroom watermark and persistent watermarks are not affected - exitcode, output = dvs.runcmd("sonic-clear priority-group watermark shared") - time.sleep(1) - assert exitcode == 0, "CLI failure: %s" % output - # make sure it cleared + self.clear_watermark(dvs, ["USER", "PG_SHARED"]) self.verify_value(dvs, self.pgs, WmTables.user, SaiWmStats.pg_shared, "0") # make sure the rest is untouched @@ -271,9 +268,7 @@ def test_clear(self, dvs): # clear queue unicast persistent watermark, and verify that multicast watermark and user watermarks are not affected - exitcode, output = dvs.runcmd("sonic-clear queue persistent-watermark unicast") - time.sleep(1) - assert exitcode == 0, "CLI failure: %s" % output + self.clear_watermark(dvs, ["PERSISTENT", "Q_SHARED_UNI"]) # make sure it cleared self.verify_value(dvs, self.uc_q, WmTables.persistent, SaiWmStats.queue_shared, "0") @@ -289,16 +284,14 @@ def test_clear(self, dvs): # clear queue all watermark, and verify that multicast and unicast watermarks are not affected # clear persistent all watermark - exitcode, output = dvs.runcmd("sonic-clear queue persistent-watermark all") - time.sleep(1) - assert exitcode == 0, "CLI failure: %s" % output + self.clear_watermark(dvs, ["PERSISTENT", "Q_SHARED_ALL"]) + # make sure it cleared self.verify_value(dvs, self.all_q, WmTables.persistent, SaiWmStats.queue_shared, "0") # clear user all watermark - exitcode, output = dvs.runcmd("sonic-clear queue watermark all") - time.sleep(1) - assert exitcode == 0, "CLI failure: %s" % output + self.clear_watermark(dvs, ["USER", "Q_SHARED_ALL"]) + # make sure it cleared self.verify_value(dvs, self.all_q, WmTables.user, SaiWmStats.queue_shared, "0") diff --git a/tests/virtual_chassis/8/default_config.json b/tests/virtual_chassis/8/default_config.json new file mode 100644 index 0000000000..523ab8e450 --- /dev/null +++ b/tests/virtual_chassis/8/default_config.json @@ -0,0 +1,13 @@ +{ + "DEVICE_METADATA": { + "localhost": { + "hostname": "supervisor", + "chassis_db_address" : "10.8.1.200", + "inband_address" : "10.8.1.200/24", + "switch_type": "fabric", + "sub_role" : "BackEnd", + "start_chassis_db" : "1", + "comment" : "default_config for a vs that runs chassis_db" + } + } +} diff --git a/tests/virtual_chassis/chassis_supervisor.json b/tests/virtual_chassis/chassis_supervisor.json new file mode 100644 index 0000000000..373b44f257 --- /dev/null +++ b/tests/virtual_chassis/chassis_supervisor.json @@ -0,0 +1,5 @@ +{ + "VIRTUAL_TOPOLOGY": { + "chassis_instances" : [ "8", "1", "2", "3" ] + } +} diff --git a/tlm_teamd/Makefile.am b/tlm_teamd/Makefile.am index 6bf7574a8f..46ddfd22f5 100644 --- a/tlm_teamd/Makefile.am +++ b/tlm_teamd/Makefile.am @@ -1,4 +1,4 @@ -INCLUDES = -I $(top_srcdir) +INCLUDES = -I $(top_srcdir) -I$(top_srcdir)/lib bin_PROGRAMS = tlm_teamd @@ -10,10 +10,15 @@ endif tlm_teamd_SOURCES = main.cpp teamdctl_mgr.cpp values_store.cpp -tlm_teamd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) -tlm_teamd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(JANSSON_CFLAGS) -tlm_teamd_LDADD = -lhiredis -lswsscommon -lteamdctl $(JANSSON_LIBS) +tlm_teamd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_ASAN) +tlm_teamd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(JANSSON_CFLAGS) $(CFLAGS_ASAN) +tlm_teamd_LDADD = $(LDFLAGS_ASAN) -lhiredis -lswsscommon -lteamdctl $(JANSSON_LIBS) if GCOV_ENABLED tlm_teamd_LDADD += -lgcovpreload endif + +if ASAN_ENABLED +tlm_teamd_SOURCES += $(top_srcdir)/lib/asan.cpp +endif + diff --git a/tlm_teamd/main.cpp b/tlm_teamd/main.cpp index 3a464e8a67..291b044eda 100644 --- a/tlm_teamd/main.cpp +++ b/tlm_teamd/main.cpp @@ -9,6 +9,7 @@ #include "teamdctl_mgr.h" #include "values_store.h" +#include "subintf.h" bool g_run = true; @@ -30,6 +31,11 @@ void update_interfaces(swss::SubscriberStateTable & table, TeamdCtlMgr & mgr) const auto & lag_name = kfvKey(entry); const auto & op = kfvOp(entry); + if (lag_name.find(VLAN_SUB_INTERFACE_SEPARATOR) != std::string::npos) + { + SWSS_LOG_INFO("Skip subintf %s statedb event", lag_name.c_str()); + continue; + } if (op == "SET") { mgr.add_lag(lag_name); diff --git a/warmrestart/warmRestartAssist.cpp b/warmrestart/warmRestartAssist.cpp index 988f8279db..9b1a8dfddd 100644 --- a/warmrestart/warmRestartAssist.cpp +++ b/warmrestart/warmRestartAssist.cpp @@ -208,10 +208,31 @@ void AppRestartAssist::insertToMap(string tableName, string key, vectorsecond, SAME); + auto state = getCacheEntryState(found->second); + /* + * In case an entry has been updated for more than once with the same value but different from the stored one, + * keep the state as NEW. + * Eg. + * Assume the entry's value that is restored from last warm reboot is V0. + * 1. The first update with value V1 is received and handled by the above `if (found != appTableCacheMap[tableName].end())` branch, + * - state is set to NEW + * - value is updated to V1 + * 2. The second update with the same value V1 is received and handled by this branch + * - Originally, state was set to SAME, which is wrong because V1 is different from the stored value V0 + * - The correct logic should be: set the state to same only if the state is not NEW + * This is a very rare case because in most of times the entry won't be updated for multiple times + */ + if (state == NEW) + { + SWSS_LOG_NOTICE("%s, found key: %s, it has been updated for the second time, keep state as NEW", + tableName.c_str(), key.c_str()); + } + else + { + SWSS_LOG_INFO("%s, found key: %s, same value", tableName.c_str(), key.c_str()); + // mark as SAME flag + setCacheEntryState(found->second, SAME); + } } } else