diff --git a/.github/actions/publish-docker-image/action.yml b/.github/actions/publish-docker-image/action.yml index 206e13d4c..a252bf108 100644 --- a/.github/actions/publish-docker-image/action.yml +++ b/.github/actions/publish-docker-image/action.yml @@ -38,11 +38,13 @@ inputs: docker_token: required: false description: "DockerHub Token. No push is done if omitted" + docker_tag: + required: false + description: 'additional docker tags' runs: using: "composite" steps: - - name: Checkout - uses: actions/checkout@v3 + - uses: actions/checkout@v3.3.0 ##################### # Login to DockerHub @@ -56,12 +58,7 @@ runs: ##################### # Build JAR file ##################### - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Build Controlplane shell: bash run: |- @@ -78,9 +75,7 @@ runs: images: | ${{ inputs.namespace }}/${{ inputs.imagename }} tags: | - type=ref,event=branch - type=ref,event=pr - type=semver,pattern={{version}} + type=semver,pattern={{version}},value=${{ inputs.docker_tag }} type=semver,pattern={{major}} type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{raw}} diff --git a/.github/actions/run-deployment-test/action.yml b/.github/actions/run-deployment-test/action.yml index ed720b4be..9f4b40d58 100644 --- a/.github/actions/run-deployment-test/action.yml +++ b/.github/actions/run-deployment-test/action.yml @@ -42,8 +42,7 @@ inputs: runs: using: "composite" steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - name: Cache ContainerD Image Layers uses: actions/cache@v3 @@ -51,12 +50,7 @@ runs: path: /var/lib/containerd/io.containerd.snapshotter.v1.overlayfs key: ${{ runner.os }}-io.containerd.snapshotter.v1.overlayfs - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '11' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Build docker images shell: bash diff --git a/.github/actions/setup-java/action.yml b/.github/actions/setup-java/action.yml new file mode 100644 index 000000000..ed03fafb3 --- /dev/null +++ b/.github/actions/setup-java/action.yml @@ -0,0 +1,32 @@ +# +# Copyright (c) 2023 Bayerische Motoren Werke Aktiengesellschaft (BMW AG) +# Copyright (c) 2023 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License, Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + +--- +name: "Setup JDK 17" +description: "Setup JDK 17" +runs: + using: "composite" + steps: + - name: Setup JDK 17 + uses: actions/setup-java@v3.11.0 + with: + java-version: '17' + distribution: 'temurin' + cache: 'gradle' \ No newline at end of file diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 2c2dda9c2..fd3fb7a93 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -60,10 +60,10 @@ jobs: - name: Check whether secrets exist id: secret-presence run: | - [ ! -z "${{ secrets.SONAR_TOKEN }}" ] && echo "::set-output name=SONAR_TOKEN::true" - [ ! -z "${{ secrets.GPG_PRIVATE_KEY }}" ] && echo "::set-output name=GPG_PRIVATE_KEY::true" - [ ! -z "${{ secrets.GPG_PASSPHRASE }}" ] && echo "::set-output name=GPG_PASSPHRASE::true" - [ ! -z "${{ secrets.DOCKER_HUB_TOKEN }}" ] && echo "::set-output name=DOCKER_HUB_TOKEN::true" + [ ! -z "${{ secrets.SONAR_TOKEN }}" ] && echo "SONAR_TOKEN=true" >> $GITHUB_OUTPUT + [ ! -z "${{ secrets.GPG_PRIVATE_KEY }}" ] && echo "GPG_PRIVATE_KEY=true" >> $GITHUB_OUTPUT + [ ! -z "${{ secrets.GPG_PASSPHRASE }}" ] && echo "GPG_PASSPHRASE=true" >> $GITHUB_OUTPUT + [ ! -z "${{ secrets.DOCKER_HUB_TOKEN }}" ] && echo "DOCKER_HUB_TOKEN=true" >> $GITHUB_OUTPUT exit 0 build-extensions: @@ -71,14 +71,8 @@ jobs: needs: [ secret-presence ] steps: # Set-Up - - name: Checkout - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: actions/checkout@v3.3.0 + - uses: ./.github/actions/setup-java # Build - name: Build Extensions run: |- @@ -87,34 +81,8 @@ jobs: GITHUB_PACKAGE_USERNAME: ${{ github.actor }} GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - build-controlplane: - name: "Create Docker Images for the ControlPlane" - runs-on: ubuntu-latest - needs: [ secret-presence ] - if: | - needs.secret-presence.outputs.DOCKER_HUB_TOKEN - strategy: - fail-fast: false - matrix: - name: - - edc-runtime-memory - - edc-controlplane-memory-hashicorp-vault - - edc-controlplane-postgresql - - edc-controlplane-postgresql-hashicorp-vault - permissions: - contents: write - steps: - - name: Checkout - uses: actions/checkout@v3 - - uses: ./.github/actions/publish-docker-image - with: - rootDir: edc-controlplane/${{ matrix.name }} - imagename: ${{ matrix.name }} - docker_user: ${{ secrets.DOCKER_HUB_USER }} - docker_token: ${{ secrets.DOCKER_HUB_TOKEN }} - - build-dataplane: - name: "Create Docker Images for the DataPlane" + build-docker-images: + name: "Create Docker Images" runs-on: ubuntu-latest needs: [ secret-presence ] if: | @@ -122,18 +90,22 @@ jobs: strategy: fail-fast: false matrix: - name: - - edc-dataplane-azure-vault - - edc-dataplane-hashicorp-vault + variant: [ { dir: edc-controlplane, img: edc-runtime-memory }, + { dir: edc-controlplane, img: edc-controlplane-memory-hashicorp-vault }, + { dir: edc-controlplane, img: edc-controlplane-postgresql-hashicorp-vault }, + { dir: edc-controlplane, img: edc-controlplane-postgresql }, + { dir: edc-dataplane, img: edc-dataplane-azure-vault }, + { dir: edc-dataplane, img: edc-dataplane-hashicorp-vault } ] permissions: contents: write steps: - - name: Checkout - uses: actions/checkout@v3 + - uses: actions/checkout@v3.3.0 - uses: ./.github/actions/publish-docker-image + name: Publish ${{ matrix.variant.img }} with: - rootDir: edc-dataplane/${{ matrix.name }} - imagename: ${{ matrix.name }} + docker_tag: ${{ needs.release-version.outputs.RELEASE_VERSION }} + rootDir: ${{ matrix.variant.dir }}/${{ matrix.variant.img }} + imagename: ${{ matrix.variant.img }} docker_user: ${{ secrets.DOCKER_HUB_USER }} docker_token: ${{ secrets.DOCKER_HUB_TOKEN }} @@ -149,15 +121,9 @@ jobs: needs.secret-presence.outputs.GPG_PASSPHRASE && needs.secret-presence.outputs.GPG_PRIVATE_KEY && github.event_name != 'pull_request' && github.ref != 'refs/heads/releases' steps: # Set-Up - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Import GPG Key uses: crazy-max/ghaction-import-gpg@v5 with: @@ -173,4 +139,3 @@ jobs: REPO: ${{ github.repository }} GITHUB_PACKAGE_USERNAME: ${{ github.actor }} GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - diff --git a/.github/workflows/business-tests.yaml b/.github/workflows/business-tests.yaml index 39caaadb1..cbf0f3767 100644 --- a/.github/workflows/business-tests.yaml +++ b/.github/workflows/business-tests.yaml @@ -50,15 +50,9 @@ jobs: ### Set-Up ### ############## - - name: Checkout uses: actions/checkout@v3.3.0 - - name: Set-Up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + uses: ./.github/actions/setup-java - name: Cache ContainerD Image Layers uses: actions/cache@v3 @@ -185,6 +179,7 @@ jobs: --set dataplane.image.repository=docker.io/library/edc-dataplane-hashicorp-vault \ --set controlplane.debug.enabled=true \ --set controlplane.suspendOnStart=false \ + --set controlplane.businesspartnervalidation.log.agreement.validation=true \ --set postgresql.enabled=true \ --set postgresql.username=user \ --set postgresql.password=password \ @@ -218,6 +213,7 @@ jobs: --set dataplane.image.repository=docker.io/library/edc-dataplane-hashicorp-vault \ --set controlplane.debug.enabled=true \ --set controlplane.suspendOnStart=false \ + --set controlplane.businesspartnervalidation.log.agreement.validation=true \ --set postgresql.enabled=true \ --set postgresql.username=user \ --set postgresql.password=password \ diff --git a/.github/workflows/deployment-test.yaml b/.github/workflows/deployment-test.yaml index 7d38b24ac..8e75ae31e 100644 --- a/.github/workflows/deployment-test.yaml +++ b/.github/workflows/deployment-test.yaml @@ -47,8 +47,7 @@ jobs: deployment-test-memory: runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - uses: ./.github/actions/run-deployment-test name: "Run deployment test using KinD and Helm" with: diff --git a/.github/workflows/draft-new-release.yaml b/.github/workflows/draft-new-release.yaml index 248f61bc4..98e3c956a 100644 --- a/.github/workflows/draft-new-release.yaml +++ b/.github/workflows/draft-new-release.yaml @@ -33,12 +33,7 @@ jobs: git config user.name "GitHub actions" git config user.email noreply@github.com - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + uses: ./.github/actions/setup-java - name: Bump version in gradle.properties run: |- @@ -68,7 +63,7 @@ jobs: git add CHANGELOG.md gradle.properties $(find charts -name Chart.yaml) $(find charts -name README.md) git commit --message "Prepare release ${{ github.event.inputs.version }}" - echo "::set-output name=commit::$(git rev-parse HEAD)" + echo "commit=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT - name: Push new branch run: git push origin release/${{ github.event.inputs.version }} diff --git a/.github/workflows/helm-chart-release.yaml b/.github/workflows/helm-chart-release.yaml index bd5e55302..f19c841b9 100644 --- a/.github/workflows/helm-chart-release.yaml +++ b/.github/workflows/helm-chart-release.yaml @@ -38,8 +38,7 @@ jobs: steps: # fetch-depth: 0 is required to determine differences in chart(s) - - name: Checkout - uses: actions/checkout@v3 + - uses: actions/checkout@v3.3.0 with: fetch-depth: 0 diff --git a/.github/workflows/helm-lint.yaml b/.github/workflows/helm-lint.yaml index ae94c84a7..0b5a70f1f 100644 --- a/.github/workflows/helm-lint.yaml +++ b/.github/workflows/helm-lint.yaml @@ -26,7 +26,6 @@ jobs: ### Set-Up ### ############## - - name: Checkout uses: actions/checkout@v3.3.0 with: fetch-depth: 0 @@ -52,7 +51,7 @@ jobs: run: | changed=$(ct list-changed --config ct.yaml --target-branch main) if [[ -n "$changed" ]]; then - echo "::set-output name=changed::true" + echo "changed=true" >> $GITHUB_OUTPUT fi - name: chart-testing (lint) diff --git a/.github/workflows/publish-docker.yaml b/.github/workflows/publish-docker.yaml index 794d15061..bbe7a5d10 100644 --- a/.github/workflows/publish-docker.yaml +++ b/.github/workflows/publish-docker.yaml @@ -28,6 +28,9 @@ on: description: 'The namespace (=repo) in DockerHub' required: false default: "tractusx" + docker_tag: + description: 'Explicitly specify the Docker tag. Note that SHA and latest are added automatically.' + required: false concurrency: # cancel only running jobs on pull requests @@ -35,51 +38,29 @@ concurrency: cancel-in-progress: true jobs: - create-docker-image-controlplane: + create-docker-image: name: "Create Docker Images for the ControlPlane" runs-on: ubuntu-latest strategy: fail-fast: false matrix: - name: - - edc-runtime-memory - - edc-controlplane-memory-hashicorp-vault - - edc-controlplane-postgresql - - edc-controlplane-postgresql-hashicorp-vault + variant: [ { dir: edc-controlplane, img: edc-runtime-memory }, + { dir: edc-controlplane, img: edc-controlplane-memory-hashicorp-vault }, + { dir: edc-controlplane, img: edc-controlplane-postgresql-hashicorp-vault }, + { dir: edc-controlplane, img: edc-controlplane-postgresql }, + { dir: edc-dataplane, img: edc-dataplane-azure-vault }, + { dir: edc-dataplane, img: edc-dataplane-hashicorp-vault } ] permissions: contents: write packages: write steps: - - name: Checkout - uses: actions/checkout@v3 + - uses: actions/checkout@v3.3.0 - uses: ./.github/actions/publish-docker-image + name: Publish ${{ matrix.variant.img }} with: - rootDir: edc-controlplane/${{ matrix.name }} - imagename: ${{ matrix.name }} + docker_tag: ${{ needs.release-version.outputs.RELEASE_VERSION }} + rootDir: ${{ matrix.variant.dir }}/${{ matrix.variant.img }} + imagename: ${{ matrix.variant.img }} namespace: ${{ inputs.namespace }} docker_user: ${{ secrets.DOCKER_HUB_USER }} docker_token: ${{ secrets.DOCKER_HUB_TOKEN }} - - - create-docker-image-dataplane: - name: "Create Docker Images for the DataPlane" - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - name: - - edc-dataplane-azure-vault - - edc-dataplane-hashicorp-vault - permissions: - contents: write - packages: write - steps: - - name: Checkout - uses: actions/checkout@v3 - - uses: ./.github/actions/publish-docker-image - with: - rootDir: edc-dataplane/${{ matrix.name }} - imagename: ${{ matrix.name }} - namespace: ${{ inputs.namespace }} - docker_user: ${{ secrets.DOCKER_HUB_USER }} - docker_token: ${{ secrets.DOCKER_HUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/publish-new-release.yml b/.github/workflows/publish-new-release.yml index 373c892e7..0da6f5da5 100644 --- a/.github/workflows/publish-new-release.yml +++ b/.github/workflows/publish-new-release.yml @@ -37,7 +37,7 @@ jobs: name: Output release version id: release-version run: | - echo "::set-output name=RELEASE_VERSION::${{ env.RELEASE_VERSION }}" + echo "RELEASE_VERSION=${{ env.RELEASE_VERSION }}" >> $GITHUB_OUTPUT # Release: Maven Artifacts maven-release: @@ -54,19 +54,13 @@ jobs: run: | echo "RELEASE_VERSION=${{ needs.release-version.outputs.RELEASE_VERSION }}" >> $GITHUB_ENV - - name: Checkout uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + uses: ./.github/actions/setup-java - name: Import GPG Key uses: crazy-max/ghaction-import-gpg@v5 - env: + with: gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} passphrase: ${{ secrets.GPG_PASSPHRASE }} @@ -79,6 +73,35 @@ jobs: GITHUB_PACKAGE_USERNAME: ${{ github.actor }} GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} + docker-release: + name: Publish Docker images + runs-on: ubuntu-latest + needs: [ release-version ] + permissions: + contents: write + if: github.event.pull_request.merged == true && needs.release-version.outputs.RELEASE_VERSION + + strategy: + fail-fast: false + matrix: + variant: [{dir: edc-controlplane, img: edc-runtime-memory}, + {dir: edc-controlplane, img: edc-controlplane-memory-hashicorp-vault}, + {dir: edc-controlplane, img: edc-controlplane-postgresql-hashicorp-vault}, + {dir: edc-controlplane, img: edc-controlplane-postgresql}, + {dir: edc-dataplane, img: edc-dataplane-azure-vault}, + {dir: edc-dataplane, img: edc-dataplane-hashicorp-vault}] + + steps: + - uses: actions/checkout@v3.3.0 + - uses: ./.github/actions/publish-docker-image + name: Publish ${{ matrix.variant.img }} + with: + docker_tag: ${{ needs.release-version.outputs.RELEASE_VERSION }} + rootDir: ${{ matrix.variant.dir }}/${{ matrix.variant.img }} + imagename: ${{ matrix.variant.img }} + docker_user: ${{ secrets.DOCKER_HUB_USER }} + docker_token: ${{ secrets.DOCKER_HUB_TOKEN }} + # Release: Helm Charts helm-release: name: Publish new helm release @@ -96,7 +119,6 @@ jobs: run: | echo "RELEASE_VERSION=${{ needs.release-version.outputs.RELEASE_VERSION }}" >> $GITHUB_ENV - - name: Checkout uses: actions/checkout@v3.3.0 with: fetch-depth: 0 @@ -144,7 +166,6 @@ jobs: run: | echo "RELEASE_VERSION=${{ needs.release-version.outputs.RELEASE_VERSION }}" >> $GITHUB_ENV - - name: Checkout uses: actions/checkout@v3.3.0 with: # 0 to fetch the full history due to upcoming merge of releases into main branch @@ -177,12 +198,7 @@ jobs: draft: false prerelease: false - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + uses: ./.github/actions/setup-java - name: Merge releases back into main and set new snapshot version if: github.event.pull_request.base.ref == 'releases' diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index c315e8a07..2fe44c399 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -27,7 +27,7 @@ jobs: - name: Resolve git 7-chars sha id: git-sha7 run: | - echo "::set-output name=SHA7::${GITHUB_SHA::7}" + echo "SHA7=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT trivy-analyze-config: runs-on: ubuntu-latest @@ -36,8 +36,7 @@ jobs: contents: read security-events: write steps: - - name: Checkout repository - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - name: Run Trivy vulnerability scanner in repo mode uses: aquasecurity/trivy-action@master with: @@ -72,10 +71,18 @@ jobs: - edc-dataplane-azure-vault - edc-dataplane-hashicorp-vault steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 + + ## This step will fail if the docker images is not found + - name: "Check if image exists" + id: imageCheck + run: | + docker manifest inspect tractusx/${{ matrix.image }}:sha-${{ needs.git-sha7.outputs.value }} + continue-on-error: true + + ## the next two steps will only execute if the image exists check was successful - name: Run Trivy vulnerability scanner - if: always() + if: success() && steps.imageCheck.outcome != 'failure' uses: aquasecurity/trivy-action@master with: image-ref: "tractusx/${{ matrix.image }}:sha-${{ needs.git-sha7.outputs.value }}" @@ -85,7 +92,7 @@ jobs: severity: "CRITICAL,HIGH" timeout: "10m0s" - name: Upload Trivy scan results to GitHub Security tab - if: always() + if: success() && steps.imageCheck.outcome != 'failure' uses: github/codeql-action/upload-sarif@v2 with: sarif_file: "trivy-results-${{ matrix.image }}.sarif" diff --git a/.github/workflows/veracode.yaml b/.github/workflows/veracode.yaml index bba9df1b5..b8900971c 100644 --- a/.github/workflows/veracode.yaml +++ b/.github/workflows/veracode.yaml @@ -16,73 +16,21 @@ jobs: - name: Check whether secrets exist id: secret-presence run: | - [ ! -z "${{ secrets.ORG_VERACODE_API_ID }}" ] && echo "::set-output name=ORG_VERACODE_API_ID::true" - [ ! -z "${{ secrets.ORG_VERACODE_API_KEY }}" ] && echo "::set-output name=ORG_VERACODE_API_KEY::true" + [ ! -z "${{ secrets.ORG_VERACODE_API_ID }}" ] && echo "ORG_VERACODE_API_ID=true" >> $GITHUB_OUTPUT + [ ! -z "${{ secrets.ORG_VERACODE_API_KEY }}" ] && echo "ORG_VERACODE_API_KEY=true" >> $GITHUB_OUTPUT exit 0 verify-formatting: runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 with: fetch-depth: 0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Verify proper formatting run: ./gradlew spotlessCheck - build-controlplane: - runs-on: ubuntu-latest - needs: [ secret-presence, verify-formatting ] - permissions: - contents: read - strategy: - fail-fast: false - matrix: - name: - - edc-runtime-memory - - edc-controlplane-memory-hashicorp-vault - - edc-controlplane-postgresql - - edc-controlplane-postgresql-hashicorp-vault - steps: - # Set-Up - - name: Checkout - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' - # Build - - name: Build Controlplane - run: |- - ./gradlew -p edc-controlplane/${{ matrix.name }} shadowJar - env: - GITHUB_PACKAGE_USERNAME: ${{ github.actor }} - GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - - name: Tar gzip files for veracode upload - run: |- - tar -czvf edc-controlplane/${{ matrix.name }}/build/libs/${{ matrix.name }}.tar.gz edc-controlplane/${{ matrix.name }}/build/libs/${{ matrix.name }}.jar - - name: Veracode Upload And Scan - uses: veracode/veracode-uploadandscan-action@v1.0 - if: | - needs.secret-presence.outputs.ORG_VERACODE_API_ID && needs.secret-presence.outputs.ORG_VERACODE_API_KEY - continue-on-error: true - with: - appname: product-edc/${{ matrix.name }} - createprofile: true - version: ${{ matrix.name }}-${{ github.sha }} - filepath: edc-controlplane/${{ matrix.name }}/build/libs/${{ matrix.name }}.tar.gz - vid: ${{ secrets.ORG_VERACODE_API_ID }} - vkey: ${{ secrets.ORG_VERACODE_API_KEY }} - - build-dataplane: + build: runs-on: ubuntu-latest needs: [ secret-presence, verify-formatting ] permissions: @@ -90,39 +38,35 @@ jobs: strategy: fail-fast: false matrix: - name: - - edc-dataplane-azure-vault - - edc-dataplane-hashicorp-vault + variant: [ { dir: edc-controlplane, name: edc-runtime-memory }, + { dir: edc-controlplane, name: edc-controlplane-memory-hashicorp-vault }, + { dir: edc-controlplane, name: edc-controlplane-postgresql-hashicorp-vault }, + { dir: edc-controlplane, name: edc-controlplane-postgresql }, + { dir: edc-dataplane, name: edc-dataplane-azure-vault }, + { dir: edc-dataplane, name: edc-dataplane-hashicorp-vault } ] steps: # Set-Up - - name: Checkout - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: actions/checkout@v3.3.0 + - uses: ./.github/actions/setup-java # Build - - name: Build Dataplane + - name: Build ${{ matrix.variant.name }} run: |- - ./gradlew -p edc-dataplane/${{ matrix.name }} shadowJar + ./gradlew -p ${{ matrix.variant.dir }}/${{ matrix.variant.name }} shadowJar env: GITHUB_PACKAGE_USERNAME: ${{ github.actor }} GITHUB_PACKAGE_PASSWORD: ${{ secrets.GITHUB_TOKEN }} - name: Tar gzip files for veracode upload run: |- - tar -czvf edc-dataplane/${{ matrix.name }}/build/libs/${{ matrix.name }}.tar.gz edc-dataplane/${{ matrix.name }}/build/libs/${{ matrix.name }}.jar + tar -czvf ${{ matrix.variant.dir }}/${{ matrix.variant.name }}/build/libs/${{ matrix.variant.name }}.tar.gz ${{ matrix.variant.dir }}/${{ matrix.variant.name }}/build/libs/${{ matrix.variant.name }}.jar - name: Veracode Upload And Scan uses: veracode/veracode-uploadandscan-action@v1.0 if: | needs.secret-presence.outputs.ORG_VERACODE_API_ID && needs.secret-presence.outputs.ORG_VERACODE_API_KEY continue-on-error: true with: - appname: product-edc/${{ matrix.name }} + appname: tractusx-edc/${{ matrix.variant.name }} createprofile: true - version: ${{ matrix.name }}-${{ github.sha }} - filepath: edc-dataplane/${{ matrix.name }}/build/libs/${{ matrix.name }}.tar.gz + version: ${{ matrix.variant.name }}-${{ github.sha }} + filepath: ${{ matrix.variant.dir }}/${{ matrix.variant.name }}/build/libs/${{ matrix.variant.name }}.tar.gz vid: ${{ secrets.ORG_VERACODE_API_ID }} vkey: ${{ secrets.ORG_VERACODE_API_KEY }} - diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index d9dda3844..2cd0432f8 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -52,21 +52,15 @@ jobs: - name: Check whether secrets exist id: secret-presence run: | - [ ! -z "${{ secrets.SONAR_TOKEN }}" ] && echo "::set-output name=SONAR_TOKEN::true" + [ ! -z "${{ secrets.SONAR_TOKEN }}" ] && echo "SONAR_TOKEN=true" >> $GITHUB_OUTPUT exit 0 verify-formatting: runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Verify proper formatting run: ./gradlew spotlessCheck @@ -78,7 +72,7 @@ jobs: markdown-lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v3.3.0 - name: Install mardkdownlint run: npm install -g markdownlint-cli2 @@ -91,15 +85,9 @@ jobs: runs-on: ubuntu-latest needs: [ verify-formatting ] steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Run Unit tests run: ./gradlew test @@ -108,15 +96,9 @@ jobs: runs-on: ubuntu-latest needs: [ verify-formatting ] steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Run Integration tests run: ./gradlew test -DincludeTags="ComponentTest" @@ -125,15 +107,9 @@ jobs: runs-on: ubuntu-latest needs: [ verify-formatting ] steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Run API tests run: ./gradlew test -DincludeTags="ApiTest" @@ -142,15 +118,9 @@ jobs: runs-on: ubuntu-latest needs: [ verify-formatting ] steps: - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Run E2E tests run: ./gradlew :edc-tests:runtime:build test -DincludeTags="EndToEndTest" @@ -162,16 +132,10 @@ jobs: runs-on: ubuntu-latest steps: # Set-Up - - name: Checkout - uses: actions/checkout@v3.3.0 + - uses: actions/checkout@v3.3.0 with: fetch-depth: 0 - - name: Set up JDK 11 - uses: actions/setup-java@v3.11.0 - with: - java-version: '17' - distribution: 'temurin' - cache: 'gradle' + - uses: ./.github/actions/setup-java - name: Cache SonarCloud packages uses: actions/cache@v3 with: diff --git a/CHANGELOG.md b/CHANGELOG.md index e84846211..7ce0ca990 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.3.3] - 2023-04-19 + +### Fixed + +- Config values for the data plane part of the helm chart +- Contract Validity + +### Added + +- A log line whenever a policy evaluation of the BPN number was performed + ## [0.3.2] - 2023-03-30 ### Fixed @@ -270,9 +281,11 @@ corresponding [documentation](/docs/migration/Version_0.0.x_0.1.x.md). ## [0.0.1] - 2022-05-13 -[Unreleased]: https://github.com/catenax-ng/tx-tractusx-edc/compare/0.3.2...HEAD +[Unreleased]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.3.3...HEAD + +[0.3.3]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.3.2...0.3.3 -[0.3.2]: https://github.com/catenax-ng/tx-tractusx-edc/compare/0.3.1...0.3.2 +[0.3.2]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.3.1...0.3.2 [0.3.1]: https://github.com/eclipse-tractusx/tractusx-edc/compare/0.3.0...0.3.1 diff --git a/charts/tractusx-connector-memory/Chart.yaml b/charts/tractusx-connector-memory/Chart.yaml index 42b139a55..cb0a06b72 100644 --- a/charts/tractusx-connector-memory/Chart.yaml +++ b/charts/tractusx-connector-memory/Chart.yaml @@ -34,12 +34,12 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.3.2 +version: 0.3.3 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.3.2" +appVersion: "0.3.3" home: https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector-memory sources: - https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector-memory diff --git a/charts/tractusx-connector-memory/README.md b/charts/tractusx-connector-memory/README.md index 1e37bc286..872827664 100644 --- a/charts/tractusx-connector-memory/README.md +++ b/charts/tractusx-connector-memory/README.md @@ -1,241 +1,147 @@ -# tractusx-connector +# tractusx-connector-memory -![Version: 0.3.2](https://img.shields.io/badge/Version-0.3.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.2](https://img.shields.io/badge/AppVersion-0.3.2-informational?style=flat-square) +![Version: 0.3.3](https://img.shields.io/badge/Version-0.3.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.3](https://img.shields.io/badge/AppVersion-0.3.3-informational?style=flat-square) -A Helm chart for Tractus-X Eclipse Data Space Connector +A Helm chart for Tractus-X Eclipse Data Space Connector based on memory -**Homepage:** +**Homepage:** ## TL;DR ```shell helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -helm install my-release tractusx-edc/tractusx-connector --version 0.3.2 +helm install my-release tractusx-edc/tractusx-connector-memory --version 0.3.3 ``` ## Source Code -* +* ## Values -| Key | Type | Default | Description | -|---------------------------------------------------------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| backendService.httpProxyTokenReceiverUrl | string | `""` | | -| runtime.affinity | object | `{}` | | -| runtime.autoscaling.enabled | bool | `false` | Enables [horizontal pod autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) | -| runtime.autoscaling.maxReplicas | int | `100` | Maximum replicas if resource consumption exceeds resource threshholds | -| runtime.autoscaling.minReplicas | int | `1` | Minimal replicas if resource consumption falls below resource threshholds | -| runtime.autoscaling.targetCPUUtilizationPercentage | int | `80` | targetAverageUtilization of cpu provided to a pod | -| runtime.autoscaling.targetMemoryUtilizationPercentage | int | `80` | targetAverageUtilization of memory provided to a pod | -| runtime.debug.enabled | bool | `false` | | -| runtime.debug.port | int | `1044` | | -| runtime.debug.suspendOnStart | bool | `false` | | -| runtime.endpoints | object | `{"control":{"path":"/control","port":8083},"data":{"authKey":"","path":"/data","port":8081},"default":{"path":"/api","port":8080},"ids":{"path":"/api/v1/ids","port":8084},"metrics":{"path":"/metrics","port":9090},"observability":{"insecure":true,"path":"/observability","port":8085},"validation":{"path":"/validation","port":8082}}` | endpoints of the control plane | -| runtime.endpoints.control | object | `{"path":"/control","port":8083}` | control api, used for internal control calls. can be added to the internal ingress, but should probably not | -| runtime.endpoints.control.path | string | `"/control"` | path for incoming api calls | -| runtime.endpoints.control.port | int | `8083` | port for incoming api calls | -| runtime.endpoints.data | object | `{"authKey":"","path":"/data","port":8081}` | data management api, used by internal users, can be added to an ingress and must not be internet facing | -| runtime.endpoints.data.authKey | string | `""` | authentication key, must be attached to each 'X-Api-Key' request header | -| runtime.endpoints.data.path | string | `"/data"` | path for incoming api calls | -| runtime.endpoints.data.port | int | `8081` | port for incoming api calls | -| runtime.endpoints.default | object | `{"path":"/api","port":8080}` | default api for health checks, should not be added to any ingress | -| runtime.endpoints.default.path | string | `"/api"` | path for incoming api calls | -| runtime.endpoints.default.port | int | `8080` | port for incoming api calls | -| runtime.endpoints.ids | object | `{"path":"/api/v1/ids","port":8084}` | ids api, used for inter connector communication and must be internet facing | -| runtime.endpoints.ids.path | string | `"/api/v1/ids"` | path for incoming api calls | -| runtime.endpoints.ids.port | int | `8084` | port for incoming api calls | -| runtime.endpoints.metrics | object | `{"path":"/metrics","port":9090}` | metrics api, used for application metrics, must not be internet facing | -| runtime.endpoints.metrics.path | string | `"/metrics"` | path for incoming api calls | -| runtime.endpoints.metrics.port | int | `9090` | port for incoming api calls | -| runtime.endpoints.observability | object | `{"insecure":true,"path":"/observability","port":8085}` | observability api with unsecured access, must not be internet facing | -| runtime.endpoints.observability.insecure | bool | `true` | allow or disallow insecure access, i.e. access without authentication | -| runtime.endpoints.observability.path | string | `"/observability"` | observability api, provides /health /readiness and /liveness endpoints | -| runtime.endpoints.observability.port | int | `8085` | port for incoming API calls | -| runtime.endpoints.validation | object | `{"path":"/validation","port":8082}` | validation api, only used by the data plane and should not be added to any ingress | -| runtime.endpoints.validation.path | string | `"/validation"` | path for incoming api calls | -| runtime.endpoints.validation.port | int | `8082` | port for incoming api calls | -| runtime.env | object | `{}` | | -| runtime.envConfigMapNames | list | `[]` | | -| runtime.envSecretNames | list | `[]` | | -| runtime.envValueFrom | object | `{}` | | -| runtime.image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | -| runtime.image.repository | string | `""` | Which derivate of the control plane to use. when left empty the deployment will select the correct image automatically | -| runtime.image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion | -| runtime.ingresses[0].annotations | object | `{}` | Additional ingress annotations to add | -| runtime.ingresses[0].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | -| runtime.ingresses[0].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | -| runtime.ingresses[0].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | -| runtime.ingresses[0].enabled | bool | `false` | | -| runtime.ingresses[0].endpoints | list | `["ids"]` | EDC endpoints exposed by this ingress resource | -| runtime.ingresses[0].hostname | string | `"edc-control.local"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | -| runtime.ingresses[0].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | -| runtime.ingresses[0].tls.enabled | bool | `false` | Enables TLS on the ingress resource | -| runtime.ingresses[0].tls.secretName | string | `""` | If present overwrites the default secret name | -| runtime.ingresses[1].annotations | object | `{}` | Additional ingress annotations to add | -| runtime.ingresses[1].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | -| runtime.ingresses[1].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | -| runtime.ingresses[1].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | -| runtime.ingresses[1].enabled | bool | `false` | | -| runtime.ingresses[1].endpoints | list | `["data","control"]` | EDC endpoints exposed by this ingress resource | -| runtime.ingresses[1].hostname | string | `"edc-control.intranet"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | -| runtime.ingresses[1].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | -| runtime.ingresses[1].tls.enabled | bool | `false` | Enables TLS on the ingress resource | -| runtime.ingresses[1].tls.secretName | string | `""` | If present overwrites the default secret name | -| runtime.initContainers | list | `[]` | | -| runtime.internationalDataSpaces.catalogId | string | `"TXDC-Catalog"` | | -| runtime.internationalDataSpaces.curator | string | `""` | | -| runtime.internationalDataSpaces.description | string | `"Tractus-X Eclipse IDS Data Space Connector"` | | -| runtime.internationalDataSpaces.id | string | `"TXDC"` | | -| runtime.internationalDataSpaces.maintainer | string | `""` | | -| runtime.internationalDataSpaces.title | string | `""` | | -| runtime.livenessProbe.enabled | bool | `true` | Whether to enable kubernetes [liveness-probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | -| runtime.livenessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | -| runtime.livenessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first liveness check | -| runtime.livenessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a liveness check every 10 seconds | -| runtime.livenessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | -| runtime.livenessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | -| runtime.logging | string | `".level=INFO\norg.eclipse.edc.level=ALL\nhandlers=java.util.logging.ConsoleHandler\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.ConsoleHandler.level=ALL\njava.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n"` | configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) | -| runtime.nodeSelector | object | `{}` | | -| runtime.opentelemetry | string | `"otel.javaagent.enabled=false\notel.javaagent.debug=false"` | configuration of the [Open Telemetry Agent](https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/) to collect and expose metrics | -| runtime.podAnnotations | object | `{}` | additional annotations for the pod | -| runtime.podLabels | object | `{}` | additional labels for the pod | -| runtime.podSecurityContext | object | `{"fsGroup":10001,"runAsGroup":10001,"runAsUser":10001,"seccompProfile":{"type":"RuntimeDefault"}}` | The [pod security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) defines privilege and access control settings for a Pod within the deployment | -| runtime.podSecurityContext.fsGroup | int | `10001` | The owner for volumes and any files created within volumes will belong to this guid | -| runtime.podSecurityContext.runAsGroup | int | `10001` | Processes within a pod will belong to this guid | -| runtime.podSecurityContext.runAsUser | int | `10001` | Runs all processes within a pod with a special uid | -| runtime.podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | Restrict a Container's Syscalls with seccomp | -| runtime.readinessProbe.enabled | bool | `true` | Whether to enable kubernetes [readiness-probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | -| runtime.readinessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | -| runtime.readinessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first readiness check | -| runtime.readinessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a readiness check every 10 seconds | -| runtime.readinessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | -| runtime.readinessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | -| runtime.replicaCount | int | `1` | | -| runtime.resources | object | `{}` | [resource management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for the container | -| runtime.securityContext.allowPrivilegeEscalation | bool | `false` | Controls [Privilege Escalation](https://kubernetes.io/docs/concepts/security/pod-security-policy/#privilege-escalation) enabling setuid binaries changing the effective user ID | -| runtime.securityContext.capabilities.add | list | `[]` | Specifies which capabilities to add to issue specialized syscalls | -| runtime.securityContext.capabilities.drop | list | `["ALL"]` | Specifies which capabilities to drop to reduce syscall attack surface | -| runtime.securityContext.readOnlyRootFilesystem | bool | `true` | Whether the root filesystem is mounted in read-only mode | -| runtime.securityContext.runAsNonRoot | bool | `true` | Requires the container to run without root privileges | -| runtime.securityContext.runAsUser | int | `10001` | The container's process will run with the specified uid | -| runtime.service.annotations | object | `{}` | | -| runtime.service.type | string | `"ClusterIP"` | [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. | -| runtime.tolerations | list | `[]` | | -| runtime.url.ids | string | `""` | Explicitly declared url for reaching the ids api (e.g. if ingresses not used) | -| runtime.volumeMounts | list | `[]` | declare where to mount [volumes](https://kubernetes.io/docs/concepts/storage/volumes/) into the container | -| runtime.volumes | list | `[]` | [volume](https://kubernetes.io/docs/concepts/storage/volumes/) directories | -| customLabels | object | `{}` | | -| daps.clientId | string | `""` | | -| daps.paths.jwks | string | `"/jwks.json"` | | -| daps.paths.token | string | `"/token"` | | -| daps.url | string | `""` | | -| dataplane.affinity | object | `{}` | | -| dataplane.autoscaling.enabled | bool | `false` | Enables [horizontal pod autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) | -| dataplane.autoscaling.maxReplicas | int | `100` | Maximum replicas if resource consumption exceeds resource threshholds | -| dataplane.autoscaling.minReplicas | int | `1` | Minimal replicas if resource consumption falls below resource threshholds | -| dataplane.autoscaling.targetCPUUtilizationPercentage | int | `80` | targetAverageUtilization of cpu provided to a pod | -| dataplane.autoscaling.targetMemoryUtilizationPercentage | int | `80` | targetAverageUtilization of memory provided to a pod | -| dataplane.aws.accessKeyId | string | `""` | | -| dataplane.aws.endpointOverride | string | `""` | | -| dataplane.aws.secretAccessKey | string | `""` | | -| dataplane.debug.enabled | bool | `false` | | -| dataplane.debug.port | int | `1044` | | -| dataplane.debug.suspendOnStart | bool | `false` | | -| dataplane.endpoints.control.path | string | `"/api/dataplane/control"` | | -| dataplane.endpoints.control.port | int | `8083` | | -| dataplane.endpoints.default.path | string | `"/api"` | | -| dataplane.endpoints.default.port | int | `8080` | | -| dataplane.endpoints.metrics.path | string | `"/metrics"` | | -| dataplane.endpoints.metrics.port | int | `9090` | | -| dataplane.endpoints.public.path | string | `"/api/public"` | | -| dataplane.endpoints.public.port | int | `8081` | | -| dataplane.endpoints.validation.path | string | `"/validation"` | | -| dataplane.endpoints.validation.port | int | `8082` | | -| dataplane.env | object | `{}` | | -| dataplane.envConfigMapNames | list | `[]` | | -| dataplane.envSecretNames | list | `[]` | | -| dataplane.envValueFrom | object | `{}` | | -| dataplane.image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | -| dataplane.image.repository | string | `""` | Which derivate of the data plane to use. when left empty the deployment will select the correct image automatically | -| dataplane.image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion | -| dataplane.ingresses[0].annotations | object | `{}` | Additional ingress annotations to add | -| dataplane.ingresses[0].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | -| dataplane.ingresses[0].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | -| dataplane.ingresses[0].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | -| dataplane.ingresses[0].enabled | bool | `false` | | -| dataplane.ingresses[0].endpoints | list | `["public"]` | EDC endpoints exposed by this ingress resource | -| dataplane.ingresses[0].hostname | string | `"edc-data.local"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | -| dataplane.ingresses[0].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | -| dataplane.ingresses[0].tls.enabled | bool | `false` | Enables TLS on the ingress resource | -| dataplane.ingresses[0].tls.secretName | string | `""` | If present overwrites the default secret name | -| dataplane.initContainers | list | `[]` | | -| dataplane.livenessProbe.enabled | bool | `true` | Whether to enable kubernetes [liveness-probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | -| dataplane.livenessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | -| dataplane.livenessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first liveness check | -| dataplane.livenessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a liveness check every 10 seconds | -| dataplane.livenessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | -| dataplane.livenessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | -| dataplane.logging | string | `".level=INFO\norg.eclipse.edc.level=ALL\nhandlers=java.util.logging.ConsoleHandler\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.ConsoleHandler.level=ALL\njava.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n"` | configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) | -| dataplane.nodeSelector | object | `{}` | | -| dataplane.opentelemetry | string | `"otel.javaagent.enabled=false\notel.javaagent.debug=false"` | configuration of the [Open Telemetry Agent](https://opentelemetry.io/docs/instrumentation/java/automatic/agent-config/) to collect and expose metrics | -| dataplane.podAnnotations | object | `{}` | additional annotations for the pod | -| dataplane.podLabels | object | `{}` | additional labels for the pod | -| dataplane.podSecurityContext | object | `{"fsGroup":10001,"runAsGroup":10001,"runAsUser":10001,"seccompProfile":{"type":"RuntimeDefault"}}` | The [pod security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) defines privilege and access control settings for a Pod within the deployment | -| dataplane.podSecurityContext.fsGroup | int | `10001` | The owner for volumes and any files created within volumes will belong to this guid | -| dataplane.podSecurityContext.runAsGroup | int | `10001` | Processes within a pod will belong to this guid | -| dataplane.podSecurityContext.runAsUser | int | `10001` | Runs all processes within a pod with a special uid | -| dataplane.podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | Restrict a Container's Syscalls with seccomp | -| dataplane.readinessProbe.enabled | bool | `true` | Whether to enable kubernetes [readiness-probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | -| dataplane.readinessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | -| dataplane.readinessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first readiness check | -| dataplane.readinessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a liveness check every 10 seconds | -| dataplane.readinessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | -| dataplane.readinessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | -| dataplane.replicaCount | int | `1` | | -| dataplane.resources | object | `{}` | [resource management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for the container | -| dataplane.securityContext.allowPrivilegeEscalation | bool | `false` | Controls [Privilege Escalation](https://kubernetes.io/docs/concepts/security/pod-security-policy/#privilege-escalation) enabling setuid binaries changing the effective user ID | -| dataplane.securityContext.capabilities.add | list | `[]` | Specifies which capabilities to add to issue specialized syscalls | -| dataplane.securityContext.capabilities.drop | list | `["ALL"]` | Specifies which capabilities to drop to reduce syscall attack surface | -| dataplane.securityContext.readOnlyRootFilesystem | bool | `true` | Whether the root filesystem is mounted in read-only mode | -| dataplane.securityContext.runAsNonRoot | bool | `true` | Requires the container to run without root privileges | -| dataplane.securityContext.runAsUser | int | `10001` | The container's process will run with the specified uid | -| dataplane.service.port | int | `80` | | -| dataplane.service.type | string | `"ClusterIP"` | [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. | -| dataplane.tolerations | list | `[]` | | -| dataplane.url.public | string | `""` | Explicitly declared url for reaching the public api (e.g. if ingresses not used) | -| dataplane.volumeMounts | list | `[]` | declare where to mount [volumes](https://kubernetes.io/docs/concepts/storage/volumes/) into the container | -| dataplane.volumes | list | `[]` | [volume](https://kubernetes.io/docs/concepts/storage/volumes/) directories | -| fullnameOverride | string | `""` | | -| imagePullSecrets | list | `[]` | Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | -| nameOverride | string | `""` | | -| postgresql.enabled | bool | `false` | | -| postgresql.jdbcUrl | string | `""` | | -| postgresql.password | string | `""` | | -| postgresql.username | string | `""` | | -| serviceAccount.annotations | object | `{}` | | -| serviceAccount.create | bool | `true` | | -| serviceAccount.imagePullSecrets | list | `[]` | Existing image pull secret bound to the service account to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | -| serviceAccount.name | string | `""` | | -| vault.azure.certificate | string | `nil` | | -| vault.azure.client | string | `""` | | -| vault.azure.enabled | bool | `false` | | -| vault.azure.name | string | `""` | | -| vault.azure.secret | string | `nil` | | -| vault.azure.tenant | string | `""` | | -| vault.hashicorp.enabled | bool | `false` | | -| vault.hashicorp.healthCheck.enabled | bool | `true` | | -| vault.hashicorp.healthCheck.standbyOk | bool | `true` | | -| vault.hashicorp.paths.health | string | `"/v1/sys/health"` | | -| vault.hashicorp.paths.secret | string | `"/v1/secret"` | | -| vault.hashicorp.timeout | int | `30` | | -| vault.hashicorp.token | string | `""` | | -| vault.hashicorp.url | string | `""` | | -| vault.secretNames.dapsPrivateKey | string | `"daps-private-key"` | | -| vault.secretNames.dapsPublicKey | string | `"daps-public-key"` | | -| vault.secretNames.transferProxyTokenEncryptionAesKey | string | `"transfer-proxy-token-encryption-aes-key"` | | -| vault.secretNames.transferProxyTokenSignerPrivateKey | string | `"transfer-proxy-token-signer-private-key"` | | -| vault.secretNames.transferProxyTokenSignerPublicKey | string | `"transfer-proxy-token-signer-public-key"` | | +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| backendService.httpProxyTokenReceiverUrl | string | `""` | | +| customLabels | object | `{}` | | +| daps.clientId | string | `""` | | +| daps.paths.jwks | string | `"/jwks.json"` | | +| daps.paths.token | string | `"/token"` | | +| daps.url | string | `""` | | +| fullnameOverride | string | `""` | | +| imagePullSecrets | list | `[]` | Existing image pull secret to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | +| nameOverride | string | `""` | | +| runtime.affinity | object | `{}` | | +| runtime.autoscaling.enabled | bool | `false` | Enables [horizontal pod autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) | +| runtime.autoscaling.maxReplicas | int | `100` | Maximum replicas if resource consumption exceeds resource threshholds | +| runtime.autoscaling.minReplicas | int | `1` | Minimal replicas if resource consumption falls below resource threshholds | +| runtime.autoscaling.targetCPUUtilizationPercentage | int | `80` | targetAverageUtilization of cpu provided to a pod | +| runtime.autoscaling.targetMemoryUtilizationPercentage | int | `80` | targetAverageUtilization of memory provided to a pod | +| runtime.businessPartnerValidation.log.agreementValidation | bool | `true` | | +| runtime.debug.enabled | bool | `false` | | +| runtime.debug.port | int | `1044` | | +| runtime.debug.suspendOnStart | bool | `false` | | +| runtime.endpoints | object | `{"control":{"path":"/control","port":8083},"data":{"authKey":"","path":"/data","port":8081},"default":{"path":"/api","port":8080},"ids":{"path":"/api/v1/ids","port":8084},"observability":{"insecure":true,"path":"/observability","port":8085},"public":{"path":"/api/public","port":8086},"validation":{"path":"/validation","port":8082}}` | endpoints of the control plane | +| runtime.endpoints.control | object | `{"path":"/control","port":8083}` | control api, used for internal control calls. can be added to the internal ingress, but should probably not | +| runtime.endpoints.control.path | string | `"/control"` | path for incoming api calls | +| runtime.endpoints.control.port | int | `8083` | port for incoming api calls | +| runtime.endpoints.data | object | `{"authKey":"","path":"/data","port":8081}` | data management api, used by internal users, can be added to an ingress and must not be internet facing | +| runtime.endpoints.data.authKey | string | `""` | authentication key, must be attached to each 'X-Api-Key' request header | +| runtime.endpoints.data.path | string | `"/data"` | path for incoming api calls | +| runtime.endpoints.data.port | int | `8081` | port for incoming api calls | +| runtime.endpoints.default | object | `{"path":"/api","port":8080}` | default api for health checks, should not be added to any ingress | +| runtime.endpoints.default.path | string | `"/api"` | path for incoming api calls | +| runtime.endpoints.default.port | int | `8080` | port for incoming api calls | +| runtime.endpoints.ids | object | `{"path":"/api/v1/ids","port":8084}` | ids api, used for inter connector communication and must be internet facing | +| runtime.endpoints.ids.path | string | `"/api/v1/ids"` | path for incoming api calls | +| runtime.endpoints.ids.port | int | `8084` | port for incoming api calls | +| runtime.endpoints.observability | object | `{"insecure":true,"path":"/observability","port":8085}` | observability api with unsecured access, must not be internet facing | +| runtime.endpoints.observability.insecure | bool | `true` | allow or disallow insecure access, i.e. access without authentication | +| runtime.endpoints.observability.path | string | `"/observability"` | observability api, provides /health /readiness and /liveness endpoints | +| runtime.endpoints.observability.port | int | `8085` | port for incoming API calls | +| runtime.endpoints.validation | object | `{"path":"/validation","port":8082}` | validation api, only used by the data plane and should not be added to any ingress | +| runtime.endpoints.validation.path | string | `"/validation"` | path for incoming api calls | +| runtime.endpoints.validation.port | int | `8082` | port for incoming api calls | +| runtime.env | object | `{}` | | +| runtime.envConfigMapNames | list | `[]` | | +| runtime.envSecretNames | list | `[]` | | +| runtime.envValueFrom | object | `{}` | | +| runtime.image.pullPolicy | string | `"IfNotPresent"` | [Kubernetes image pull policy](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy) to use | +| runtime.image.repository | string | `""` | | +| runtime.image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion | +| runtime.ingresses[0].annotations | object | `{}` | Additional ingress annotations to add | +| runtime.ingresses[0].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | +| runtime.ingresses[0].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | +| runtime.ingresses[0].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | +| runtime.ingresses[0].enabled | bool | `false` | | +| runtime.ingresses[0].endpoints | list | `["ids"]` | EDC endpoints exposed by this ingress resource | +| runtime.ingresses[0].hostname | string | `"edc-control.local"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | +| runtime.ingresses[0].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | +| runtime.ingresses[0].tls.enabled | bool | `false` | Enables TLS on the ingress resource | +| runtime.ingresses[0].tls.secretName | string | `""` | If present overwrites the default secret name | +| runtime.ingresses[1].annotations | object | `{}` | Additional ingress annotations to add | +| runtime.ingresses[1].certManager.clusterIssuer | string | `""` | If preset enables certificate generation via cert-manager cluster-wide issuer | +| runtime.ingresses[1].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | +| runtime.ingresses[1].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | +| runtime.ingresses[1].enabled | bool | `false` | | +| runtime.ingresses[1].endpoints | list | `["data","control"]` | EDC endpoints exposed by this ingress resource | +| runtime.ingresses[1].hostname | string | `"edc-control.intranet"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | +| runtime.ingresses[1].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | +| runtime.ingresses[1].tls.enabled | bool | `false` | Enables TLS on the ingress resource | +| runtime.ingresses[1].tls.secretName | string | `""` | If present overwrites the default secret name | +| runtime.initContainers | list | `[]` | | +| runtime.internationalDataSpaces.catalogId | string | `"TXDC-Catalog"` | | +| runtime.internationalDataSpaces.curator | string | `""` | | +| runtime.internationalDataSpaces.description | string | `"Tractus-X Eclipse IDS Data Space Connector"` | | +| runtime.internationalDataSpaces.id | string | `"TXDC"` | | +| runtime.internationalDataSpaces.maintainer | string | `""` | | +| runtime.internationalDataSpaces.title | string | `""` | | +| runtime.livenessProbe.enabled | bool | `true` | Whether to enable kubernetes [liveness-probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | +| runtime.livenessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | +| runtime.livenessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first liveness check | +| runtime.livenessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a liveness check every 10 seconds | +| runtime.livenessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | +| runtime.livenessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | +| runtime.logging | string | `".level=INFO\norg.eclipse.edc.level=ALL\nhandlers=java.util.logging.ConsoleHandler\njava.util.logging.ConsoleHandler.formatter=java.util.logging.SimpleFormatter\njava.util.logging.ConsoleHandler.level=ALL\njava.util.logging.SimpleFormatter.format=[%1$tY-%1$tm-%1$td %1$tH:%1$tM:%1$tS] [%4$-7s] %5$s%6$s%n"` | configuration of the [Java Util Logging Facade](https://docs.oracle.com/javase/7/docs/technotes/guides/logging/overview.html) | +| runtime.nodeSelector | object | `{}` | | +| runtime.podAnnotations | object | `{}` | additional annotations for the pod | +| runtime.podLabels | object | `{}` | additional labels for the pod | +| runtime.podSecurityContext | object | `{"fsGroup":10001,"runAsGroup":10001,"runAsUser":10001,"seccompProfile":{"type":"RuntimeDefault"}}` | The [pod security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod) defines privilege and access control settings for a Pod within the deployment | +| runtime.podSecurityContext.fsGroup | int | `10001` | The owner for volumes and any files created within volumes will belong to this guid | +| runtime.podSecurityContext.runAsGroup | int | `10001` | Processes within a pod will belong to this guid | +| runtime.podSecurityContext.runAsUser | int | `10001` | Runs all processes within a pod with a special uid | +| runtime.podSecurityContext.seccompProfile.type | string | `"RuntimeDefault"` | Restrict a Container's Syscalls with seccomp | +| runtime.readinessProbe.enabled | bool | `true` | Whether to enable kubernetes [readiness-probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) | +| runtime.readinessProbe.failureThreshold | int | `6` | when a probe fails kubernetes will try 6 times before giving up | +| runtime.readinessProbe.initialDelaySeconds | int | `30` | seconds to wait before performing the first readiness check | +| runtime.readinessProbe.periodSeconds | int | `10` | this fields specifies that kubernetes should perform a readiness check every 10 seconds | +| runtime.readinessProbe.successThreshold | int | `1` | number of consecutive successes for the probe to be considered successful after having failed | +| runtime.readinessProbe.timeoutSeconds | int | `5` | number of seconds after which the probe times out | +| runtime.replicaCount | int | `1` | | +| runtime.resources | object | `{}` | [resource management](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) for the container | +| runtime.securityContext.allowPrivilegeEscalation | bool | `false` | Controls [Privilege Escalation](https://kubernetes.io/docs/concepts/security/pod-security-policy/#privilege-escalation) enabling setuid binaries changing the effective user ID | +| runtime.securityContext.capabilities.add | list | `[]` | Specifies which capabilities to add to issue specialized syscalls | +| runtime.securityContext.capabilities.drop | list | `["ALL"]` | Specifies which capabilities to drop to reduce syscall attack surface | +| runtime.securityContext.readOnlyRootFilesystem | bool | `true` | Whether the root filesystem is mounted in read-only mode | +| runtime.securityContext.runAsNonRoot | bool | `true` | Requires the container to run without root privileges | +| runtime.securityContext.runAsUser | int | `10001` | The container's process will run with the specified uid | +| runtime.service.annotations | object | `{}` | | +| runtime.service.type | string | `"ClusterIP"` | [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. | +| runtime.tolerations | list | `[]` | | +| runtime.url.ids | string | `""` | Explicitly declared url for reaching the ids api (e.g. if ingresses not used) | +| runtime.url.public | string | `""` | | +| runtime.url.readiness | string | `""` | | +| runtime.volumeMounts | list | `[]` | declare where to mount [volumes](https://kubernetes.io/docs/concepts/storage/volumes/) into the container | +| runtime.volumes | list | `[]` | [volume](https://kubernetes.io/docs/concepts/storage/volumes/) directories | +| serviceAccount.annotations | object | `{}` | | +| serviceAccount.create | bool | `true` | | +| serviceAccount.imagePullSecrets | list | `[]` | Existing image pull secret bound to the service account to use to [obtain the container image from private registries](https://kubernetes.io/docs/concepts/containers/images/#using-a-private-registry) | +| serviceAccount.name | string | `""` | | +| vault.secretNames.dapsPrivateKey | string | `"daps-private-key"` | | +| vault.secretNames.dapsPublicKey | string | `"daps-public-key"` | | +| vault.secretNames.transferProxyTokenEncryptionAesKey | string | `"transfer-proxy-token-encryption-aes-key"` | | +| vault.secretNames.transferProxyTokenSignerPrivateKey | string | `"transfer-proxy-token-signer-private-key"` | | +| vault.secretNames.transferProxyTokenSignerPublicKey | string | `"transfer-proxy-token-signer-public-key"` | | +| vault.secrets | string | `""` | | ---------------------------------------------- Autogenerated from chart metadata using [helm-docs v1.10.0](https://github.com/norwoodj/helm-docs/releases/v1.10.0) diff --git a/charts/tractusx-connector-memory/README.md.gotmpl b/charts/tractusx-connector-memory/README.md.gotmpl index b1671f5a2..44500d3d1 100644 --- a/charts/tractusx-connector-memory/README.md.gotmpl +++ b/charts/tractusx-connector-memory/README.md.gotmpl @@ -12,7 +12,7 @@ ```shell helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -helm install my-release tractusx-edc/tractusx-connector --version {{ .Version }} +helm install my-release tractusx-edc/tractusx-connector-memory --version {{ .Version }} ``` {{ template "chart.maintainersSection" . }} diff --git a/charts/tractusx-connector-memory/templates/deployment-runtime.yaml b/charts/tractusx-connector-memory/templates/deployment-runtime.yaml index 04386678c..82d162ad8 100644 --- a/charts/tractusx-connector-memory/templates/deployment-runtime.yaml +++ b/charts/tractusx-connector-memory/templates/deployment-runtime.yaml @@ -63,7 +63,7 @@ spec: {{- if .Values.runtime.image.repository }} image: "{{ .Values.runtime.image.repository }}:{{ .Values.runtime.image.tag | default .Chart.AppVersion }}" {{- else }} - image: "ghcr.io/catenax-ng/tx-tractusx-edc/edc-runtime-memory:{{ .Values.runtime.image.tag | default .Chart.AppVersion }}" + image: "tractusx/edc-runtime-memory:{{ .Values.runtime.image.tag | default .Chart.AppVersion }}" {{- end }} imagePullPolicy: {{ .Values.runtime.image.pullPolicy }} @@ -253,6 +253,12 @@ spec: value: "0" - name: "EDC_CP_ADAPTER_REUSE_CONTRACT_AGREEMENT" value: "0" + + ########################### + ## BUSINESS PARTNER NUMBER VALIDATION EXTENSION ## + ########################### + - name: "TRACTUSX_BUSINESSPARTNERVALIDATION_LOG_AGREEMENT_VALIDATION" + value: {{ .Values.runtime.businessPartnerValidation.log.agreementValidation | quote }} ###################################### ## Additional environment variables ## diff --git a/charts/tractusx-connector-memory/values.yaml b/charts/tractusx-connector-memory/values.yaml index 66fa1b7fe..83ce92818 100644 --- a/charts/tractusx-connector-memory/values.yaml +++ b/charts/tractusx-connector-memory/values.yaml @@ -121,6 +121,9 @@ runtime: public: port: 8086 path: /api/public + businessPartnerValidation: + log: + agreementValidation: true service: # -- [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. type: ClusterIP diff --git a/charts/tractusx-connector/Chart.yaml b/charts/tractusx-connector/Chart.yaml index f9e4322c6..696e94396 100644 --- a/charts/tractusx-connector/Chart.yaml +++ b/charts/tractusx-connector/Chart.yaml @@ -36,12 +36,12 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.3.2 +version: 0.3.3 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "0.3.2" +appVersion: "0.3.3" home: https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector sources: - https://github.com/eclipse-tractusx/tractusx-edc/tree/main/charts/tractusx-connector diff --git a/charts/tractusx-connector/README.md b/charts/tractusx-connector/README.md index af53087c9..12c45b649 100644 --- a/charts/tractusx-connector/README.md +++ b/charts/tractusx-connector/README.md @@ -1,6 +1,6 @@ # tractusx-connector -![Version: 0.3.2](https://img.shields.io/badge/Version-0.3.2-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.2](https://img.shields.io/badge/AppVersion-0.3.2-informational?style=flat-square) +![Version: 0.3.3](https://img.shields.io/badge/Version-0.3.3-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.3.3](https://img.shields.io/badge/AppVersion-0.3.3-informational?style=flat-square) A Helm chart for Tractus-X Eclipse Data Space Connector @@ -10,7 +10,7 @@ A Helm chart for Tractus-X Eclipse Data Space Connector ```shell helm repo add tractusx-edc https://eclipse-tractusx.github.io/charts/dev -helm install my-release tractusx-edc/tractusx-connector --version 0.3.2 +helm install my-release tractusx-edc/tractusx-connector --version 0.3.3 ``` ## Source Code @@ -28,23 +28,21 @@ helm install my-release tractusx-edc/tractusx-connector --version 0.3.2 | controlplane.autoscaling.minReplicas | int | `1` | Minimal replicas if resource consumption falls below resource threshholds | | controlplane.autoscaling.targetCPUUtilizationPercentage | int | `80` | targetAverageUtilization of cpu provided to a pod | | controlplane.autoscaling.targetMemoryUtilizationPercentage | int | `80` | targetAverageUtilization of memory provided to a pod | +| controlplane.businessPartnerValidation.log.agreementValidation | bool | `true` | | | controlplane.debug.enabled | bool | `false` | | | controlplane.debug.port | int | `1044` | | | controlplane.debug.suspendOnStart | bool | `false` | | -| controlplane.endpoints | object | `{"control":{"path":"/control","port":8083},"data":{"authKey":"","path":"/data","port":8081},"default":{"path":"/api","port":8080},"ids":{"path":"/api/v1/ids","port":8084},"metrics":{"path":"/metrics","port":9090},"observability":{"insecure":true,"path":"/observability","port":8085},"validation":{"path":"/validation","port":8082}}` | endpoints of the control plane | +| controlplane.endpoints | object | `{"control":{"path":"/control","port":8083},"default":{"path":"/api","port":8080},"management":{"authKey":"","path":"/management","port":8081},"metrics":{"path":"/metrics","port":9090},"observability":{"insecure":true,"path":"/observability","port":8085},"protocol":{"path":"/api/v1/ids","port":8084}}` | endpoints of the control plane | | controlplane.endpoints.control | object | `{"path":"/control","port":8083}` | control api, used for internal control calls. can be added to the internal ingress, but should probably not | | controlplane.endpoints.control.path | string | `"/control"` | path for incoming api calls | | controlplane.endpoints.control.port | int | `8083` | port for incoming api calls | -| controlplane.endpoints.data | object | `{"authKey":"","path":"/data","port":8081}` | data management api, used by internal users, can be added to an ingress and must not be internet facing | -| controlplane.endpoints.data.authKey | string | `""` | authentication key, must be attached to each 'X-Api-Key' request header | -| controlplane.endpoints.data.path | string | `"/data"` | path for incoming api calls | -| controlplane.endpoints.data.port | int | `8081` | port for incoming api calls | | controlplane.endpoints.default | object | `{"path":"/api","port":8080}` | default api for health checks, should not be added to any ingress | | controlplane.endpoints.default.path | string | `"/api"` | path for incoming api calls | | controlplane.endpoints.default.port | int | `8080` | port for incoming api calls | -| controlplane.endpoints.ids | object | `{"path":"/api/v1/ids","port":8084}` | ids api, used for inter connector communication and must be internet facing | -| controlplane.endpoints.ids.path | string | `"/api/v1/ids"` | path for incoming api calls | -| controlplane.endpoints.ids.port | int | `8084` | port for incoming api calls | +| controlplane.endpoints.management | object | `{"authKey":"","path":"/management","port":8081}` | data management api, used by internal users, can be added to an ingress and must not be internet facing | +| controlplane.endpoints.management.authKey | string | `""` | authentication key, must be attached to each 'X-Api-Key' request header | +| controlplane.endpoints.management.path | string | `"/management"` | path for incoming api calls | +| controlplane.endpoints.management.port | int | `8081` | port for incoming api calls | | controlplane.endpoints.metrics | object | `{"path":"/metrics","port":9090}` | metrics api, used for application metrics, must not be internet facing | | controlplane.endpoints.metrics.path | string | `"/metrics"` | path for incoming api calls | | controlplane.endpoints.metrics.port | int | `9090` | port for incoming api calls | @@ -52,9 +50,9 @@ helm install my-release tractusx-edc/tractusx-connector --version 0.3.2 | controlplane.endpoints.observability.insecure | bool | `true` | allow or disallow insecure access, i.e. access without authentication | | controlplane.endpoints.observability.path | string | `"/observability"` | observability api, provides /health /readiness and /liveness endpoints | | controlplane.endpoints.observability.port | int | `8085` | port for incoming API calls | -| controlplane.endpoints.validation | object | `{"path":"/validation","port":8082}` | validation api, only used by the data plane and should not be added to any ingress | -| controlplane.endpoints.validation.path | string | `"/validation"` | path for incoming api calls | -| controlplane.endpoints.validation.port | int | `8082` | port for incoming api calls | +| controlplane.endpoints.protocol | object | `{"path":"/api/v1/ids","port":8084}` | ids api, used for inter connector communication and must be internet facing | +| controlplane.endpoints.protocol.path | string | `"/api/v1/ids"` | path for incoming api calls | +| controlplane.endpoints.protocol.port | int | `8084` | port for incoming api calls | | controlplane.env | object | `{}` | | | controlplane.envConfigMapNames | list | `[]` | | | controlplane.envSecretNames | list | `[]` | | @@ -77,7 +75,7 @@ helm install my-release tractusx-edc/tractusx-connector --version 0.3.2 | controlplane.ingresses[1].certManager.issuer | string | `""` | If preset enables certificate generation via cert-manager namespace scoped issuer | | controlplane.ingresses[1].className | string | `""` | Defines the [ingress class](https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class) to use | | controlplane.ingresses[1].enabled | bool | `false` | | -| controlplane.ingresses[1].endpoints | list | `["data","control"]` | EDC endpoints exposed by this ingress resource | +| controlplane.ingresses[1].endpoints | list | `["management","control"]` | EDC endpoints exposed by this ingress resource | | controlplane.ingresses[1].hostname | string | `"edc-control.intranet"` | The hostname to be used to precisely map incoming traffic onto the underlying network service | | controlplane.ingresses[1].tls | object | `{"enabled":false,"secretName":""}` | TLS [tls class](https://kubernetes.io/docs/concepts/services-networking/ingress/#tls) applied to the ingress resource | | controlplane.ingresses[1].tls.enabled | bool | `false` | Enables TLS on the ingress resource | @@ -148,10 +146,11 @@ helm install my-release tractusx-edc/tractusx-connector --version 0.3.2 | dataplane.endpoints.default.port | int | `8080` | | | dataplane.endpoints.metrics.path | string | `"/metrics"` | | | dataplane.endpoints.metrics.port | int | `9090` | | +| dataplane.endpoints.observability.insecure | bool | `true` | allow or disallow insecure access, i.e. access without authentication | +| dataplane.endpoints.observability.path | string | `"/observability"` | observability api, provides /health /readiness and /liveness endpoints | +| dataplane.endpoints.observability.port | int | `8085` | port for incoming API calls | | dataplane.endpoints.public.path | string | `"/api/public"` | | | dataplane.endpoints.public.port | int | `8081` | | -| dataplane.endpoints.validation.path | string | `"/validation"` | | -| dataplane.endpoints.validation.port | int | `8082` | | | dataplane.env | object | `{}` | | | dataplane.envConfigMapNames | list | `[]` | | | dataplane.envSecretNames | list | `[]` | | diff --git a/charts/tractusx-connector/templates/deployment-controlplane.yaml b/charts/tractusx-connector/templates/deployment-controlplane.yaml index 6eded494c..daab957e4 100644 --- a/charts/tractusx-connector/templates/deployment-controlplane.yaml +++ b/charts/tractusx-connector/templates/deployment-controlplane.yaml @@ -331,6 +331,12 @@ spec: - name: "EDC_CP_ADAPTER_REUSE_CONTRACT_AGREEMENT" value: "0" + ########################### + ## BUSINESS PARTNER NUMBER VALIDATION EXTENSION ## + ########################### + - name: "TRACTUSX_BUSINESSPARTNERVALIDATION_LOG_AGREEMENT_VALIDATION" + value: {{ .Values.controlplane.businessPartnerValidation.log.agreementValidation | quote }} + ###################################### ## Additional environment variables ## ###################################### diff --git a/charts/tractusx-connector/values.yaml b/charts/tractusx-connector/values.yaml index aebd45481..21acfc20b 100644 --- a/charts/tractusx-connector/values.yaml +++ b/charts/tractusx-connector/values.yaml @@ -122,6 +122,9 @@ controlplane: path: /observability # -- allow or disallow insecure access, i.e. access without authentication insecure: true + businessPartnerValidation: + log: + agreementValidation: true service: # -- [Service type](https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types) to expose the running application on a set of Pods as a network service. type: ClusterIP diff --git a/docs/development/decision-records/2023-04-20_conventional_commits/README.md b/docs/development/decision-records/2023-04-20_conventional_commits/README.md new file mode 100644 index 000000000..d58b55c61 --- /dev/null +++ b/docs/development/decision-records/2023-04-20_conventional_commits/README.md @@ -0,0 +1,43 @@ +# Using Conventional Commit messages + +## Decision + +From now on, TractusX-EDC will use only conventional commit messages. The specification can be +found [here](https://www.conventionalcommits.org/en/v1.0.0/#summary) + +## Rationale + +Conventional commits create a structured, explicit and unambiguous commit history, that is easy to read and to +interpret. Conventional commits are widely used in the world of open source development. +On top of that, there +is [extensive tooling](https://www.conventionalcommits.org/en/about/#tooling-for-conventional-commits) to support the +creation, interpretation and enforcement of conventional commits. + +## Approach + +As a first step, we enforce conventional commits as part of our CI pipeline. TractusX-EDC is using +Squash-Rebase-merging, and the PR title is used as commit message. We will not dictate how people structure their +commits during the _development_ phase of their PR, but we _will_ enforce, that PR titles (and thus: merge commit +messages) are in the conventional commit format. + +To do that, we can use a very simple regex check on the PR title: + +```yaml +- uses: deepakputhraya/action-pr-title@master + with: + regex: '^(build|chore|ci|docs|feat|fix|perf|refactor|revert|style|test)(\(\w+((,|\/|\\)?\s?\w+)+\))?!?: [\S ]{1,80}[^\.]$' + allowed_prefixes: 'build,chore,ci,docs,feat,fix,perf,refactor,revert,style,test' + prefix_case_sensitive: true +``` + +That way, we can catch malformed PR titles early, which would result in malformed _merge commit messages_. In addition, +we can +use any of the tools linked above to ensure commit messages, e.g. when merge commits are altered manually, etc. + +## Future outlook + +Once we have a structured commit history done in the conventional commit format, we can auto-generate changelogs, link +to (auto-generated) documentation, render visually appealing version information, etc. Essentially, we can use any +number of tooling on top of cc's. +One key aspect would be to get rid of the manual changelog, +see [this discussion](https://github.com/eclipse-tractusx/tractusx-edc/discussions/253). diff --git a/docs/development/postman/collection.json b/docs/development/postman/collection.json index 50d0c5ab7..26de5c7d2 100644 --- a/docs/development/postman/collection.json +++ b/docs/development/postman/collection.json @@ -1,6 +1,6 @@ { "info": { - "_postman_id": "b61c0075-e360-45df-9756-c9bc432fe76a", + "_postman_id": "fcea09d2-13d9-49ce-8c44-d3cb3078eb82", "name": "EDC", "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json", "_exporter_id": "6134257" @@ -12,12 +12,11 @@ "method": "GET", "header": [], "url": { - "raw": "{{PROVIDER_DATAMGMT_URL}}/data/assets/{{ASSET_ID}}", + "raw": "{{PROVIDER_MANAGEMENT_URL}}/assets/{{ASSET_ID}}", "host": [ - "{{PROVIDER_DATAMGMT_URL}}" + "{{PROVIDER_MANAGEMENT_URL}}" ], "path": [ - "data", "assets", "{{ASSET_ID}}" ] @@ -31,12 +30,11 @@ "method": "GET", "header": [], "url": { - "raw": "{{PROVIDER_DATAMGMT_URL}}/data/assets", + "raw": "{{PROVIDER_MANAGEMENT_URL}}/assets", "host": [ - "{{PROVIDER_DATAMGMT_URL}}" + "{{PROVIDER_MANAGEMENT_URL}}" ], "path": [ - "data", "assets" ] } @@ -71,12 +69,11 @@ } }, "url": { - "raw": "{{PROVIDER_DATAMGMT_URL}}/data/assets", + "raw": "{{PROVIDER_MANAGEMENT_URL}}/assets", "host": [ - "{{PROVIDER_DATAMGMT_URL}}" + "{{PROVIDER_MANAGEMENT_URL}}" ], "path": [ - "data", "assets" ] } @@ -89,12 +86,11 @@ "method": "DELETE", "header": [], "url": { - "raw": "{{PROVIDER_DATAMGMT_URL}}/data/assets/{{ASSET_ID}}", + "raw": "{{PROVIDER_MANAGEMENT_URL}}/assets/{{ASSET_ID}}", "host": [ - "{{PROVIDER_DATAMGMT_URL}}" + "{{PROVIDER_MANAGEMENT_URL}}" ], "path": [ - "data", "assets", "{{ASSET_ID}}" ] @@ -120,12 +116,11 @@ } }, "url": { - "raw": "{{PROVIDER_DATAMGMT_URL}}/data/policydefinitions/{{POLICY_ID}}", + "raw": "{{PROVIDER_MANAGEMENT_URL}}/policydefinitions/{{POLICY_ID}}", "host": [ - "{{PROVIDER_DATAMGMT_URL}}" + "{{PROVIDER_MANAGEMENT_URL}}" ], "path": [ - "data", "policydefinitions", "{{POLICY_ID}}" ] @@ -151,12 +146,11 @@ } }, "url": { - "raw": "{{PROVIDER_DATAMGMT_URL}}/data/policydefinitions", + "raw": "{{PROVIDER_MANAGEMENT_URL}}/policydefinitions", "host": [ - "{{PROVIDER_DATAMGMT_URL}}" + "{{PROVIDER_MANAGEMENT_URL}}" ], "path": [ - "data", "policydefinitions" ] } @@ -178,12 +172,11 @@ } }, "url": { - "raw": "{{PROVIDER_DATAMGMT_URL}}/data/policydefinitions", + "raw": "{{PROVIDER_MANAGEMENT_URL}}/policydefinitions", "host": [ - "{{PROVIDER_DATAMGMT_URL}}" + "{{PROVIDER_MANAGEMENT_URL}}" ], "path": [ - "data", "policydefinitions" ] } @@ -205,12 +198,11 @@ } }, "url": { - "raw": "{{PROVIDER_DATAMGMT_URL}}/data/policydefinitions", + "raw": "{{PROVIDER_MANAGEMENT_URL}}/policydefinitions", "host": [ - "{{PROVIDER_DATAMGMT_URL}}" + "{{PROVIDER_MANAGEMENT_URL}}" ], "path": [ - "data", "policydefinitions" ] } @@ -232,12 +224,11 @@ } }, "url": { - "raw": "{{PROVIDER_DATAMGMT_URL}}/data/policydefinitions", + "raw": "{{PROVIDER_MANAGEMENT_URL}}/policydefinitions", "host": [ - "{{PROVIDER_DATAMGMT_URL}}" + "{{PROVIDER_MANAGEMENT_URL}}" ], "path": [ - "data", "policydefinitions" ] } @@ -259,12 +250,11 @@ } }, "url": { - "raw": "{{PROVIDER_DATAMGMT_URL}}/data/policydefinitions/{{POLICY_ID}}", + "raw": "{{PROVIDER_MANAGEMENT_URL}}/policydefinitions/{{POLICY_ID}}", "host": [ - "{{PROVIDER_DATAMGMT_URL}}" + "{{PROVIDER_MANAGEMENT_URL}}" ], "path": [ - "data", "policydefinitions", "{{POLICY_ID}}" ] @@ -290,12 +280,11 @@ } }, "url": { - "raw": "{{PROVIDER_DATAMGMT_URL}}/data/contractdefinitions/{{POLICY_ID}}", + "raw": "{{PROVIDER_MANAGEMENT_URL}}/contractdefinitions/{{POLICY_ID}}", "host": [ - "{{PROVIDER_DATAMGMT_URL}}" + "{{PROVIDER_MANAGEMENT_URL}}" ], "path": [ - "data", "contractdefinitions", "{{POLICY_ID}}" ] @@ -321,12 +310,11 @@ } }, "url": { - "raw": "{{PROVIDER_DATAMGMT_URL}}/data/contractdefinitions", + "raw": "{{PROVIDER_MANAGEMENT_URL}}/contractdefinitions", "host": [ - "{{PROVIDER_DATAMGMT_URL}}" + "{{PROVIDER_MANAGEMENT_URL}}" ], "path": [ - "data", "contractdefinitions" ] } @@ -348,12 +336,11 @@ } }, "url": { - "raw": "{{PROVIDER_DATAMGMT_URL}}/data/contractdefinitions", + "raw": "{{PROVIDER_MANAGEMENT_URL}}/contractdefinitions", "host": [ - "{{PROVIDER_DATAMGMT_URL}}" + "{{PROVIDER_MANAGEMENT_URL}}" ], "path": [ - "data", "contractdefinitions" ] } @@ -375,12 +362,11 @@ } }, "url": { - "raw": "{{PROVIDER_DATAMGMT_URL}}/data/contractdefinitions/{{POLICY_ID}}", + "raw": "{{PROVIDER_MANAGEMENT_URL}}/contractdefinitions/{{POLICY_ID}}", "host": [ - "{{PROVIDER_DATAMGMT_URL}}" + "{{PROVIDER_MANAGEMENT_URL}}" ], "path": [ - "data", "contractdefinitions", "{{POLICY_ID}}" ] @@ -394,18 +380,17 @@ "method": "GET", "header": [], "url": { - "raw": "{{CONSUMER_DATAMGMT_URL}}/data/catalog?providerUrl={{PROVIDER_IDS_URL}}/api/v1/ids/data&size=50", + "raw": "{{CONSUMER_MANAGEMENT_URL}}/catalog?providerUrl={{PROVIDER_PROTOCOL_URL}}/api/v1/ids/data&size=50", "host": [ - "{{CONSUMER_DATAMGMT_URL}}" + "{{CONSUMER_MANAGEMENT_URL}}" ], "path": [ - "data", "catalog" ], "query": [ { "key": "providerUrl", - "value": "{{PROVIDER_IDS_URL}}/api/v1/ids/data" + "value": "{{PROVIDER_PROTOCOL_URL}}/api/v1/ids/data" }, { "key": "size", @@ -423,7 +408,7 @@ "header": [], "body": { "mode": "raw", - "raw": "{\r\n \"providerUrl\": \"{{PROVIDER_IDS_URL}}/api/v1/ids/data\",\r\n \"querySpec\": {\r\n \"offset\": 0,\r\n \"limit\": 100,\r\n \"filter\": \"\",\r\n \"range\": {\r\n \"from\": 0,\r\n \"to\": 100\r\n },\r\n \"sortOrder\": \"ASC\",\r\n \"sortField\": \"\"\r\n }\r\n}", + "raw": "{\r\n \"providerUrl\": \"{{PROVIDER_PROTOCOL_URL}}/api/v1/ids/data\",\r\n \"querySpec\": {\r\n \"offset\": 0,\r\n \"limit\": 100,\r\n \"sort\": \"ASC\",\r\n \"sortField\": \"\"\r\n }\r\n}", "options": { "raw": { "language": "json" @@ -431,12 +416,11 @@ } }, "url": { - "raw": "{{CONSUMER_DATAMGMT_URL}}/data/catalog/request", + "raw": "{{CONSUMER_MANAGEMENT_URL}}/catalog/request", "host": [ - "{{CONSUMER_DATAMGMT_URL}}" + "{{CONSUMER_MANAGEMENT_URL}}" ], "path": [ - "data", "catalog", "request" ] @@ -445,46 +429,48 @@ "response": [] }, { - "name": "Negotation", + "name": "Negotation (Public)", "event": [ - { - "listen": "prerequest", - "script": { - "exec": [ - "" - ], - "type": "text/javascript" - } - }, { "listen": "test", "script": { "exec": [ - "" + "pm.test(\"Body matches string\", function () {", + " var jsonData = pm.response.json();", + " pm.collectionVariables.set(\"NEGOTIATION_ID\", jsonData.id);", + "", + "});" ], "type": "text/javascript" } } ], "request": { - "method": "GET", + "method": "POST", "header": [], + "body": { + "mode": "raw", + "raw": "{\n \"connectorId\": \"foo\",\n \"connectorAddress\": \"{{PROVIDER_PROTOCOL_URL}}/api/v1/ids/data\",\n \"offer\": {\n \"offerId\": \"{{CONTRACT_DEFINITION_ID}}:foo\",\n \"assetId\": \"{{ASSET_ID}}\",\n \"policy\": {\n \"prohibitions\": [],\n \"obligations\": [],\n \"permissions\": [\n {\n \"edctype\": \"dataspaceconnector:permission\",\n \"action\": {\n \"type\": \"USE\"\n },\n \"target\": \"{{ASSET_ID}}\",\n \"constraints\": []\n }\n ]\n }\n }\n}", + "options": { + "raw": { + "language": "json" + } + } + }, "url": { - "raw": "{{CONSUMER_DATAMGMT_URL}}/data/contractnegotiations/{{NEGOTIATION_ID}}", + "raw": "{{CONSUMER_MANAGEMENT_URL}}/contractnegotiations", "host": [ - "{{CONSUMER_DATAMGMT_URL}}" + "{{CONSUMER_MANAGEMENT_URL}}" ], "path": [ - "data", - "contractnegotiations", - "{{NEGOTIATION_ID}}" + "contractnegotiations" ] } }, "response": [] }, { - "name": "Negotation (Public)", + "name": "Negotation (Properties)", "event": [ { "listen": "test", @@ -505,7 +491,7 @@ "header": [], "body": { "mode": "raw", - "raw": "{\n \"connectorId\": \"foo\",\n \"connectorAddress\": \"{{PROVIDER_IDS_URL}}/api/v1/ids/data\",\n \"offer\": {\n \"offerId\": \"{{CONTRACT_DEFINITION_ID}}:foo\",\n \"assetId\": \"{{ASSET_ID}}\",\n \"policy\": {\n \"prohibitions\": [],\n \"obligations\": [],\n \"permissions\": [\n {\n \"edctype\": \"dataspaceconnector:permission\",\n \"action\": {\n \"type\": \"USE\"\n },\n \"target\": \"{{ASSET_ID}}\",\n \"constraints\": []\n }\n ]\n }\n }\n}", + "raw": "{\n \"connectorId\": \"foo\",\n \"connectorAddress\": \"{{PROVIDER_PROTOCOL_URL}}/api/v1/ids/data\",\n \"offer\": {\n \"offerId\": \"{{POLICY_ID}}:foo\",\n \"assetId\": \"{{ASSET_ID}}\",\n \"policy\": {\n \"prohibitions\": [],\n \"obligations\": [],\n \"permissions\": [\n {\n \"edctype\": \"dataspaceconnector:permission\",\n \"action\": {\n \"type\": \"USE\"\n },\n \"target\": \"{{ASSET_ID}}\",\n \"constraints\": []\n }\n ],\n \"extensibleProperties\": {\n \"foo\": \"bar\"\n }\n }\n }\n}", "options": { "raw": { "language": "json" @@ -513,12 +499,11 @@ } }, "url": { - "raw": "{{CONSUMER_DATAMGMT_URL}}/data/contractnegotiations", + "raw": "{{CONSUMER_MANAGEMENT_URL}}/contractnegotiations", "host": [ - "{{CONSUMER_DATAMGMT_URL}}" + "{{CONSUMER_MANAGEMENT_URL}}" ], "path": [ - "data", "contractnegotiations" ] } @@ -526,7 +511,7 @@ "response": [] }, { - "name": "Negotation (Properties)", + "name": "Negotation (BPN)", "event": [ { "listen": "test", @@ -547,7 +532,7 @@ "header": [], "body": { "mode": "raw", - "raw": "{\n \"connectorId\": \"foo\",\n \"connectorAddress\": \"{{PROVIDER_IDS_URL}}/api/v1/ids/data\",\n \"offer\": {\n \"offerId\": \"{{POLICY_ID}}:foo\",\n \"assetId\": \"{{ASSET_ID}}\",\n \"policy\": {\n \"prohibitions\": [],\n \"obligations\": [],\n \"permissions\": [\n {\n \"edctype\": \"dataspaceconnector:permission\",\n \"action\": {\n \"type\": \"USE\"\n },\n \"target\": \"{{ASSET_ID}}\",\n \"constraints\": []\n }\n ],\n \"extensibleProperties\": {\n \"foo\": \"bar\"\n }\n }\n }\n}", + "raw": "{\n \"connectorId\": \"foo\",\n \"connectorAddress\": \"{{PROVIDER_PROTOCOL_URL}}/api/v1/ids/data\",\n \"offer\": {\n \"offerId\": \"{{POLICY_ID}}:foo\",\n \"assetId\": \"{{ASSET_ID}}\",\n \"policy\": {\n \"prohibitions\": [],\n \"obligations\": [],\n \"permissions\": [\n {\n \"edctype\": \"dataspaceconnector:permission\",\n \"action\": {\n \"type\": \"USE\"\n },\n \"target\": \"{{ASSET_ID}}\",\n \"constraints\": [\n {\n \"edctype\": \"AtomicConstraint\",\n \"leftExpression\": {\n \"edctype\": \"dataspaceconnector:literalexpression\",\n \"value\": \"BusinessPartnerNumber\"\n },\n \"rightExpression\": {\n \"edctype\": \"dataspaceconnector:literalexpression\",\n \"value\": \"{{POLICY_BPN}}\"\n },\n \"operator\": \"EQ\"\n }\n ]\n }\n ]\n }\n }\n}", "options": { "raw": { "language": "json" @@ -555,12 +540,11 @@ } }, "url": { - "raw": "{{CONSUMER_DATAMGMT_URL}}/data/contractnegotiations", + "raw": "{{CONSUMER_MANAGEMENT_URL}}/contractnegotiations", "host": [ - "{{CONSUMER_DATAMGMT_URL}}" + "{{CONSUMER_MANAGEMENT_URL}}" ], "path": [ - "data", "contractnegotiations" ] } @@ -568,16 +552,24 @@ "response": [] }, { - "name": "Negotation (BPN)", + "name": "Negotation (init AGREEMENT_ID)", "event": [ + { + "listen": "prerequest", + "script": { + "exec": [ + "" + ], + "type": "text/javascript" + } + }, { "listen": "test", "script": { "exec": [ - "pm.test(\"Body matches string\", function () {", - " var jsonData = pm.response.json();", - " pm.collectionVariables.set(\"NEGOTIATION_ID\", jsonData.id);", - "", + "pm.test(\"Body matches string\", function () {\r", + " var jsonData = pm.response.json();\r", + " pm.collectionVariables.set(\"AGREEMENT_ID\", jsonData.contractAgreementId);\r", "});" ], "type": "text/javascript" @@ -585,25 +577,16 @@ } ], "request": { - "method": "POST", + "method": "GET", "header": [], - "body": { - "mode": "raw", - "raw": "{\n \"connectorId\": \"foo\",\n \"connectorAddress\": \"{{PROVIDER_IDS_URL}}/api/v1/ids/data\",\n \"offer\": {\n \"offerId\": \"{{POLICY_ID}}:foo\",\n \"assetId\": \"{{ASSET_ID}}\",\n \"policy\": {\n \"prohibitions\": [],\n \"obligations\": [],\n \"permissions\": [\n {\n \"edctype\": \"dataspaceconnector:permission\",\n \"action\": {\n \"type\": \"USE\"\n },\n \"target\": \"{{ASSET_ID}}\",\n \"constraints\": [\n {\n \"edctype\": \"AtomicConstraint\",\n \"leftExpression\": {\n \"edctype\": \"dataspaceconnector:literalexpression\",\n \"value\": \"BusinessPartnerNumber\"\n },\n \"rightExpression\": {\n \"edctype\": \"dataspaceconnector:literalexpression\",\n \"value\": \"{{POLICY_BPN}}\"\n },\n \"operator\": \"EQ\"\n }\n ]\n }\n ]\n }\n }\n}", - "options": { - "raw": { - "language": "json" - } - } - }, "url": { - "raw": "{{CONSUMER_DATAMGMT_URL}}/data/contractnegotiations", + "raw": "{{CONSUMER_MANAGEMENT_URL}}/contractnegotiations/{{NEGOTIATION_ID}}", "host": [ - "{{CONSUMER_DATAMGMT_URL}}" + "{{CONSUMER_MANAGEMENT_URL}}" ], "path": [ - "data", - "contractnegotiations" + "contractnegotiations", + "{{NEGOTIATION_ID}}" ] } }, @@ -639,7 +622,7 @@ "header": [], "body": { "mode": "raw", - "raw": "{ \"id\": \"{{TRANSFER_ID}}\",\n \"connectorId\": \"foo\", \n \"connectorAddress\": \"{{PROVIDER_IDS_URL}}/api/v1/ids/data\", \n \"contractId\": \"{{AGREEMENT_ID}}\", \n \"assetId\": \"{{ASSET_ID}}\",\n \"managedResources\": \"false\", \n \"dataDestination\": { \"type\": \"HttpProxy\" }\n}", + "raw": "{ \"id\": \"{{TRANSFER_ID}}\",\n \"connectorId\": \"foo\", \n \"connectorAddress\": \"{{PROVIDER_PROTOCOL_URL}}/api/v1/ids/data\", \n \"contractId\": \"{{AGREEMENT_ID}}\", \n \"assetId\": \"{{ASSET_ID}}\",\n \"managedResources\": \"false\", \n \"dataDestination\": { \"type\": \"HttpProxy\" }\n}", "options": { "raw": { "language": "json" @@ -647,12 +630,11 @@ } }, "url": { - "raw": "{{CONSUMER_DATAMGMT_URL}}/data/transferprocess", + "raw": "{{CONSUMER_MANAGEMENT_URL}}/transferprocess", "host": [ - "{{CONSUMER_DATAMGMT_URL}}" + "{{CONSUMER_MANAGEMENT_URL}}" ], "path": [ - "data", "transferprocess" ] } @@ -689,7 +671,7 @@ "header": [], "body": { "mode": "raw", - "raw": "{ \"id\": \"{{TRANSFER_ID}}\",\n \"connectorId\": \"foo\", \n \"connectorAddress\": \"{{PROVIDER_IDS_URL}}/api/v1/ids/data\", \n \"contractId\": \"{{AGREEMENT_ID}}\", \n \"assetId\": \"{{ASSET_ID}}\",\n \"managedResources\": \"false\", \n \"dataDestination\": { \"type\": \"HttpProxy\" },\n \"properties\": {\n \"receiver.http.endpoint\": \"{{BACKEND_SERVICE}}\"\n }\n}", + "raw": "{ \"id\": \"{{TRANSFER_ID}}\",\n \"connectorId\": \"foo\", \n \"connectorAddress\": \"{{PROVIDER_PROTOCOL_URL}}/api/v1/ids/data\", \n \"contractId\": \"{{AGREEMENT_ID}}\", \n \"assetId\": \"{{ASSET_ID}}\",\n \"managedResources\": \"false\", \n \"dataDestination\": { \"type\": \"HttpProxy\" },\n \"properties\": {\n \"receiver.http.endpoint\": \"{{BACKEND_SERVICE}}\"\n }\n}", "options": { "raw": { "language": "json" @@ -697,12 +679,11 @@ } }, "url": { - "raw": "{{CONSUMER_DATAMGMT_URL}}/data/transferprocess", + "raw": "{{CONSUMER_MANAGEMENT_URL}}/transferprocess", "host": [ - "{{CONSUMER_DATAMGMT_URL}}" + "{{CONSUMER_MANAGEMENT_URL}}" ], "path": [ - "data", "transferprocess" ] } @@ -735,18 +716,61 @@ "method": "GET", "header": [], "url": { - "raw": "{{CONSUMER_DATAMGMT_URL}}/data/transferprocess/{{TRANSFER_PROCESS_ID}}", + "raw": "{{CONSUMER_MANAGEMENT_URL}}/transferprocess/{{TRANSFER_PROCESS_ID}}", "host": [ - "{{CONSUMER_DATAMGMT_URL}}" + "{{CONSUMER_MANAGEMENT_URL}}" ], "path": [ - "data", "transferprocess", "{{TRANSFER_PROCESS_ID}}" ] } }, "response": [] + }, + { + "name": "CPA (getData)", + "event": [ + { + "listen": "test", + "script": { + "exec": [ + "pm.test(\"Body matches string\", function () {\r", + " var jsonData = pm.response.json();\r", + " pm.collectionVariables.set(\"authCode\", jsonData.authCode);\r", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "GET", + "header": [], + "url": { + "raw": "{{CONSUMER_MANAGEMENT_URL}}/adapter/asset/sync/{{ASSET_ID}}?providerUrl={{PROVIDER_PROTOCOL_URL}}/api/v1/ids/data&contractAgreementReuse=false", + "host": [ + "{{CONSUMER_MANAGEMENT_URL}}" + ], + "path": [ + "adapter", + "asset", + "sync", + "{{ASSET_ID}}" + ], + "query": [ + { + "key": "providerUrl", + "value": "{{PROVIDER_PROTOCOL_URL}}/api/v1/ids/data" + }, + { + "key": "contractAgreementReuse", + "value": "false" + } + ] + } + }, + "response": [] } ], "auth": { @@ -786,16 +810,16 @@ ], "variable": [ { - "key": "CONSUMER_DATAMGMT_URL", - "value": "https://sokrates-txdc.int.demo.catena-x.net" + "key": "CONSUMER_MANAGEMENT_URL", + "value": "https://sokrates-txdc.int.demo.catena-x.net/management" }, { - "key": "PROVIDER_IDS_URL", + "key": "PROVIDER_PROTOCOL_URL", "value": "https://plato-txdc.int.demo.catena-x.net" }, { - "key": "PROVIDER_DATAMGMT_URL", - "value": "https://plato-txdc.int.demo.catena-x.net" + "key": "PROVIDER_MANAGEMENT_URL", + "value": "https://plato-txdc.int.demo.catena-x.net/management" }, { "key": "ASSET_ID", @@ -847,6 +871,14 @@ "key": "BACKEND_SERVICE", "value": "http://backend:8080", "type": "string" + }, + { + "key": "AGREEMENT-ID", + "value": "" + }, + { + "key": "authCode", + "value": "" } ] } \ No newline at end of file diff --git a/docs/development/postman/images/screenshot.png b/docs/development/postman/images/screenshot.png deleted file mode 100644 index 8a9d231c6..000000000 Binary files a/docs/development/postman/images/screenshot.png and /dev/null differ diff --git a/edc-controlplane/edc-controlplane-postgresql/build.gradle.kts b/edc-controlplane/edc-controlplane-postgresql/build.gradle.kts index 5888c34c4..78b5e253f 100644 --- a/edc-controlplane/edc-controlplane-postgresql/build.gradle.kts +++ b/edc-controlplane/edc-controlplane-postgresql/build.gradle.kts @@ -11,6 +11,11 @@ dependencies { runtimeOnly(project(":edc-controlplane:edc-controlplane-base")) runtimeOnly(project(":edc-extensions:postgresql-migration")) runtimeOnly(edc.azure.vault) + constraints { + implementation("net.minidev:json-smart:2.4.10") { + because("version 2.4.8 has vulnerabilities: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-1370.") + } + } runtimeOnly(edc.bundles.sqlstores) runtimeOnly(edc.transaction.local) runtimeOnly(edc.sql.pool) diff --git a/edc-dataplane/edc-dataplane-azure-vault/build.gradle.kts b/edc-dataplane/edc-dataplane-azure-vault/build.gradle.kts index 02d29b7db..020dc0512 100644 --- a/edc-dataplane/edc-dataplane-azure-vault/build.gradle.kts +++ b/edc-dataplane/edc-dataplane-azure-vault/build.gradle.kts @@ -8,6 +8,11 @@ plugins { dependencies { implementation(project(":edc-dataplane:edc-dataplane-base")) implementation(edc.azure.vault) + constraints { + implementation("net.minidev:json-smart:2.4.10") { + because("version 2.4.8 has vulnerabilities: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-1370.") + } + } implementation(edc.azure.identity) implementation("com.azure:azure-security-keyvault-secrets:4.6.0") } diff --git a/edc-extensions/business-partner-validation/build.gradle.kts b/edc-extensions/business-partner-validation/build.gradle.kts index 53cb11e31..198886d9a 100644 --- a/edc-extensions/business-partner-validation/build.gradle.kts +++ b/edc-extensions/business-partner-validation/build.gradle.kts @@ -1,4 +1,3 @@ - plugins { `java-library` `maven-publish` @@ -7,5 +6,6 @@ plugins { dependencies { api(edc.spi.core) implementation(edc.spi.policy) + implementation(edc.spi.contract) implementation(edc.spi.policyengine) } diff --git a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtension.java b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtension.java index ee076406f..d88293a72 100644 --- a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtension.java +++ b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtension.java @@ -26,6 +26,7 @@ import org.eclipse.edc.policy.model.Permission; import org.eclipse.edc.policy.model.Prohibition; import org.eclipse.edc.runtime.metamodel.annotation.Inject; +import org.eclipse.edc.runtime.metamodel.annotation.Setting; import org.eclipse.edc.spi.monitor.Monitor; import org.eclipse.edc.spi.system.ServiceExtension; import org.eclipse.edc.spi.system.ServiceExtensionContext; @@ -37,60 +38,73 @@ public class BusinessPartnerValidationExtension implements ServiceExtension { - /** - * The key for business partner numbers constraints. Must be used as left operand when declaring - * constraints. - * - *

Example: - * - *

-   * {
-   *     "constraint": {
-   *         "leftOperand": "BusinessPartnerNumber",
-   *         "operator": "EQ",
-   *         "rightOperand": "BPNLCDQ90000X42KU"
-   *     }
-   * }
-   * 
- */ - public static final String BUSINESS_PARTNER_CONSTRAINT_KEY = "BusinessPartnerNumber"; - - public BusinessPartnerValidationExtension() {} - - public BusinessPartnerValidationExtension( - final RuleBindingRegistry ruleBindingRegistry, final PolicyEngine policyEngine) { - this.ruleBindingRegistry = ruleBindingRegistry; - this.policyEngine = policyEngine; - } - - @Inject private RuleBindingRegistry ruleBindingRegistry; - - @Inject private PolicyEngine policyEngine; - - @Override - public String name() { - return "Business Partner Validation Extension"; - } - - @Override - public void initialize(ServiceExtensionContext context) { - - final Monitor monitor = context.getMonitor(); - - final BusinessPartnerDutyFunction dutyFunction = new BusinessPartnerDutyFunction(monitor); - final BusinessPartnerPermissionFunction permissionFunction = - new BusinessPartnerPermissionFunction(monitor); - final BusinessPartnerProhibitionFunction prohibitionFunction = - new BusinessPartnerProhibitionFunction(monitor); - - ruleBindingRegistry.bind("USE", ALL_SCOPES); - ruleBindingRegistry.bind(BUSINESS_PARTNER_CONSTRAINT_KEY, ALL_SCOPES); - - policyEngine.registerFunction( - ALL_SCOPES, Duty.class, BUSINESS_PARTNER_CONSTRAINT_KEY, dutyFunction); - policyEngine.registerFunction( - ALL_SCOPES, Permission.class, BUSINESS_PARTNER_CONSTRAINT_KEY, permissionFunction); - policyEngine.registerFunction( - ALL_SCOPES, Prohibition.class, BUSINESS_PARTNER_CONSTRAINT_KEY, prohibitionFunction); - } + /** + * The key for business partner numbers constraints. Must be used as left operand when declaring + * constraints. + * + *

Example: + * + *

+     * {
+     *     "constraint": {
+     *         "leftOperand": "BusinessPartnerNumber",
+     *         "operator": "EQ",
+     *         "rightOperand": "BPNLCDQ90000X42KU"
+     *     }
+     * }
+     * 
+ */ + public static final String BUSINESS_PARTNER_CONSTRAINT_KEY = "BusinessPartnerNumber"; + + public static final String DEFAULT_LOG_AGREEMENT_EVALUATION = "true"; + + + @Setting(value = "Enable logging when evaluating the business partner constraints in the agreement validation", type = "boolean", defaultValue = DEFAULT_LOG_AGREEMENT_EVALUATION) + public static final String BUSINESS_PARTNER_VALIDATION_LOG_AGREEMENT_VALIDATION = "tractusx.businesspartnervalidation.log.agreement.validation"; + @Inject + private RuleBindingRegistry ruleBindingRegistry; + @Inject + private PolicyEngine policyEngine; + + public BusinessPartnerValidationExtension() { + } + + public BusinessPartnerValidationExtension( + final RuleBindingRegistry ruleBindingRegistry, final PolicyEngine policyEngine) { + this.ruleBindingRegistry = ruleBindingRegistry; + this.policyEngine = policyEngine; + } + + @Override + public String name() { + return "Business Partner Validation Extension"; + } + + @Override + public void initialize(ServiceExtensionContext context) { + + final Monitor monitor = context.getMonitor(); + + var logAgreementEvaluation = logAgreementEvaluationSetting(context); + + final BusinessPartnerDutyFunction dutyFunction = new BusinessPartnerDutyFunction(monitor, logAgreementEvaluation); + final BusinessPartnerPermissionFunction permissionFunction = + new BusinessPartnerPermissionFunction(monitor, logAgreementEvaluation); + final BusinessPartnerProhibitionFunction prohibitionFunction = + new BusinessPartnerProhibitionFunction(monitor, logAgreementEvaluation); + + ruleBindingRegistry.bind("USE", ALL_SCOPES); + ruleBindingRegistry.bind(BUSINESS_PARTNER_CONSTRAINT_KEY, ALL_SCOPES); + + policyEngine.registerFunction( + ALL_SCOPES, Duty.class, BUSINESS_PARTNER_CONSTRAINT_KEY, dutyFunction); + policyEngine.registerFunction( + ALL_SCOPES, Permission.class, BUSINESS_PARTNER_CONSTRAINT_KEY, permissionFunction); + policyEngine.registerFunction( + ALL_SCOPES, Prohibition.class, BUSINESS_PARTNER_CONSTRAINT_KEY, prohibitionFunction); + } + + private Boolean logAgreementEvaluationSetting(ServiceExtensionContext context) { + return Boolean.parseBoolean(context.getSetting(BUSINESS_PARTNER_VALIDATION_LOG_AGREEMENT_VALIDATION, DEFAULT_LOG_AGREEMENT_EVALUATION)); + } } diff --git a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidation.java b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidation.java index 55cb0d52b..ecb5b81ef 100644 --- a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidation.java +++ b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidation.java @@ -20,132 +20,147 @@ package org.eclipse.tractusx.edc.validation.businesspartner.functions; -import java.util.Map; -import java.util.Objects; +import org.eclipse.edc.connector.contract.spi.types.agreement.ContractAgreement; import org.eclipse.edc.policy.engine.spi.PolicyContext; import org.eclipse.edc.policy.model.Operator; import org.eclipse.edc.spi.agent.ParticipantAgent; import org.eclipse.edc.spi.monitor.Monitor; +import java.util.Map; +import java.util.Objects; + +import static java.lang.String.format; + /** * Abstract class for BusinessPartnerNumber validation. This class may be inherited from the EDC * policy enforcing functions for duties, permissions and prohibitions. */ public abstract class AbstractBusinessPartnerValidation { - // Developer Note: - // Problems reported to the policy context are not logged. Therefore, everything - // that is reported to the policy context should be logged, too. - - private static final String FAIL_EVALUATION_BECAUSE_RIGHT_VALUE_NOT_STRING = - "Failing evaluation because of invalid BusinessPartnerNumber constraint. For operator 'EQ' right value must be of type 'String'. Unsupported type: '%s'"; - private static final String FAIL_EVALUATION_BECAUSE_UNSUPPORTED_OPERATOR = - "Failing evaluation because of invalid BusinessPartnerNumber constraint. As operator only 'EQ' is supported. Unsupported operator: '%s'"; - - private final Monitor monitor; - - protected AbstractBusinessPartnerValidation(Monitor monitor) { - this.monitor = Objects.requireNonNull(monitor); - } - - /** - * Name of the claim that contains the Business Partner Number. - * - *

Please note: At the time of writing (April 2022) the business partner - * number is part of the 'referringConnector' claim in the IDS DAT token. This will probably - * change for the next release. - */ - private static final String REFERRING_CONNECTOR_CLAIM = "referringConnector"; - - /** - * Evaluation funtion to decide whether a claim belongs to a specific business partner. - * - * @param operator operator of the constraint - * @param rightValue right value fo the constraint, that contains the business partner number - * (e.g. BPNLCDQ90000X42KU) - * @param policyContext context of the policy with claims - * @return true if claims are from the constrained business partner - */ - protected boolean evaluate( - final Operator operator, final Object rightValue, final PolicyContext policyContext) { - - if (policyContext.hasProblems() && !policyContext.getProblems().isEmpty()) { - String problems = String.join(", ", policyContext.getProblems()); - String message = - String.format( - "BusinessPartnerNumberValidation: Rejecting PolicyContext with problems. Problems: %s", - problems); - monitor.debug(message); - return false; + // Developer Note: + // Problems reported to the policy context are not logged. Therefore, everything + // that is reported to the policy context should be logged, too. + + private static final String FAIL_EVALUATION_BECAUSE_RIGHT_VALUE_NOT_STRING = + "Failing evaluation because of invalid BusinessPartnerNumber constraint. For operator 'EQ' right value must be of type 'String'. Unsupported type: '%s'"; + private static final String FAIL_EVALUATION_BECAUSE_UNSUPPORTED_OPERATOR = + "Failing evaluation because of invalid BusinessPartnerNumber constraint. As operator only 'EQ' is supported. Unsupported operator: '%s'"; + /** + * Name of the claim that contains the Business Partner Number. + * + *

Please note: At the time of writing (April 2022) the business partner + * number is part of the 'referringConnector' claim in the IDS DAT token. This will probably + * change for the next release. + */ + private static final String REFERRING_CONNECTOR_CLAIM = "referringConnector"; + private final Monitor monitor; + private final boolean logAgreementEvaluation; + + protected AbstractBusinessPartnerValidation(Monitor monitor, boolean logAgreementEvaluation) { + this.monitor = Objects.requireNonNull(monitor); + this.logAgreementEvaluation = logAgreementEvaluation; } - final ParticipantAgent participantAgent = policyContext.getParticipantAgent(); - final Map claims = participantAgent.getClaims(); - - if (!claims.containsKey(REFERRING_CONNECTOR_CLAIM)) { - return false; + /** + * At the time of writing (11. April 2022) the business partner number is part of the + * 'referringConnector' claim, which contains a connector URL. As the CX projects are not further + * aligned about the URL formatting, the enforcement can only be done by checking whether the URL + * _contains_ the number. As this introduces some insecurities when validation business partner + * numbers, this should be addresses in the long term. + * + * @param referringConnectorClaim describing URL with business partner number + * @param businessPartnerNumber of the constraint + * @return true if claim contains the business partner number + */ + private static boolean isCorrectBusinessPartner( + String referringConnectorClaim, String businessPartnerNumber) { + return referringConnectorClaim.contains(businessPartnerNumber); } - Object referringConnectorClaimObject = claims.get(REFERRING_CONNECTOR_CLAIM); - String referringConnectorClaim = null; - - if (referringConnectorClaimObject instanceof String) { - referringConnectorClaim = (String) referringConnectorClaimObject; + public boolean isLogAgreementEvaluation() { + return logAgreementEvaluation; } - if (referringConnectorClaim == null || referringConnectorClaim.isEmpty()) { - return false; + /** + * Evaluation funtion to decide whether a claim belongs to a specific business partner. + * + * @param operator operator of the constraint + * @param rightValue right value fo the constraint, that contains the business partner number + * (e.g. BPNLCDQ90000X42KU) + * @param policyContext context of the policy with claims + * @return true if claims are from the constrained business partner + */ + protected boolean evaluate( + final Operator operator, final Object rightValue, final PolicyContext policyContext) { + + if (policyContext.hasProblems() && !policyContext.getProblems().isEmpty()) { + String problems = String.join(", ", policyContext.getProblems()); + String message = + format( + "BusinessPartnerNumberValidation: Rejecting PolicyContext with problems. Problems: %s", + problems); + monitor.debug(message); + return false; + } + + final ParticipantAgent participantAgent = policyContext.getParticipantAgent(); + final Map claims = participantAgent.getClaims(); + + if (!claims.containsKey(REFERRING_CONNECTOR_CLAIM)) { + return false; + } + + Object referringConnectorClaimObject = claims.get(REFERRING_CONNECTOR_CLAIM); + String referringConnectorClaim = null; + + if (referringConnectorClaimObject instanceof String) { + referringConnectorClaim = (String) referringConnectorClaimObject; + } + + if (referringConnectorClaim == null || referringConnectorClaim.isEmpty()) { + return false; + } + + if (operator == Operator.EQ) { + return isBusinessPartnerNumber(referringConnectorClaim, rightValue, policyContext); + } else { + final String message = format(FAIL_EVALUATION_BECAUSE_UNSUPPORTED_OPERATOR, operator); + monitor.warning(message); + policyContext.reportProblem(message); + return false; + } } - if (operator == Operator.EQ) { - return isBusinessPartnerNumber(referringConnectorClaim, rightValue, policyContext); - } else { - final String message = String.format(FAIL_EVALUATION_BECAUSE_UNSUPPORTED_OPERATOR, operator); - monitor.warning(message); - policyContext.reportProblem(message); - return false; - } - } - - /** - * @param referringConnectorClaim of the participant - * @param businessPartnerNumber object - * @return true if object is string and successfully evaluated against the claim - */ - private boolean isBusinessPartnerNumber( - String referringConnectorClaim, Object businessPartnerNumber, PolicyContext policyContext) { - if (businessPartnerNumber == null) { - final String message = String.format(FAIL_EVALUATION_BECAUSE_RIGHT_VALUE_NOT_STRING, "null"); - monitor.warning(message); - policyContext.reportProblem(message); - return false; + /** + * @param referringConnectorClaim of the participant + * @param businessPartnerNumber object + * @return true if object is string and successfully evaluated against the claim + */ + private boolean isBusinessPartnerNumber( + String referringConnectorClaim, Object businessPartnerNumber, PolicyContext policyContext) { + if (businessPartnerNumber == null) { + final String message = format(FAIL_EVALUATION_BECAUSE_RIGHT_VALUE_NOT_STRING, "null"); + monitor.warning(message); + policyContext.reportProblem(message); + return false; + } + if (!(businessPartnerNumber instanceof String)) { + final String message = + format( + FAIL_EVALUATION_BECAUSE_RIGHT_VALUE_NOT_STRING, + businessPartnerNumber.getClass().getName()); + monitor.warning(message); + policyContext.reportProblem(message); + return false; + } + + var businessPartnerNumberStr = (String) businessPartnerNumber; + var agreement = policyContext.getContextData(ContractAgreement.class); + var isCorrectBusinessPartner = isCorrectBusinessPartner(referringConnectorClaim, businessPartnerNumberStr); + + if (agreement != null && logAgreementEvaluation) { + monitor.info(format("Evaluated policy access for referringConnectorClaim: %s and contract id: %s with result: %s", referringConnectorClaim, agreement.getId(), isCorrectBusinessPartner)); + } + return isCorrectBusinessPartner; } - if (!(businessPartnerNumber instanceof String)) { - final String message = - String.format( - FAIL_EVALUATION_BECAUSE_RIGHT_VALUE_NOT_STRING, - businessPartnerNumber.getClass().getName()); - monitor.warning(message); - policyContext.reportProblem(message); - return false; - } - - return isCorrectBusinessPartner(referringConnectorClaim, (String) businessPartnerNumber); - } - - /** - * At the time of writing (11. April 2022) the business partner number is part of the - * 'referringConnector' claim, which contains a connector URL. As the CX projects are not further - * aligned about the URL formatting, the enforcement can only be done by checking whether the URL - * _contains_ the number. As this introduces some insecurities when validation business partner - * numbers, this should be addresses in the long term. - * - * @param referringConnectorClaim describing URL with business partner number - * @param businessPartnerNumber of the constraint - * @return true if claim contains the business partner number - */ - private static boolean isCorrectBusinessPartner( - String referringConnectorClaim, String businessPartnerNumber) { - return referringConnectorClaim.contains(businessPartnerNumber); - } } diff --git a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerDutyFunction.java b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerDutyFunction.java index f53ba3cbc..061d7fd7d 100644 --- a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerDutyFunction.java +++ b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerDutyFunction.java @@ -26,16 +26,18 @@ import org.eclipse.edc.policy.model.Operator; import org.eclipse.edc.spi.monitor.Monitor; -/** AtomicConstraintFunction to validate business partner numbers for edc duties. */ +/** + * AtomicConstraintFunction to validate business partner numbers for edc duties. + */ public class BusinessPartnerDutyFunction extends AbstractBusinessPartnerValidation - implements AtomicConstraintFunction { + implements AtomicConstraintFunction { - public BusinessPartnerDutyFunction(Monitor monitor) { - super(monitor); - } + public BusinessPartnerDutyFunction(Monitor monitor, boolean shouldLogOnAgreementEvaluation) { + super(monitor, shouldLogOnAgreementEvaluation); + } - @Override - public boolean evaluate(Operator operator, Object rightValue, Duty rule, PolicyContext context) { - return evaluate(operator, rightValue, context); - } + @Override + public boolean evaluate(Operator operator, Object rightValue, Duty rule, PolicyContext context) { + return evaluate(operator, rightValue, context); + } } diff --git a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerPermissionFunction.java b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerPermissionFunction.java index 07bda765e..b6713c477 100644 --- a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerPermissionFunction.java +++ b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerPermissionFunction.java @@ -26,17 +26,19 @@ import org.eclipse.edc.policy.model.Permission; import org.eclipse.edc.spi.monitor.Monitor; -/** AtomicConstraintFunction to validate business partner numbers for edc permissions. */ +/** + * AtomicConstraintFunction to validate business partner numbers for edc permissions. + */ public class BusinessPartnerPermissionFunction extends AbstractBusinessPartnerValidation - implements AtomicConstraintFunction { + implements AtomicConstraintFunction { - public BusinessPartnerPermissionFunction(Monitor monitor) { - super(monitor); - } + public BusinessPartnerPermissionFunction(Monitor monitor, boolean shouldLogOnAgreementEvaluation) { + super(monitor, shouldLogOnAgreementEvaluation); + } - @Override - public boolean evaluate( - Operator operator, Object rightValue, Permission rule, PolicyContext context) { - return evaluate(operator, rightValue, context); - } + @Override + public boolean evaluate( + Operator operator, Object rightValue, Permission rule, PolicyContext context) { + return evaluate(operator, rightValue, context); + } } diff --git a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerProhibitionFunction.java b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerProhibitionFunction.java index f3cddf9fe..79e318741 100644 --- a/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerProhibitionFunction.java +++ b/edc-extensions/business-partner-validation/src/main/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/BusinessPartnerProhibitionFunction.java @@ -26,17 +26,19 @@ import org.eclipse.edc.policy.model.Prohibition; import org.eclipse.edc.spi.monitor.Monitor; -/** AtomicConstraintFunction to validate business partner numbers for edc prohibitions. */ +/** + * AtomicConstraintFunction to validate business partner numbers for edc prohibitions. + */ public class BusinessPartnerProhibitionFunction extends AbstractBusinessPartnerValidation - implements AtomicConstraintFunction { + implements AtomicConstraintFunction { - public BusinessPartnerProhibitionFunction(Monitor monitor) { - super(monitor); - } + public BusinessPartnerProhibitionFunction(Monitor monitor, boolean shouldLogOnAgreementEvaluation) { + super(monitor, shouldLogOnAgreementEvaluation); + } - @Override - public boolean evaluate( - Operator operator, Object rightValue, Prohibition rule, PolicyContext context) { - return evaluate(operator, rightValue, context); - } + @Override + public boolean evaluate( + Operator operator, Object rightValue, Prohibition rule, PolicyContext context) { + return evaluate(operator, rightValue, context); + } } diff --git a/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtensionTest.java b/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtensionTest.java index 0240dc9ef..dcea3be41 100644 --- a/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtensionTest.java +++ b/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/BusinessPartnerValidationExtensionTest.java @@ -27,10 +27,13 @@ import org.eclipse.edc.policy.model.Prohibition; import org.eclipse.edc.spi.monitor.Monitor; import org.eclipse.edc.spi.system.ServiceExtensionContext; +import org.eclipse.tractusx.edc.validation.businesspartner.functions.BusinessPartnerPermissionFunction; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; import org.mockito.Mockito; +import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; @@ -105,4 +108,24 @@ void testRegisterProhibitionFunction() { eq(BusinessPartnerValidationExtension.BUSINESS_PARTNER_CONSTRAINT_KEY), any()); } + + @Test + void testLogConfiguration() { + + when(serviceExtensionContext.getSetting(BusinessPartnerValidationExtension.BUSINESS_PARTNER_VALIDATION_LOG_AGREEMENT_VALIDATION, "true")).thenReturn("false"); + + var captor = ArgumentCaptor.forClass(BusinessPartnerPermissionFunction.class); + // invoke + extension.initialize(serviceExtensionContext); + + // verify + verify(policyEngine) + .registerFunction( + anyString(), + eq(Permission.class), + eq(BusinessPartnerValidationExtension.BUSINESS_PARTNER_CONSTRAINT_KEY), + captor.capture()); + + assertThat(captor.getValue().isLogAgreementEvaluation()).isFalse(); + } } diff --git a/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidationTest.java b/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidationTest.java index e8909c04e..2bc0738b0 100644 --- a/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidationTest.java +++ b/edc-extensions/business-partner-validation/src/test/java/org/eclipse/tractusx/edc/validation/businesspartner/functions/AbstractBusinessPartnerValidationTest.java @@ -20,10 +20,10 @@ package org.eclipse.tractusx.edc.validation.businesspartner.functions; -import java.util.Collections; -import java.util.List; +import org.eclipse.edc.connector.contract.spi.types.agreement.ContractAgreement; import org.eclipse.edc.policy.engine.spi.PolicyContext; import org.eclipse.edc.policy.model.Operator; +import org.eclipse.edc.policy.model.Policy; import org.eclipse.edc.spi.agent.ParticipantAgent; import org.eclipse.edc.spi.monitor.Monitor; import org.junit.jupiter.api.Assertions; @@ -31,143 +31,180 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; +import org.mockito.ArgumentCaptor; import org.mockito.Mockito; +import java.util.Collections; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.eq; + class AbstractBusinessPartnerValidationTest { - private AbstractBusinessPartnerValidation validation; + private AbstractBusinessPartnerValidation validation; + + // mocks + private Monitor monitor; + private PolicyContext policyContext; + private ParticipantAgent participantAgent; + + @BeforeEach + void BeforeEach() { + this.monitor = Mockito.mock(Monitor.class); + this.policyContext = Mockito.mock(PolicyContext.class); + this.participantAgent = Mockito.mock(ParticipantAgent.class); + + Mockito.when(policyContext.getParticipantAgent()).thenReturn(participantAgent); + + validation = new AbstractBusinessPartnerValidation(monitor, true) { + }; + } + + @ParameterizedTest + @EnumSource(Operator.class) + void testFailsOnUnsupportedOperations(Operator operator) { - // mocks - private Monitor monitor; - private PolicyContext policyContext; - private ParticipantAgent participantAgent; + if (operator == Operator.EQ) { // only allowed operator + return; + } - @BeforeEach - void BeforeEach() { - this.monitor = Mockito.mock(Monitor.class); - this.policyContext = Mockito.mock(PolicyContext.class); - this.participantAgent = Mockito.mock(ParticipantAgent.class); + // prepare + prepareContextProblems(null); + prepareBusinessPartnerClaim("yes"); - Mockito.when(policyContext.getParticipantAgent()).thenReturn(participantAgent); + // invoke & assert + Assertions.assertFalse(validation.evaluate(operator, "foo", policyContext)); + } + + @Test + void testFailsOnUnsupportedRightValue() { + + // prepare + prepareContextProblems(null); + prepareBusinessPartnerClaim("yes"); + + // invoke & assert + Assertions.assertFalse(validation.evaluate(Operator.EQ, 1, policyContext)); + } + + @Test + void testValidationFailsWhenClaimMissing() { - validation = new AbstractBusinessPartnerValidation(monitor) {}; - } + // prepare + prepareContextProblems(null); - @ParameterizedTest - @EnumSource(Operator.class) - void testFailsOnUnsupportedOperations(Operator operator) { + // invoke + final boolean isValid = validation.evaluate(Operator.EQ, "foo", policyContext); - if (operator == Operator.EQ) { // only allowed operator - return; + // assert + Assertions.assertFalse(isValid); } - // prepare - prepareContextProblems(null); - prepareBusinessPartnerClaim("yes"); + @Test + void testValidationSucceedsWhenClaimContainsValue() { - // invoke & assert - Assertions.assertFalse(validation.evaluate(operator, "foo", policyContext)); - } + // prepare + prepareContextProblems(null); - @Test - void testFailsOnUnsupportedRightValue() { + // prepare equals + prepareBusinessPartnerClaim("foo"); + final boolean isEqualsTrue = validation.evaluate(Operator.EQ, "foo", policyContext); - // prepare - prepareContextProblems(null); - prepareBusinessPartnerClaim("yes"); + // prepare contains + prepareBusinessPartnerClaim("foobar"); + final boolean isContainedTrue = validation.evaluate(Operator.EQ, "foo", policyContext); - // invoke & assert - Assertions.assertFalse(validation.evaluate(Operator.EQ, 1, policyContext)); - } + // assert + Assertions.assertTrue(isEqualsTrue); + Assertions.assertTrue(isContainedTrue); + } - @Test - void testValidationFailsWhenClaimMissing() { + @Test + void testValidationWhenParticipantHasProblems() { - // prepare - prepareContextProblems(null); + // prepare + prepareContextProblems(Collections.singletonList("big problem")); + prepareBusinessPartnerClaim("foo"); - // invoke - final boolean isValid = validation.evaluate(Operator.EQ, "foo", policyContext); + // invoke + final boolean isValid = validation.evaluate(Operator.EQ, "foo", policyContext); - // assert - Assertions.assertFalse(isValid); - } + // Mockito.verify(monitor.debug(Mockito.anyString()); + Assertions.assertFalse(isValid); + } - @Test - void testValidationSucceedsWhenClaimContainsValue() { + @Test + void testValidationWhenSingleParticipantIsValid() { - // prepare - prepareContextProblems(null); + // prepare + prepareContextProblems(null); + prepareBusinessPartnerClaim("foo"); - // prepare equals - prepareBusinessPartnerClaim("foo"); - final boolean isEqualsTrue = validation.evaluate(Operator.EQ, "foo", policyContext); + // invoke + final boolean isContainedTrue = validation.evaluate(Operator.EQ, "foo", policyContext); - // prepare contains - prepareBusinessPartnerClaim("foobar"); - final boolean isContainedTrue = validation.evaluate(Operator.EQ, "foo", policyContext); + // Mockito.verify(monitor.debug(Mockito.anyString()); + Assertions.assertTrue(isContainedTrue); + } - // assert - Assertions.assertTrue(isEqualsTrue); - Assertions.assertTrue(isContainedTrue); - } + @Test + void testValidationWhenSingleParticipantIsValidWithAgreement() { - @Test - void testValidationWhenParticipantHasProblems() { + // prepare + prepareContextProblems(null); + prepareBusinessPartnerClaim("foo"); - // prepare - prepareContextProblems(Collections.singletonList("big problem")); - prepareBusinessPartnerClaim("foo"); + var captor = ArgumentCaptor.forClass(String.class); - // invoke - final boolean isValid = validation.evaluate(Operator.EQ, "foo", policyContext); + var agreement = ContractAgreement.Builder.newInstance() + .id("agreementId") + .providerAgentId("provider") + .consumerAgentId("consumer") + .assetId("assetId") + .policy(Policy.Builder.newInstance().build()) + .build(); - // Mockito.verify(monitor.debug(Mockito.anyString()); - Assertions.assertFalse(isValid); - } + Mockito.when(policyContext.getContextData(eq(ContractAgreement.class))).thenReturn(agreement); - @Test - void testValidationWhenSingleParticipantIsValid() { + // invoke + final boolean isContainedTrue = validation.evaluate(Operator.EQ, "foo", policyContext); - // prepare - prepareContextProblems(null); - prepareBusinessPartnerClaim("foo"); + Assertions.assertTrue(isContainedTrue); - // invoke - final boolean isContainedTrue = validation.evaluate(Operator.EQ, "foo", policyContext); + Mockito.verify(monitor).info(captor.capture()); - // Mockito.verify(monitor.debug(Mockito.anyString()); - Assertions.assertTrue(isContainedTrue); - } + assertThat(captor.getValue()).contains(agreement.getId()).contains("foo"); + } - // In the past it was possible to use the 'IN' constraint with multiple BPNs as - // a list. This is no longer supported. - // The EDC must now always decline this kind of BPN format. - @Test - void testValidationForMultipleParticipants() { + // In the past it was possible to use the 'IN' constraint with multiple BPNs as + // a list. This is no longer supported. + // The EDC must now always decline this kind of BPN format. + @Test + void testValidationForMultipleParticipants() { - // prepare - prepareContextProblems(null); - prepareBusinessPartnerClaim("foo"); + // prepare + prepareContextProblems(null); + prepareBusinessPartnerClaim("foo"); - // invoke & verify - Assertions.assertFalse(validation.evaluate(Operator.IN, List.of("foo", "bar"), policyContext)); - Assertions.assertFalse(validation.evaluate(Operator.IN, List.of(1, "foo"), policyContext)); - Assertions.assertFalse(validation.evaluate(Operator.IN, List.of("bar", "bar"), policyContext)); - } + // invoke & verify + Assertions.assertFalse(validation.evaluate(Operator.IN, List.of("foo", "bar"), policyContext)); + Assertions.assertFalse(validation.evaluate(Operator.IN, List.of(1, "foo"), policyContext)); + Assertions.assertFalse(validation.evaluate(Operator.IN, List.of("bar", "bar"), policyContext)); + } - private void prepareContextProblems(List problems) { - Mockito.when(policyContext.getProblems()).thenReturn(problems); + private void prepareContextProblems(List problems) { + Mockito.when(policyContext.getProblems()).thenReturn(problems); - if (problems == null || problems.isEmpty()) { - Mockito.when(policyContext.hasProblems()).thenReturn(false); - } else { - Mockito.when(policyContext.hasProblems()).thenReturn(true); + if (problems == null || problems.isEmpty()) { + Mockito.when(policyContext.hasProblems()).thenReturn(false); + } else { + Mockito.when(policyContext.hasProblems()).thenReturn(true); + } } - } - private void prepareBusinessPartnerClaim(String businessPartnerNumber) { - Mockito.when(participantAgent.getClaims()) - .thenReturn(Collections.singletonMap("referringConnector", businessPartnerNumber)); - } + private void prepareBusinessPartnerClaim(String businessPartnerNumber) { + Mockito.when(participantAgent.getClaims()) + .thenReturn(Collections.singletonMap("referringConnector", businessPartnerNumber)); + } } diff --git a/edc-extensions/control-plane-adapter/build.gradle.kts b/edc-extensions/control-plane-adapter/build.gradle.kts index fe34a0866..c205d5cb0 100644 --- a/edc-extensions/control-plane-adapter/build.gradle.kts +++ b/edc-extensions/control-plane-adapter/build.gradle.kts @@ -8,7 +8,14 @@ plugins { dependencies { implementation(edc.spi.core) implementation(edc.spi.policy) + implementation(edc.api.management) + constraints { + implementation("org.yaml:snakeyaml:2.0") { + because("version 1.33 has vulnerabilities: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-1471.") + } + } + implementation(edc.spi.catalog) implementation(edc.spi.transactionspi) implementation(edc.spi.transaction.datasource) diff --git a/edc-extensions/postgresql-migration/build.gradle.kts b/edc-extensions/postgresql-migration/build.gradle.kts index 8d7b1fa05..cb04877c0 100644 --- a/edc-extensions/postgresql-migration/build.gradle.kts +++ b/edc-extensions/postgresql-migration/build.gradle.kts @@ -11,5 +11,5 @@ dependencies { implementation(edc.sql.assetindex) implementation(edc.sql.core) - implementation("org.flywaydb:flyway-core:9.15.2") + implementation("org.flywaydb:flyway-core:9.16.3") } diff --git a/edc-tests/cucumber/build.gradle.kts b/edc-tests/cucumber/build.gradle.kts index f2e40439d..2628ce71e 100644 --- a/edc-tests/cucumber/build.gradle.kts +++ b/edc-tests/cucumber/build.gradle.kts @@ -16,7 +16,7 @@ dependencies { implementation(project(":edc-extensions:transferprocess-sftp-provisioner")) - testImplementation("com.google.code.gson:gson:2.10") + testImplementation("com.google.code.gson:gson:2.10.1") testImplementation("org.apache.httpcomponents:httpclient:4.5.14") testImplementation("org.junit.platform:junit-platform-suite:1.9.2") testImplementation("io.cucumber:cucumber-java:7.11.2") diff --git a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/MultiRuntimeTest.java b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/MultiRuntimeTest.java index a28a2610d..5bf2a2417 100644 --- a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/MultiRuntimeTest.java +++ b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/MultiRuntimeTest.java @@ -69,6 +69,7 @@ public class MultiRuntimeTest { put("edc.dataplane.selector.httpplane.destinationtypes", "HttpProxy"); put("edc.dataplane.selector.httpplane.properties", "{\"publicApiUrl\":\"http://localhost:" + SOKRATES_PUBLIC_API_PORT + "/api/public\"}"); put("edc.receiver.http.dynamic.endpoint", "http://localhost:" + SOKRATES_CONNECTOR_PORT + "/api/consumer/datareference"); + put("tractusx.businesspartnervalidation.log.agreement.validation", "true"); } }); @@ -98,6 +99,7 @@ public class MultiRuntimeTest { put("edc.dataplane.selector.httpplane.sourcetypes", "HttpData"); put("edc.dataplane.selector.httpplane.destinationtypes", "HttpProxy"); put("edc.dataplane.selector.httpplane.properties", "{\"publicApiUrl\":\"http://localhost:" + PLATO_PUBLIC_API_PORT + "/api/public\"}"); + put("tractusx.businesspartnervalidation.log.agreement.validation", "true"); } }); } diff --git a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/Participant.java b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/Participant.java index dd43d4f90..bd1546111 100644 --- a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/Participant.java +++ b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/lifecycle/Participant.java @@ -26,6 +26,7 @@ import org.eclipse.edc.connector.api.management.transferprocess.model.TransferRequestDto; import org.eclipse.edc.connector.policy.spi.PolicyDefinition; import org.eclipse.edc.junit.extensions.EdcRuntimeExtension; +import org.eclipse.edc.policy.model.PolicyRegistrationTypes; import org.eclipse.edc.spi.asset.AssetSelectorExpression; import org.eclipse.edc.spi.iam.IdentityService; import org.eclipse.edc.spi.system.ServiceExtension; @@ -76,6 +77,9 @@ public Participant(String moduleName, String runtimeName, Map pr this.bpn = runtimeName + "-BPN"; this.backend = properties.get("edc.receiver.http.dynamic.endpoint"); this.registerServiceMock(IdentityService.class, new MockDapsService(getBpn())); + + typeManager.registerTypes(PolicyRegistrationTypes.TYPES.toArray(Class[]::new)); + } @Override diff --git a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/HttpConsumerPullWithProxyTest.java b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/HttpConsumerPullWithProxyTest.java index 78f3d2013..1f93ae5e7 100644 --- a/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/HttpConsumerPullWithProxyTest.java +++ b/edc-tests/e2e-tests/src/test/java/org/eclipse/tractusx/edc/tests/HttpConsumerPullWithProxyTest.java @@ -38,7 +38,7 @@ import static org.awaitility.Awaitility.await; import static org.awaitility.pollinterval.FibonacciPollInterval.fibonacci; import static org.eclipse.edc.connector.transfer.dataplane.spi.TransferDataPlaneConstants.HTTP_PROXY; -import static org.eclipse.tractusx.edc.policy.PolicyHelperFunctions.noConstraintPolicy; +import static org.eclipse.tractusx.edc.policy.PolicyHelperFunctions.businessPartnerNumberPolicy; @EndToEndTest public class HttpConsumerPullWithProxyTest extends MultiRuntimeTest { @@ -61,8 +61,8 @@ void transferData_privateBackend() throws IOException, InterruptedException { .authKey(authCodeHeaderName) .authCode(authCode) .build()); - plato.createPolicy(noConstraintPolicy("policy-1")); - plato.createPolicy(noConstraintPolicy("policy-2")); + plato.createPolicy(businessPartnerNumberPolicy("policy-1", sokrates.getBpn())); + plato.createPolicy(businessPartnerNumberPolicy("policy-2", sokrates.getBpn())); plato.createContractDefinition(assetId, "def-1", "policy-1", "policy-2", ONE_WEEK); var negotiationId = sokrates.negotiateContract(plato, assetId); diff --git a/gradle.properties b/gradle.properties index 66b31427b..6b3bcce1e 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,5 +1,5 @@ groupId=org.eclipse.tractusx.edc -version=0.3.3-SNAPSHOT +version=0.3.4-SNAPSHOT javaVersion=11 # configure the build: diff --git a/settings.gradle.kts b/settings.gradle.kts index e22d4aacc..a4158ef8c 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -58,7 +58,7 @@ dependencyResolutionManagement { } // create version catalog for all EDC modules create("edc") { - version("edc", "0.0.1-20230220-SNAPSHOT") + version("edc", "0.0.1-20230220.patch1") library("spi-catalog", "org.eclipse.edc", "catalog-spi").versionRef("edc") library("spi-auth", "org.eclipse.edc", "auth-spi").versionRef("edc") library("spi-transfer", "org.eclipse.edc", "transfer-spi").versionRef("edc") @@ -137,34 +137,38 @@ dependencyResolutionManagement { library("micrometer-jersey", "org.eclipse.edc", "jersey-micrometer").versionRef("edc") library("micrometer-jetty", "org.eclipse.edc", "jetty-micrometer").versionRef("edc") library("monitor-jdklogger", "org.eclipse.edc", "monitor-jdk-logger").versionRef("edc") - library("transfer.dynamicreceiver", "org.eclipse.edc", "transfer-pull-http-dynamic-receiver").versionRef("edc") + library( + "transfer.dynamicreceiver", + "org.eclipse.edc", + "transfer-pull-http-dynamic-receiver" + ).versionRef("edc") library("transfer.receiver", "org.eclipse.edc", "transfer-pull-http-receiver").versionRef("edc") bundle( - "connector", - listOf("boot", "core-connector", "core-jersey", "core-controlplane", "api-observability") + "connector", + listOf("boot", "core-connector", "core-jersey", "core-controlplane", "api-observability") ) bundle( - "dpf", - listOf("dpf-transfer", "dpf-selector-core", "dpf-selector-client", "spi-dataplane-selector") + "dpf", + listOf("dpf-transfer", "dpf-selector-core", "dpf-selector-client", "spi-dataplane-selector") ) bundle( - "sqlstores", - listOf( - "sql-assetindex", - "sql-contract-definition", - "sql-contract-negotiation", - "sql-transferprocess", - "sql-policydef" - ) + "sqlstores", + listOf( + "sql-assetindex", + "sql-contract-definition", + "sql-contract-negotiation", + "sql-transferprocess", + "sql-policydef" + ) ) bundle( - "monitoring", - listOf("micrometer-core", "micrometer-jersey", "micrometer-jetty") + "monitoring", + listOf("micrometer-core", "micrometer-jersey", "micrometer-jetty") // listOf("micrometer-core", "micrometer-jersey", "micrometer-jetty", "monitor-jdklogger") ) }