diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 94c0655bad..0000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,172 +0,0 @@ -# CODEOWNERS Best Practices -# 1. Per Github docs: "Order is important; the last matching pattern takes the most precedence." -# Please define less specific codeowner paths before more specific codeowner paths in order for the more specific rule to have priority - -# Root -* @smartcontractkit/ccip - -# Chains -/common @smartcontractkit/bix-framework -/core/chains/ @smartcontractkit/bix-framework - -# Services -/core/services/directrequest @smartcontractkit/keepers -/core/services/feeds @smartcontractkit/op-core @eutopian @yevshev -/core/services/synchronization/telem @smartcontractkit/realtime -/core/capabilities/ccip @smartcontractkit/ccip-offchain - -# To be deprecated in Chainlink V3 -/core/services/fluxmonitorv2 @smartcontractkit/foundations -/core/services/job @smartcontractkit/ccip -/core/services/keystore @smartcontractkit/keepers -/core/services/ocr* @smartcontractkit/foundations -/core/services/periodicbackup @smartcontractkit/foundations -/core/services/pg @smartcontractkit/foundations @samsondav -/core/services/pipeline @smartcontractkit/foundations @smartcontractkit/bix-framework -/core/services/telemetry @smartcontractkit/realtime -/core/services/relay/evm/mercury @smartcontractkit/data-streams-engineers -/core/services/webhook @smartcontractkit/foundations @smartcontractkit/bix-framework -/core/services/llo @smartcontractkit/data-streams-engineers - -# VRF-related services -/core/services/vrf @smartcontractkit/vrf-team -/core/services/blockhashstore @smartcontractkit/vrf-team -/core/services/blockheaderfeeder @smartcontractkit/vrf-team -/core/services/pipeline/task.vrf.go @smartcontractkit/vrf-team -/core/services/pipeline/task.vrfv2.go @smartcontractkit/vrf-team -/core/services/pipeline/task.vrfv2plus.go @smartcontractkit/vrf-team - -# Keeper/Automation-related services -/core/services/keeper @smartcontractkit/keepers -/core/services/ocr2/plugins/ocr2keeper @smartcontractkit/keepers - -# Chainlink Functions -core/services/functions @smartcontractkit/functions -core/services/ocr2/plugins/functions @smartcontractkit/functions -core/services/s4 @smartcontractkit/functions -core/service/ocr2/plugins/s4 @smartcontractkit/functions -core/services/ocr2/plugins/threshold @smartcontractkit/functions -core/services/relay/evm/functions @smartcontractkit/functions -core/services/relay/evm/functions @smartcontractkit/functions -core/scripts/functions @smartcontractkit/functions -core/scripts/gateway @smartcontractkit/functions - -# Contracts -/contracts/ @RensR @matYang @RayXpub @elatoskinas - -# First we match on project names to catch files like the compilation scripts, -# gas snapshots and other files not places in the project directories. -# This could give some false positives, so afterwards we match on the project directories -# to ensure the entire directory is always owned by the correct team. - -/contracts/**/*keeper* @smartcontractkit/keepers -/contracts/**/*upkeep* @smartcontractkit/keepers -/contracts/**/*automation* @smartcontractkit/keepers -/contracts/gas-snapshots/automation.gas-snapshot @smartcontractkit/keepers -/contracts/**/ccip/ @smartcontractkit/ccip-onchain @makramkd -/contracts/**/*Functions* @smartcontractkit/functions - -/contracts/src/v0.8/functions @smartcontractkit/functions -/contracts/**/*functions* @smartcontractkit/functions -/contracts/**/*llo-feeds* @smartcontractkit/data-streams-engineers -/contracts/**/*vrf* @smartcontractkit/vrf-team -/contracts/**/*l2ep* @smartcontractkit/bix-ship -/contracts/**/*keystone* @smartcontractkit/keystone - -/contracts/src/v0.8/automation @smartcontractkit/keepers -/contracts/src/v0.8/functions @smartcontractkit/functions -# TODO: interfaces folder, folder should be removed and files moved to the correct folders -/contracts/src/v0.8/l2ep @chris-de-leon-cll -/contracts/src/v0.8/llo-feeds @smartcontractkit/data-streams-engineers -# TODO: mocks folder, folder should be removed and files moved to the correct folders -/contracts/src/v0.8/operatorforwarder @smartcontractkit/data-feeds-engineers -/contracts/src/v0.8/shared @RensR @matYang @RayXpub @elatoskinas -# TODO: tests folder, folder should be removed and files moved to the correct folders -# TODO: transmission folder, owner should be found -/contracts/src/v0.8/vrf @smartcontractkit/vrf-team - - -# At the end, match any files missed by the patterns above -/contracts/scripts/native_solc_compile_all_events_mock @smartcontractkit/functions -# Remove changeset files from the codeowners -/contracts/.changeset - - -# Tests -/integration-tests/ @smartcontractkit/test-tooling-team -/integration-tests/ccip-tests @smartcontractkit/ccip-offchain -/integration-tests/**/*keeper* @smartcontractkit/keepers -/integration-tests/**/*automation* @smartcontractkit/keepers -/integration-tests/**/*lm_* @smartcontractkit/liquidity-manager - -# Deployment tooling -# Initially the common structures owned by CCIP -/integration-tests/deployment @smartcontractkit/ccip -/integration-tests/deployment/ccip @smartcontractkit/ccip -# TODO: As more products add their deployment logic here, add the team as an owner - -# CI/CD -/.github/** @smartcontractkit/releng @smartcontractkit/test-tooling-team @jasonmci @smartcontractkit/ccip -/.github/workflows/integration-tests.yml @smartcontractkit/test-tooling-team @jasonmci -/.github/workflows/**-tests.yml @smartcontractkit/test-tooling-team @jasonmci -/.github/workflows/integration-chaos-tests.yml @smartcontractkit/test-tooling-team @jasonmci -/.github/workflows/integration-tests-publish.yml @smartcontractkit/test-tooling-team @jasonmci -/.github/workflows/performance-tests.yml @smartcontractkit/test-tooling-team @jasonmci - -/.github/workflows/automation-ondemand-tests.yml @smartcontractkit/keepers -/.github/workflows/automation-benchmark-tests.yml @smartcontractkit/keepers -/.github/workflows/automation-load-tests.yml @smartcontractkit/keepers -/.github/workflows/automation-nightly-tests.yml @smartcontractkit/keepers - -/core/chainlink.Dockerfile @smartcontractkit/prodsec-public - -# Dependencies -contracts/scripts/requirements.txt @smartcontractkit/prodsec-public -.tool-versions @smartcontractkit/prodsec-public -.nvmrc @smartcontractkit/prodsec-public -contracts/package.json @smartcontractkit/prodsec-public -contracts/pnpm.lock @smartcontractkit/prodsec-public -go.mod @smartcontractkit/prodsec-public @smartcontractkit/releng @smartcontractkit/foundations -go.sum @smartcontractkit/prodsec-public @smartcontractkit/releng @smartcontractkit/foundations -integration-tests/go.mod @smartcontractkit/prodsec-public -integration-tests/go.sum @smartcontractkit/prodsec-public -flake.nix @smartcontractkit/prodsec-public -flake.lock @smartcontractkit/prodsec-public - -# Config -./core/config @samsondav @jmank88 - -# LOOP Plugins -/plugins @jmank88 @krehermann - -# Config -./docs/CONFIG.md @smartcontractkit/foundations @smartcontractkit/devrel -./internal/config/docs.toml @smartcontractkit/foundations @smartcontractkit/devrel - - -# CCIP override -/core/ @smartcontractkit/ccip -/core/scripts/ccip/manual-execution @smartcontractkit/ccip-offchain -/contracts/ @smartcontractkit/ccip-onchain @makramkd @elatoskinas @RayXpub -go.mod @smartcontractkit/ccip @smartcontractkit/prodsec-public @smartcontractkit/releng @smartcontractkit/foundations -go.sum @smartcontractkit/ccip @smartcontractkit/prodsec-public @smartcontractkit/releng @smartcontractkit/foundations -integration-tests/go.mod @smartcontractkit/ccip @smartcontractkit/prodsec-public -integration-tests/go.sum @smartcontractkit/ccip @smartcontractkit/prodsec-public - -# leave snapshots & changeset as ownerless -/contracts/gas-snapshots/ -/contracts/.changeset/ - -# CCIP LM -/core/**/liquiditymanager/ @smartcontractkit/liquidity-manager -/core/services/relay/evm/liquidity_manager.go @smartcontractkit/liquidity-manager -/contracts/**/liquiditymanager/ @smartcontractkit/liquidity-manager - -# CCIP RMN -/contracts/src/v0.8/ccip/RMN.sol @smartcontractkit/rmn -/contracts/src/v0.8/ccip/ARMProxy.sol @smartcontractkit/rmn -/contracts/src/v0.8/ccip/interfaces/IRMN.sol @smartcontractkit/rmn -/contracts/src/v0.8/ccip/test/arm @smartcontractkit/rmn - -# CCIP Capabilities -/core/capabilities/ccip @smartcontractkit/ccip-offchain diff --git a/.github/E2E_TESTS_ON_GITHUB_CI.md b/.github/E2E_TESTS_ON_GITHUB_CI.md deleted file mode 100644 index 02144eff64..0000000000 --- a/.github/E2E_TESTS_ON_GITHUB_CI.md +++ /dev/null @@ -1,58 +0,0 @@ -# E2E Tests on GitHub CI - -E2E tests are executed on GitHub CI using the [E2E Tests Reusable Workflow](#about-the-reusable-workflow) or dedicated workflows. - -## Automatic workflows - -These workflows are designed to run automatically at crucial stages of the software development process, such as on every commit in a PR, nightly or before release. - -### PR E2E Tests - -Run on every commit in a PR to ensure changes do not introduce regressions. - -[Link](https://github.com/smartcontractkit/chainlink/blob/develop/.github/workflows/integration-tests.yml) - -### Nightly E2E Tests - -Conducted nightly to catch issues that may develop over time or with accumulated changes. - -[Link](https://github.com/smartcontractkit/chainlink/blob/develop/.github/workflows/run-nightly-e2e-tests.yml) - -### Release E2E Tests - -This section contains automatic workflows triggering E2E tests at release. - -#### Client Compatibility Tests - -[Link](https://github.com/smartcontractkit/chainlink/actions/workflows/client-compatibility-tests.yml) - -## On-Demand Workflows - -Triggered manually by QA for specific testing needs. - -**Examples:** - -- [On-Demand Automation Tests](https://github.com/smartcontractkit/chainlink/actions/workflows/automation-ondemand-tests.yml) -- [CCIP Chaos Tests](https://github.com/smartcontractkit/chainlink/actions/workflows/ccip-chaos-tests.yml) -- [OCR Soak Tests](https://github.com/smartcontractkit/chainlink/actions/workflows/on-demand-ocr-soak-test.yml) -- [VRFv2Plus Smoke Tests](https://github.com/smartcontractkit/chainlink/actions/workflows/on-demand-vrfv2plus-smoke-tests.yml) - -## Test Configs - -E2E tests utilize TOML files to define their parameters. Each test is equipped with a default TOML config, which can be overridden by specifying an alternative TOML config. This allows for running tests with varied parameters, such as on a non-default blockchain network. For tests executed on GitHub CI, both the default configs and any override configs must reside within the git repository. The `test_config_override_path` workflow input is used to provide a path to an override config. - -Config overrides should be stored in `testconfig/*/overrides/*.toml`. Placing files here will not trigger a rebuild of the test runner image. - -**Important Note:** The use of `base64Config` input is deprecated in favor of `test_config_override_path`. For more details, refer to [the decision log](https://smartcontract-it.atlassian.net/wiki/spaces/TT/pages/927596563/Storing+All+Test+Configs+In+Git). - -To learn more about test configs see [CTF Test Config](https://github.com/smartcontractkit/chainlink-testing-framework/blob/main/lib/config/README.md). - -## Test Secrets - -For security reasons, test secrets and sensitive information are not stored directly within the test config TOML files. Instead, these secrets are securely injected into tests using environment variables. For a detailed explanation on managing test secrets, refer to our [Test Secrets documentation](https://github.com/smartcontractkit/chainlink-testing-framework/blob/main/lib/config/README.md#test-secrets). - -If you need to run a GitHub workflow using custom secrets, please refer to the [guide on running GitHub workflows with your test secrets](https://github.com/smartcontractkit/chainlink-testing-framework/blob/main/lib/config/README.md#run-github-workflow-with-your-test-secrets). - -## About the E2E Test Reusable Workflow - -For information on the E2E Test Reusable Workflow, visit the documentation in the [smartcontractkit/.github repository](https://github.com/smartcontractkit/.github/blob/main/.github/workflows/README.md). diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index c934b6b945..0000000000 --- a/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1,5 +0,0 @@ -blank_issues_enabled: true -contact_links: - - name: Question - url: https://stackoverflow.com/questions/tagged/chainlink - about: Please ask and answer questions here. diff --git a/.github/ISSUE_TEMPLATE/development.md b/.github/ISSUE_TEMPLATE/development.md deleted file mode 100644 index 45baaccf24..0000000000 --- a/.github/ISSUE_TEMPLATE/development.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -name: Development Issue -about: Report an issue encountered while working on code found in this repository. -title: '[DEVEL] ' -labels: 'Development' -assignees: '' ---- - -**Description** -[replace this line with a clear and concise description of the development issue you are experiencing] - -**Your Environment** -[replace this line with basic information about your environment, such as your operating system and the versions of any relevant tools you are using (e.g. Go, Docker)] - -**Basic Information** -[replace this line with basic information about the issue you are experiencing, including but not limited to the names of the files you are working with and any relevant error messages] - -**Steps to Reproduce** -[replace this line with detailed steps to reproduce the issue you are experiencing] - -**Additional Information** -[replace this line with any additional information you would like to provide, such as screenshots illustrating the issue] diff --git a/.github/ISSUE_TEMPLATE/faucet.md b/.github/ISSUE_TEMPLATE/faucet.md deleted file mode 100644 index 47d82b0148..0000000000 --- a/.github/ISSUE_TEMPLATE/faucet.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -name: Faucet Issue -about: Report an issue with a Chainlink LINK Faucet. -title: '[FAUC] ' -labels: 'Faucet' -assignees: '' ---- - -**Description** -[replace this line with a clear and concise description of the Chainlink LINK Faucet issue you are experiencing] - -**Basic Information** -[replace this line with basic information about the issue you are experiencing, including but not limited to your testnet address, the name and version of your web browser and wallet, and the link to the faucet transaction on Etherscan] - -**Steps to Reproduce** -[replace this line with detailed steps to reproduce the issue you are experiencing] - -**Additional Information** -[replace this line with any additional information you would like to provide, such as screenshots illustrating the issue] diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md deleted file mode 100644 index 0535501239..0000000000 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -name: Feature Request -about: Request a feature. Help us make Chainlink better! -title: '[FEAT] ' -labels: 'Feature Request' -assignees: '' ---- - -**Description** -[replace this line with a clear and concise description of the feature you are requesting] - -**Motivation** -[replace this line with a clear and concise explanation of _why_ you are requesting this feature] - -**Justification** -[replace this line with a clear and concise explanation of _why_ the feature you are requesting is the best way to approach this issue and list other approaches you considered] - -**Additional Information** -[replace this line with any additional information you would like to provide, such as examples or screenshots of similar features] diff --git a/.github/ISSUE_TEMPLATE/node-operator.md b/.github/ISSUE_TEMPLATE/node-operator.md deleted file mode 100644 index 5857679e5e..0000000000 --- a/.github/ISSUE_TEMPLATE/node-operator.md +++ /dev/null @@ -1,30 +0,0 @@ ---- -name: Node Operator Issue -about: Report an issue encountered while operating a Chainlink node. -title: '[NODE] ' -labels: 'Node Operator' -assignees: '' ---- - -**Description** -[replace this line with a clear and concise description of the issue you are experiencing] - -**Basic Information** -[replace this line with basic information about the issue you are experiencing, including but not limited to all relevant logs and any other relevant information, such as if you are using a Docker container to run the node, job specification, oracle contract address, transaction IDs, etc.] - -- Network: [e.g. Ethereum Mainnet, Ropsten] -- Blockchain Client: [name and version of blockchain client e.g. Geth v1.9.6] -- Go Version: [e.g. v1.12] -- Operating System: [name and version of operating system running Chainlink node] -- Commit: [log INFO line when starting node] -- Hosting Provider: [e.g. AWS, GCP, self-hosted] -- Startup Command: [e.g. `docker run smartcontract/chainlink local n`] - -**Environment Variables** -[replace this line with the output of the environment variables when running the node in debug mode] - -**Steps to Reproduce** -[replace this line with detailed steps to reproduce the issue you are experiencing] - -**Additional Information** -[replace this line with any additional information you would like to provide, such as screenshots illustrating the issue] diff --git a/.github/ISSUE_TEMPLATE/smart-contract.md b/.github/ISSUE_TEMPLATE/smart-contract.md deleted file mode 100644 index e4b9b97bf7..0000000000 --- a/.github/ISSUE_TEMPLATE/smart-contract.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -name: Smart Contract Issue -about: Report an issue with smart contracts found in this repository. -title: '[SMRT] ' -labels: 'Smart Contract' -assignees: '' ---- - -**Description** -[replace this line with a clear and concise description of the smart contract issue you are experiencing] - -**Basic Information** -[replace this line with basic information about the issue you are experiencing, including but not limited to the names of the smart contract files and the version of the Chainlink software repository in which they are found, contract addresses, transaction IDs, etc.] - -**Steps to Reproduce** -[replace this line with detailed steps to reproduce the issue you are experiencing] - -**Additional Information** -[replace this line with any additional information you would like to provide, such as screenshots illustrating the issue] diff --git a/.github/actions/build-chainlink-image/action.yml b/.github/actions/build-chainlink-image/action.yml deleted file mode 100644 index 7381d887a1..0000000000 --- a/.github/actions/build-chainlink-image/action.yml +++ /dev/null @@ -1,54 +0,0 @@ -name: Build Chainlink Image -description: A composite action that allows building and publishing the Chainlink image for integration testing - -inputs: - tag_suffix: - description: The suffix to append to the image tag (usually blank or "-plugins") - default: "" - dockerfile: - description: The path to the Dockerfile to use (usually core/chainlink.Dockerfile or plugins/chainlink.Dockerfile) - default: core/chainlink.Dockerfile - git_commit_sha: - description: The git commit sha to use for the image tag - default: ${{ github.sha }} - AWS_REGION: - description: "AWS region to use for ECR" - AWS_ROLE_TO_ASSUME: - description: "AWS role to assume for ECR" - dep_evm_sha: - description: The chainlink-evm commit sha to use in go deps - required: false - check_image_exists: - description: "Check if the image exists in ECR before building" - required: false - default: 'false' - -runs: - using: composite - steps: - - name: Check if image exists - if: ${{ inputs.dep_evm_sha != '' || inputs.check_image_exists == 'true'}} - id: check-image - uses: smartcontractkit/chainlink-github-actions/docker/image-exists@75a9005952a9e905649cfb5a6971fd9429436acd # v2.3.25 - with: - repository: chainlink - tag: ${{ inputs.git_commit_sha }}${{ inputs.tag_suffix }} - AWS_REGION: ${{ inputs.AWS_REGION }} - AWS_ROLE_TO_ASSUME: ${{ inputs.AWS_ROLE_TO_ASSUME }} - - name: Build Image - if: steps.check-image.outputs.exists != 'true' - uses: smartcontractkit/.github/actions/ctf-build-image@1a26fe378d7ebdc34ab1fe31ec4a6d1c376199f8 # ctf-build-image@0.0.0 - with: - cl_repo: smartcontractkit/ccip - cl_ref: ${{ inputs.git_commit_sha }} - cl_dockerfile: ${{ inputs.dockerfile }} - push_tag: ${{ env.CHAINLINK_IMAGE }}:${{ inputs.git_commit_sha }}${{ inputs.tag_suffix }} - QA_AWS_REGION: ${{ inputs.AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ inputs.AWS_ROLE_TO_ASSUME }} - GO_COVER_FLAG: true - dep_evm_sha: ${{ inputs.dep_evm_sha }} - - name: Print Chainlink Image Built - shell: sh - run: | - echo "### Chainlink node image tag used for this test run :link:" >>$GITHUB_STEP_SUMMARY - echo "\`${{inputs.git_commit_sha}}\`" >>$GITHUB_STEP_SUMMARY diff --git a/.github/actions/build-sign-publish-chainlink/action.yml b/.github/actions/build-sign-publish-chainlink/action.yml deleted file mode 100644 index 302f436390..0000000000 --- a/.github/actions/build-sign-publish-chainlink/action.yml +++ /dev/null @@ -1,270 +0,0 @@ -name: Build and Publish Chainlink - -description: A composite action that allows building and publishing signed chainlink images. - -inputs: - # Inputs for publishing - publish: - description: When set to the string boolean value of "true", the resulting built image will be published - default: "false" - required: false - - dockerfile: - description: Path to the Dockerfile (relative to the repo root) - default: core/chainlink.Dockerfile - required: false - dockerhub_username: - description: Username for Docker Hub to avoid rate limits when pulling public images - required: false - dockerhub_password: - description: Password for Docker Hub to avoid rate limits when pulling public images - required: false - ecr-hostname: - description: The ECR registry scope - default: public.ecr.aws - required: false - ecr-image-name: - description: | - The image name with path, in the format of `[registry]/repository`. For private ECR repos the registry name is optional, where for public repos, it is required. - Eg. Public ECR repo `chainlink` and registry alias `chainlinklabs` should be `chainlinklabs/chainlink`. For a private ECR repo `chainlink` the image name should be `chainlink` - default: chainlink/chainlink - required: false - ecr-tag-suffix: - description: Docker image tag suffix - required: false - git-commit-sha: - description: Git commit SHA used as metadata when building the application (appears in logs) - default: ${{ github.event.pull_request.head.sha || github.sha }} - required: false - aws-role-to-assume: - description: The AWS role to assume as the CD user, if any. Used in configuring the docker/login-action - required: false - aws-role-duration-seconds: - description: The duration of the role assumed - required: false - aws-region: - description: The AWS region the ECR repository is located in, should only be needed for public ECR repositories, used in configuring docker/login-action - required: false - - # Inputs for signing - sign-images: - description: When set to the string boolean value of "true", the resulting build image will be signed - default: "false" - required: false - verify-signature: - description: When set to the string boolean value of "true", the resulting build image signature will be verified - default: "false" - required: false - -outputs: - docker-image-tag: - description: The docker image tag that was built and pushed - value: ${{ steps.save-non-root-image-name-env.outputs.image-tag }} - docker-image-digest: - description: The docker image digest that was built and pushed - value: ${{ steps.save-non-root-image-name-env.outputs.image-digest }} - -runs: - using: composite - steps: - - name: Set shared variables - shell: bash - # See https://docs.github.com/en/actions/learn-github-actions/workflow-commands-for-github-actions#multiline-strings - run: | - SHARED_IMAGES=${{ inputs.ecr-hostname }}/${{ inputs.ecr-image-name }} - OIDC_ISSUER=https://token.actions.githubusercontent.com - OIDC_IDENTITY=https://github.com/smartcontractkit/chainlink/.github/workflows/build-publish.yml@${{ github.ref }} - - SHARED_TAG_LIST=$(cat << EOF - type=ref,event=branch,suffix=${{ inputs.ecr-tag-suffix }} - type=semver,pattern={{version}},suffix=${{ inputs.ecr-tag-suffix }} - type=sha,format=short,suffix=${{ inputs.ecr-tag-suffix }} - EOF - ) - - SHARED_BUILD_ARGS=$(cat << EOF - COMMIT_SHA=${{ inputs.git-commit-sha }} - EOF - ) - - echo "shared-images<> $GITHUB_ENV - echo "$SHARED_IMAGES" >> $GITHUB_ENV - echo "EOF" >> $GITHUB_ENV - - echo "oidc-issuer=${OIDC_ISSUER}" >> $GITHUB_ENV - echo "oidc-identity=${OIDC_IDENTITY}" >> $GITHUB_ENV - - echo "shared-tag-list<> $GITHUB_ENV - echo "$SHARED_TAG_LIST" >> $GITHUB_ENV - echo "EOF" >> $GITHUB_ENV - - echo "shared-build-args<> $GITHUB_ENV - echo "$SHARED_BUILD_ARGS" >> $GITHUB_ENV - echo "EOF" >> $GITHUB_ENV - - - if: inputs.publish == 'true' - # Log in to AWS for publish to ECR - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 - with: - role-to-assume: ${{ inputs.aws-role-to-assume }} - role-duration-seconds: ${{ inputs.aws-role-duration-seconds }} - aws-region: ${{ inputs.aws-region }} - mask-aws-account-id: true - role-session-name: build-sign-publish-chainlink - - - if: inputs.publish == 'true' - name: Login to ECR - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 - with: - registry: ${{ inputs.ecr-hostname }} - - - name: Setup Docker Buildx - uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0 - - - name: Generate docker metadata for root image - id: meta-root - uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 - env: - DOCKER_METADATA_PR_HEAD_SHA: "true" - with: - # list of Docker images to use as base name for tags - images: ${{ env.shared-images }} - # XXX: DO NOT USE SHARED TAGS HERE - tags: | - type=ref,event=branch,suffix=${{ inputs.ecr-tag-suffix }}-root - type=semver,pattern={{version}},suffix=${{ inputs.ecr-tag-suffix }}-root - type=sha,format=short,suffix=${{ inputs.ecr-tag-suffix }}-root - - # To avoid rate limiting from Docker Hub, we login with a paid user account. - - name: Login to Docker Hub - if: inputs.dockerhub_username && inputs.dockerhub_password - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 - with: - username: ${{ inputs.dockerhub_username }} - password: ${{ inputs.dockerhub_password }} - - - name: Build and push root docker image - id: buildpush-root - uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 - with: - push: ${{ inputs.publish }} - context: . - load: ${{ contains(inputs.publish, false) }} - tags: ${{ steps.meta-root.outputs.tags }} - labels: ${{ steps.meta-root.outputs.labels }} - file: ${{ inputs.dockerfile }} - build-args: | - CHAINLINK_USER=root - ${{ env.shared-build-args }} - - - name: Save root image name in GITHUB_ENV - id: save-root-image-name-env - shell: sh - run: | - IMAGES_NAME_RAW=${{ fromJSON(steps.buildpush-root.outputs.metadata)['image.name'] }} - IMAGE_NAME=$(echo "$IMAGES_NAME_RAW" | cut -d"," -f1) - IMAGE_DIGEST=${{ fromJSON(steps.buildpush-root.outputs.metadata)['containerimage.digest'] }} - echo "root_image_name=${IMAGE_NAME}" >> $GITHUB_ENV - echo "root_image_digest=${IMAGE_DIGEST}" >> $GITHUB_ENV - - - name: Generate docker metadata for non-root image - id: meta-nonroot - uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 # v5.5.1 - env: - DOCKER_METADATA_PR_HEAD_SHA: "true" - with: - flavor: | - latest=auto - prefix= - suffix= - images: ${{ env.shared-images }} - tags: ${{ env.shared-tag-list }} - - # To avoid rate limiting from Docker Hub, we login with a paid user account. - - name: Login to Docker Hub - if: inputs.dockerhub_username && inputs.dockerhub_password - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 - with: - username: ${{ inputs.dockerhub_username }} - password: ${{ inputs.dockerhub_password }} - - - name: Build and push non-root docker image - id: buildpush-nonroot - uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 - with: - push: ${{ inputs.publish }} - context: . - load: ${{ contains(inputs.publish, false) }} - tags: ${{ steps.meta-nonroot.outputs.tags }} - labels: ${{ steps.meta-nonroot.outputs.labels }} - file: ${{ inputs.dockerfile }} - build-args: | - CHAINLINK_USER=chainlink - ${{ env.shared-build-args }} - - - name: Save non-root image name in GITHUB_ENV and GITHUB_STEP_SUMMARY - id: save-non-root-image-name-env - shell: sh - run: | - IMAGES_NAME_RAW=${{ fromJSON(steps.buildpush-nonroot.outputs.metadata)['image.name'] }} - IMAGE_DIGEST=${{ fromJSON(steps.buildpush-nonroot.outputs.metadata)['containerimage.digest'] }} - IMAGE_NAME=$(echo "$IMAGES_NAME_RAW" | cut -d"," -f1) - IMAGE_TAG=$(echo "$IMAGES_NAME_RAW" | cut -d":" -f2) - echo "nonroot_image_name=${IMAGE_NAME}" >> $GITHUB_ENV - echo "nonroot_image_digest=${IMAGE_DIGEST}" >> $GITHUB_ENV - echo '### Docker Image' >> $GITHUB_STEP_SUMMARY - echo "Image Name: ${IMAGE_NAME}" >> $GITHUB_STEP_SUMMARY - echo "Image Digest: ${IMAGE_DIGEST}" >> $GITHUB_STEP_SUMMARY - echo "image-tag=${IMAGE_TAG}" >> $GITHUB_OUTPUT - echo "image-digest=${IMAGE_DIGEST}" >> $GITHUB_OUTPUT - - - name: Check if non-root image runs as root - id: check-nonroot-runs-root - shell: sh - env: - PUBLISH: ${{ inputs.publish }} - run: | - echo "Fail build if non-root image runs as user: root" - # if we're publishing the image, it doesn't get loaded into the local docker daemon - # so we need to pull the image into our daemon - if [ $PUBLISH = "true" ]; then - docker pull "${nonroot_image_name}" - fi - docker inspect "${nonroot_image_name}" | jq -r '.[].Config.User' | ( ! grep "root" ) - - - if: inputs.sign-images == 'true' - name: Install cosign - uses: sigstore/cosign-installer@4959ce089c160fddf62f7b42464195ba1a56d382 # v3.6.0 - with: - cosign-release: "v2.4.0" - - # This automatically signs the image with the correct OIDC provider from Github - - if: inputs.sign-images == 'true' - name: Sign the published root Docker image using keyless method - shell: sh - run: | - cosign sign "${{ env.root_image_name }}" --yes - - - if: inputs.verify-signature == 'true' - name: Verify the signature of the published root Docker image using keyless - shell: sh - run: | - cosign verify "${{ env.root_image_name }}" \ - --certificate-oidc-issuer ${{ env.oidc-issuer }} \ - --certificate-identity "${{ env.oidc-identity }}" - - # This automatically signs the image with the correct OIDC provider from Github - - if: inputs.sign-images == 'true' - name: Sign the published non-root Docker image using keyless method - shell: sh - run: | - cosign sign "${{ env.nonroot_image_name }}" --yes - - - if: inputs.verify-signature == 'true' - name: Verify the signature of the published non-root Docker image using keyless - shell: sh - run: | - cosign verify "${{ env.nonroot_image_name }}" \ - --certificate-oidc-issuer ${{ env.oidc-issuer }} \ - --certificate-identity "${{ env.oidc-identity }}" diff --git a/.github/actions/delete-deployments/action.yml b/.github/actions/delete-deployments/action.yml deleted file mode 100644 index eaf7e0f61b..0000000000 --- a/.github/actions/delete-deployments/action.yml +++ /dev/null @@ -1,59 +0,0 @@ -name: Delete Deployments -description: Delete deployments by env and ref -inputs: - environment: - required: true - description: The Github environment to filter deployments by - ref: - required: true - description: The ref to filter deployments by - dry-run: - required: false - description: Whether to actually delete deployments or not - github-token: - description: "The Github token to use for authentication" - required: true - default: ${{ github.token }} - num-of-pages: - required: false - description: The number of pages (of 100 per page) to fetch deployments from, set to 'all' to fetch all deployments - default: "all" - starting-page: - required: false - description: The page to start fetching deployments from, only valid if num-of-pages is set to a number - repository: - required: false - description: The owner and repository name to delete deployments from, defaults to the current repository, ex. 'smartcontractkit/chainlink' - default: ${{ github.repository }} - -runs: - using: composite - steps: - - uses: pnpm/action-setup@a3252b78c470c02df07e9d59298aecedc3ccdd6d # v3.0.0 - with: - version: ^9.0.0 - - - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 - with: - node-version: "20" - cache: "pnpm" - cache-dependency-path: "./.github/actions/delete-deployments/pnpm-lock.yaml" - - - name: Install dependencies - shell: bash - run: pnpm i --prod - working-directory: "./.github/actions/delete-deployments" - - - name: Run deployment deleter - shell: bash - run: pnpm start - env: - NUM_OF_PAGES: ${{ inputs.num-of-pages }} - STARTING_PAGE: ${{ inputs.starting-page }} - GITHUB_TOKEN: ${{ inputs.github-token }} - ENVIRONMENT: ${{ inputs.environment }} - REF: ${{ inputs.ref }} - DRY_RUN: ${{ inputs.dry-run }} - OWNER: ${{ inputs.owner }} - REPOSITORY: ${{ inputs.repository }} - working-directory: "./.github/actions/delete-deployments" diff --git a/.github/actions/delete-deployments/index.ts b/.github/actions/delete-deployments/index.ts deleted file mode 100644 index e38f1957d2..0000000000 --- a/.github/actions/delete-deployments/index.ts +++ /dev/null @@ -1,232 +0,0 @@ -import { Octokit } from "@octokit/action"; -import { info, warning, isDebug } from "@actions/core"; -import { throttling } from "@octokit/plugin-throttling"; -import { retry } from "@octokit/plugin-retry"; - -async function main() { - const { - dryRun, - environment, - numOfPages, - owner, - ref, - repo, - debug, - startingPage, - } = getInputs(); - const octokit = getOctokit(debug); - - const deployments = await getDeployments({ - octokit, - owner, - repo, - environment, - ref, - paginateOptions: { - numOfPages, - startingPage, - }, - }); - const deploymentIds = deployments.map((d) => d.id); - if (dryRun) { - info(`Dry run: would delete deployments (${deploymentIds.length})`); - return; - } - - info(`Deleting deployments (${deploymentIds.length})`); - const deleteDeployments = deploymentIds.map(async (id) => { - const sharedArgs = { - owner, - repo, - deployment_id: id, - request: { - retries: 0, - }, - }; - - const setStatus = await octokit.repos - .createDeploymentStatus({ - ...sharedArgs, - state: "inactive", - }) - .then(() => true) - .catch((e) => { - warning( - `Marking deployment id ${id} to "inactive" failed: ${e.message}` - ); - return false; - }); - if (!setStatus) return false; - - return octokit.repos - .deleteDeployment({ - ...sharedArgs, - }) - .then(() => true) - .catch((e) => { - warning(`Deleting deployment id ${id} failed: ${e.message}`); - return false; - }); - }); - - const processed = await Promise.all(deleteDeployments); - const succeeded = processed.filter((p) => !!p); - info( - `Successfully deleted ${succeeded.length}/${processed.length} deployments` - ); -} -main(); - -function getInputs() { - const debug = !!(process.env.DEBUG || isDebug()); - - const dryRun = process.env.DRY_RUN === "true"; - - const environment = process.env.ENVIRONMENT; - if (!environment) throw new Error("ENVIRONMENT not set"); - - const ref = process.env.REF; - - const repository = process.env.REPOSITORY; - if (!repository) throw new Error("REPOSITORY not set"); - const [owner, repo] = repository.split("/"); - - const rawStartingPage = process.env.STARTING_PAGE; - - let startingPage: number | undefined; - if (rawStartingPage) { - startingPage = parseInt(rawStartingPage); - if (isNaN(startingPage)) { - throw new Error(`STARTING_PAGE is not a number: ${rawStartingPage}`); - } - if (startingPage < 0) { - throw new Error( - `STARTING_PAGE must be a positive integer or zero: ${rawStartingPage}` - ); - } - info(`Starting from page ${startingPage}`); - } - - const rawNumOfPages = process.env.NUM_OF_PAGES; - let numOfPages: "all" | number = "all"; - if (rawNumOfPages === "all") { - info("Fetching all pages of deployments"); - } else { - const parsedPages = parseInt(rawNumOfPages || ""); - if (isNaN(parsedPages)) { - throw new Error(`NUM_OF_PAGES is not a number: ${rawNumOfPages}`); - } - if (parsedPages < 1) { - throw new Error(`NUM_OF_PAGES must be greater than 0: ${rawNumOfPages}`); - } - numOfPages = parsedPages; - } - - if (numOfPages === "all" && startingPage) { - throw new Error(`Cannot use STARTING_PAGE with NUM_OF_PAGES=all`); - } - - const parsedInputs = { - environment, - ref, - owner, - repo, - numOfPages, - startingPage, - dryRun, - debug, - }; - info(`Configuration: ${JSON.stringify(parsedInputs)}`); - return parsedInputs; -} - -function getOctokit(debug: boolean) { - const OctokitAPI = Octokit.plugin(throttling, retry); - const octokit = new OctokitAPI({ - log: debug ? console : undefined, - throttle: { - onRateLimit: (retryAfter, options, octokit, retryCount) => { - octokit.log.warn( - // Types are busted from octokit - //@ts-expect-error - `Request quota exhausted for request ${options.method} ${options.url}` - ); - - octokit.log.info(`Retrying after ${retryAfter} seconds!`); - return true; - }, - onSecondaryRateLimit: (_retryAfter, options, octokit) => { - octokit.log.warn( - // Types are busted from octokit - //@ts-expect-error - `SecondaryRateLimit detected for request ${options.method} ${options.url}` - ); - return true; - }, - }, - }); - - return octokit; -} - -async function getDeployments({ - octokit, - owner, - repo, - environment, - ref, - paginateOptions, -}: { - octokit: ReturnType; - owner: string; - repo: string; - environment: string; - ref?: string; - paginateOptions: { - numOfPages: number | "all"; - startingPage?: number; - }; -}) { - const listDeploymentsSharedArgs: Parameters< - typeof octokit.repos.listDeployments - >[0] = { - owner, - repo, - environment, - ref, - per_page: 100, - request: { - retries: 20, - }, - }; - - if (paginateOptions.numOfPages === "all") { - info(`Fetching all deployments`); - const deployments = await octokit.paginate(octokit.repos.listDeployments, { - ...listDeploymentsSharedArgs, - }); - - return deployments; - } else { - info( - `Fetching ${ - paginateOptions.numOfPages * listDeploymentsSharedArgs.per_page! - } deployments` - ); - const deployments: Awaited< - ReturnType - >["data"] = []; - - const offset = paginateOptions.startingPage || 0; - for (let i = offset; i < paginateOptions.numOfPages + offset; i++) { - const deploymentPage = await octokit.repos.listDeployments({ - ...listDeploymentsSharedArgs, - page: i, - }); - - deployments.push(...deploymentPage.data); - } - - return deployments; - } -} diff --git a/.github/actions/delete-deployments/package.json b/.github/actions/delete-deployments/package.json deleted file mode 100644 index a2d6f83908..0000000000 --- a/.github/actions/delete-deployments/package.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "name": "delete-deployments", - "version": "1.0.0", - "description": "", - "main": "index.ts", - "scripts": { - "start": "ts-node -T .", - "test": "echo \"Error: no test specified\" && exit 1" - }, - "keywords": [], - "author": "", - "license": "ISC", - "engines": { - "node": ">=18", - "pnpm": ">=9" - }, - "dependencies": { - "@actions/core": "^1.10.1", - "@octokit/action": "^6.1.0", - "@octokit/plugin-retry": "^6.0.1", - "@octokit/plugin-throttling": "^7.0.0", - "ts-node": "^10.9.2" - }, - "devDependencies": { - "@octokit/types": "^11.1.0", - "@types/node": "^20.12.10", - "typescript": "^5.4.5" - } -} diff --git a/.github/actions/delete-deployments/pnpm-lock.yaml b/.github/actions/delete-deployments/pnpm-lock.yaml deleted file mode 100644 index d9f25b3f80..0000000000 --- a/.github/actions/delete-deployments/pnpm-lock.yaml +++ /dev/null @@ -1,408 +0,0 @@ -lockfileVersion: '9.0' - -settings: - autoInstallPeers: true - excludeLinksFromLockfile: false - -importers: - - .: - dependencies: - '@actions/core': - specifier: ^1.10.1 - version: 1.10.1 - '@octokit/action': - specifier: ^6.1.0 - version: 6.1.0 - '@octokit/plugin-retry': - specifier: ^6.0.1 - version: 6.0.1(@octokit/core@5.0.0) - '@octokit/plugin-throttling': - specifier: ^7.0.0 - version: 7.0.0(@octokit/core@5.0.0) - ts-node: - specifier: ^10.9.2 - version: 10.9.2(@types/node@20.12.10)(typescript@5.4.5) - devDependencies: - '@octokit/types': - specifier: ^11.1.0 - version: 11.1.0 - '@types/node': - specifier: ^20.12.10 - version: 20.12.10 - typescript: - specifier: ^5.4.5 - version: 5.4.5 - -packages: - - '@actions/core@1.10.1': - resolution: {integrity: sha512-3lBR9EDAY+iYIpTnTIXmWcNbX3T2kCkAEQGIQx4NVQ0575nk2k3GRZDTPQG+vVtS2izSLmINlxXf0uLtnrTP+g==} - - '@actions/http-client@2.1.1': - resolution: {integrity: sha512-qhrkRMB40bbbLo7gF+0vu+X+UawOvQQqNAA/5Unx774RS8poaOhThDOG6BGmxvAnxhQnDp2BG/ZUm65xZILTpw==} - - '@cspotcode/source-map-support@0.8.1': - resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==} - engines: {node: '>=12'} - - '@jridgewell/resolve-uri@3.1.1': - resolution: {integrity: sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==} - engines: {node: '>=6.0.0'} - - '@jridgewell/sourcemap-codec@1.4.15': - resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} - - '@jridgewell/trace-mapping@0.3.9': - resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} - - '@octokit/action@6.1.0': - resolution: {integrity: sha512-lo+nHx8kAV86bxvOVOI3vFjX3gXPd/L7guAUbvs3pUvnR2KC+R7yjBkA1uACt4gYhs4LcWP3AXSGQzsbeN2XXw==} - engines: {node: '>= 18'} - - '@octokit/auth-action@4.0.0': - resolution: {integrity: sha512-sMm9lWZdiX6e89YFaLrgE9EFs94k58BwIkvjOtozNWUqyTmsrnWFr/M5LolaRzZ7Kmb5FbhF9hi7FEeE274SoQ==} - engines: {node: '>= 18'} - - '@octokit/auth-token@4.0.0': - resolution: {integrity: sha512-tY/msAuJo6ARbK6SPIxZrPBms3xPbfwBrulZe0Wtr/DIY9lje2HeV1uoebShn6mx7SjCHif6EjMvoREj+gZ+SA==} - engines: {node: '>= 18'} - - '@octokit/core@5.0.0': - resolution: {integrity: sha512-YbAtMWIrbZ9FCXbLwT9wWB8TyLjq9mxpKdgB3dUNxQcIVTf9hJ70gRPwAcqGZdY6WdJPZ0I7jLaaNDCiloGN2A==} - engines: {node: '>= 18'} - - '@octokit/endpoint@9.0.0': - resolution: {integrity: sha512-szrQhiqJ88gghWY2Htt8MqUDO6++E/EIXqJ2ZEp5ma3uGS46o7LZAzSLt49myB7rT+Hfw5Y6gO3LmOxGzHijAQ==} - engines: {node: '>= 18'} - - '@octokit/graphql@7.0.1': - resolution: {integrity: sha512-T5S3oZ1JOE58gom6MIcrgwZXzTaxRnxBso58xhozxHpOqSTgDS6YNeEUvZ/kRvXgPrRz/KHnZhtb7jUMRi9E6w==} - engines: {node: '>= 18'} - - '@octokit/openapi-types@18.0.0': - resolution: {integrity: sha512-V8GImKs3TeQRxRtXFpG2wl19V7444NIOTDF24AWuIbmNaNYOQMWRbjcGDXV5B+0n887fgDcuMNOmlul+k+oJtw==} - - '@octokit/openapi-types@20.0.0': - resolution: {integrity: sha512-EtqRBEjp1dL/15V7WiX5LJMIxxkdiGJnabzYx5Apx4FkQIFgAfKumXeYAqqJCj1s+BMX4cPFIFC4OLCR6stlnA==} - - '@octokit/plugin-paginate-rest@9.2.1': - resolution: {integrity: sha512-wfGhE/TAkXZRLjksFXuDZdmGnJQHvtU/joFQdweXUgzo1XwvBCD4o4+75NtFfjfLK5IwLf9vHTfSiU3sLRYpRw==} - engines: {node: '>= 18'} - peerDependencies: - '@octokit/core': '5' - - '@octokit/plugin-rest-endpoint-methods@10.4.1': - resolution: {integrity: sha512-xV1b+ceKV9KytQe3zCVqjg+8GTGfDYwaT1ATU5isiUyVtlVAO3HNdzpS4sr4GBx4hxQ46s7ITtZrAsxG22+rVg==} - engines: {node: '>= 18'} - peerDependencies: - '@octokit/core': '5' - - '@octokit/plugin-retry@6.0.1': - resolution: {integrity: sha512-SKs+Tz9oj0g4p28qkZwl/topGcb0k0qPNX/i7vBKmDsjoeqnVfFUquqrE/O9oJY7+oLzdCtkiWSXLpLjvl6uog==} - engines: {node: '>= 18'} - peerDependencies: - '@octokit/core': '>=5' - - '@octokit/plugin-throttling@7.0.0': - resolution: {integrity: sha512-KL2k/d0uANc8XqP5S64YcNFCudR3F5AaKO39XWdUtlJIjT9Ni79ekWJ6Kj5xvAw87udkOMEPcVf9xEge2+ahew==} - engines: {node: '>= 18'} - peerDependencies: - '@octokit/core': ^5.0.0 - - '@octokit/request-error@5.0.0': - resolution: {integrity: sha512-1ue0DH0Lif5iEqT52+Rf/hf0RmGO9NWFjrzmrkArpG9trFfDM/efx00BJHdLGuro4BR/gECxCU2Twf5OKrRFsQ==} - engines: {node: '>= 18'} - - '@octokit/request@8.1.1': - resolution: {integrity: sha512-8N+tdUz4aCqQmXl8FpHYfKG9GelDFd7XGVzyN8rc6WxVlYcfpHECnuRkgquzz+WzvHTK62co5di8gSXnzASZPQ==} - engines: {node: '>= 18'} - - '@octokit/types@11.1.0': - resolution: {integrity: sha512-Fz0+7GyLm/bHt8fwEqgvRBWwIV1S6wRRyq+V6exRKLVWaKGsuy6H9QFYeBVDV7rK6fO3XwHgQOPxv+cLj2zpXQ==} - - '@octokit/types@12.6.0': - resolution: {integrity: sha512-1rhSOfRa6H9w4YwK0yrf5faDaDTb+yLyBUKOCV4xtCDB5VmIPqd/v9yr9o6SAzOAlRxMiRiCic6JVM1/kunVkw==} - - '@tsconfig/node10@1.0.9': - resolution: {integrity: sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==} - - '@tsconfig/node12@1.0.11': - resolution: {integrity: sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==} - - '@tsconfig/node14@1.0.3': - resolution: {integrity: sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==} - - '@tsconfig/node16@1.0.4': - resolution: {integrity: sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==} - - '@types/node@20.12.10': - resolution: {integrity: sha512-Eem5pH9pmWBHoGAT8Dr5fdc5rYA+4NAovdM4EktRPVAAiJhmWWfQrA0cFhAbOsQdSfIHjAud6YdkbL69+zSKjw==} - - acorn-walk@8.2.0: - resolution: {integrity: sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==} - engines: {node: '>=0.4.0'} - - acorn@8.10.0: - resolution: {integrity: sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==} - engines: {node: '>=0.4.0'} - hasBin: true - - arg@4.1.3: - resolution: {integrity: sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==} - - before-after-hook@2.2.3: - resolution: {integrity: sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==} - - bottleneck@2.19.5: - resolution: {integrity: sha512-VHiNCbI1lKdl44tGrhNfU3lup0Tj/ZBMJB5/2ZbNXRCPuRCO7ed2mgcK4r17y+KB2EfuYuRaVlwNbAeaWGSpbw==} - - create-require@1.1.1: - resolution: {integrity: sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==} - - deprecation@2.3.1: - resolution: {integrity: sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==} - - diff@4.0.2: - resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==} - engines: {node: '>=0.3.1'} - - is-plain-object@5.0.0: - resolution: {integrity: sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==} - engines: {node: '>=0.10.0'} - - make-error@1.3.6: - resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} - - once@1.4.0: - resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} - - ts-node@10.9.2: - resolution: {integrity: sha512-f0FFpIdcHgn8zcPSbf1dRevwt047YMnaiJM3u2w2RewrB+fob/zePZcrOyQoLMMO7aBIddLcQIEK5dYjkLnGrQ==} - hasBin: true - peerDependencies: - '@swc/core': '>=1.2.50' - '@swc/wasm': '>=1.2.50' - '@types/node': '*' - typescript: '>=2.7' - peerDependenciesMeta: - '@swc/core': - optional: true - '@swc/wasm': - optional: true - - tunnel@0.0.6: - resolution: {integrity: sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==} - engines: {node: '>=0.6.11 <=0.7.0 || >=0.7.3'} - - typescript@5.4.5: - resolution: {integrity: sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==} - engines: {node: '>=14.17'} - hasBin: true - - undici-types@5.26.5: - resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} - - undici@6.16.0: - resolution: {integrity: sha512-HQfVddOTb5PJtfLnJ1Px8bNGyIg/z7WTj1hjUSna1Itsv59Oca9JdclIU08ToNqvWWXjFLRzc9rqjnpfw5UWcQ==} - engines: {node: '>=18.17'} - - universal-user-agent@6.0.0: - resolution: {integrity: sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w==} - - uuid@8.3.2: - resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==} - hasBin: true - - v8-compile-cache-lib@3.0.1: - resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} - - wrappy@1.0.2: - resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} - - yn@3.1.1: - resolution: {integrity: sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==} - engines: {node: '>=6'} - -snapshots: - - '@actions/core@1.10.1': - dependencies: - '@actions/http-client': 2.1.1 - uuid: 8.3.2 - - '@actions/http-client@2.1.1': - dependencies: - tunnel: 0.0.6 - - '@cspotcode/source-map-support@0.8.1': - dependencies: - '@jridgewell/trace-mapping': 0.3.9 - - '@jridgewell/resolve-uri@3.1.1': {} - - '@jridgewell/sourcemap-codec@1.4.15': {} - - '@jridgewell/trace-mapping@0.3.9': - dependencies: - '@jridgewell/resolve-uri': 3.1.1 - '@jridgewell/sourcemap-codec': 1.4.15 - - '@octokit/action@6.1.0': - dependencies: - '@octokit/auth-action': 4.0.0 - '@octokit/core': 5.0.0 - '@octokit/plugin-paginate-rest': 9.2.1(@octokit/core@5.0.0) - '@octokit/plugin-rest-endpoint-methods': 10.4.1(@octokit/core@5.0.0) - '@octokit/types': 12.6.0 - undici: 6.16.0 - - '@octokit/auth-action@4.0.0': - dependencies: - '@octokit/auth-token': 4.0.0 - '@octokit/types': 11.1.0 - - '@octokit/auth-token@4.0.0': {} - - '@octokit/core@5.0.0': - dependencies: - '@octokit/auth-token': 4.0.0 - '@octokit/graphql': 7.0.1 - '@octokit/request': 8.1.1 - '@octokit/request-error': 5.0.0 - '@octokit/types': 11.1.0 - before-after-hook: 2.2.3 - universal-user-agent: 6.0.0 - - '@octokit/endpoint@9.0.0': - dependencies: - '@octokit/types': 11.1.0 - is-plain-object: 5.0.0 - universal-user-agent: 6.0.0 - - '@octokit/graphql@7.0.1': - dependencies: - '@octokit/request': 8.1.1 - '@octokit/types': 11.1.0 - universal-user-agent: 6.0.0 - - '@octokit/openapi-types@18.0.0': {} - - '@octokit/openapi-types@20.0.0': {} - - '@octokit/plugin-paginate-rest@9.2.1(@octokit/core@5.0.0)': - dependencies: - '@octokit/core': 5.0.0 - '@octokit/types': 12.6.0 - - '@octokit/plugin-rest-endpoint-methods@10.4.1(@octokit/core@5.0.0)': - dependencies: - '@octokit/core': 5.0.0 - '@octokit/types': 12.6.0 - - '@octokit/plugin-retry@6.0.1(@octokit/core@5.0.0)': - dependencies: - '@octokit/core': 5.0.0 - '@octokit/request-error': 5.0.0 - '@octokit/types': 12.6.0 - bottleneck: 2.19.5 - - '@octokit/plugin-throttling@7.0.0(@octokit/core@5.0.0)': - dependencies: - '@octokit/core': 5.0.0 - '@octokit/types': 11.1.0 - bottleneck: 2.19.5 - - '@octokit/request-error@5.0.0': - dependencies: - '@octokit/types': 11.1.0 - deprecation: 2.3.1 - once: 1.4.0 - - '@octokit/request@8.1.1': - dependencies: - '@octokit/endpoint': 9.0.0 - '@octokit/request-error': 5.0.0 - '@octokit/types': 11.1.0 - is-plain-object: 5.0.0 - universal-user-agent: 6.0.0 - - '@octokit/types@11.1.0': - dependencies: - '@octokit/openapi-types': 18.0.0 - - '@octokit/types@12.6.0': - dependencies: - '@octokit/openapi-types': 20.0.0 - - '@tsconfig/node10@1.0.9': {} - - '@tsconfig/node12@1.0.11': {} - - '@tsconfig/node14@1.0.3': {} - - '@tsconfig/node16@1.0.4': {} - - '@types/node@20.12.10': - dependencies: - undici-types: 5.26.5 - - acorn-walk@8.2.0: {} - - acorn@8.10.0: {} - - arg@4.1.3: {} - - before-after-hook@2.2.3: {} - - bottleneck@2.19.5: {} - - create-require@1.1.1: {} - - deprecation@2.3.1: {} - - diff@4.0.2: {} - - is-plain-object@5.0.0: {} - - make-error@1.3.6: {} - - once@1.4.0: - dependencies: - wrappy: 1.0.2 - - ts-node@10.9.2(@types/node@20.12.10)(typescript@5.4.5): - dependencies: - '@cspotcode/source-map-support': 0.8.1 - '@tsconfig/node10': 1.0.9 - '@tsconfig/node12': 1.0.11 - '@tsconfig/node14': 1.0.3 - '@tsconfig/node16': 1.0.4 - '@types/node': 20.12.10 - acorn: 8.10.0 - acorn-walk: 8.2.0 - arg: 4.1.3 - create-require: 1.1.1 - diff: 4.0.2 - make-error: 1.3.6 - typescript: 5.4.5 - v8-compile-cache-lib: 3.0.1 - yn: 3.1.1 - - tunnel@0.0.6: {} - - typescript@5.4.5: {} - - undici-types@5.26.5: {} - - undici@6.16.0: {} - - universal-user-agent@6.0.0: {} - - uuid@8.3.2: {} - - v8-compile-cache-lib@3.0.1: {} - - wrappy@1.0.2: {} - - yn@3.1.1: {} diff --git a/.github/actions/delete-deployments/test.sh b/.github/actions/delete-deployments/test.sh deleted file mode 100755 index 18b7726088..0000000000 --- a/.github/actions/delete-deployments/test.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -export NUM_OF_PAGES=all -export ENVIRONMENT=integration -export DRY_RUN=false -export REPOSITORY=smartcontractkit/chainlink -export REF=fix/golint -export GITHUB_ACTION=true - -pnpm start diff --git a/.github/actions/delete-deployments/tsconfig.json b/.github/actions/delete-deployments/tsconfig.json deleted file mode 100644 index 4b36d4a178..0000000000 --- a/.github/actions/delete-deployments/tsconfig.json +++ /dev/null @@ -1,104 +0,0 @@ -{ - "compilerOptions": { - /* Visit https://aka.ms/tsconfig to read more about this file */ - - /* Projects */ - // "incremental": true, /* Save .tsbuildinfo files to allow for incremental compilation of projects. */ - // "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */ - // "tsBuildInfoFile": "./.tsbuildinfo", /* Specify the path to .tsbuildinfo incremental compilation file. */ - // "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects. */ - // "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */ - // "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */ - - /* Language and Environment */ - "target": "ESNext" /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */, - // "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */ - // "jsx": "preserve", /* Specify what JSX code is generated. */ - // "experimentalDecorators": true, /* Enable experimental support for TC39 stage 2 draft decorators. */ - // "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */ - // "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */ - // "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */ - // "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */ - // "reactNamespace": "", /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */ - // "noLib": true, /* Disable including any library files, including the default lib.d.ts. */ - // "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */ - // "moduleDetection": "auto", /* Control what method is used to detect module-format JS files. */ - - /* Modules */ - "module": "NodeNext" /* Specify what module code is generated. */, - // "rootDir": "./", /* Specify the root folder within your source files. */ - "moduleResolution": "NodeNext" /* Specify how TypeScript looks up a file from a given module specifier. */, - // "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */ - // "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */ - // "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */ - // "typeRoots": [], /* Specify multiple folders that act like './node_modules/@types'. */ - // "types": [], /* Specify type package names to be included without being referenced in a source file. */ - // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */ - // "moduleSuffixes": [], /* List of file name suffixes to search when resolving a module. */ - // "resolveJsonModule": true, /* Enable importing .json files. */ - // "noResolve": true, /* Disallow 'import's, 'require's or ''s from expanding the number of files TypeScript should add to a project. */ - - /* JavaScript Support */ - // "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */ - // "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */ - // "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */ - - /* Emit */ - // "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */ - // "declarationMap": true, /* Create sourcemaps for d.ts files. */ - // "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */ - // "sourceMap": true, /* Create source map files for emitted JavaScript files. */ - // "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */ - // "outDir": "./", /* Specify an output folder for all emitted files. */ - // "removeComments": true, /* Disable emitting comments. */ - "noEmit": true /* Disable emitting files from a compilation. */, - // "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */ - // "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types. */ - // "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */ - // "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */ - // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */ - // "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */ - // "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */ - // "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */ - // "newLine": "crlf", /* Set the newline character for emitting files. */ - // "stripInternal": true, /* Disable emitting declarations that have '@internal' in their JSDoc comments. */ - // "noEmitHelpers": true, /* Disable generating custom helper functions like '__extends' in compiled output. */ - // "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */ - // "preserveConstEnums": true, /* Disable erasing 'const enum' declarations in generated code. */ - // "declarationDir": "./", /* Specify the output directory for generated declaration files. */ - // "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */ - - /* Interop Constraints */ - // "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */ - // "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */ - "esModuleInterop": true /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */, - // "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */ - "forceConsistentCasingInFileNames": true /* Ensure that casing is correct in imports. */, - - /* Type Checking */ - "strict": true /* Enable all strict type-checking options. */, - // "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied 'any' type. */ - // "strictNullChecks": true, /* When type checking, take into account 'null' and 'undefined'. */ - // "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */ - // "strictBindCallApply": true, /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */ - // "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */ - // "noImplicitThis": true, /* Enable error reporting when 'this' is given the type 'any'. */ - // "useUnknownInCatchVariables": true, /* Default catch clause variables as 'unknown' instead of 'any'. */ - // "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */ - // "noUnusedLocals": true, /* Enable error reporting when local variables aren't read. */ - // "noUnusedParameters": true, /* Raise an error when a function parameter isn't read. */ - // "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */ - // "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */ - // "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */ - // "noUncheckedIndexedAccess": true, /* Add 'undefined' to a type when accessed using an index. */ - // "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */ - // "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type. */ - // "allowUnusedLabels": true, /* Disable error reporting for unused labels. */ - // "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */ - - /* Completeness */ - // "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */ - "skipLibCheck": false /* Skip type checking all .d.ts files. */ - }, - "include": ["src", "test"] -} diff --git a/.github/actions/detect-solidity-file-changes/action.yml b/.github/actions/detect-solidity-file-changes/action.yml deleted file mode 100644 index b86c91dbb4..0000000000 --- a/.github/actions/detect-solidity-file-changes/action.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: 'Detect Solidity File Changes Composite Action' -description: 'Detects changes in solidity files and outputs the result.' -outputs: - changes: - description: 'Whether or not changes were detected' - value: ${{ steps.changed_files.outputs.src }} -runs: - using: 'composite' - steps: - - - name: Filter paths - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 - id: changed_files - with: - list-files: 'csv' - filters: | - src: - - 'contracts/**/*' - - '.github/workflows/solidity.yml' - - '.github/workflows/solidity-foundry.yml' - - '.github/workflows/solidity-wrappers.yml' diff --git a/.github/actions/detect-solidity-foundry-version/action.yml b/.github/actions/detect-solidity-foundry-version/action.yml deleted file mode 100644 index b37f1e2509..0000000000 --- a/.github/actions/detect-solidity-foundry-version/action.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: 'Detect Foundry version in GNUmakefile' -description: 'Detects Foundry version in GNUmakefile' -inputs: - working-directory: - description: 'The GNUmakefile directory' - required: false - default: 'contracts' -outputs: - foundry-version: - description: 'Foundry version found in GNUmakefile' - value: ${{ steps.extract-foundry-version.outputs.foundry-version }} -runs: - using: 'composite' - steps: - - name: Extract Foundry version - id: extract-foundry-version - shell: bash - working-directory: ${{ inputs.working-directory }} - run: | - foundry_version=$(grep -Eo "foundryup --version [^ ]+" GNUmakefile | awk '{print $3}') - if [ -z "$foundry_version" ]; then - echo "::error::Foundry version not found in GNUmakefile" - exit 1 - fi - echo "Foundry version found: $foundry_version" - echo "foundry-version=$foundry_version" >> $GITHUB_OUTPUT diff --git a/.github/actions/detect-solidity-readonly-file-changes/action.yml b/.github/actions/detect-solidity-readonly-file-changes/action.yml deleted file mode 100644 index faca16d53f..0000000000 --- a/.github/actions/detect-solidity-readonly-file-changes/action.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: 'Detect Solidity Readonly Files Changes Composite Action' -description: 'Detects changes in readonly solidity files and fails if they are modified.' -outputs: - changes: - description: 'Whether or not changes were detected' - value: ${{ steps.changed_files.outputs.src }} -runs: - using: 'composite' - steps: - - - name: Filter paths - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 - id: changed_files - with: - list-files: 'csv' - filters: | - read_only_sol: - - 'contracts/src/v0.8/interfaces/**/*' - - 'contracts/src/v0.8/automation/v1_2/**/*' - - 'contracts/src/v0.8/automation/v1_3/**/*' - - 'contracts/src/v0.8/automation/v2_0/**/*' - - - name: Fail if read-only files have changed - if: ${{ steps.changed_files.outputs.read_only_sol == 'true' }} - shell: bash - run: | - echo "One or more read-only Solidity file(s) has changed." - for file in ${{ steps.changed_files.outputs.read_only_sol_files }}; do - echo "$file was changed" - done - exit 1 diff --git a/.github/actions/golangci-lint/action.yml b/.github/actions/golangci-lint/action.yml deleted file mode 100644 index 3ada575877..0000000000 --- a/.github/actions/golangci-lint/action.yml +++ /dev/null @@ -1,83 +0,0 @@ -name: CI lint for Golang -description: Runs CI lint for Golang -inputs: - # general inputs - id: - description: Unique metrics collection id - required: true - name: - description: Name of the lint action - required: true - go-directory: - description: Go directory to run commands from - default: "." - # setup-go inputs - only-modules: - description: Set to 'true' to only cache modules - default: "false" - cache-version: - description: Set this to cache bust - default: "1" - go-version-file: - description: Set where the go version file is located at - default: "go.mod" - go-module-file: - description: Set where the go module file is located at - default: "go.sum" - # grafana inputs - gc-host: - description: "grafana hostname" - gc-basic-auth: - description: "grafana basic auth" - gc-org-id: - description: "grafana org id" - -runs: - using: composite - steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Setup Go - uses: ./.github/actions/setup-go - with: - only-modules: ${{ inputs.only-modules }} - cache-version: ${{ inputs.cache-version }} - go-version-file: ${{ inputs.go-version-file }} - go-module-file: ${{ inputs.go-module-file }} - - name: Touching core/web/assets/index.html - shell: bash - run: mkdir -p core/web/assets && touch core/web/assets/index.html - - name: Build binary - working-directory: ${{ inputs.go-directory }} - shell: bash - run: go build ./... - - name: golangci-lint - uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0 - with: - version: v1.59.1 - # We already cache these directories in setup-go - skip-pkg-cache: true - skip-build-cache: true - # only-new-issues is only applicable to PRs, otherwise it is always set to false - only-new-issues: false # disabled for PRs due to unreliability - args: --out-format colored-line-number,checkstyle:golangci-lint-report.xml - working-directory: ${{ inputs.go-directory }} - - name: Print lint report artifact - if: failure() - shell: bash - run: cat ${{ inputs.go-directory }}/golangci-lint-report.xml - - name: Store lint report artifact - if: always() - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 - with: - name: golangci-lint-report - path: ${{ inputs.go-directory }}/golangci-lint-report.xml - - name: Collect Metrics - if: always() - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: chainlink-golang-ci-${{ inputs.id }} - basic-auth: ${{ inputs.gc-basic-auth }} - hostname: ${{ inputs.gc-host }} - org-id: ${{ inputs.gc-org-id }} - this-job-name: ${{ inputs.name }} - continue-on-error: true \ No newline at end of file diff --git a/.github/actions/goreleaser-build-sign-publish/README.md b/.github/actions/goreleaser-build-sign-publish/README.md deleted file mode 100644 index 07bb644c00..0000000000 --- a/.github/actions/goreleaser-build-sign-publish/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# goreleaser-build-sign-publish - -> goreleaser wrapper action - -## workflows - -### build publish - -```yaml -name: goreleaser - -on: - push: - tags: - - "v*" - -jobs: - goreleaser: - runs-on: ubuntu-latest - environment: release - permissions: - id-token: write - contents: read - steps: - - name: Checkout repository - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Configure aws credentials - uses: aws-actions/configure-aws-credentials@010d0da01d0b5a38af31e9c3470dbfdabdecca3a # v4.0.1 - with: - role-to-assume: ${{ secrets.aws-role-arn }} - role-duration-seconds: ${{ secrets.aws-role-dur-sec }} - aws-region: ${{ secrets.aws-region }} - - name: Build, sign, and publish - uses: ./.github/actions/goreleaser-build-sign-publish - with: - docker-registry: ${{ secrets.aws-ecr-registry }} - goreleaser-config: .goreleaser.yaml - env: - GITHUB_TOKEN: ${{ secrets.gh-token }} -``` - -### snapshot release - -```yaml -- name: Build, sign, and publish image - uses: ./.github/actions/goreleaser-build-sign-publish - with: - docker-registry: ${{ secrets.aws-ecr-registry }} - goreleaser-config: .goreleaser.yaml -``` - -## customizing - -### inputs - -Following inputs can be used as `step.with` keys - -| Name | Type | Default | Description | -| ---------------------------- | ------ | ------------------ | ----------------------------------------------------------------------- | -| `goreleaser-version` | String | `~> v2` | `goreleaser` version | -| `docker-registry` | String | `localhost:5001` | Docker registry | -| `docker-image-tag` | String | `develop` | Docker image tag | -| `goreleaser-config` | String | `.goreleaser.yaml` | The goreleaser configuration yaml | - -## testing - -- bring up local docker registry - -```sh -docker run -d --restart=always -p "127.0.0.1:5001:5000" --name registry registry:2 -``` - -- run snapshot release, publish to local docker registry - -```sh -GORELEASER_CONFIG=".goreleaser.yaml" \ -DOCKER_MANIFEST_EXTRA_ARGS="--insecure" \ -./.github/actions/goreleaser-build-sign-publish/action_utils goreleaser_release -``` diff --git a/.github/actions/goreleaser-build-sign-publish/action.yml b/.github/actions/goreleaser-build-sign-publish/action.yml deleted file mode 100644 index 94387ce3fa..0000000000 --- a/.github/actions/goreleaser-build-sign-publish/action.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: Build and Publish with Goreleaser -description: A composite action that allows building and publishing signed chainlink artifacts (binaries + images) -inputs: - goreleaser-version: - description: The goreleaser version - default: "~> v2" - required: false - goreleaser-key: - description: The goreleaser key - required: false - # publishing inputs - docker-registry: - description: The docker registry - default: localhost:5001 - required: false - docker-image-tag: - description: The docker image tag - default: develop - required: false - # goreleaser inputs - goreleaser-release-type: - description: The goreleaser release type, it can be either "nightly", "merge", "snapshot", "release" - default: "snapshot" - required: false - goreleaser-config: - description: "The goreleaser configuration yaml" - default: ".goreleaser.yaml" - required: false -runs: - using: composite - steps: - - # We need QEMU to test the cross architecture builds after they're built. - name: Set up QEMU - uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - - name: Setup docker buildx - uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v3.2.0 - - name: Setup go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version-file: "go.mod" - - name: Setup goreleaser - uses: goreleaser/goreleaser-action@286f3b13b1b49da4ac219696163fb8c1c93e1200 # v6.0.0 - with: - distribution: goreleaser-pro - install-only: true - version: ${{ inputs.goreleaser-version }} - env: - GORELEASER_KEY: ${{ inputs.goreleaser-key }} - - - name: Login to docker registry - uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0 - with: - registry: ${{ inputs.docker-registry }} - - - name: Install syft - uses: anchore/sbom-action/download-syft@61119d458adab75f756bc0b9e4bde25725f86a7a # v0.17.2 - - - name: Run goreleaser release - shell: bash - env: - GORELEASER_CONFIG: ${{ inputs.goreleaser-config }} - RELEASE_TYPE: ${{ inputs.goreleaser-release-type }} - IMAGE_PREFIX: ${{ inputs.docker-registry }} - IMAGE_TAG: ${{ inputs.docker-image-tag }} - GORELEASER_KEY: ${{ inputs.goreleaser-key }} - GITHUB_TOKEN: ${{ github.token }} - run: | - # https://github.com/orgs/community/discussions/24950 - ${GITHUB_ACTION_PATH}/release.js diff --git a/.github/actions/goreleaser-build-sign-publish/release.js b/.github/actions/goreleaser-build-sign-publish/release.js deleted file mode 100755 index 0dbd58ca6c..0000000000 --- a/.github/actions/goreleaser-build-sign-publish/release.js +++ /dev/null @@ -1,243 +0,0 @@ -#!/usr/bin/env node -const { execSync } = require("child_process"); -const fs = require("fs"); -const path = require("path"); - -function main() { - const args = process.argv.slice(2); - const useExistingDist = args.includes("--use-existing-dist"); - const chainlinkVersion = getVersion(); - - if (!useExistingDist) { - const goreleaserConfig = mustGetEnv("GORELEASER_CONFIG"); - const releaseType = mustGetEnv("RELEASE_TYPE"); - const command = constructGoreleaserCommand( - releaseType, - chainlinkVersion, - goreleaserConfig - ); - - if (process.env.DRY_RUN) { - console.log(`Generated command: ${command}`); - console.log("Dry run enabled. Exiting without executing the command."); - return; - } else { - console.log(`Executing command: ${command}`); - execSync(command, { stdio: "inherit" }); - } - } else { - console.log( - "Skipping Goreleaser command execution as '--use-existing-dist' is set." - ); - } - - const artifacts = getArtifacts(); - const dockerImages = extractDockerImages(artifacts); - const repoSha = execSync("git rev-parse HEAD", { encoding: "utf-8" }).trim(); - - const results = dockerImages.map((image) => { - try { - console.log( - `Checking version for image: ${image}, expected version: ${chainlinkVersion}, expected SHA: ${repoSha}` - ); - const versionOutput = execSync(`docker run --rm ${image} --version`, { - encoding: "utf-8", - }); - console.log(`Output from image ${image}: ${versionOutput}`); - - const cleanedOutput = versionOutput - .replace("chainlink version ", "") - .trim(); - const [version, sha] = cleanedOutput.split("@"); - if (!version || !sha) { - throw new Error("Version or SHA not found in output."); - } - - if (sha.trim() !== repoSha) { - throw new Error(`SHA mismatch: Expected ${repoSha}, got ${sha.trim()}`); - } - if (version.trim() !== chainlinkVersion) { - throw new Error( - `Version mismatch: Expected ${chainlinkVersion}, got ${version.trim()}` - ); - } - - return { image, success: true, message: "Version check passed." }; - } catch (error) { - return { image, success: false, message: error.message }; - } - }); - - printSummary(results); - if (results.some((result) => !result.success)) { - process.exit(1); - } -} - -function printSummary(results) { - const passed = results.filter((result) => result.success); - const failed = results.filter((result) => !result.success); - - console.log("\nSummary:"); - console.log(`Total images checked: ${results.length}`); - console.log(`Passed: ${passed.length}`); - console.log(`Failed: ${failed.length}`); - - if (passed.length > 0) { - console.log("\nPassed images:"); - passed.forEach((result) => - console.log(`${result.image}:\n${result.message}`) - ); - } - - if (failed.length > 0) { - console.log("\nFailed images:"); - failed.forEach((result) => - console.log(`${result.image}:\n${result.message}`) - ); - } -} - -function getArtifacts() { - const distDir = path.resolve(process.cwd(), "dist"); - const files = []; - - function findJsonFiles(dir) { - const items = fs.readdirSync(dir, { withFileTypes: true }); - for (const item of items) { - const fullPath = path.join(dir, item.name); - if (item.isDirectory()) { - // Skip child directories if an artifacts.json exists in the current directory - const parentArtifacts = path.join(dir, "artifacts.json"); - if (fs.existsSync(parentArtifacts)) { - console.log( - `Skipping child directory: ${fullPath} because a parent artifacts.json exists at: ${parentArtifacts}` - ); - } else { - findJsonFiles(fullPath); - } - } else if (item.isFile() && item.name === "artifacts.json") { - console.log(`Found artifacts.json at: ${fullPath}`); - files.push(fullPath); - } - } - } - - findJsonFiles(distDir); - - if (files.length === 0) { - console.error("Error: No artifacts.json found in /dist."); - process.exit(1); - } - - // Merge all artifacts.json files into one - let mergedArtifacts = []; - - for (const file of files) { - const artifactsJson = JSON.parse(fs.readFileSync(file, "utf-8")); - mergedArtifacts = mergedArtifacts.concat(artifactsJson); - } - - // Remove duplicate Docker images based on the artifact name - const uniqueArtifacts = Array.from( - new Map( - mergedArtifacts.map((artifact) => [artifact.name, artifact]) - ).values() - ); - - return uniqueArtifacts; -} - -function extractDockerImages(artifacts) { - const dockerImages = artifacts - .filter( - (artifact) => - artifact.type === "Docker Image" || - artifact.type === "Published Docker Image" - ) - .map((artifact) => artifact.name); - - if (dockerImages.length === 0) { - console.error("Error: No Docker images found in artifacts.json."); - process.exit(1); - } - - console.log(`Found Docker images:\n - ${dockerImages.join("\n - ")}`); - return dockerImages; -} - -function constructGoreleaserCommand(releaseType, version, goreleaserConfig) { - const flags = []; - - checkReleaseType(releaseType); - - let subCmd = "release"; - const splitArgs = ["--split", "--clean"]; - - switch (releaseType) { - case "release": - flags.push(...splitArgs); - break; - case "nightly": - flags.push("--nightly", ...splitArgs); - break; - case "snapshot": - flags.push("--snapshot", ...splitArgs); - break; - case "merge": - flags.push("--merge"); - subCmd = "continue"; - break; - } - - const flagsStr = flags.join(" "); - if (releaseType === "merge") { - return `CHAINLINK_VERSION=${version} goreleaser ${subCmd} ${flagsStr}`; - } else { - return `CHAINLINK_VERSION=${version} goreleaser ${subCmd} --config ${goreleaserConfig} ${flagsStr}`; - } -} - -function checkReleaseType(releaseType) { - const VALID_RELEASE_TYPES = ["nightly", "merge", "snapshot", "release"]; - - if (!VALID_RELEASE_TYPES.includes(releaseType)) { - const validReleaseTypesStr = VALID_RELEASE_TYPES.join(", "); - console.error( - `Error: Invalid release type: ${releaseType}. Must be one of: ${validReleaseTypesStr}` - ); - process.exit(1); - } -} - -function mustGetEnv(key) { - const val = process.env[key]; - if (!val || val.trim() === "") { - console.error(`Error: Environment variable ${key} is not set or empty.`); - process.exit(1); - } - - return val.trim(); -} - -function getVersion() { - try { - const pkgPath = path.resolve(process.cwd(), "package.json"); - console.log("Looking for chainlink version in package.json at: ", pkgPath); - const packageJson = require(pkgPath); - if (!packageJson.version) { - console.error( - 'Error: "version" field is missing or empty in package.json.' - ); - process.exit(1); - } - console.log("Resolved version: ", packageJson.version); - - return packageJson.version; - } catch (err) { - console.error(`Error reading package.json: ${err.message}`); - process.exit(1); - } -} - -main(); diff --git a/.github/actions/notify-slack-jobs-result/README.md b/.github/actions/notify-slack-jobs-result/README.md deleted file mode 100644 index 298930c0d9..0000000000 --- a/.github/actions/notify-slack-jobs-result/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# Notify Slack Jobs Result - -Sends a Slack message to a specified channel detailing the results of one to many GHA job results using a regex. The job results will be grouped by the `github_job_name_regex` and displayed underneath the `message_title`, with the regex matching group displayed as an individual result. This is primarily designed for when you have test groups running in a matrix, and would like condensed reporting on their status by group. It's often accompanied by posting a Slack message before to start a thread, then attaching all the results to that thread like we do in the reporting section of the [live-testnet-test.yml workflow](../../workflows/live-testnet-tests.yml). Check out the example below, where we post an initial summary message, then use this action to thread together specific results: - -```yaml -message_title: Optimism Goerli -github_job_name_regex: ^Optimism Goerli (?.*?) Tests$ # Note that the regex MUST have a capturing group named "cap" -``` - -![example](image.png) - -## Inputs - -```yaml -inputs: - github_token: - description: "The GitHub token to use for authentication (usually ${{ github.token }})" - required: true - github_repository: - description: "The GitHub owner/repository to use for authentication (usually ${{ github.repository }}))" - required: true - workflow_run_id: - description: "The workflow run ID to get the results from (usually ${{ github.run_id }})" - required: true - github_job_name_regex: - description: "The regex to use to match 1..many job name(s) to collect results from. Should include a capture group named 'cap' for the part of the job name you want to display in the Slack message (e.g. ^Client Compatability Test (?.*?)$)" - required: true - message_title: - description: "The title of the Slack message" - required: true - slack_channel_id: - description: "The Slack channel ID to post the message to" - required: true - slack_thread_ts: - description: "The Slack thread timestamp to post the message to, handy for keeping multiple related results in a single thread" - required: false -``` diff --git a/.github/actions/notify-slack-jobs-result/action.yml b/.github/actions/notify-slack-jobs-result/action.yml deleted file mode 100644 index d8fda4fb90..0000000000 --- a/.github/actions/notify-slack-jobs-result/action.yml +++ /dev/null @@ -1,126 +0,0 @@ -name: Notify Slack Jobs Result -description: Will send a notification in Slack for the result of a GitHub action run, typically for test results -inputs: - github_token: - description: "The GitHub token to use for authentication (usually github.token)" - required: true - github_repository: - description: "The GitHub owner/repository to use for authentication (usually github.repository))" - required: true - workflow_run_id: - description: "The workflow run ID to get the results from (usually github.run_id)" - required: true - github_job_name_regex: - description: "The regex to use to match 1..many job name(s) to collect results from. Should include a capture group named 'cap' for the part of the job name you want to display in the Slack message (e.g. ^Client Compatability Test (?.*?)$)" - required: true - message_title: - description: "The title of the Slack message" - required: true - slack_channel_id: - description: "The Slack channel ID to post the message to" - required: true - slack_bot_token: - description: "The Slack bot token to use for authentication which needs permission and an installed app in the channel" - required: true - slack_thread_ts: - description: "The Slack thread timestamp to post the message to, handy for keeping multiple related results in a single thread" - required: false - base64_parsed_results: - description: "Base64 encoded parsed results to use" - required: false - -runs: - using: composite - steps: - - name: Get Results - shell: bash - id: test-results - run: | - if [ -n "${{ inputs.base64_parsed_results }}" ]; then - echo "Using base64 parsed results" - PARSED_RESULTS=$(echo "${{ inputs.base64_parsed_results }}" | base64 -d) - else - go install github.com/smartcontractkit/chainlink-testing-framework/tools/workflowresultparser@v1.0.0 - PATH=$PATH:$(go env GOPATH)/bin - export PATH - - workflowresultparser -workflowRunID ${{ inputs.workflow_run_id }} -githubToken ${{ inputs.github_token }} -githubRepo "${{ inputs.github_repository }}" -jobNameRegex "${{ inputs.github_job_name_regex }}" -outputFile=output.json - - if [ ! -f output.json ]; then - PARSED_RESULTS='""' - else - PARSED_RESULTS=$(cat output.json | jq -c "select(has(\"results\")) | .results[]") - fi - - fi - - echo "Parsed Results:" - echo $PARSED_RESULTS - - ALL_SUCCESS=true - - if [ "$PARSED_RESULTS" != '""' ]; then - echo "Checking for failures" - echo "$PARSED_RESULTS" | jq -s | jq -r '.[] | select(.conclusion != ":white_check_mark:")' - for row in $(echo "$PARSED_RESULTS" | jq -s | jq -r '.[] | select(.conclusion != ":white_check_mark:")'); do - ALL_SUCCESS=false - break - done - echo "Success: $ALL_SUCCESS" - - echo all_success=$ALL_SUCCESS >> $GITHUB_OUTPUT - - FORMATTED_RESULTS=$(echo $PARSED_RESULTS | jq -s '[.[] - | { - conclusion: .conclusion, - cap: .cap, - html_url: .html_url - } - ] - | map("{\"type\": \"section\", \"text\": {\"type\": \"mrkdwn\", \"text\": \"<\(.html_url)|\(.cap)>: \(.conclusion)\"}}") - | join(",")') - else - echo "Nothing to post, no results found" - exit 0 - fi - - echo "Formatted Results:" - echo $FORMATTED_RESULTS - - # Cleans out backslashes and quotes from jq - CLEAN_RESULTS=$(echo "$FORMATTED_RESULTS" | sed 's/\\\"/"/g' | sed 's/^"//;s/"$//') - - echo "Clean Results" - echo $CLEAN_RESULTS - - echo results=$CLEAN_RESULTS >> $GITHUB_OUTPUT - - name: Post Results - uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 - if: steps.test-results.outputs.results != '' - env: - SLACK_BOT_TOKEN: ${{ inputs.slack_bot_token }} - with: - channel-id: ${{ inputs.slack_channel_id }} - payload: | - { - "thread_ts": "${{ inputs.slack_thread_ts }}", - "attachments": [ - { - "color": "${{ steps.test-results.outputs.all_success == 'true' && '#2E7D32' || '#C62828' }}", - "blocks": [ - { - "type": "header", - "text": { - "type": "plain_text", - "text": "${{ inputs.message_title }} ${{ steps.test-results.outputs.all_success == 'true' && ':white_check_mark:' || ':x:'}}", - "emoji": true - } - }, - { - "type": "divider" - }, - ${{ steps.test-results.outputs.results }} - ] - } - ] - } diff --git a/.github/actions/notify-slack-jobs-result/image.png b/.github/actions/notify-slack-jobs-result/image.png deleted file mode 100644 index 3bd398101f..0000000000 Binary files a/.github/actions/notify-slack-jobs-result/image.png and /dev/null differ diff --git a/.github/actions/setup-create-base64-config-ccip/action.yml b/.github/actions/setup-create-base64-config-ccip/action.yml deleted file mode 100644 index e741a2dac0..0000000000 --- a/.github/actions/setup-create-base64-config-ccip/action.yml +++ /dev/null @@ -1,151 +0,0 @@ -name: Create Base64 Config for CCIP Tests -description: A composite action that creates a base64-encoded config to be used by ccip integration tests - -inputs: - runId: - description: The run id - existingNamespace: - description: If test needs to run against already deployed namespace - testLogCollect: - description: Whether to always collect logs, even for passing tests - default: "false" - selectedNetworks: - description: The networks to run tests against - chainlinkVersion: - description: The git commit sha to use for the image tag - upgradeVersion: - description: The git commit sha to use for the image tag - logstreamLogTargets: - description: Where to send logs (e.g. file, loki) - customEvmNodes: - description: Custom EVM nodes to use in key=value format, where key is chain id and value is docker image to use. If they are provided the number of networksSelected must be equal to the number of customEvmNodes - evmNodeLogLevel: - description: Log level for the custom EVM nodes - default: "info" -outputs: - base64_config: - description: The base64-encoded config - value: ${{ steps.base64_config_override.outputs.base64_config }} - -runs: - using: composite - steps: - - name: Prepare Base64 TOML override - shell: bash - id: base64_config_override - env: - RUN_ID: ${{ inputs.runId }} - SELECTED_NETWORKS: ${{ inputs.selectedNetworks }} - EXISTING_NAMESPACE: ${{ inputs.existingNamespace }} - TEST_LOG_COLLECT: ${{ inputs.testLogCollect }} - CHAINLINK_VERSION: ${{ inputs.chainlinkVersion }} - UPGRADE_VERSION: ${{ inputs.upgradeVersion }} - LOGSTREAM_LOG_TARGETS: ${{ inputs.logstreamLogTargets }} - CUSTOM_EVM_NODES: ${{ inputs.customEvmNodes }} - EVM_NODE_LOG_LEVEL: ${{ inputs.evmNodeLogLevel }} - run: | - function convert_to_toml_array() { - local IFS=',' - local input_array=($1) - local toml_array_format="[" - - for element in "${input_array[@]}"; do - toml_array_format+="\"$element\"," - done - - toml_array_format="${toml_array_format%,}]" - echo "$toml_array_format" - } - - selected_networks=$(convert_to_toml_array "$SELECTED_NETWORKS") - log_targets=$(convert_to_toml_array "$LOGSTREAM_LOG_TARGETS") - - if [ -n "$TEST_LOG_COLLECT" ]; then - test_log_collect=true - else - test_log_collect=false - fi - - # make sure the number of networks and nodes match - IFS=',' read -r -a networks_array <<< "$SELECTED_NETWORKS" - IFS=',' read -r -a nodes_array <<< "$CUSTOM_EVM_NODES" - - networks_count=${#networks_array[@]} - nodes_count=${#nodes_array[@]} - - # Initialize or clear CONFIG_TOML environment variable - custom_nodes_toml="" - - # Check if the number of CUSTOM_EVM_NODES is zero - if [ $nodes_count -eq 0 ]; then - echo "The number of CUSTOM_EVM_NODES is zero, won't output any custom private Ethereum network configurations." - else - if [ $networks_count -ne $nodes_count ]; then - echo "The number of elements in SELECTED_NETWORKS (${networks_count}) and CUSTOM_EVM_NODES does not match (${nodes_count})." - exit 1 - else - for i in "${!networks_array[@]}"; do - IFS='=' read -r chain_id docker_image <<< "${nodes_array[i]}" - custom_nodes_toml+=" - [CCIP.Env.PrivateEthereumNetworks.${networks_array[i]}] - ethereum_version=\"\" - execution_layer=\"\" - - [CCIP.Env.PrivateEthereumNetworks.${networks_array[i]}.EthereumChainConfig] - seconds_per_slot=3 - slots_per_epoch=2 - genesis_delay=15 - validator_count=4 - chain_id=${chain_id} - addresses_to_fund=[\"0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266\", \"0x70997970C51812dc3A010C7d01b50e0d17dc79C8\"] - node_log_level=\"${EVM_NODES_LOG_LEVEL}\" - - [CCIP.Env.PrivateEthereumNetworks.${networks_array[i]}.EthereumChainConfig.HardForkEpochs] - Deneb=500 - - [CCIP.Env.PrivateEthereumNetworks.${networks_array[i]}.CustomDockerImages] - execution_layer=\"${docker_image}\" - " - done - fi - fi - - cat << EOF > config.toml - [CCIP] - [CCIP.Env] - EnvToConnect="$EXISTING_NAMESPACE" - [CCIP.Env.Network] - selected_networks = $selected_networks - [CCIP.Env.NewCLCluster] - [CCIP.Env.NewCLCluster.Common] - [CCIP.Env.NewCLCluster.Common.ChainlinkImage] - version="$CHAINLINK_VERSION" - - $custom_nodes_toml - - [CCIP.Env.Logging] - test_log_collect=$test_log_collect - run_id="$RUN_ID" - - [CCIP.Env.Logging.LogStream] - log_targets=$log_targets - - [CCIP.Groups.load] - TestRunName = '$EXISTING_NAMESPACE' - - [CCIP.Groups.smoke] - TestRunName = '$EXISTING_NAMESPACE' - - EOF - - # Check if UPGRADE_VERSION is not empty and append to config.toml - if [ -n "$UPGRADE_VERSION" ]; then - cat << EOF >> config.toml - [CCIP.Env.NewCLCluster.Common.ChainlinkUpgradeImage] - version="$UPGRADE_VERSION" - EOF - fi - - BASE64_CONFIG=$(cat config.toml | base64 -w 0) - echo ::add-mask::$BASE64_CONFIG - echo "base64_config=$BASE64_CONFIG" >> $GITHUB_OUTPUT diff --git a/.github/actions/setup-go/action.yml b/.github/actions/setup-go/action.yml deleted file mode 100644 index 6514f533ef..0000000000 --- a/.github/actions/setup-go/action.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: Setup Go -description: Setup Golang with efficient caching -inputs: - only-modules: - description: Set to 'true' to only cache modules - default: "false" - cache-version: - description: Set this to cache bust - default: "1" - go-version-file: - description: Set where the go version file is located at - default: "go.mod" - go-module-file: - description: Set where the go module file is located at - default: "go.sum" - -runs: - using: composite - steps: - - name: Set up Go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version-file: ${{ inputs.go-version-file }} - cache: false - - - name: Get branch name - if: ${{ inputs.only-modules == 'false' }} - id: branch-name - uses: tj-actions/branch-names@6871f53176ad61624f978536bbf089c574dc19a2 # v8.0.1 - - - name: Set go cache keys - shell: bash - id: go-cache-dir - run: | - echo "gomodcache=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT - echo "gobuildcache=$(go env GOCACHE)" >> $GITHUB_OUTPUT - - - name: Set go module path - id: go-module-path - shell: bash - run: echo "path=./${{ inputs.go-module-file }}" >> $GITHUB_OUTPUT - - - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 - name: Cache Go Modules - with: - path: | - ${{ steps.go-cache-dir.outputs.gomodcache }} - # The lifetime of go modules is much higher than the build outputs, so we increase cache efficiency - # here by not having the primary key contain the branch name - key: ${{ runner.os }}-gomod-${{ inputs.cache-version }}-${{ hashFiles(steps.go-module-path.output.path) }} - restore-keys: | - ${{ runner.os }}-gomod-${{ inputs.cache-version }}- - - - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 - if: ${{ inputs.only-modules == 'false' }} - name: Cache Go Build Outputs - with: - path: | - ${{ steps.go-cache-dir.outputs.gobuildcache }} - # The lifetime of go build outputs is pretty short, so we make our primary cache key be the branch name - key: ${{ runner.os }}-gobuild-${{ inputs.cache-version }}-${{ hashFiles(steps.go-module-path.output.path) }}-${{ steps.branch-name.outputs.current_branch }} - restore-keys: | - ${{ runner.os }}-gobuild-${{ inputs.cache-version }}-${{ hashFiles(steps.go-module-path.output.path) }}- - ${{ runner.os }}-gobuild-${{ inputs.cache-version }}- diff --git a/.github/actions/setup-hardhat/action.yaml b/.github/actions/setup-hardhat/action.yaml deleted file mode 100644 index 189c821002..0000000000 --- a/.github/actions/setup-hardhat/action.yaml +++ /dev/null @@ -1,31 +0,0 @@ -name: Setup NodeJS -inputs: - namespace: - required: true - description: A cache namespace to add - cache-version: - default: "6" - description: Change to bust cache -description: Setup pnpm for contracts -runs: - using: composite - steps: - - name: Cache Compilers - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 - with: - path: ~/.cache/hardhat-nodejs/ - key: contracts-compilers-${{ runner.os }}-${{ inputs.cache-version }}-${{ hashFiles('contracts/pnpm-lock.yaml', 'contracts/hardhat.config.ts') }} - - - name: Cache contracts build outputs - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 - with: - path: | - contracts/cache/ - contracts/artifacts/ - contracts/typechain/ - key: ${{ format('contracts-{0}-{1}-{2}-{3}', runner.os, inputs.cache-version, inputs.namespace, hashFiles('contracts/pnpm-lock.yaml', 'contracts/hardhat.config.ts', 'contracts/src/**/*.sol')) }} - - - name: Compile contracts - shell: bash - run: pnpm compile - working-directory: contracts diff --git a/.github/actions/setup-postgres/.env b/.github/actions/setup-postgres/.env deleted file mode 100644 index 47ed8d9bcd..0000000000 --- a/.github/actions/setup-postgres/.env +++ /dev/null @@ -1,5 +0,0 @@ -POSTGRES_USER=postgres -POSTGRES_OPTIONS="-c max_connections=1000 -c shared_buffers=2GB -c log_lock_waits=true" -POSTGRES_PASSWORD=postgres -POSTGRES_DB=chainlink_test -POSTGRES_HOST_AUTH_METHOD=trust diff --git a/.github/actions/setup-postgres/action.yml b/.github/actions/setup-postgres/action.yml deleted file mode 100644 index 45bfba5965..0000000000 --- a/.github/actions/setup-postgres/action.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: Setup Postgresql -description: Setup postgres docker container via docker-compose, allowing usage of a custom command, see https://github.com/orgs/community/discussions/26688 -inputs: - base-path: - description: Path to the base of the repo - required: false - default: . -runs: - using: composite - steps: - - name: Start postgres service - run: docker compose up -d - shell: bash - working-directory: ${{ inputs.base-path }}/.github/actions/setup-postgres - - name: Wait for postgres service to be healthy - run: ./wait-for-healthy-postgres.sh - shell: bash - working-directory: ${{ inputs.base-path }}/.github/actions/setup-postgres diff --git a/.github/actions/setup-postgres/bin/pg_dump b/.github/actions/setup-postgres/bin/pg_dump deleted file mode 100755 index d8135ad824..0000000000 --- a/.github/actions/setup-postgres/bin/pg_dump +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash -# -# This script acts as a docker replacement around pg_dump so that developers can -# run DB involved tests locally without having postgres installed. -# -# Installation: -# - Make sure that your PATH doesn't already contain a postgres installation -# - Add this script to your PATH -# -# Usage: -# You should be able to setup your test db via: -# - go build -o chainlink.test . # Build the chainlink binary to run test db prep commands from -# - export CL_DATABASE_URL="postgresql://postgres:postgres@localhost:5432/chainlink_test?sslmode=disable" -# - pushd .github/actions/setup-postgres/ # Navigate to the setup-postgres action so we can spin up a docker postgres -# instance -# - docker compose up # Spin up postgres -# - ./chainlink.test local db preparetest # Run the db migration, which will shell out to our pg_dump wrapper too. -# - popd -# - go test -timeout 30s ./core/services/workflows/... -v # Run tests that use the database - -cd "$(dirname "$0")" || exit - -docker compose exec -T postgres pg_dump "$@" diff --git a/.github/actions/setup-postgres/docker-compose.yml b/.github/actions/setup-postgres/docker-compose.yml deleted file mode 100644 index 23f8d82b91..0000000000 --- a/.github/actions/setup-postgres/docker-compose.yml +++ /dev/null @@ -1,15 +0,0 @@ -name: gha_postgres -services: - postgres: - ports: - - "5432:5432" - container_name: cl_pg - image: postgres:14-alpine - command: postgres ${POSTGRES_OPTIONS} - env_file: - - .env - healthcheck: - test: "pg_isready -d ${POSTGRES_DB} -U ${POSTGRES_USER}" - interval: 2s - timeout: 5s - retries: 5 diff --git a/.github/actions/setup-postgres/wait-for-healthy-postgres.sh b/.github/actions/setup-postgres/wait-for-healthy-postgres.sh deleted file mode 100755 index 438cfbaff3..0000000000 --- a/.github/actions/setup-postgres/wait-for-healthy-postgres.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -RETRIES=10 - -until [ $RETRIES -eq 0 ]; do - DOCKER_OUTPUT=$(docker compose ps postgres --status running --format json) - JSON_TYPE=$(echo "$DOCKER_OUTPUT" | jq -r 'type') - - if [ "$JSON_TYPE" == "array" ]; then - HEALTH_STATUS=$(echo "$DOCKER_OUTPUT" | jq -r '.[0].Health') - elif [ "$JSON_TYPE" == "object" ]; then - HEALTH_STATUS=$(echo "$DOCKER_OUTPUT" | jq -r '.Health') - else - HEALTH_STATUS="Unknown JSON type: $JSON_TYPE" - fi - - echo "postgres health status: $HEALTH_STATUS" - if [ "$HEALTH_STATUS" == "healthy" ]; then - exit 0 - fi - - echo "Waiting for postgres server, $((RETRIES--)) remaining attempts..." - sleep 2 -done - -exit 1 diff --git a/.github/actions/setup-slither/action.yaml b/.github/actions/setup-slither/action.yaml deleted file mode 100644 index b8bef38575..0000000000 --- a/.github/actions/setup-slither/action.yaml +++ /dev/null @@ -1,10 +0,0 @@ -name: Setup Slither -description: Installs Slither 0.10.3 for contract analysis. Requires Python 3.6 or higher. -runs: - using: composite - steps: - - name: Install Slither - shell: bash - run: | - python -m pip install --upgrade pip - pip install slither-analyzer==0.10.3 diff --git a/.github/actions/setup-solana/action.yml b/.github/actions/setup-solana/action.yml deleted file mode 100644 index 02a0b85ca8..0000000000 --- a/.github/actions/setup-solana/action.yml +++ /dev/null @@ -1,27 +0,0 @@ -name: Setup Solana CLI -description: Setup solana CLI -inputs: - base-path: - description: Path to the base of the repo - required: false - default: . -runs: - using: composite - steps: - - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 - id: cache - name: Cache solana CLI - with: - path: | - ~/.local/share/solana/install/active_release/bin - key: ${{ runner.os }}-solana-cli-${{ hashFiles('${{ inputs.base-path }}tools/ci/install_solana') }} - - - if: ${{ steps.cache.outputs.cache-hit != 'true' }} - name: Install solana cli - shell: bash - working-directory: ${{ inputs.base-path }} - run: ./tools/ci/install_solana - - - name: Export solana path to env - shell: bash - run: echo "PATH=$HOME/.local/share/solana/install/active_release/bin:$PATH" >> $GITHUB_ENV diff --git a/.github/actions/setup-solc-select/action.yaml b/.github/actions/setup-solc-select/action.yaml deleted file mode 100644 index b74ffae018..0000000000 --- a/.github/actions/setup-solc-select/action.yaml +++ /dev/null @@ -1,30 +0,0 @@ -name: Setup Solc Select -description: Installs Solc Select, required versions and selects the version to use. Requires Python 3.6 or higher. -inputs: - to_install: - description: Comma-separated list of solc versions to install - required: true - to_use: - description: Solc version to use - required: true - -runs: - using: composite - steps: - - name: Install solc-select and solc - shell: bash - run: | - pip3 install solc-select - sudo ln -s /usr/local/bin/solc-select /usr/bin/solc-select - - IFS=',' read -ra versions <<< "${{ inputs.to_install }}" - for version in "${versions[@]}"; do - solc-select install $version - if [ $? -ne 0 ]; then - echo "Failed to install Solc $version" - exit 1 - fi - done - - solc-select install ${{ inputs.to_use }} - solc-select use ${{ inputs.to_use }} diff --git a/.github/actions/setup-wasmd/action.yml b/.github/actions/setup-wasmd/action.yml deleted file mode 100644 index ae31cf2395..0000000000 --- a/.github/actions/setup-wasmd/action.yml +++ /dev/null @@ -1,28 +0,0 @@ -name: Setup Cosmos wasmd -description: Setup Cosmos wasmd, used for integration tests -inputs: - base-path: - description: Path to the base of the repo - required: false - default: . -runs: - using: composite - steps: - - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 - id: cache - name: Cache wasmd-build - with: - path: ~/wasmd-build - # this caching works without cloning the repo because the install_wasmd contains - # the commit hash. - key: ${{ runner.os }}-wasmd-cli-${{ hashFiles('${{ inputs.base-path }}/tools/ci/install_wasmd') }} - - - if: ${{ steps.cache.outputs.cache-hit != 'true' }} - name: Install wasmd - shell: bash - working-directory: ${{ inputs.base-path }} - run: ./tools/ci/install_wasmd - - - name: Export wasmd path to env - shell: bash - run: echo "PATH=$HOME/wasmd-build/bin:$PATH" >> $GITHUB_ENV diff --git a/.github/actions/validate-artifact-scope/action.yaml b/.github/actions/validate-artifact-scope/action.yaml deleted file mode 100644 index 7440efc63a..0000000000 --- a/.github/actions/validate-artifact-scope/action.yaml +++ /dev/null @@ -1,103 +0,0 @@ -name: Validate Artifact Scope -description: Checks there are any modified Solidity files outside of the specified scope. If so, it prints a warning message, but does not fail the workflow. -inputs: - product: - description: The product for which the artifacts are being generated - required: true - sol_files: - description: Comma-separated (CSV) or space-separated (shell) list of Solidity files to check - required: true - -runs: - using: composite - steps: - - name: Transform input array - id: transform_input_array - shell: bash - run: | - is_csv_format() { - local input="$1" - if [[ "$input" =~ "," ]]; then - return 0 - else - return 1 - fi - } - - is_space_separated_string() { - local input="$1" - if [[ "$input" =~ ^[^[:space:]]+([[:space:]][^[:space:]]+)*$ ]]; then - return 0 - else - return 1 - fi - } - - array="${{ inputs.sol_files }}" - - if is_csv_format "$array"; then - echo "::debug::CSV format detected, nothing to do" - echo "sol_files=$array" >> $GITHUB_OUTPUT - exit 0 - fi - - if is_space_separated_string "$array"; then - echo "::debug::Space-separated format detected, converting to CSV" - csv_array="${array// /,}" - echo "sol_files=$csv_array" >> $GITHUB_OUTPUT - exit 0 - fi - - echo "::error::Invalid input format for sol_files. Please provide a comma-separated (CSV) or space-separated (shell) list of Solidity files" - exit 1 - - - name: Check for changes outside of artifact scope - shell: bash - run: | - echo "::debug::All modified contracts:" - echo "${{ steps.transform_input_array.outputs.sol_files }}" | tr ',' '\n' - if [ "${{ inputs.product }}" = "shared" ]; then - excluded_paths_pattern="!/^contracts\/src\/v0\.8\/interfaces/ && !/^contracts\/src\/v0\.8\/${{ inputs.product }}/ && !/^contracts\/src\/v0\.8\/[^\/]+\.sol$/" - else - excluded_paths_pattern="!/^contracts\/src\/v0\.8\/${{ inputs.product }}/" - fi - echo "::debug::Excluded paths: $excluded_paths_pattern" - unexpected_files=$(echo "${{ steps.transform_input_array.outputs.sol_files }}" | tr ',' '\n' | awk "$excluded_paths_pattern") - missing_files="" - set -e - set -o pipefail - if [[ -n "$unexpected_files" ]]; then - products=() - productsStr="" - IFS=$'\n' read -r -d '' -a files <<< "$unexpected_files" || true - echo "Files: ${files[@]}" - - for file in "${files[@]}"; do - missing_files+="$file," - - product=$(echo "$file" | awk -F'src/v0.8/' '{if ($2 ~ /\//) print substr($2, 1, index($2, "/")-1); else print "shared"}') - if [[ ! " ${products[@]} " =~ " ${product} " ]]; then - products+=("$product") - productsStr+="$product, " - fi - done - productsStr=${productsStr%, } - - set +e - set +o pipefail - - missing_files=$(echo $missing_files | tr ',' '\n') - - echo "Error: Found modified contracts outside of the expected scope: ${{ inputs.product }}" - echo "Files:" - echo "$missing_files" - echo "Action required: If you want to generate artifacts for other products ($productsStr) run this workflow again with updated configuration" - - echo "# Warning!" >> $GITHUB_STEP_SUMMARY - echo "## Reason: Found modified contracts outside of the expected scope: ${{ inputs.product }}" >> $GITHUB_STEP_SUMMARY - echo "### Files:" >> $GITHUB_STEP_SUMMARY - echo "$missing_files" >> $GITHUB_STEP_SUMMARY - echo "## Action required: If you want to generate artifacts for other products ($productsStr) run this workflow again with updated configuration" >> $GITHUB_STEP_SUMMARY - else - echo "No unexpected files found." - fi diff --git a/.github/actions/validate-solidity-artifacts/action.yaml b/.github/actions/validate-solidity-artifacts/action.yaml deleted file mode 100644 index 5357a87f96..0000000000 --- a/.github/actions/validate-solidity-artifacts/action.yaml +++ /dev/null @@ -1,115 +0,0 @@ -name: Validate Solidity Artifacts -description: Checks whether Slither reports and UML diagrams were generated for all necessary files. If not, a warning is printed in job summary, but the job is not marked as failed. -inputs: - slither_reports_path: - description: Path to the Slither reports directory (without trailing slash) - required: true - uml_diagrams_path: - description: Path to the UML diagrams directory (without trailing slash) - required: true - validate_slither_reports: - description: Whether Slither reports should be validated - required: true - validate_uml_diagrams: - description: Whether UML diagrams should be validated - required: true - sol_files: - description: Comma-separated (CSV) or space-separated (shell) list of Solidity files to check - required: true - -runs: - using: composite - steps: - - name: Transform input array - id: transform_input_array - shell: bash - run: | - is_csv_format() { - local input="$1" - if [[ "$input" =~ "," ]]; then - return 0 - else - return 1 - fi - } - - is_space_separated_string() { - local input="$1" - if [[ "$input" =~ ^[^[:space:]]+([[:space:]][^[:space:]]+)*$ ]]; then - return 0 - else - return 1 - fi - } - - array="${{ inputs.sol_files }}" - - if is_csv_format "$array"; then - echo "::debug::CSV format detected, nothing to do" - echo "sol_files=$array" >> $GITHUB_OUTPUT - exit 0 - fi - - if is_space_separated_string "$array"; then - echo "::debug::Space-separated format detected, converting to CSV" - csv_array="${array// /,}" - echo "sol_files=$csv_array" >> $GITHUB_OUTPUT - exit 0 - fi - - echo "::error::Invalid input format for sol_files. Please provide a comma-separated (CSV) or space-separated (shell) list of Solidity files" - exit 1 - - - name: Validate UML diagrams - if: ${{ inputs.validate_uml_diagrams == 'true' }} - shell: bash - run: | - echo "Validating UML diagrams" - IFS=',' read -r -a modified_files <<< "${{ steps.transform_input_array.outputs.sol_files }}" - missing_svgs=() - for file in "${modified_files[@]}"; do - svg_file="$(basename "${file%.sol}").svg" - if [ ! -f "${{ inputs.uml_diagrams_path }}/$svg_file" ]; then - echo "Error: UML diagram for $file not found" - missing_svgs+=("$file") - fi - done - - if [ ${#missing_svgs[@]} -gt 0 ]; then - echo "Error: Missing UML diagrams for files: ${missing_svgs[@]}" - echo "# Warning!" >> $GITHUB_STEP_SUMMARY - echo "## Reason: Missing UML diagrams for files:" >> $GITHUB_STEP_SUMMARY - for file in "${missing_svgs[@]}"; do - echo " $file" >> $GITHUB_STEP_SUMMARY - done - echo "## Action required: Please try to generate artifacts for them locally or using a different tool" >> $GITHUB_STEP_SUMMARY - else - echo "All UML diagrams generated successfully" - fi - - - name: Validate Slither reports - if: ${{ inputs.validate_slither_reports == 'true' }} - shell: bash - run: | - echo "Validating Slither reports" - IFS=',' read -r -a modified_files <<< "${{ steps.transform_input_array.outputs.sol_files }}" - missing_reports=() - for file in "${modified_files[@]}"; do - report_file="$(basename "${file%.sol}")-slither-report.md" - if [ ! -f "${{ inputs.slither_reports_path }}/$report_file" ]; then - echo "Error: Slither report for $file not found" - missing_reports+=("$file") - fi - done - - if [ ${#missing_reports[@]} -gt 0 ]; then - echo "Error: Missing Slither reports for files: ${missing_reports[@]}" - echo "# Warning!" >> $GITHUB_STEP_SUMMARY - echo "## Reason: Missing Slither reports for files:" >> $GITHUB_STEP_SUMMARY - for file in "${missing_reports[@]}"; do - echo " $file" >> $GITHUB_STEP_SUMMARY - done - echo "## Action required: Please try to generate artifacts for them locally" >> $GITHUB_STEP_SUMMARY - else - echo "All Slither reports generated successfully" - fi diff --git a/.github/actions/version-file-bump/action.yml b/.github/actions/version-file-bump/action.yml deleted file mode 100644 index eb8d5c1742..0000000000 --- a/.github/actions/version-file-bump/action.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: version-file-bump -description: "Ensure that the VERSION file has been bumped since the last release." -inputs: - github-token: - description: "Github access token" - default: ${{ github.token }} - required: true -outputs: - result: - value: ${{ steps.compare.outputs.result }} - description: "Result of the comparison" -runs: - using: composite - steps: - - name: Get latest release version - id: get-latest-version - shell: bash - run: | - untrimmed_ver=$( - curl --header "Authorization: token ${{ inputs.github-token }}" \ - --request GET \ - "https://api.github.com/repos/${{ github.repository }}/releases/latest?draft=false&prerelease=false" \ - | jq -r .name - ) - latest_version="${untrimmed_ver:1}" - echo "latest_version=${latest_version}" | tee -a "$GITHUB_OUTPUT" - - name: Get current version - id: get-current-version - shell: bash - run: | - current_version=$(jq -r '.version' ./package.json) - echo "current_version=${current_version}" | tee -a "$GITHUB_OUTPUT" - - name: Compare semantic versions - uses: smartcontractkit/chainlink-github-actions/semver-compare@75a9005952a9e905649cfb5a6971fd9429436acd # v2.3.25 - id: compare - with: - version1: ${{ steps.get-current-version.outputs.current_version }} - operator: eq - version2: ${{ steps.get-latest-version.outputs.latest_version }} - - name: Fail if version not bumped - # XXX: The reason we are not checking if the current is greater than the - # latest release is to account for hot fixes which may have been branched - # from a previous tag. - shell: bash - env: - VERSION_NOT_BUMPED: ${{ steps.compare.outputs.result }} - run: | - if [[ "${VERSION_NOT_BUMPED:-}" = "true" ]]; then - echo "The version in `package.json` has not bumped since the last release. Please fix by running `pnpm changeset version`." - exit 1 - fi diff --git a/.github/e2e-tests.yml b/.github/e2e-tests.yml deleted file mode 100644 index 02ab3ee101..0000000000 --- a/.github/e2e-tests.yml +++ /dev/null @@ -1,1152 +0,0 @@ -# This file specifies the GitHub runner for each E2E test and is utilized by all E2E CI workflows. -# -# Each entry in this file includes the following: -# - The GitHub runner (runs_on field) that will execute tests. -# - The tests that will be run by the runner. -# - The triggers (e.g., Run PR E2E Tests, Nightly E2E Tests) that should trigger these tests. -# -runner-test-matrix: - - # START: OCR tests - - # Example of 1 runner for all tests in integration-tests/smoke/ocr_test.go - - id: smoke/ocr_test.go:* - path: integration-tests/smoke/ocr_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ && go test smoke/ocr_test.go -timeout 30m -count=1 -test.parallel=2 -json - pyroscope_env: ci-smoke-ocr-evm-simulated - - - id: soak/ocr_test.go:TestOCRv1Soak - path: integration-tests/soak/ocr_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRv1Soak$ -test.parallel=1 -timeout 900h -count=1 -json - test_cmd_opts: 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci -singlepackage -hidepassingtests=false - test_secrets_required: true - test_env_vars: - TEST_SUITE: soak - - - id: soak/ocr_test.go:TestOCRv2Soak - path: integration-tests/soak/ocr_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRv2Soak$ -test.parallel=1 -timeout 900h -count=1 -json - test_cmd_opts: 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci -singlepackage -hidepassingtests=false - test_secrets_required: true - test_env_vars: - TEST_SUITE: soak - - - id: soak/ocr_test.go:TestOCRv2Soak_WemixTestnet - path: integration-tests/soak/ocr_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRv2Soak$ -test.parallel=1 -timeout 900h -count=1 -json - test_config_override_path: integration-tests/testconfig/ocr2/overrides/wemix_testnet.toml - test_secrets_required: true - test_env_vars: - TEST_SUITE: soak - - - id: soak/ocr_test.go:TestForwarderOCRv1Soak - path: integration-tests/soak/ocr_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestForwarderOCRv1Soak$ -test.parallel=1 -timeout 900h -count=1 -json - test_secrets_required: true - test_env_vars: - TEST_SUITE: soak - - - id: soak/ocr_test.go:TestForwarderOCRv2Soak - path: integration-tests/soak/ocr_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestForwarderOCRv2Soak$ -test.parallel=1 -timeout 900h -count=1 -json - test_secrets_required: true - test_env_vars: - TEST_SUITE: soak - - - id: soak/ocr_test.go:TestOCRSoak_GethReorgBelowFinality_FinalityTagDisabled - path: integration-tests/soak/ocr_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run TestOCRSoak_GethReorgBelowFinality_FinalityTagDisabled -test.parallel=1 -timeout 900h -count=1 -json - test_secrets_required: true - test_env_vars: - TEST_SUITE: soak - - - id: soak/ocr_test.go:TestOCRSoak_GethReorgBelowFinality_FinalityTagEnabled - path: integration-tests/soak/ocr_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRSoak_GethReorgBelowFinality_FinalityTagEnabled$ -test.parallel=1 -timeout 900h -count=1 -json - test_secrets_required: true - test_env_vars: - TEST_SUITE: soak - - - id: soak/ocr_test.go:TestOCRSoak_GasSpike - path: integration-tests/soak/ocr_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRSoak_GasSpike$ -test.parallel=1 -timeout 900h -count=1 -json - test_secrets_required: true - test_env_vars: - TEST_SUITE: soak - - - id: soak/ocr_test.go:TestOCRSoak_ChangeBlockGasLimit - path: integration-tests/soak/ocr_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRSoak_ChangeBlockGasLimit$ -test.parallel=1 -timeout 900h -count=1 -json - test_secrets_required: true - test_env_vars: - TEST_SUITE: soak - - - id: soak/ocr_test.go:TestOCRSoak_RPCDownForAllCLNodes - path: integration-tests/soak/ocr_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRSoak_RPCDownForAllCLNodes$ -test.parallel=1 -timeout 900h -count=1 -json - test_secrets_required: true - test_env_vars: - TEST_SUITE: soak - - - id: soak/ocr_test.go:TestOCRSoak_RPCDownForHalfCLNodes - path: integration-tests/soak/ocr_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - test_cmd: cd integration-tests/ && go test soak/ocr_test.go -v -test.run ^TestOCRSoak_RPCDownForHalfCLNodes$ -test.parallel=1 -timeout 900h -count=1 -json - test_secrets_required: true - test_env_vars: - TEST_SUITE: soak - - - id: smoke/forwarder_ocr_test.go:* - path: integration-tests/smoke/forwarder_ocr_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ && go test smoke/forwarder_ocr_test.go -timeout 30m -count=1 -test.parallel=2 -json - pyroscope_env: ci-smoke-forwarder-ocr-evm-simulated - - - id: smoke/forwarders_ocr2_test.go:* - path: integration-tests/smoke/forwarders_ocr2_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ && go test smoke/forwarders_ocr2_test.go -timeout 30m -count=1 -test.parallel=2 -json - pyroscope_env: ci-smoke-forwarder-ocr-evm-simulated - - - id: smoke/ocr2_test.go:* - path: integration-tests/smoke/ocr2_test.go - test_env_type: docker - runs_on: ubuntu22.04-16cores-64GB - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ && go test smoke/ocr2_test.go -timeout 30m -count=1 -test.parallel=6 -json - pyroscope_env: ci-smoke-ocr2-evm-simulated - test_env_vars: - E2E_TEST_CHAINLINK_VERSION: '{{ env.DEFAULT_CHAINLINK_PLUGINS_VERSION }}' # This is the chainlink version that has the plugins - - - id: smoke/ocr2_test.go:*-plugins - path: integration-tests/smoke/ocr2_test.go - test_env_type: docker - runs_on: ubuntu22.04-16cores-64GB - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ && go test smoke/ocr2_test.go -timeout 30m -count=1 -test.parallel=6 -json - pyroscope_env: ci-smoke-ocr2-plugins-evm-simulated - test_env_vars: - E2E_TEST_CHAINLINK_VERSION: '{{ env.DEFAULT_CHAINLINK_PLUGINS_VERSION }}' # This is the chainlink version that has the plugins - ENABLE_OTEL_TRACES: true - - - id: chaos/ocr_chaos_test.go - path: integration-tests/chaos/ocr_chaos_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - triggers: - - Automation On Demand Tests - - E2E Chaos Tests - test_cmd: cd integration-tests/chaos && DETACH_RUNNER=false go test -test.run "^TestOCRChaos$" -v -test.parallel=10 -timeout 60m -count=1 -json - test_env_vars: - TEST_SUITE: chaos - - # END: OCR tests - - # START: Automation tests - - - id: smoke/automation_test.go:^TestAutomationBasic/registry_2_0|TestAutomationBasic/registry_2_1_conditional|TestAutomationBasic/registry_2_1_logtrigger$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run "^TestAutomationBasic/registry_2_0|TestAutomationBasic/registry_2_1_conditional|TestAutomationBasic/registry_2_1_logtrigger$" -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestAutomationBasic/registry_2_1_with_mercury_v02|TestAutomationBasic/registry_2_1_with_mercury_v03|TestAutomationBasic/registry_2_1_with_logtrigger_and_mercury_v02$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run "^TestAutomationBasic/registry_2_1_with_mercury_v02|TestAutomationBasic/registry_2_1_with_mercury_v03|TestAutomationBasic/registry_2_1_with_logtrigger_and_mercury_v02$" -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestAutomationBasic/registry_2_2_conditional|TestAutomationBasic/registry_2_2_logtrigger|TestAutomationBasic/registry_2_2_with_mercury_v02$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run "^TestAutomationBasic/registry_2_2_conditional|TestAutomationBasic/registry_2_2_logtrigger|TestAutomationBasic/registry_2_2_with_mercury_v02$" -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestAutomationBasic/registry_2_2_with_mercury_v03|TestAutomationBasic/registry_2_2_with_logtrigger_and_mercury_v02$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run "^TestAutomationBasic/registry_2_2_with_mercury_v03|TestAutomationBasic/registry_2_2_with_logtrigger_and_mercury_v02$" -test.parallel=2 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestAutomationBasic/registry_2_3_conditional_native|TestAutomationBasic/registry_2_3_conditional_link$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run "^TestAutomationBasic/registry_2_3_conditional_native|TestAutomationBasic/registry_2_3_conditional_link$" -test.parallel=2 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestAutomationBasic/registry_2_3_logtrigger_native|TestAutomationBasic/registry_2_3_logtrigger_link$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run "^TestAutomationBasic/registry_2_3_logtrigger_native|TestAutomationBasic/registry_2_3_logtrigger_link$" -test.parallel=2 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestAutomationBasic/registry_2_3_with_mercury_v03_link|TestAutomationBasic/registry_2_3_with_logtrigger_and_mercury_v02_link$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run "^TestAutomationBasic/registry_2_3_with_mercury_v03_link|TestAutomationBasic/registry_2_3_with_logtrigger_and_mercury_v02_link$" -test.parallel=2 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestSetUpkeepTriggerConfig$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestSetUpkeepTriggerConfig$ -test.parallel=2 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestAutomationAddFunds$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationAddFunds$ -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestAutomationPauseUnPause$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationPauseUnPause$ -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestAutomationRegisterUpkeep$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationRegisterUpkeep$ -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestAutomationPauseRegistry$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationPauseRegistry$ -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestAutomationKeeperNodesDown$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationKeeperNodesDown$ -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestAutomationPerformSimulation$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationPerformSimulation$ -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestAutomationCheckPerformGasLimit$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationCheckPerformGasLimit$ -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestUpdateCheckData$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestUpdateCheckData$ -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/automation_test.go:^TestSetOffchainConfigWithMaxGasPrice$ - path: integration-tests/smoke/automation_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestSetOffchainConfigWithMaxGasPrice$ -test.parallel=2 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-automation-evm-simulated - - - id: smoke/keeper_test.go:^TestKeeperBasicSmoke$ - path: integration-tests/smoke/keeper_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperBasicSmoke$ -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-keeper-evm-simulated - - - id: smoke/keeper_test.go:^TestKeeperBlockCountPerTurn$ - path: integration-tests/smoke/keeper_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperBlockCountPerTurn$ -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-keeper-evm-simulated - - - id: smoke/keeper_test.go:^TestKeeperSimulation$ - path: integration-tests/smoke/keeper_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperSimulation$ -test.parallel=2 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-keeper-evm-simulated - - - id: smoke/keeper_test.go:^TestKeeperCheckPerformGasLimit$ - path: integration-tests/smoke/keeper_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperCheckPerformGasLimit$ -test.parallel=2 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-keeper-evm-simulated - - - id: smoke/keeper_test.go:^TestKeeperRegisterUpkeep$ - path: integration-tests/smoke/keeper_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperRegisterUpkeep$ -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-keeper-evm-simulated - - - id: smoke/keeper_test.go:^TestKeeperAddFunds$ - path: integration-tests/smoke/keeper_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperAddFunds$ -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-keeper-evm-simulated - - - id: smoke/keeper_test.go:^TestKeeperRemove$ - path: integration-tests/smoke/keeper_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperRemove$ -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-keeper-evm-simulated - - - id: smoke/keeper_test.go:^TestKeeperPauseRegistry$ - path: integration-tests/smoke/keeper_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperPauseRegistry$ -test.parallel=2 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-keeper-evm-simulated - - - id: smoke/keeper_test.go:^TestKeeperMigrateRegistry$ - path: integration-tests/smoke/keeper_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperMigrateRegistry$ -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-keeper-evm-simulated - - - id: smoke/keeper_test.go:^TestKeeperNodeDown$ - path: integration-tests/smoke/keeper_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperNodeDown$ -test.parallel=3 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-keeper-evm-simulated - - - id: smoke/keeper_test.go:^TestKeeperPauseUnPauseUpkeep$ - path: integration-tests/smoke/keeper_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperPauseUnPauseUpkeep$ -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-keeper-evm-simulated - - - id: smoke/keeper_test.go:^TestKeeperUpdateCheckData$ - path: integration-tests/smoke/keeper_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperUpdateCheckData$ -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-keeper-evm-simulated - - - id: smoke/keeper_test.go:^TestKeeperJobReplacement$ - path: integration-tests/smoke/keeper_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestKeeperJobReplacement$ -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-keeper-evm-simulated - - - id: load/automationv2_1/automationv2_1_test.go:TestLogTrigger - path: integration-tests/load/automationv2_1/automationv2_1_test.go - runs_on: ubuntu-latest - test_env_type: k8s-remote-runner - test_cmd: cd integration-tests/load/automationv2_1 && go test -test.run TestLogTrigger -test.parallel=1 -timeout 60m -count=1 -json - remote_runner_memory: 4Gi - test_secrets_required: true - test_env_vars: - TEST_LOG_LEVEL: info - TEST_SUITE: automationv2_1 - pyroscope_env: automation-load-test - - - id: smoke/automation_upgrade_test.go:^TestAutomationNodeUpgrade/registry_2_0 - path: integration-tests/smoke/automation_upgrade_test.go - test_env_type: docker - runs_on: ubuntu22.04-8cores-32GB - triggers: - - Automation Nightly Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationNodeUpgrade/registry_2_0 -test.parallel=1 -timeout 60m -count=1 -json - test_env_vars: - E2E_TEST_CHAINLINK_IMAGE: public.ecr.aws/chainlink/chainlink - E2E_TEST_CHAINLINK_VERSION: latest - E2E_TEST_CHAINLINK_UPGRADE_IMAGE: '{{ env.QA_CHAINLINK_IMAGE }}' - E2E_TEST_CHAINLINK_UPGRADE_VERSION: '{{ env.DEFAULT_CHAINLINK_UPGRADE_VERSION }}' - pyroscope_env: ci-smoke-automation-upgrade-tests - - - id: smoke/automation_upgrade_test.go:^TestAutomationNodeUpgrade/registry_2_1 - path: integration-tests/smoke/automation_upgrade_test.go - test_env_type: docker - runs_on: ubuntu22.04-8cores-32GB - triggers: - - Automation Nightly Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationNodeUpgrade/registry_2_1 -test.parallel=5 -timeout 60m -count=1 -json - test_env_vars: - E2E_TEST_CHAINLINK_IMAGE: public.ecr.aws/chainlink/chainlink - E2E_TEST_CHAINLINK_VERSION: latest - E2E_TEST_CHAINLINK_UPGRADE_IMAGE: '{{ env.QA_CHAINLINK_IMAGE }}' - E2E_TEST_CHAINLINK_UPGRADE_VERSION: '{{ env.DEFAULT_CHAINLINK_UPGRADE_VERSION }}' - pyroscope_env: ci-smoke-automation-upgrade-tests - - - id: smoke/automation_upgrade_test.go:^TestAutomationNodeUpgrade/registry_2_2 - path: integration-tests/smoke/automation_upgrade_test.go - test_env_type: docker - runs_on: ubuntu22.04-8cores-32GB - triggers: - - Automation Nightly Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestAutomationNodeUpgrade/registry_2_2 -test.parallel=5 -timeout 60m -count=1 -json - test_env_vars: - E2E_TEST_CHAINLINK_IMAGE: public.ecr.aws/chainlink/chainlink - E2E_TEST_CHAINLINK_VERSION: latest - E2E_TEST_CHAINLINK_UPGRADE_IMAGE: '{{ env.QA_CHAINLINK_IMAGE }}' - E2E_TEST_CHAINLINK_UPGRADE_VERSION: '{{ env.DEFAULT_CHAINLINK_UPGRADE_VERSION }}' - pyroscope_env: ci-smoke-automation-upgrade-tests - - - id: reorg/automation_reorg_test.go^TestAutomationReorg/registry_2_0 - path: integration-tests/reorg/automation_reorg_test.go - runs_on: ubuntu-latest - test_env_type: docker - test_env_vars: - TEST_SUITE: reorg - triggers: - - Automation On Demand Tests - test_cmd: cd integration-tests/reorg && DETACH_RUNNER=false go test -v -test.run ^TestAutomationReorg/registry_2_0 -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-automation-on-demand-reorg - - - id: reorg/automation_reorg_test.go^TestAutomationReorg/registry_2_1 - path: integration-tests/reorg/automation_reorg_test.go - runs_on: ubuntu-latest - test_env_type: docker - test_env_vars: - TEST_SUITE: reorg - triggers: - - Automation On Demand Tests - test_cmd: cd integration-tests/reorg && DETACH_RUNNER=false go test -v -test.run ^TestAutomationReorg/registry_2_1 -test.parallel=2 -timeout 30m -count=1 -json - pyroscope_env: ci-automation-on-demand-reorg - - - id: reorg/automation_reorg_test.go^TestAutomationReorg/registry_2_2 - path: integration-tests/reorg/automation_reorg_test.go - runs_on: ubuntu-latest - test_env_type: docker - test_env_vars: - TEST_SUITE: reorg - triggers: - - Automation On Demand Tests - test_cmd: cd integration-tests/reorg && DETACH_RUNNER=false go test -v -test.run ^TestAutomationReorg/registry_2_2 -test.parallel=2 -timeout 30m -count=1 -json - pyroscope_env: ci-automation-on-demand-reorg - - - id: reorg/automation_reorg_test.go^TestAutomationReorg/registry_2_3 - path: integration-tests/reorg/automation_reorg_test.go - runs_on: ubuntu-latest - test_env_type: docker - test_env_vars: - TEST_SUITE: reorg - triggers: - - Automation On Demand Tests - test_cmd: cd integration-tests/reorg && DETACH_RUNNER=false go test -v -test.run ^TestAutomationReorg/registry_2_3 -test.parallel=2 -timeout 30m -count=1 -json - pyroscope_env: ci-automation-on-demand-reorg - - - id: chaos/automation_chaos_test.go - path: integration-tests/chaos/automation_chaos_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - triggers: - - Automation On Demand Tests - - E2E Chaos Tests - test_cmd: cd integration-tests/chaos && DETACH_RUNNER=false go test -v -test.run ^TestAutomationChaos$ -test.parallel=20 -timeout 60m -count=1 -json - pyroscope_env: ci-automation-on-demand-chaos - test_env_vars: - TEST_SUITE: chaos - - - id: benchmark/automation_test.go:TestAutomationBenchmark - path: integration-tests/benchmark/automation_test.go - test_env_type: k8s-remote-runner - remote_runner_memory: 4Gi - runs_on: ubuntu-latest - # triggers: - # - Nightly E2E Tests - test_cmd: cd integration-tests/benchmark && go test -v -test.run ^TestAutomationBenchmark$ -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-benchmark-automation-nightly - test_env_vars: - TEST_LOG_LEVEL: info - TEST_SUITE: benchmark - TEST_TYPE: benchmark - - - id: soak/automation_test.go:TestAutomationBenchmark - path: integration-tests/benchmark/automation_test.go - test_env_type: k8s-remote-runner - remote_runner_memory: 4Gi - runs_on: ubuntu-latest - # triggers: - # - Nightly E2E Tests - test_cmd: cd integration-tests/benchmark && go test -v -test.run ^TestAutomationBenchmark$ -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-benchmark-automation-nightly - test_env_vars: - TEST_LOG_LEVEL: info - TEST_SUITE: benchmark - TEST_TYPE: soak - - # END: Automation tests - - # START: VRF tests - - - id: smoke/vrfv2_test.go:TestVRFv2Basic - path: integration-tests/smoke/vrfv2_test.go - runs_on: ubuntu22.04-8cores-32GB - test_env_type: docker - test_cmd: cd integration-tests/smoke && go test -v -test.run TestVRFv2Basic -test.parallel=1 -timeout 30m -count=1 -json - test_secrets_required: true - triggers: - - On Demand VRFV2 Smoke Test (Ethereum clients) - - - id: load/vrfv2plus/vrfv2plus_test.go:^TestVRFV2PlusPerformance$Smoke - path: integration-tests/load/vrfv2plus/vrfv2plus_test.go - runs_on: ubuntu22.04-8cores-32GB - test_env_type: docker - test_cmd: cd integration-tests/load/vrfv2plus && go test -v -test.run ^TestVRFV2PlusPerformance$ -test.parallel=1 -timeout 24h -count=1 -json - test_config_override_required: true - test_secrets_required: true - test_env_vars: - TEST_TYPE: Smoke - triggers: - - On Demand VRFV2 Plus Performance Test - - - id: load/vrfv2plus/vrfv2plus_test.go:^TestVRFV2PlusBHSPerformance$Smoke - path: integration-tests/load/vrfv2plus/vrfv2plus_test.go - runs_on: ubuntu22.04-8cores-32GB - test_env_type: docker - test_cmd: cd integration-tests/load/vrfv2plus && go test -v -test.run ^TestVRFV2PlusBHSPerformance$ -test.parallel=1 -timeout 24h -count=1 -json - test_config_override_required: true - test_secrets_required: true - test_env_vars: - TEST_TYPE: Smoke - triggers: - - On Demand VRFV2 Plus Performance Test - - - id: load/vrfv2/vrfv2_test.go:^TestVRFV2Performance$Smoke - path: integration-tests/load/vrfv2/vrfv2_test.go - runs_on: ubuntu22.04-8cores-32GB - test_env_type: docker - test_cmd: cd integration-tests/load/vrfv2 && go test -v -test.run ^TestVRFV2Performance$ -test.parallel=1 -timeout 24h -count=1 -json - test_config_override_required: true - test_secrets_required: true - test_env_vars: - TEST_TYPE: Smoke - triggers: - - On Demand VRFV2 Performance Test - - - id: load/vrfv2/vrfv2_test.go:^TestVRFV2PlusBHSPerformance$Smoke - path: integration-tests/load/vrfv2/vrfv2_test.go - runs_on: ubuntu22.04-8cores-32GB - test_env_type: docker - test_cmd: cd integration-tests/load/vrfv2 && go test -v -test.run ^TestVRFV2PlusBHSPerformance$ -test.parallel=1 -timeout 24h -count=1 -json - test_config_override_required: true - test_secrets_required: true - test_env_vars: - TEST_TYPE: Smoke - triggers: - - On Demand VRFV2 Performance Test - - - id: smoke/vrf_test.go:* - path: integration-tests/smoke/vrf_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ && go test smoke/vrf_test.go -timeout 30m -count=1 -test.parallel=2 -json - pyroscope_env: ci-smoke-vrf-evm-simulated - - - id: smoke/vrfv2_test.go:* - path: integration-tests/smoke/vrfv2_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ && go test smoke/vrfv2_test.go -timeout 30m -count=1 -test.parallel=6 -json - pyroscope_env: ci-smoke-vrf2-evm-simulated - - - id: smoke/vrfv2plus_test.go:* - path: integration-tests/smoke/vrfv2plus_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ && go test smoke/vrfv2plus_test.go -timeout 30m -count=1 -test.parallel=9 -json - pyroscope_env: ci-smoke-vrf2plus-evm-simulated - - # VRFv2Plus tests on any live testnet (test_config_override_path required) - # Tests have to run in sequence because of a single private key used - - id: TestVRFv2Plus_LiveTestnets - path: integration-tests/smoke/vrfv2plus_test.go - runs_on: ubuntu-latest - test_env_type: docker - test_cmd: cd integration-tests/smoke && go test -v -test.run "TestVRFv2Plus$/(Link_Billing|Native_Billing|Direct_Funding)|TestVRFV2PlusWithBHS" -test.parallel=1 -timeout 2h -count=1 -json - - # VRFv2Plus release tests on Sepolia testnet - - id: TestVRFv2Plus_Release_Sepolia - path: integration-tests/smoke/vrfv2plus_test.go - runs_on: ubuntu-latest - test_env_type: docker - test_cmd: cd integration-tests/smoke && go test -v -test.run "TestVRFv2Plus$/(Link_Billing|Native_Billing|Direct_Funding)|TestVRFV2PlusWithBHS" -test.parallel=1 -timeout 2h -count=1 -json - test_config_override_path: integration-tests/testconfig/vrfv2plus/overrides/new_env/sepolia_new_env_test_config.toml - triggers: - - VRF E2E Release Tests - - # END: VRF tests - - # START: LogPoller tests - - - id: smoke/log_poller_test.go:^TestLogPollerFewFiltersFixedDepth$ - path: integration-tests/smoke/log_poller_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestLogPollerFewFiltersFixedDepth$ -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-log_poller-evm-simulated - - - id: smoke/log_poller_test.go:^TestLogPollerFewFiltersFinalityTag$ - path: integration-tests/smoke/log_poller_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestLogPollerFewFiltersFinalityTag$ -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-log_poller-evm-simulated - - - id: smoke/log_poller_test.go:^TestLogPollerWithChaosFixedDepth$ - path: integration-tests/smoke/log_poller_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestLogPollerWithChaosFixedDepth$ -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-log_poller-evm-simulated - - - id: smoke/log_poller_test.go:^TestLogPollerWithChaosFinalityTag$ - path: integration-tests/smoke/log_poller_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestLogPollerWithChaosFinalityTag$ -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-log_poller-evm-simulated - - - id: smoke/log_poller_test.go:^TestLogPollerWithChaosPostgresFinalityTag$ - path: integration-tests/smoke/log_poller_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestLogPollerWithChaosPostgresFinalityTag$ -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-log_poller-evm-simulated - - - id: smoke/log_poller_test.go:^TestLogPollerWithChaosPostgresFixedDepth$ - path: integration-tests/smoke/log_poller_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestLogPollerWithChaosPostgresFixedDepth$ -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-log_poller-evm-simulated - - - id: smoke/log_poller_test.go:^TestLogPollerReplayFixedDepth$ - path: integration-tests/smoke/log_poller_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestLogPollerReplayFixedDepth$ -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-log_poller-evm-simulated - - - id: smoke/log_poller_test.go:^TestLogPollerReplayFinalityTag$ - path: integration-tests/smoke/log_poller_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/smoke && go test -test.run ^TestLogPollerReplayFinalityTag$ -test.parallel=1 -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-log_poller-evm-simulated - - # END: LogPoller tests - - # START: Other tests - - - id: smoke/runlog_test.go:* - path: integration-tests/smoke/runlog_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ && go test smoke/runlog_test.go -timeout 30m -test.parallel=2 -count=1 -json - pyroscope_env: ci-smoke-runlog-evm-simulated - - - id: smoke/cron_test.go:* - path: integration-tests/smoke/cron_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ && go test smoke/cron_test.go -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-cron-evm-simulated - - - id: smoke/flux_test.go:* - path: integration-tests/smoke/flux_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ && go test smoke/flux_test.go -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-flux-evm-simulated - - - id: smoke/reorg_above_finality_test.go:* - path: integration-tests/smoke/reorg_above_finality_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ && go test smoke/reorg_above_finality_test.go -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-reorg-above-finality-evm-simulated - - - id: migration/upgrade_version_test.go:* - path: integration-tests/migration/upgrade_version_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/migration && go test upgrade_version_test.go -timeout 30m -count=1 -test.parallel=2 -json - test_env_vars: - E2E_TEST_CHAINLINK_IMAGE: public.ecr.aws/w0i8p0z9/chainlink-ccip - E2E_TEST_CHAINLINK_VERSION: '{{ env.LATEST_CHAINLINK_RELEASE_VERSION }}' - E2E_TEST_CHAINLINK_UPGRADE_IMAGE: '{{ env.QA_CHAINLINK_IMAGE }}' - E2E_TEST_CHAINLINK_UPGRADE_VERSION: '{{ env.DEFAULT_CHAINLINK_VERSION }}' - - - id: smoke/job_distributor_test.go:* - path: integration-tests/smoke/job_distributor_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E Core Tests - - Merge Queue E2E Core Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ && go test smoke/job_distributor_test.go -timeout 30m -count=1 -json - pyroscope_env: ci-smoke-jd-evm-simulated - - # END: Other tests - - # START: CCIP tests - - - id: ccip-smoke - path: integration-tests/ccip-tests/smoke/ccip_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E CCIP Tests - - Merge Queue E2E CCIP Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPForBidirectionalLane$ -timeout 30m -count=1 -test.parallel=1 -json - test_env_vars: - E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - - - id: ccip-smoke-1.4-pools - path: integration-tests/ccip-tests/smoke/ccip_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E CCIP Tests - - Merge Queue E2E CCIP Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPForBidirectionalLane$ -timeout 30m -count=1 -test.parallel=1 -json - test_env_vars: - E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - test_config_override_path: integration-tests/ccip-tests/testconfig/tomls/contract-version1.4.toml - - - id: ccip-smoke-usdc - path: integration-tests/ccip-tests/smoke/ccip_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E CCIP Tests - - Merge Queue E2E CCIP Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPForBidirectionalLane$ -timeout 30m -count=1 -test.parallel=1 -json - test_env_vars: - E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - test_config_override_path: integration-tests/ccip-tests/testconfig/tomls/usdc_mock_deployment.toml - - - id: ccip-smoke-db-compatibility - path: integration-tests/ccip-tests/smoke/ccip_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E CCIP Tests - - Merge Queue E2E CCIP Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPForBidirectionalLane$ -timeout 30m -count=1 -test.parallel=1 -json - test_env_vars: - E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - test_config_override_path: integration-tests/ccip-tests/testconfig/tomls/db-compatibility.toml - - - id: ccip-smoke-leader-lane - path: integration-tests/ccip-tests/smoke/ccip_test.go - test_env_type: docker - runs_on: ubuntu-latest - # Leader lane test is flakey in Core repo - Need to fix CCIP-3074 to enable it. - triggers: - # - PR E2E CCIP Tests - # - Nightly E2E Tests - test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPForBidirectionalLane$ -timeout 30m -count=1 -test.parallel=1 -json - test_env_vars: - E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - test_config_override_path: integration-tests/ccip-tests/testconfig/tomls/leader-lane.toml - - - id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPTokenPoolRateLimits$ - path: integration-tests/ccip-tests/smoke/ccip_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E CCIP Tests - - Merge Queue E2E CCIP Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPTokenPoolRateLimits$ -timeout 30m -count=1 -test.parallel=1 -json - test_env_vars: - E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - - - id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPMulticall$ - path: integration-tests/ccip-tests/smoke/ccip_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E CCIP Tests - - Merge Queue E2E CCIP Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPMulticall$ -timeout 30m -count=1 -test.parallel=1 -json - test_env_vars: - E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - - - id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPManuallyExecuteAfterExecutionFailingDueToInsufficientGas$ - path: integration-tests/ccip-tests/smoke/ccip_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E CCIP Tests - - Merge Queue E2E CCIP Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPManuallyExecuteAfterExecutionFailingDueToInsufficientGas$ -timeout 30m -count=1 -test.parallel=1 -json - test_env_vars: - E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - - - id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPOnRampLimits$ - path: integration-tests/ccip-tests/smoke/ccip_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E CCIP Tests - - Merge Queue E2E CCIP Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPOnRampLimits$ -timeout 30m -count=1 -test.parallel=1 -json - test_env_vars: - E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - - - id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPOffRampCapacityLimit$ - path: integration-tests/ccip-tests/smoke/ccip_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Nightly E2E Tests - test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPOffRampCapacityLimit$ -timeout 30m -count=1 -test.parallel=1 -json - test_env_vars: - E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - - - id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPOffRampAggRateLimit$ - path: integration-tests/ccip-tests/smoke/ccip_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - Nightly E2E Tests - test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPOffRampAggRateLimit$ -timeout 30m -count=1 -test.parallel=1 -json - test_env_vars: - E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - - - id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPReorgBelowFinality$ - path: integration-tests/ccip-tests/smoke/ccip_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E CCIP Tests - - Merge Queue E2E CCIP Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPReorgBelowFinality$ -timeout 30m -count=1 -test.parallel=1 -json - test_env_vars: - E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - test_config_override_path: integration-tests/ccip-tests/testconfig/tomls/ccip-reorg.toml - - - id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPReorgAboveFinalityAtDestination$ - path: integration-tests/ccip-tests/smoke/ccip_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E CCIP Tests - - Merge Queue E2E CCIP Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPReorgAboveFinalityAtDestination$ -timeout 30m -count=1 -test.parallel=1 -json - test_env_vars: - E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - test_config_override_path: integration-tests/ccip-tests/testconfig/tomls/ccip-reorg.toml - - - id: ccip-tests/smoke/ccip_test.go:^TestSmokeCCIPReorgAboveFinalityAtSource$ - path: integration-tests/ccip-tests/smoke/ccip_test.go - test_env_type: docker - runs_on: ubuntu-latest - triggers: - - PR E2E CCIP Tests - - Merge Queue E2E CCIP Tests - - Nightly E2E Tests - test_cmd: cd integration-tests/ccip-tests/smoke && go test ccip_test.go -test.run ^TestSmokeCCIPReorgAboveFinalityAtSource$ -timeout 30m -count=1 -test.parallel=1 -json - test_env_vars: - E2E_TEST_SELECTED_NETWORK: SIMULATED_1,SIMULATED_2 - test_config_override_path: integration-tests/ccip-tests/testconfig/tomls/ccip-reorg.toml - - - id: integration-tests/ccip-tests/load/ccip_test.go:TestLoadCCIPStableRPS - path: integration-tests/ccip-tests/load/ccip_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - test_cmd: cd integration-tests/ccip-tests/load && DETACH_RUNNER=false go test -test.run ^TestLoadCCIPStableRPS$ -timeout 70m -count=1 -test.parallel=1 -json - test_env_vars: - TEST_SUITE: ccip-load - E2E_TEST_GRAFANA_DASHBOARD_URL: "/d/6vjVx-1V8/ccip-long-running-tests" - triggers: - - E2E CCIP Load Tests - test_artifacts_on_failure: - - ./integration-tests/load/logs/payload_ccip.json - - # Enable when CCIP-2277 is resolved - # - # - id: integration-tests/ccip-tests/load/ccip_test.go:TestLoadCCIPStableRPSAfterARMCurseAndUncurse - # path: integration-tests/ccip-tests/load/ccip_test.go - # test_env_type: k8s-remote-runner - # runs_on: ubuntu-latest - # test_cmd: cd integration-tests/ccip-tests/load && DETACH_RUNNER=false go test -test.run $TestLoadCCIPStableRPSAfterARMCurseAndUncurse$ -timeout 70m -count=1 -test.parallel=1 -json - # test_config_override_path: integration-tests/ccip-tests/testconfig/tomls/load-with-arm-curse-uncurse.toml - # test_env_vars: - # E2E_TEST_GRAFANA_DASHBOARD_URL: "/d/6vjVx-1V8/ccip-long-running-tests" - # triggers: - # - E2E CCIP Load Tests - # test_artifacts_on_failure: - # - ./integration-tests/load/logs/payload_ccip.json - - - id: ccip-tests/chaos/ccip_test.go - path: integration-tests/ccip-tests/chaos/ccip_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - triggers: - - E2E CCIP Chaos Tests - test_cmd: cd integration-tests/ccip-tests/chaos && DETACH_RUNNER=false go test ccip_test.go -v -test.parallel=11 -timeout 60m -count=1 -json - test_env_vars: - TEST_SUITE: chaos - TEST_TRIGGERED_BY: ccip-cron-chaos-eth - TEST_LOG_LEVEL: debug - - - id: ccip-tests/load/ccip_test.go:^TestLoadCCIPStableWithPodChaosDiffCommitAndExec - path: integration-tests/ccip-tests/load/ccip_test.go - test_env_type: k8s-remote-runner - runs_on: ubuntu-latest - triggers: - # Disabled until CCIP-2555 is resolved - # - E2E CCIP Chaos Tests - test_cmd: cd integration-tests/ccip-tests/load && DETACH_RUNNER=false go test -run '^TestLoadCCIPStableWithPodChaosDiffCommitAndExec' -v -test.parallel=4 -timeout 120m -count=1 -json - test_env_vars: - TEST_SUITE: chaos - TEST_TRIGGERED_BY: ccip-cron-chaos-eth - TEST_LOG_LEVEL: debug - E2E_TEST_GRAFANA_DASHBOARD_URL: /d/6vjVx-1V8/ccip-long-running-tests - - # END: CCIP tests \ No newline at end of file diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md deleted file mode 100644 index e77921d0dd..0000000000 --- a/.github/pull_request_template.md +++ /dev/null @@ -1,4 +0,0 @@ -## Motivation - - -## Solution diff --git a/.github/scripts/check-changeset-tags.sh b/.github/scripts/check-changeset-tags.sh deleted file mode 100755 index 5447cb7915..0000000000 --- a/.github/scripts/check-changeset-tags.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/bin/bash - -# This checks for if at least one tag exists from a list of tags provided in a changeset file -# -# TAG LIST: -# #nops : For any feature that is NOP facing and needs to be in the official Release Notes for the release. -# #added : For any new functionality added. -# #changed : For any change to the existing functionality. -# #removed : For any functionality/config that is removed. -# #updated : For any functionality that is updated. -# #deprecation_notice : For any upcoming deprecation functionality. -# #breaking_change : For any functionality that requires manual action for the node to boot. -# #db_update : For any feature that introduces updates to database schema. -# #wip : For any change that is not ready yet and external communication about it should be held off till it is feature complete. -# #bugfix - For bug fixes. -# #internal - For changesets that need to be excluded from the final changelog. - -if [ $# -eq 0 ]; then - echo "Error: No changeset file path provided." - exit 1 -fi - -CHANGESET_FILE_PATH=$1 -tags_list=( "#nops" "#added" "#changed" "#removed" "#updated" "#deprecation_notice" "#breaking_change" "#db_update" "#wip" "#bugfix" "#internal" ) -has_tags=false -found_tags=() - -if [[ ! -f "$CHANGESET_FILE_PATH" ]]; then - echo "Error: File '$CHANGESET_FILE_PATH' does not exist." - exit 1 -fi - -changeset_content=$(sed -n '/^---$/,/^---$/{ /^---$/!p; }' $CHANGESET_FILE_PATH) -semvar_value=$(echo "$changeset_content" | awk -F": " '/"ccip"/ {print $2}') - -if [[ "$semvar_value" != "major" && "$semvar_value" != "minor" && "$semvar_value" != "patch" ]]; then - echo "Invalid changeset semvar value for 'ccip'. Must be 'major', 'minor', or 'patch'." - exit 1 -fi - -while IFS= read -r line; do - for tag in "${tags_list[@]}"; do - if [[ "$line" == *"$tag"* ]]; then - found_tags+=("$tag") - echo "Found tag: $tag in $CHANGESET_FILE_PATH" - has_tags=true - fi - done -done < "$CHANGESET_FILE_PATH" - -if [[ "$has_tags" == false ]]; then - echo "Error: No tags found in $CHANGESET_FILE_PATH" -fi - -echo "has_tags=$has_tags" >> $GITHUB_OUTPUT -echo "found_tags=$(jq -jR 'split(" ") | join(",")' <<< "${found_tags[*]}")" >> $GITHUB_OUTPUT diff --git a/.github/tracing/README.md b/.github/tracing/README.md deleted file mode 100644 index 06b2eef665..0000000000 --- a/.github/tracing/README.md +++ /dev/null @@ -1,112 +0,0 @@ -# Distributed Tracing - -As part of the LOOP plugin effort, we've added distributed tracing to the core node. This is helpful for initial development and maintenance of LOOPs, but will also empower product teams building on top of core. - -## Dev environment - -One way to generate traces locally today is with the OCR2 basic smoke test. - -1. navigate to `.github/tracing/` and then run `docker compose --file local-smoke-docker-compose.yaml up` -2. setup a local docker registry at `127.0.0.1:5000` (https://www.docker.com/blog/how-to-use-your-own-registry-2/) -3. run `make build_push_plugin_docker_image` in `chainlink/integration-tests/Makefile` -4. preapre your `overrides.toml` file with selected network and CL image name and version and place it anywhere -inside `integration-tests` directory. Sample `overrides.toml` file: -```toml -[ChainlinkImage] -image="127.0.0.1:5000/chainlink" -version="develop" - -[Network] -selected_networks=["simulated"] -``` -5. run `go test -run TestOCRv2Basic ./smoke/ocr2_test.go` -6. navigate to `localhost:3000/explore` in a web browser to query for traces - -Core and the median plugins are instrumented with open telemetry traces, which are sent to the OTEL collector and forwarded to the Tempo backend. The grafana UI can then read the trace data from the Tempo backend. - - - -## CI environment - -Another way to generate traces is by enabling traces for PRs. This will instrument traces for `TestOCRv2Basic` in the CI run. - -1. Cut a PR in the core repo -2. Add the `enable tracing` label to the PR -3. Navigate to `Integration Tests / ETH Smoke Tests ocr2-plugins (pull_request)` details -4. Navigate to the summary of the integration tests -5. After the test completes, the generated trace data will be saved as an artifact, currently called `trace-data` -6. Download the artifact to this directory (`chainlink/.github/tracing`) -7. `docker compose --file local-smoke-docker-compose.yaml up` -8. Run `sh replay.sh` to replay those traces to the otel-collector container that was spun up in the last step. -9. navigate to `localhost:3000/explore` in a web browser to query for traces - -The artifact is not json encoded - each individual line is a well formed and complete json object. - - -## Production and NOPs environments - -In a production environment, we suggest coupling the lifecycle of nodes and otel-collectors. A best practice is to deploy the otel-collector alongside your node, using infrastructure as code (IAC) to automate deployments and certificate lifecycles. While there are valid use cases for using `Tracing.Mode = unencrypted`, we have set the default encryption setting to `Tracing.Mode = tls`. Externally deployed otel-collectors can not be used with `Tracing.Mode = unencrypted`. i.e. If `Tracing.Mode = unencrypted` and an external URI is detected for `Tracing.CollectorTarget` node configuration will fail to validate and the node will not boot. The node requires a valid encryption mode and collector target to send traces. - -Once traces reach the otel-collector, the rest of the observability pipeline is flexible. We recommend deploying (through automation) centrally managed Grafana Tempo and Grafana UI instances to receive from one or many otel-collector instances. Always use networking best practices and encrypt trace data, especially at network boundaries. - -## Configuration -This folder contains the following config files: -* otel-collector-ci.yaml -* otel-collector-dev.yaml -* tempo.yaml -* grafana-datasources.yaml - -These config files are for an OTEL collector, grafana Tempo, and a grafana UI instance to run as containers on the same network. -`otel-collector-dev.yaml` is the configuration for dev (i.e. your local machine) environments, and forwards traces from the otel collector to the grafana tempo instance on the same network. -`otel-collector-ci.yaml` is the configuration for the CI runs, and exports the trace data to the artifact from the github run. - -## Adding Traces to Plugins and to core - -Adding traces requires identifying an observability gap in a related group of code executions or a critical path in your application. This is intuitive for the developer: - -- "What's the flow of component interaction in this distributed system?" -- "What's the behavior of the JobProcessorOne component when jobs with [x, y, z] attributes are processed?" -- "Is this critical path workflow behaving the way we expect?" - -The developer will measure a flow of execution from end to end in one trace. Each logically separate measure of this flow is called a span. Spans have either one or no parent span and multiple children span. The relationship between parent and child spans in agreggate will form a directed acyclic graph. The trace begins at the root of this graph. - -The most trivial application of a span is measuring top level performance in one critical path. There is much more you can do, including creating human readable and timestamped events within a span (useful for monitoring concurrent access to resources), recording errors, linking parent and children spans through large parts of an application, and even extending a span beyond a single process. - -Spans are created by `tracers` and passed through go applications by `Context`s. A tracer must be initialized first. Both core and plugin developers will initialize a tracer from the globally registered trace provider: - -``` -tracer := otel.GetTracerProvider().Tracer("example.com/foo") -``` - -The globally registered tracer provider is available for plugins after they are initialized, and available in core after configuration is processed (`initGlobals`). - -Add spans by: -``` - func interestingFunc() { - // Assuming there is an appropriate parentContext - ctx, span := tracer.Start(parentContext, "hello-span") - defer span.End() - - // do some work to track with hello-span - } -``` -As implied by the example, `span` is a child of its parent span captured by `parentContext`. - - -Note that in certain situations, there are 3rd party libraries that will setup spans. For instance: - -``` -import ( - "github.com/gin-gonic/gin" - "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin" -) - -router := gin.Default() -router.Use(otelgin.Middleware("service-name")) -``` - -The developer aligns with best practices when they: -- Start with critical paths -- Measure paths from end to end (Context is wired all the way through) -- Emphasize broadness of measurement over depth -- Use automatic instrumentation if possible \ No newline at end of file diff --git a/.github/tracing/grafana-datasources.yaml b/.github/tracing/grafana-datasources.yaml deleted file mode 100644 index 098b06ec76..0000000000 --- a/.github/tracing/grafana-datasources.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: 1 - -datasources: -- name: Tempo - type: tempo - access: proxy - orgId: 1 - url: http://tempo:3200 - basicAuth: false - isDefault: true - version: 1 - editable: false - apiVersion: 1 - uid: tempo - jsonData: - httpMethod: GET - serviceMap: - datasourceUid: prometheus \ No newline at end of file diff --git a/.github/tracing/local-smoke-docker-compose.yaml b/.github/tracing/local-smoke-docker-compose.yaml deleted file mode 100644 index 744ba88ef6..0000000000 --- a/.github/tracing/local-smoke-docker-compose.yaml +++ /dev/null @@ -1,48 +0,0 @@ -version: "3" -services: - - # ... the OpenTelemetry Collector configured to receive traces and export to Tempo ... - otel-collector: - image: otel/opentelemetry-collector:0.61.0 - command: [ "--config=/etc/otel-collector.yaml" ] - volumes: - - ./otel-collector-dev.yaml:/etc/otel-collector.yaml - - ../../integration-tests/smoke/traces/trace-data.json:/etc/trace-data.json # local trace data stored consistent with smoke/logs - ports: - - "4317:4317" # otlp grpc - - "3100:3100" - depends_on: - - tempo - networks: - - tracing-network - - # .. Which accepts requests from grafana ... - tempo: - image: grafana/tempo:latest - command: [ "-config.file=/etc/tempo.yaml" ] - volumes: - - ./tempo.yaml:/etc/tempo.yaml - - ./tempo-data:/tmp/tempo - ports: - - "4317" # otlp grpc - networks: - - tracing-network - - grafana: - image: grafana/grafana:9.4.3 - volumes: - - ./grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml - environment: - - GF_AUTH_ANONYMOUS_ENABLED=true - - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin - - GF_AUTH_DISABLE_LOGIN_FORM=true - - GF_FEATURE_TOGGLES_ENABLE=traceqlEditor - ports: - - "3000:3000" - networks: - - tracing-network - -networks: - tracing-network: - name: tracing - driver: bridge \ No newline at end of file diff --git a/.github/tracing/otel-collector-ci.yaml b/.github/tracing/otel-collector-ci.yaml deleted file mode 100644 index 0bf123d29b..0000000000 --- a/.github/tracing/otel-collector-ci.yaml +++ /dev/null @@ -1,22 +0,0 @@ -receivers: - otlp: - protocols: - grpc: - endpoint: "0.0.0.0:4317" - http: - endpoint: "0.0.0.0:3100" -exporters: - file: - path: /tracing/trace-data.json - otlp: - endpoint: tempo:4317 - tls: - insecure: true -service: - telemetry: - logs: - level: "debug" # Set log level to debug - pipelines: - traces: - receivers: [otlp] - exporters: [file,otlp] \ No newline at end of file diff --git a/.github/tracing/otel-collector-dev.yaml b/.github/tracing/otel-collector-dev.yaml deleted file mode 100644 index dd059127b8..0000000000 --- a/.github/tracing/otel-collector-dev.yaml +++ /dev/null @@ -1,20 +0,0 @@ -receivers: - otlp: - protocols: - grpc: - endpoint: "0.0.0.0:4317" - http: - endpoint: "0.0.0.0:3100" -exporters: - otlp: - endpoint: tempo:4317 - tls: - insecure: true -service: - telemetry: - logs: - level: "debug" # Set log level to debug - pipelines: - traces: - receivers: [otlp] - exporters: [otlp] \ No newline at end of file diff --git a/.github/tracing/replay.sh b/.github/tracing/replay.sh deleted file mode 100644 index b2e564567c..0000000000 --- a/.github/tracing/replay.sh +++ /dev/null @@ -1,6 +0,0 @@ -# Read JSON file and loop through each trace -while IFS= read -r trace; do - curl -X POST http://localhost:3100/v1/traces \ - -H "Content-Type: application/json" \ - -d "$trace" -done < "trace-data" diff --git a/.github/tracing/tempo.yaml b/.github/tracing/tempo.yaml deleted file mode 100644 index aa8c0ecbf0..0000000000 --- a/.github/tracing/tempo.yaml +++ /dev/null @@ -1,24 +0,0 @@ -server: - http_listen_port: 3200 - -distributor: - receivers: - otlp: - protocols: - http: - grpc: - -ingester: - max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally - -compactor: - compaction: - block_retention: 1h # overall Tempo trace retention. set for demo purposes - -storage: - trace: - backend: local # backend configuration to use - wal: - path: /tmp/tempo/wal # where to store the wal locally - local: - path: /tmp/tempo/blocks \ No newline at end of file diff --git a/.github/workflows/auto-update.yml b/.github/workflows/auto-update.yml deleted file mode 100644 index 963145c404..0000000000 --- a/.github/workflows/auto-update.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: Auto Update -on: - push: - branches: - - develop -jobs: - autoupdate: - name: Auto Update - runs-on: ubuntu-latest - steps: - - uses: docker://chinthakagodawita/autoupdate-action:v1 - env: - GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" - PR_FILTER: "labelled" - PR_LABELS: "auto-update" - MERGE_MSG: "Branch was auto-updated." - MERGE_CONFLICT_ACTION: "ignore" diff --git a/.github/workflows/automation-benchmark-tests.yml b/.github/workflows/automation-benchmark-tests.yml deleted file mode 100644 index 7d46b8e0c2..0000000000 --- a/.github/workflows/automation-benchmark-tests.yml +++ /dev/null @@ -1,54 +0,0 @@ -name: Automation Benchmark Test -on: - workflow_dispatch: - inputs: - test_config_override_path: - description: Path to a test config file used to override the default test config - required: false - type: string - test_secrets_override_key: - description: Key to run tests with custom test secrets - required: false - type: string - slackMemberID: - description: Notifies test results (Not your @) - required: true - default: U02Q14G80TY - type: string - testType: - description: Type of test to run (benchmark, soak) - required: true - default: benchmark - type: string - -jobs: - run-e2e-tests-workflow: - name: Run E2E Tests - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - test_path: .github/e2e-tests.yml - test_ids: '${{ inputs.testType }}/automation_test.go:TestAutomationBenchmark' - test_config_override_path: ${{ inputs.test_config_override_path }} - SLACK_USER: ${{ inputs.slackMemberID }} - SLACK_CHANNEL: C03KJ5S7KEK - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - TEST_SECRETS_OVERRIDE_BASE64: ${{ secrets[inputs.test_secrets_override_key] }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }} diff --git a/.github/workflows/automation-load-tests.yml b/.github/workflows/automation-load-tests.yml deleted file mode 100644 index 5d37e81c14..0000000000 --- a/.github/workflows/automation-load-tests.yml +++ /dev/null @@ -1,49 +0,0 @@ -name: Automation Load Test -on: - workflow_dispatch: - inputs: - test_config_override_path: - description: Path to a test config file used to override the default test config - required: false - type: string - test_secrets_override_key: - description: 'Key to run tests with custom test secrets' - required: false - type: string - slackMemberID: - description: Notifies test results (Not your @) - required: true - default: U02Q14G80TY - type: string - -jobs: - run-e2e-tests-workflow: - name: Run E2E Tests - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - test_path: .github/e2e-tests.yml - test_ids: 'load/automationv2_1/automationv2_1_test.go:TestLogTrigger' - test_config_override_path: ${{ inputs.test_config_override_path }} - SLACK_USER: ${{ inputs.slackMemberID }} - SLACK_CHANNEL: C03KJ5S7KEK - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - TEST_SECRETS_OVERRIDE_BASE64: ${{ secrets[inputs.test_secrets_override_key] }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }} diff --git a/.github/workflows/automation-nightly-tests.yml b/.github/workflows/automation-nightly-tests.yml deleted file mode 100644 index 1ff80cff3c..0000000000 --- a/.github/workflows/automation-nightly-tests.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Automation Nightly Tests -on: - schedule: - - cron: "0 0 * * *" # Run nightly - push: - tags: - - "*" - workflow_dispatch: - -jobs: - run-e2e-tests-workflow: - name: Run E2E Tests - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - test_path: .github/e2e-tests.yml - test_trigger: Automation Nightly Tests - chainlink_version: ${{ github.sha }} - slack_notification_after_tests: true - slack_notification_after_tests_channel_id: "#automation-test-notifications" - slack_notification_after_tests_name: Automation Nightly E2E Tests - # slack_notification_after_tests_notify_user_id_on_failure: U0XXXXXXX - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - TEST_SECRETS_OVERRIDE_BASE64: ${{ secrets[inputs.test_secrets_override_key] }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} diff --git a/.github/workflows/automation-ondemand-tests.yml b/.github/workflows/automation-ondemand-tests.yml deleted file mode 100644 index 053a0e9147..0000000000 --- a/.github/workflows/automation-ondemand-tests.yml +++ /dev/null @@ -1,182 +0,0 @@ -name: Run Automation On Demand Tests - -on: - workflow_dispatch: - inputs: - chainlinkVersionUpdate: - description: Chainlink image version to upgrade to (Leave empty to build from head/ref) - required: false - type: string - chainlinkImageUpdate: - description: Chainlink image repo to upgrade to - options: - - QA_ECR - - public.ecr.aws/chainlink/chainlink - type: choice - chainlinkVersion: - description: Chainlink image version to use initially for upgrade test - default: latest - required: true - type: string - chainlinkImage: - description: Chainlink image repo to use initially for upgrade test - required: true - options: - - public.ecr.aws/chainlink/chainlink - - QA_ECR - type: choice - enableChaos: - description: Check to enable chaos tests - type: boolean - default: false - required: true - enableReorg: - description: Check to enable reorg tests - type: boolean - default: false - required: true - with_existing_remote_runner_version: - description: 'Tag of the existing remote runner version to use (Leave empty to build from head/ref)' - required: false - type: string - -jobs: - # Set tests to run based on the workflow inputs - set-tests-to-run: - name: Set tests to run - runs-on: ubuntu-latest - outputs: - test_list: ${{ steps.set-tests.outputs.test_list }} - require_chainlink_image_versions_in_qa_ecr: ${{ steps.determine-chainlink-image-check.outputs.require_chainlink_image_versions_in_qa_ecr }} - steps: - - name: Determine build to use - id: determine-build - shell: bash - run: | - if [[ "${{ inputs.chainlinkImage }}" == "QA_ECR" ]]; then - echo "image='{{ env.QA_CHAINLINK_IMAGE }}'" >> $GITHUB_ENV - else - echo "image=${{ inputs.chainlinkImage }}" >> $GITHUB_ENV - fi - if [[ "${{ inputs.chainlinkImageUpdate }}" == "QA_ECR" ]]; then - echo "upgrade_image='{{ env.QA_CHAINLINK_IMAGE }}'" >> $GITHUB_ENV - else - echo "upgrade_image=${{ inputs.chainlinkImageUpdate }}" >> $GITHUB_ENV - fi - if [[ -z "${{ inputs.chainlinkVersion }}" ]] && [[ "${{ inputs.chainlinkImage }}" == "QA_ECR" ]]; then - echo "version=${{ github.sha }}" >> $GITHUB_ENV - else - echo "version=${{ inputs.chainlinkVersion }}" >> $GITHUB_ENV - fi - if [[ -z "${{ inputs.chainlinkVersionUpdate }}" ]] && [[ "${{ inputs.chainlinkImageUpdate }}" == "QA_ECR" ]]; then - echo "upgrade_version=${{ github.sha }}" >> $GITHUB_ENV - else - echo "upgrade_version=${{ inputs.chainlinkVersionUpdate }}" >> $GITHUB_ENV - fi - - name: Check if chainlink image check required - id: determine-chainlink-image-check - shell: bash - run: | - chainlink_image_versions="" - if [ "${{ github.event.inputs.chainlinkImage }}" = "QA_ECR" ]; then - chainlink_image_versions+="${{ env.version }}," - fi - if [ "${{ github.event.inputs.chainlinkImageUpdate }}" = "QA_ECR" ]; then - chainlink_image_versions+="${{ env.upgrade_version }}" - fi - echo "require_chainlink_image_versions_in_qa_ecr=$chainlink_image_versions" >> $GITHUB_OUTPUT - - name: Set tests to run - id: set-tests - run: | - - # Always run upgrade tests - cat > test_list.yaml <> test_list.yaml <> test_list.yaml <> $GITHUB_OUTPUT - - call-run-e2e-tests-workflow: - name: Run E2E Tests - needs: set-tests-to-run - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - test_path: .github/e2e-tests.yml - test_list: ${{ needs.set-tests-to-run.outputs.test_list }} - require_chainlink_image_versions_in_qa_ecr: ${{ needs.set-tests-to-run.outputs.require_chainlink_image_versions_in_qa_ecr }} - with_existing_remote_runner_version: ${{ github.event.inputs.with_existing_remote_runner_version }} - test_log_upload_on_failure: true - test_log_upload_retention_days: 7 - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - diff --git a/.github/workflows/bash-scripts.yml b/.github/workflows/bash-scripts.yml deleted file mode 100644 index b27def4b5e..0000000000 --- a/.github/workflows/bash-scripts.yml +++ /dev/null @@ -1,37 +0,0 @@ -name: Bash Scripts - -on: - push: - branches: - - this-workflow-is-disabled-for-ccip - -jobs: - changes: - name: detect changes - runs-on: ubuntu-latest - outputs: - bash-scripts-src: ${{ steps.bash-scripts.outputs.src }} - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 - id: bash-scripts - with: - filters: | - src: - - 'tools/bin/**' - - '.github/workflows/bash-scripts.yml' - shellcheck: - name: ShellCheck Lint - runs-on: ubuntu-latest - needs: [changes] - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Run ShellCheck - if: needs.changes.outputs.bash-scripts-src == 'true' - uses: ludeeus/action-shellcheck@00cae500b08a931fb5698e11e79bfbd38e612a38 # v2.0.0 - with: - scandir: "./tools/bin" - # Consider changing this to check for warnings once all warnings are fixed. - severity: error diff --git a/.github/workflows/build-publish-develop-pr.yml b/.github/workflows/build-publish-develop-pr.yml deleted file mode 100644 index 7f5a285bb4..0000000000 --- a/.github/workflows/build-publish-develop-pr.yml +++ /dev/null @@ -1,167 +0,0 @@ -name: "Build and Publish GoReleaser" - -on: - pull_request: - # The default types are opened, synchronize, and reopened - - # See https://docs.github.com/en/actions/writing-workflows/choosing-when-your-workflow-runs/events-that-trigger-workflows#pull_request - # We add a label trigger too, since when the build-publish label is added to a PR, we want to build and publish - types: - - opened - - synchronize - - reopened - - labeled - push: - branches: - - develop - workflow_dispatch: - inputs: - git_ref: - description: "The git ref to check out" - required: true - build-publish: - description: "Whether to build and publish - defaults to just build" - required: false - default: "false" - -env: - GIT_REF: ${{ github.event.inputs.git_ref || github.ref }} - -jobs: - merge: - runs-on: ubuntu-latest - needs: [split, image-tag] - if: ${{ needs.image-tag.outputs.release-type == 'nightly' }} - permissions: - id-token: write - contents: read - steps: - - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - ref: ${{ env.GIT_REF }} - - - name: Configure aws credentials - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 - with: - role-to-assume: ${{ secrets.AWS_OIDC_IAM_ROLE_PUBLISH_PR_ARN }} - aws-region: ${{ secrets.AWS_REGION }} - mask-aws-account-id: true - role-session-name: "merge" - - - uses: actions/cache/restore@v4 - with: - path: dist/linux_amd64_v1 - key: chainlink-amd64-${{ github.sha }} - fail-on-cache-miss: true - - - uses: actions/cache/restore@v4 - with: - path: dist/linux_arm64 - key: chainlink-arm64-${{ github.sha }} - fail-on-cache-miss: true - - - name: Merge images for both architectures - uses: ./.github/actions/goreleaser-build-sign-publish - with: - docker-registry: ${{ secrets.AWS_SDLC_ECR_HOSTNAME }} - docker-image-tag: ${{ needs.image-tag.outputs.image-tag }} - goreleaser-release-type: "merge" - goreleaser-config: .goreleaser.develop.yaml - goreleaser-key: ${{ secrets.GORELEASER_KEY }} - - split: - name: "split-${{ matrix.goarch }}" - needs: image-tag - runs-on: ${{ matrix.runner }} - permissions: - id-token: write - contents: read - strategy: - fail-fast: false - matrix: - include: - - runner: ubuntu-latest - goarch: amd64 - dist_name: linux_amd64_v1 - - - runner: ubuntu-24.04-4cores-16GB-ARM - goarch: arm64 - dist_name: linux_arm64 - steps: - - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - ref: ${{ env.GIT_REF }} - fetch-depth: 0 - - - name: Configure aws credentials - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 - with: - role-to-assume: ${{ secrets.AWS_OIDC_IAM_ROLE_PUBLISH_PR_ARN }} - aws-region: ${{ secrets.AWS_REGION }} - mask-aws-account-id: true - role-session-name: "split-${{ matrix.goarch }}" - - - id: cache - uses: actions/cache@v4 - with: - path: dist/${{ matrix.dist_name }} - key: chainlink-${{ matrix.goarch }}-${{ github.sha }} - - - name: Build images for ${{ matrix.goarch }} - uses: ./.github/actions/goreleaser-build-sign-publish - if: steps.cache.outputs.cache-hit != 'true' - with: - docker-registry: ${{ secrets.AWS_SDLC_ECR_HOSTNAME }} - docker-image-tag: ${{ needs.image-tag.outputs.image-tag }} - goreleaser-release-type: ${{ needs.image-tag.outputs.release-type }} - goreleaser-config: .goreleaser.develop.yaml - goreleaser-key: ${{ secrets.GORELEASER_KEY }} - - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: goreleaser-build-publish - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: "split-${{ matrix.goarch }}" - continue-on-error: true - - image-tag: - runs-on: ubuntu-latest - outputs: - image-tag: ${{ steps.get-image-tag.outputs.image-tag }} - release-type: ${{ steps.get-image-tag.outputs.release-type }} - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - ref: ${{ env.GIT_REF }} - - - name: Get image tag - id: get-image-tag - run: | - short_sha=$(git rev-parse --short HEAD) - echo "release-type=snapshot" | tee -a $GITHUB_OUTPUT - if [[ ${{ github.event_name }} == 'push' ]]; then - echo "image-tag=develop" | tee -a $GITHUB_OUTPUT - echo "release-type=nightly" | tee -a $GITHUB_OUTPUT - elif [[ ${{ github.event_name }} == 'workflow_dispatch' ]]; then - echo "image-tag=${short_sha}" | tee -a $GITHUB_OUTPUT - if [[ "${{ inputs.build-publish }}" == 'false' ]]; then - echo "release-type=snapshot" | tee -a $GITHUB_OUTPUT - else - echo "release-type=nightly" | tee -a $GITHUB_OUTPUT - fi - else - if [[ ${{ github.event_name }} == "pull_request" ]]; then - echo "image-tag=pr-${{ github.event.number }}-${short_sha}" | tee -a $GITHUB_OUTPUT - if [[ ${{ contains(github.event.pull_request.labels.*.name, 'build-publish') }} == "true" ]]; then - echo "release-type=nightly" | tee -a $GITHUB_OUTPUT - fi - fi - fi diff --git a/.github/workflows/build-publish-goreleaser.yml b/.github/workflows/build-publish-goreleaser.yml deleted file mode 100644 index f19df8cb0b..0000000000 --- a/.github/workflows/build-publish-goreleaser.yml +++ /dev/null @@ -1,150 +0,0 @@ -name: "Goreleaser Chainlink" - -on: - push: - tags: - - "goreleaser-v*" - -env: - ECR_HOSTNAME: public.ecr.aws - -jobs: - checks: - name: "Checks" - runs-on: ubuntu-20.04 - steps: - - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Check for VERSION file bump on tags - # Avoids checking VERSION file bump on forks. - if: ${{ github.repository == 'smartcontractkit/chainlink' }} - uses: ./.github/actions/version-file-bump - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - - # The main differences between this workflow and the develop one are: - # - Goreleaser pipeline only runs on tags - # - We only build ccip OR chainlink, not both - goreleaser-merge: - needs: [goreleaser-split] - name: merge - runs-on: ubuntu-latest - environment: build-publish - permissions: - id-token: write - contents: read - attestations: write - steps: - - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - fetch-depth: 0 - - - name: Configure aws credentials - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 - with: - role-to-assume: ${{ secrets.AWS_OIDC_IAM_ROLE_ARN }} - role-duration-seconds: ${{ secrets.AWS_ROLE_DURATION_SECONDS }} - aws-region: ${{ secrets.AWS_REGION }} - mask-aws-account-id: true - role-session-name: goreleaser-build-sign-publish-chainlink - - - uses: actions/cache/restore@v4 - with: - path: dist/linux_amd64_v1 - # We use ref_name here and not in develop b/c develop builds both ccip and chainlink - # whereas here we only build one or the other - key: chainlink-amd64-${{ github.sha }}-${{ github.ref_name }} - fail-on-cache-miss: true - - - uses: actions/cache/restore@v4 - with: - path: dist/linux_arm64 - key: chainlink-arm64-${{ github.sha }}-${{ github.ref_name }} - fail-on-cache-miss: true - - - name: Merge images for both architectures - id: goreleaser-build-sign-publish - uses: ./.github/actions/goreleaser-build-sign-publish - with: - docker-registry: ${{ env.ECR_HOSTNAME }} - docker-image-tag: ${{ github.ref_name }} - goreleaser-config: .goreleaser.production.yaml - goreleaser-release-type: merge - goreleaser-key: ${{ secrets.GORELEASER_KEY }} - - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: goreleaser-build-chainlink-publish - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: merge - continue-on-error: true - - goreleaser-split: - name: "split-${{ matrix.goarch }}" - needs: [checks] - runs-on: ${{ matrix.runner }} - strategy: - fail-fast: false - matrix: - include: - - runner: ubuntu-latest - goarch: amd64 - dist_name: linux_amd64_v1 - - - runner: ubuntu-24.04-4cores-16GB-ARM - goarch: arm64 - dist_name: linux_arm64 - environment: build-publish - permissions: - id-token: write - contents: write - steps: - - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - fetch-depth: 0 - - - name: Configure aws credentials - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 - with: - role-to-assume: ${{ secrets.AWS_OIDC_IAM_ROLE_ARN }} - role-duration-seconds: ${{ secrets.AWS_ROLE_DURATION_SECONDS }} - aws-region: ${{ secrets.AWS_REGION }} - mask-aws-account-id: true - role-session-name: goreleaser-build-sign-publish-chainlink - - - id: cache - uses: actions/cache@v4 - with: - path: dist/${{ matrix.dist_name }} - # We use ref_name here and not in develop b/c develop builds both ccip and chainlink - # whereas here we only build one or the other - key: chainlink-${{ matrix.goarch }}-${{ github.sha }}-${{ github.ref_name }} - - - name: Build images for ${{ matrix.goarch }} - if: steps.cache.outputs.cache-hit != 'true' - uses: ./.github/actions/goreleaser-build-sign-publish - with: - docker-registry: ${{ env.ECR_HOSTNAME }} - docker-image-tag: ${{ github.ref_name }} - goreleaser-release-type: release - goreleaser-config: .goreleaser.production.yaml - goreleaser-key: ${{ secrets.GORELEASER_KEY }} - - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: goreleaser-build-chainlink-publish - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: split-${{ matrix.goarch }} - continue-on-error: true diff --git a/.github/workflows/build-publish.yml b/.github/workflows/build-publish.yml deleted file mode 100644 index 59f0ddc907..0000000000 --- a/.github/workflows/build-publish.yml +++ /dev/null @@ -1,114 +0,0 @@ -name: "Build, Sign and Publish Chainlink" - -on: - push: - tags: - - "v*" - -env: - ECR_HOSTNAME: public.ecr.aws - ECR_IMAGE_NAME: chainlink/chainlink - -jobs: - checks: - name: "Checks" - runs-on: ubuntu-20.04 - steps: - - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Check for VERSION file bump on tags - # Avoids checking VERSION file bump on forks. - if: ${{ github.repository == 'smartcontractkit/chainlink' }} - uses: ./.github/actions/version-file-bump - with: - github-token: ${{ secrets.GITHUB_TOKEN }} - - build-sign-publish-chainlink: - needs: [checks] - runs-on: ubuntu-20.04 - environment: build-publish - permissions: - id-token: write - contents: write - attestations: write - outputs: - docker-image-tag: ${{ steps.build-sign-publish.outputs.docker-image-tag }} - docker-image-digest: ${{ steps.build-sign-publish.outputs.docker-image-digest }} - steps: - - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - name: Build, sign and publish chainlink image - id: build-sign-publish - uses: ./.github/actions/build-sign-publish-chainlink - with: - publish: true - aws-role-to-assume: ${{ secrets.AWS_OIDC_IAM_ROLE_ARN }} - aws-role-duration-seconds: ${{ secrets.AWS_ROLE_DURATION_SECONDS }} - aws-region: ${{ secrets.AWS_REGION }} - ecr-hostname: ${{ env.ECR_HOSTNAME }} - ecr-image-name: ${{ env.ECR_IMAGE_NAME }} - dockerhub_username: ${{ secrets.DOCKERHUB_READONLY_USERNAME }} - dockerhub_password: ${{ secrets.DOCKERHUB_READONLY_PASSWORD }} - sign-images: true - verify-signature: true - - - name: Attest Docker image - uses: actions/attest-build-provenance@6149ea5740be74af77f260b9db67e633f6b0a9a1 # v1.4.2 - with: - subject-digest: ${{ steps.build-sign-publish.outputs.docker-image-digest }} - subject-name: ${{ env.ECR_HOSTNAME }}/${{ env.ECR_IMAGE_NAME }} - push-to-registry: true - - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: build-chainlink-publish - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: build-sign-publish-chainlink - continue-on-error: true - - # Notify Slack channel for new git tags. - slack-notify: - if: github.ref_type == 'tag' - needs: [build-sign-publish-chainlink] - runs-on: ubuntu-24.04 - environment: build-publish - steps: - - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Notify Slack - uses: smartcontractkit/.github/actions/slack-notify-git-ref@31e00facdd8f57a2bc7868b5e4c8591bf2aa3727 # slack-notify-git-ref@0.1.2 - with: - slack-channel-id: ${{ secrets.SLACK_CHANNEL_RELEASE_NOTIFICATIONS }} - slack-bot-token: ${{ secrets.SLACK_BOT_TOKEN_RELENG }} # Releng Bot - git-ref: ${{ github.ref_name }} - git-ref-type: ${{ github.ref_type }} - changelog-url: >- - ${{ - github.ref_type == 'tag' && - format( - 'https://github.com/{0}/blob/{1}/CHANGELOG.md', - github.repository, - github.ref_name - ) || '' - }} - docker-image-name: >- - ${{ - github.ref_type == 'tag' && - format( - '{0}/{1}:{2}', - env.ECR_HOSTNAME, - env.ECR_IMAGE_NAME, - needs.build-sign-publish-chainlink.outputs.docker-image-tag - ) || '' - }} - docker-image-digest: >- - ${{ - github.ref_type == 'tag' && - needs.build-sign-publish-chainlink.outputs.docker-image-digest || '' - }} diff --git a/.github/workflows/ccip-chaos-tests.yml b/.github/workflows/ccip-chaos-tests.yml deleted file mode 100644 index 6e36e14ef0..0000000000 --- a/.github/workflows/ccip-chaos-tests.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: CCIP Chaos Tests -on: - workflow_run: - workflows: [ CCIP Load Test ] - types: [ completed ] - branches: [ ccip-develop ] - workflow_dispatch: - -# Only run 1 of this workflow at a time per PR -concurrency: - group: chaos-ccip-tests-chainlink-${{ github.ref }} - cancel-in-progress: true - -jobs: - run-e2e-tests-workflow: - name: Run E2E Tests - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - test_path: .github/e2e-tests.yml - chainlink_version: ${{ github.sha }} - require_chainlink_image_versions_in_qa_ecr: ${{ github.sha }} - test_trigger: E2E CCIP Chaos Tests - test_log_level: debug - slack_notification_after_tests: on_failure - slack_notification_after_tests_channel_id: '#ccip-testing' - slack_notification_after_tests_name: CCIP Chaos E2E Tests - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} diff --git a/.github/workflows/ccip-client-compatibility-tests.yml b/.github/workflows/ccip-client-compatibility-tests.yml deleted file mode 100644 index b21ef82bab..0000000000 --- a/.github/workflows/ccip-client-compatibility-tests.yml +++ /dev/null @@ -1,739 +0,0 @@ -name: CCIP Client Compatibility Tests -on: - schedule: - - cron: "30 5 * * TUE,FRI" # Run every Tuesday and Friday at midnight + 30min EST - push: - tags: - - "*" - merge_group: - pull_request: - workflow_dispatch: - inputs: - chainlinkVersion: - description: commit SHA or tag of the Chainlink version to test - required: true - type: string - evmImplementations: - description: comma separated list of EVM implementations to test (ignored if base64TestList is used) - required: true - type: string - default: "geth,besu,nethermind,erigon" - latestVersionsNumber: - description: how many of latest images of EVM implementations to test with (ignored if base64TestList is used) - required: true - type: number - default: 3 - base64TestList: - description: base64 encoded list of tests to run (same as base64-ed output of testlistgenerator tool) - required: false - type: string - -env: - CHAINLINK_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink - INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com - MOD_CACHE_VERSION: 2 - -jobs: - # Build Test Dependencies - - check-dependency-bump: - name: Check for go-ethereum dependency bump - if: github.event_name == 'pull_request' || github.event_name == 'merge_queue' - runs-on: ubuntu-latest - outputs: - dependency_changed: ${{ steps.changes.outputs.dependency_changed }} - steps: - - name: Checkout code - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/ccip - fetch-depth: 0 - - name: Check for go.mod changes - id: changes - run: | - git fetch origin ${{ github.base_ref }} - # if no match is found then grep exits with code 1, but if there is a match it exits with code 0 - # this will return a match if there are any changes on that corresponding line, for example if spacing was changed - DEPENDENCY_CHANGED=$(git diff -U0 origin/${{ github.base_ref }}...HEAD -- go.mod | grep -q 'github.com/ethereum/go-ethereum'; echo $?) - PR_VERSION=$(grep 'github.com/ethereum/go-ethereum' go.mod | awk '{print $2}') - - # here 0 means a match was found, 1 means no match was found - if [ "$DEPENDENCY_CHANGED" -eq 0 ]; then - # Dependency was changed in the PR, now compare with the base branch - git fetch origin ${{ github.base_ref }} - BASE_VERSION=$(git show origin/${{ github.base_ref }}:go.mod | grep 'github.com/ethereum/go-ethereum' | awk '{print $2}') - - echo "Base branch version: $BASE_VERSION" - echo "PR branch version: $PR_VERSION" - - echo "Dependency version changed in the PR compared to the base branch." - echo "dependency_changed=true" >> $GITHUB_OUTPUT - else - echo "No changes to ethereum/go-ethereum dependency in the PR." - echo "PR branch version: $PR_VERSION" - echo "dependency_changed=false" >> $GITHUB_OUTPUT - fi - - should-run: - if: always() - name: Check if the job should run - needs: check-dependency-bump - runs-on: ubuntu-latest - outputs: - should_run: ${{ steps.should-run.outputs.should_run }} - eth_implementations : ${{ steps.should-run.outputs.eth_implementations }} - env: - GITHUB_REF_TYPE: ${{ github.ref_type }} - steps: - - name: Check if the job should run - id: should-run - run: | - if [ "${{ needs.check-dependency-bump.outputs.dependency_changed }}" == "true" ]; then - echo "Will run tests, because go-ethereum dependency was bumped" - echo "should_run=true" >> $GITHUB_OUTPUT - elif [ "$GITHUB_EVENT_NAME" = "schedule" ]; then - echo "Will run tests, because trigger event was $GITHUB_EVENT_NAME" - echo "should_run=true" >> $GITHUB_OUTPUT - elif [ "$GITHUB_EVENT_NAME" = "workflow_dispatch" ]; then - echo "Will run tests, because trigger event was $GITHUB_EVENT_NAME" - echo "should_run=true" >> $GITHUB_OUTPUT - elif [ "$GITHUB_REF_TYPE" = "tag" ]; then - echo "Will run tests, because new tag was created" - echo "should_run=true" >> $GITHUB_OUTPUT - else - echo "Will not run tests" - echo "should_run=false" >> $GITHUB_OUTPUT - fi - - select-versions: - if: always() && needs.should-run.outputs.should_run == 'true' - name: Select Versions - needs: should-run - runs-on: ubuntu-latest - env: - RELEASED_DAYS_AGO: 4 - GITHUB_REF_TYPE: ${{ github.ref_type }} - outputs: - evm_implementations : ${{ steps.select-implementations.outputs.evm_implementations }} - chainlink_version: ${{ steps.select-chainlink-version.outputs.chainlink_version }} - latest_image_count: ${{ steps.get-image-count.outputs.image_count }} - steps: - # ghlatestreleasechecker is a tool to check if new release is available for a given repo - - name: Set Up ghlatestreleasechecker - shell: bash - run: | - go install github.com/smartcontractkit/chainlink-testing-framework/tools/ghlatestreleasechecker@v1.0.0 - - name: Select EVM implementations to test - id: select-implementations - run: | - PATH=$PATH:$(go env GOPATH)/bin - export PATH - - if [ "$GITHUB_EVENT_NAME" = "schedule" ]; then - echo "Checking for new releases" - implementations_arr=() - new_geth=$(ghlatestreleasechecker "ethereum/go-ethereum" $RELEASED_DAYS_AGO) - if [ "$new_geth" != "none" ]; then - echo "New geth release found: $new_geth" - implementations_arr+=("geth") - fi - new_besu=$(ghlatestreleasechecker "hyperledger/besu" $RELEASED_DAYS_AGO) - if [ "new_besu" != "none" ]; then - echo "New besu release found: $new_besu" - implementations_arr+=("besu") - fi - new_erigon=$(ghlatestreleasechecker "ledgerwatch/erigon" $RELEASED_DAYS_AGO) - if [ "new_erigon" != "none" ]; then - echo "New erigon release found: $new_erigon" - implementations_arr+=("erigon") - fi - new_nethermind=$(ghlatestreleasechecker "nethermindEth/nethermind" $RELEASED_DAYS_AGO) - if [ "new_nethermind" != "none" ]; then - echo "New nethermind release found: $new_nethermind" - implementations_arr+=("nethermind") - fi - IFS=',' - eth_implementations="${implementations_arr[*]}" - echo "Found new releases for: $eth_implementations" - echo "evm_implementations=$eth_implementations" >> $GITHUB_OUTPUT - elif [ "$GITHUB_EVENT_NAME" = "workflow_dispatch" ]; then - if [ -n "${{ github.event.inputs.base64TestList }}" ]; then - echo "Base64-ed Test Input provided, ignoring EVM implementations" - else - echo "Will test following EVM implementations: ${{ github.event.inputs.evmImplementations }}" - echo "evm_implementations=${{ github.event.inputs.evmImplementations }}" >> $GITHUB_OUTPUT - fi - else - echo "Will test all EVM implementations" - echo "evm_implementations=geth,besu,nethermind,erigon" >> $GITHUB_OUTPUT - fi - - name: Select Chainlink CCIP version - id: select-chainlink-version - run: | - PATH=$PATH:$(go env GOPATH)/bin - export PATH - - if [ "$GITHUB_EVENT_NAME" = "schedule" ]; then - echo "Fetching latest Chainlink CCIP stable version" - implementations_arr=() - # we use 100 days since we really want the latest one, and it's highly improbable there won't be a release in last 100 days - chainlink_version=$(ghlatestreleasechecker "smartcontractkit/ccip" 100) - echo "chainlink_version=$chainlink_version" >> $GITHUB_OUTPUT - elif [ "$GITHUB_EVENT_NAME" = "workflow_dispatch" ]; then - echo "Fetching Chainlink version from input" - if [ -n "${{ github.event.inputs.chainlinkVersion }}" ]; then - echo "Chainlink version provided in input" - chainlink_version="${{ github.event.inputs.chainlinkVersion }}" - else - echo "Chainlink version not provided in input. Using latest commit SHA." - chainlink_version=${{ github.sha }} - fi - echo "chainlink_version=$chainlink_version" >> $GITHUB_OUTPUT - elif [ "$GITHUB_EVENT_NAME" = "pull_request" ]; then - echo "Fetching Chainlink version from PR's head commit" - chainlink_version="${{ github.event.pull_request.head.sha }}" - echo "chainlink_version=$chainlink_version" >> $GITHUB_OUTPUT - elif [ "$GITHUB_EVENT_NAME" = "merge_queue" ]; then - echo "Fetching Chainlink version from merge queue's head commit" - chainlink_version="${{ github.event.merge_group.head_sha }}" - echo "chainlink_version=$chainlink_version" >> $GITHUB_OUTPUT - elif [ "$GITHUB_REF_TYPE" = "tag" ]; then - echo "Fetching Chainlink version from tag" - chainlink_version="${{ github.ref_name }}" - echo "chainlink_version=$chainlink_version" >> $GITHUB_OUTPUT - else - echo "Unsupported trigger event. It's probably an issue with the pipeline definition. Please reach out to the Test Tooling team." - exit 1 - fi - echo "Will use following Chainlink version: $chainlink_version" - - name: Get image count - id: get-image-count - run: | - if [ "$GITHUB_EVENT_NAME" = "workflow_dispatch" ]; then - echo "Fetching latest image count from input" - if [ -n "${{ github.event.inputs.base64TestList }}" ]; then - echo "Base64-ed Test Input provided, ignoring latest image count" - else - image_count="${{ github.event.inputs.latestVersionsNumber }}" - echo "image_count=$image_count" >> $GITHUB_OUTPUT - fi - else - echo "Fetching default latest image count" - image_count=3 - echo "image_count=$image_count" >> $GITHUB_OUTPUT - fi - echo "Will use following latest image count: $image_count" - - check-ecr-images-exist: - name: Check images used as test dependencies exist in ECR - if: always() && needs.should-run.outputs.should_run == 'true' - environment: integration - permissions: - id-token: write - contents: read - needs: [should-run] - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - mirror: - - name: ethereum/client-go - expression: '^(alltools-v|v)[0-9]\.[0-9]+\.[0-9]+$' - - name: hyperledger/besu - expression: '^[0-9]+\.[0-9]+(\.[0-9]+)?$' - page_size: 300 - - name: thorax/erigon - expression: '^v[0-9]+\.[0-9]+\.[0-9]+$' - - name: nethermind/nethermind - expression: '^[0-9]+\.[0-9]+\.[0-9]+$' - - name: tofelb/ethereum-genesis-generator - expression: '^[0-9]+\.[0-9]+\.[0-9]+(\-slots\-per\-epoch)?' - steps: - - name: Update internal ECR if the latest Ethereum client image does not exist - uses: smartcontractkit/chainlink-testing-framework/.github/actions/update-internal-mirrors@5eea86ee4f7742b4e944561a570a6b268e712d9e # v1.30.3 - with: - aws_region: ${{ secrets.QA_AWS_REGION }} - role_to_assume: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - aws_account_number: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - image_name: ${{matrix.mirror.name}} - expression: ${{matrix.mirror.expression}} - page_size: ${{matrix.mirror.page_size}} - - build-chainlink: - if: always() && needs.should-run.outputs.should_run == 'true' - environment: integration - permissions: - id-token: write - contents: read - name: Build Chainlink Image - needs: [should-run, select-versions] - runs-on: ubuntu-latest - steps: - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: client-compatablility-build-chainlink - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Build Chainlink Image - continue-on-error: true - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/ccip - ref: ${{ needs.select-versions.outputs.chainlink_version }} - - name: Build Chainlink Image - uses: ./.github/actions/build-chainlink-image - with: - tag_suffix: "" - dockerfile: core/chainlink.Dockerfile - git_commit_sha: ${{ needs.select-versions.outputs.chainlink_version }} - check_image_exists: 'true' - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - - get-latest-available-images: - name: Get Latest EVM Implementation's Images - if: always() && needs.should-run.outputs.should_run == 'true' - environment: integration - runs-on: ubuntu-latest - needs: [check-ecr-images-exist, should-run, select-versions] - permissions: - id-token: write - contents: read - env: - LATEST_IMAGE_COUNT: ${{ needs.select-versions.outputs.latest_image_count }} - outputs: - geth_images: ${{ env.GETH_IMAGES }} - nethermind_images: ${{ env.NETHERMIND_IMAGES }} - besu_images: ${{ env.BESU_IMAGES }} - erigon_images: ${{ env.ERIGON_IMAGES }} - steps: - # Setup AWS creds - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 - with: - aws-region: ${{ secrets.QA_AWS_REGION }} - role-to-assume: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - role-duration-seconds: 3600 - # Login to ECR - - name: Login to Amazon ECR - id: login-ecr - uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1 - with: - mask-password: "true" - env: - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - # ecrimagefetcher is a tool to get latest images from ECR - - name: Set Up ecrimagefetcher - shell: bash - run: | - go install github.com/smartcontractkit/chainlink-testing-framework/tools/ecrimagefetcher@v1.0.1 - - name: Get latest docker images from ECR - if: ${{ github.event.inputs.base64TestList == '' }} - env: - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - ETH_IMPLEMENTATIONS: ${{ needs.select-versions.outputs.evm_implementations }} - run: | - PATH=$PATH:$(go env GOPATH)/bin - export PATH - if [[ "$ETH_IMPLEMENTATIONS" == *"geth"* ]]; then - geth_images=$(ecrimagefetcher 'ethereum/client-go' '^v[0-9]+\.[0-9]+\.[0-9]+$' ${{ env.LATEST_IMAGE_COUNT }}) - echo "GETH_IMAGES=$geth_images" >> $GITHUB_ENV - echo "Geth latest images: $geth_images" - fi - - if [[ "$ETH_IMPLEMENTATIONS" == *"nethermind"* ]]; then - nethermind_images=$(ecrimagefetcher 'nethermind/nethermind' '^[0-9]+\.[0-9]+\.[0-9]+$' ${{ env.LATEST_IMAGE_COUNT }}) - echo "NETHERMIND_IMAGES=$nethermind_images" >> $GITHUB_ENV - echo "Nethermind latest images: $nethermind_images" - fi - - if [[ "$ETH_IMPLEMENTATIONS" == *"besu"* ]]; then - # 24.3.3 is ignored as it doesn't support data & input fields in eth_call - besu_images=$(ecrimagefetcher 'hyperledger/besu' '^[0-9]+\.[0-9]+(\.[0-9]+)?$' ${{ env.LATEST_IMAGE_COUNT }} ">=24.5.1") - echo "BESU_IMAGES=$besu_images" >> $GITHUB_ENV - echo "Besu latest images: $besu_images" - fi - - if [[ "$ETH_IMPLEMENTATIONS" == *"erigon"* ]]; then - # 2.60.0 and 2.60.1 are ignored as they stopped working with CL node - erigon_images=$(ecrimagefetcher 'thorax/erigon' '^v[0-9]+\.[0-9]+\.[0-9]+$' ${{ env.LATEST_IMAGE_COUNT }} "> $GITHUB_ENV - echo "Erigon latest images: $erigon_images" - fi - - # End Build Test Dependencies - - prepare-compatibility-matrix: - name: Prepare Compatibility Matrix - if: always() && needs.should-run.outputs.should_run == 'true' - environment: integration - permissions: - checks: write - pull-requests: write - id-token: write - contents: read - needs: [get-latest-available-images,should-run,select-versions] - runs-on: ubuntu-latest - env: - ETH_IMPLEMENTATIONS: ${{ needs.select-versions.outputs.evm_implementations }} - BASE64_TEST_LIST: ${{ github.event.inputs.base64TestList }} - outputs: - matrix: ${{ env.JOB_MATRIX_JSON }} - steps: - - name: Decode Base64 Test List Input if Set - if: env.BASE64_TEST_LIST != '' - run: | - echo "Decoding base64 tests list from the input" - DECODED_BASE64_TEST_LIST=$(echo $BASE64_TEST_LIST | base64 -d) - echo "Decoded input:" - echo "$DECODED_BASE64_TEST_LIST" - is_valid=$(echo "$DECODED_BASE64_TEST_LIST" | jq . > /dev/null 2>&1; echo $?) - if [ "$is_valid" -ne 0 ]; then - echo "Invalid base64 input. Please provide a valid base64 encoded JSON list of tests." - echo "Here is an example of valid JSON:" - cat <> $GITHUB_ENV - # testlistgenerator is a tool that builds a matrix of tests to run - - name: Set Up testlistgenerator - if: env.BASE64_TEST_LIST == '' - shell: bash - run: | - go install github.com/smartcontractkit/chainlink-testing-framework/tools/testlistgenerator@v1.1.0 - - name: Prepare matrix input - if: env.BASE64_TEST_LIST == '' - run: | - PATH=$PATH:$(go env GOPATH)/bin - export PATH - - if [[ "$ETH_IMPLEMENTATIONS" == *"geth"* ]]; then - echo "Will test compatibility with geth" - testlistgenerator -o compatibility_test_list.json -p ccip -r TestSmokeCCIPForBidirectionalLane -f './ccip-tests/smoke/ccip_test.go' -e geth -d "${{ needs.get-latest-available-images.outputs.geth_images }}" -t "ccip-geth-compatibility-test" -w "SIMULATED_1,SIMULATED_2" -c 1337,2337 -n ubuntu-latest - else - echo "Will not test compatibility with geth" - fi - - if [[ "$ETH_IMPLEMENTATIONS" == *"besu"* ]]; then - echo "Will test compatibility with besu" - testlistgenerator -o compatibility_test_list.json -p ccip -r TestSmokeCCIPForBidirectionalLane -f './ccip-tests/smoke/ccip_test.go' -e besu -d "${{ needs.get-latest-available-images.outputs.besu_images }}" -t "ccip-besu-compatibility-test" -w "SIMULATED_BESU_NONDEV_1,SIMULATED_BESU_NONDEV_2" -c 1337,2337 -n ubuntu-latest - else - echo "Will not test compatibility with besu" - fi - - # TODO: Waiting for CCIP-2255 to be resolved - if [[ "$ETH_IMPLEMENTATIONS" == *"erigon"* ]]; then - echo "Will test compatibility with erigon" - testlistgenerator -o compatibility_test_list.json -p ccip -r TestSmokeCCIPForBidirectionalLane -f './ccip-tests/smoke/ccip_test.go' -e erigon -d "${{ needs.get-latest-available-images.outputs.erigon_images }}" -t "ccip-erigon-compatibility-test" -w "SIMULATED_1,SIMULATED_2" -c 1337,2337 -n ubuntu-latest - else - echo "Will not test compatibility with erigon" - fi - - # TODO: uncomment when nethermind flake reason is addressed - if [[ "$ETH_IMPLEMENTATIONS" == *"nethermind"* ]]; then - echo "Will not test compatibility with nethermind due to flakiness" - # echo "Will test compatibility with nethermind" - # testlistgenerator -o compatibility_test_list.json -p ccip -r TestSmokeCCIPForBidirectionalLane -f './ccip-tests/smoke/ccip_test.go' -e nethermind -d "${{ needs.get-latest-available-images.outputs.nethermind_images }}" -t "ccip-nethermind-compatibility-test" -w "SIMULATED_1,SIMULATED_2" -c 1337,2337 -n ubuntu-latest - else - echo "Will not test compatibility with nethermind" - fi - - jq . compatibility_test_list.json - echo "Adding human-readable name" - jq 'map(. + {visible_name: (.docker_image | split(",")[0] | split("=")[1])})' compatibility_test_list.json > compatibility_test_list_modified.json - jq . compatibility_test_list_modified.json - JOB_MATRIX_JSON=$(jq -c . compatibility_test_list_modified.json) - echo "JOB_MATRIX_JSON=${JOB_MATRIX_JSON}" >> $GITHUB_ENV - - run-client-compatibility-matrix: - name: CCIP Compatibility with ${{ matrix.evm_node.visible_name }} - if: always() && needs.should-run.outputs.should_run == 'true' - environment: integration - permissions: - checks: write - pull-requests: write - id-token: write - contents: read - needs: [build-chainlink, prepare-compatibility-matrix, should-run, select-versions] - env: - CHAINLINK_COMMIT_SHA: ${{ needs.select-versions.outputs.chainlink_version }} - CHAINLINK_ENV_USER: ${{ github.actor }} - TEST_LOG_LEVEL: debug - strategy: - fail-fast: false - matrix: - evm_node: ${{fromJson(needs.prepare-compatibility-matrix.outputs.matrix)}} - runs-on: ubuntu-latest - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/ccip - ref: ${{ needs.select-versions.outputs.chainlink_version }} - - name: Prepare Base64 TOML override - uses: ./.github/actions/setup-create-base64-config - with: - runId: ${{ github.run_id }} - testLogCollect: ${{ vars.TEST_LOG_COLLECT }} - selectedNetworks: ${{ matrix.evm_node.networks }} - chainlinkVersion: ${{ needs.select-versions.outputs.chainlink_version }} - logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} - - name: Prepare Base64 TOML override for CCIP secrets - uses: ./.github/actions/setup-create-base64-config-ccip - id: setup_create_base64_config_ccip - with: - runId: ${{ github.run_id }} - selectedNetworks: ${{ matrix.evm_node.networks }} - testLogCollect: ${{ vars.TEST_LOG_COLLECT }} - chainlinkVersion: ${{ needs.select-versions.outputs.chainlink_version }} - logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} - customEvmNodes: ${{ matrix.evm_node.docker_image }} - - name: Prepare test log name - run: | - replace_special_chars() { - if [ -z "$1" ]; then - echo "Please provide a string as an argument." - return 1 - fi - - local input_string="$1" - - # Replace '/' with '-' - local modified_string="${input_string//\//-}" - - # Replace ':' with '-' - modified_string="${modified_string//:/-}" - - # Replace '.' with '-' - modified_string="${modified_string//./-}" - - echo "$modified_string" - } - echo "TEST_LOG_NAME=$(replace_special_chars "ccip-${{ matrix.evm_node.name }}-test-logs")" >> $GITHUB_ENV - - name: Print Test details - ${{ matrix.evm_node.docker_image }} - run: | - echo "EVM Implementation Docker Image: ${{ matrix.evm_node.docker_image }}" - echo "EVM Implementation Networks: ${{ matrix.evm_node.networks }}" - echo "Test identifier: ${{ matrix.evm_node.name }}" - - name: Run Tests - uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@d38226be720c5ccc1ff4d3cee40608ebf264cd59 # v2.3.26 - env: - BASE64_CONFIG_OVERRIDE: ${{ steps.setup_create_base64_config_ccip.outputs.base64_config }} - TEST_BASE64_CONFIG_OVERRIDE: ${{ steps.setup_create_base64_config_ccip.outputs.base64_config }} - with: - test_command_to_run: cd ./integration-tests && go test -timeout 30m -count=1 -json -test.parallel=2 ${{ matrix.evm_node.run }} 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci - test_download_vendor_packages_command: cd ./integration-tests && go mod download - aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - artifacts_name: ${{ env.TEST_LOG_NAME }} - artifacts_location: | - ./integration-tests/smoke/logs/ - ./integration-tests/ccip-tests/smoke/logs/* - /tmp/gotest.log - publish_check_name: ${{ matrix.evm_node.name }} - token: ${{ secrets.GITHUB_TOKEN }} - go_mod_path: ./integration-tests/go.mod - cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} - cache_restore_only: "true" - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_KUBECONFIG: "" - should_tidy: "false" - DEFAULT_LOKI_TENANT_ID: ${{ vars.LOKI_TENANT_ID }} - DEFAULT_LOKI_ENDPOINT: ${{ secrets.LOKI_URL_CI }} - DEFAULT_LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - DEFAULT_CHAINLINK_IMAGE: ${{ env.CHAINLINK_IMAGE }} - DEFAULT_GRAFANA_BASE_URL: ${{ vars.GRAFANA_URL }} - DEFAULT_GRAFANA_DASHBOARD_URL: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" - DEFAULT_PYROSCOPE_SERVER_URL: ${{ !startsWith(github.ref, 'refs/tags/') && '' || secrets.QA_PYROSCOPE_INSTANCE }} # Avoid sending blank envs https://github.com/orgs/community/discussions/25725 - DEFAULT_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - DEFAULT_PYROSCOPE_ENVIRONMENT: ci-ccip-bidirectional-lane-${{ matrix.evm_node.name }} - DEFAULT_PYROSCOPE_ENABLED: 'true' - - - name: Print failed test summary - if: always() - uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/show-test-summary@1587f59bfd626b668d303abbc90fee41b12397e6 # v2.3.23 - with: - test_directories: ./integration-tests/smoke/,./integration-tests/ccip-tests/smoke/ - - start-slack-thread: - name: Start Slack Thread - if: ${{ always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' && needs.should-run.outputs.should_run == 'true' }} - environment: integration - outputs: - thread_ts: ${{ steps.slack.outputs.thread_ts }} - permissions: - checks: write - pull-requests: write - id-token: write - contents: read - runs-on: ubuntu-latest - needs: [run-client-compatibility-matrix,should-run,select-versions] - steps: - - name: Debug Result - run: echo ${{ join(needs.*.result, ',') }} - - name: Main Slack Notification - uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 - id: slack - with: - channel-id: ${{ secrets.QA_CCIP_SLACK_CHANNEL }} - payload: | - { - "attachments": [ - { - "color": "${{ contains(join(needs.*.result, ','), 'failure') && '#C62828' || '#2E7D32' }}", - "blocks": [ - { - "type": "header", - "text": { - "type": "plain_text", - "text": "CCIP Compatibility Test Results ${{ contains(join(needs.*.result, ','), 'failure') && ':x:' || ':white_check_mark:'}}", - "emoji": true - } - }, - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": "${{ contains(join(needs.*.result, ','), 'failure') && 'Some tests failed! Notifying ' || 'All Good!' }}" - } - }, - { - "type": "divider" - }, - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": "<${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}|${{ github.ref_name }}> | <${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.select-versions.outputs.chainlink_version }}|${{ needs.select-versions.outputs.chainlink_version }}> | <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|Run>" - } - } - ] - } - ] - } - env: - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - - parse-test-results: - name: Parse Test Results - if: always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' && needs.should-run.outputs.should_run == 'true' - environment: integration - permissions: - checks: write - pull-requests: write - id-token: write - contents: read - runs-on: ubuntu-latest - needs: [run-client-compatibility-matrix,should-run] - outputs: - base64_parsed_results: ${{ steps.get-test-results.outputs.base64_parsed_results }} - steps: - # workflowresultparser is a tool to get job results from a workflow run - - name: Set Up workflowresultparser - shell: bash - run: | - go install github.com/smartcontractkit/chainlink-testing-framework/tools/workflowresultparser@v1.0.0 - - name: Get and parse Test Results - shell: bash - id: get-test-results - run: | - PATH=$PATH:$(go env GOPATH)/bin - export PATH - - workflowresultparser -workflowRunID ${{ github.run_id }} -githubToken ${{ github.token }} -githubRepo "${{ github.repository }}" -jobNameRegex "^CCIP Compatibility with (.*)$" -namedKey="CCIP" -outputFile=output.json - - echo "base64_parsed_results=$(base64 -w 0 output.json)" >> $GITHUB_OUTPUT - - display-test-results: - name: Aggregated test results - if: always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' && needs.should-run.outputs.should_run == 'true' && needs.parse-test-results.result == 'success' - environment: integration - permissions: - checks: write - pull-requests: write - id-token: write - contents: read - runs-on: ubuntu-latest - needs: [start-slack-thread, should-run, select-versions, parse-test-results] - steps: - # asciitable is a tool that prints results in a nice ASCII table - - name: Set Up asciitable - shell: bash - run: | - go install github.com/smartcontractkit/chainlink-testing-framework/tools/asciitable@v1.0.2 - - name: Print aggregated test results - shell: bash - run: | - PATH=$PATH:$(go env GOPATH)/bin - export PATH - - raw_results="$(echo ${{ needs.parse-test-results.outputs.base64_parsed_results }} | base64 -d)" - echo $raw_results > input.json - asciitable --firstColumn "EVM Implementation" --secondColumn Result --jsonfile input.json --outputFile output.txt --section "CCIP" --namedKey "CCIP" - - echo - echo "AGGREGATED RESULTS" - cat output.txt - - echo "## Aggregated EVM Implementations compatibility results summary" >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - cat output.txt >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - post-test-results-to-slack: - name: Post Test Results - if: ${{ always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' && needs.should-run.outputs.should_run == 'true' }} - environment: integration - permissions: - checks: write - pull-requests: write - id-token: write - contents: read - runs-on: ubuntu-latest - needs: [start-slack-thread,should-run,select-versions] - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - ref: ${{ needs.select-versions.outputs.chainlink_version }} - - name: Get test results for CCIP - id: get-product-results - shell: bash - run: | - raw_results="$(echo ${{ needs.parse-test-results.outputs.base64_parsed_results }} | base64 -d)" - product_result=$(echo "$raw_results" | jq -c "select(has(\"CCIP\")) | .CCIP[]") - if [ -n "$product_result" ]; then - base64_result=$(echo $product_result | base64 -w 0) - echo "base64_result=$base64_result" >> $GITHUB_OUTPUT - else - echo "No results found for CCIP" - echo "base64_result=" >> $GITHUB_OUTPUT - fi - - name: Post Test Results to Slack - uses: ./.github/actions/notify-slack-jobs-result - with: - github_token: ${{ github.token }} - github_repository: ${{ github.repository }} - workflow_run_id: ${{ github.run_id }} - github_job_name_regex: ^CCIP Compatibility with (.*?)$ - message_title: CCIP Compatibility Test Results - slack_channel_id: ${{ secrets.QA_CCIP_SLACK_CHANNEL }} - slack_bot_token: ${{ secrets.QA_SLACK_API_KEY }} - slack_thread_ts: ${{ needs.start-slack-thread.outputs.thread_ts }} - base64_parsed_results: ${{ steps.get-product-results.outputs.base64_result }} diff --git a/.github/workflows/ccip-live-network-tests.yml b/.github/workflows/ccip-live-network-tests.yml deleted file mode 100644 index f466fbc7cd..0000000000 --- a/.github/workflows/ccip-live-network-tests.yml +++ /dev/null @@ -1,476 +0,0 @@ -name: CCIP On-Demand Live Network Tests -on: - schedule: - - cron: '0 */6 * * *' - workflow_dispatch: - inputs: - base64_test_input : # base64 encoded toml for test input - description: 'Base64 encoded toml test input' - required: false - slackMemberID: - description: 'Slack member ID to notify' - required: false - test_type: - description: 'Type of test to run' - required: false - type: choice - options: - - 'smoke' - - 'load' - test_secrets_override_key: - description: 'Key to run tests with custom test secrets' - required: false - type: string - -# Only run 1 of this workflow at a time per PR -concurrency: - group: live-testnet-tests - cancel-in-progress: true - -env: - CHAINLINK_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink - CHAINLINK_VERSION: ${{ github.sha }} - INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com - AWS_ECR_REPO_PUBLIC_REGISTRY: public.ecr.aws - E2E_TEST_CHAINLINK_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink - E2E_TEST_LOKI_TENANT_ID: ${{ vars.LOKI_TENANT_ID }} - E2E_TEST_LOKI_ENDPOINT: ${{ secrets.LOKI_URL }} - E2E_TEST_LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - E2E_TEST_GRAFANA_BASE_URL: ${{ vars.GRAFANA_URL }} - # Default private key test secret loaded from Github Secret as only security team has access to it. - # this key secrets.QA_SHARED_803C_KEY has a story behind it. To know more, see CCIP-2875 and SECHD-16575 tickets. - E2E_TEST_ARBITRUM_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_AVALANCHE_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_BASE_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_BLAST_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_BSC_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_CELO_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_ETHEREUM_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_GNOSIS_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_KROMA_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_LINEA_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_METIS_ANDROMEDA_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_MODE_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_OPTIMISM_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_POLYGON_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_SCROLL_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_WEMIX_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_ZKSYNC_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - -jobs: - build-chainlink: - environment: integration - permissions: - id-token: write - contents: read - name: Build Chainlink Image - runs-on: ubuntu20.04-16cores-64GB - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Check if image exists - id: check-image - uses: smartcontractkit/chainlink-github-actions/docker/image-exists@5dd916d08c03cb5f9a97304f4f174820421bb946 # v2.3.11 - with: - repository: chainlink - tag: ${{ env.CHAINLINK_VERSION }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - - name: Build Image - if: steps.check-image.outputs.exists == 'false' - uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/build-image@5dd916d08c03cb5f9a97304f4f174820421bb946 # v2.3.11 - env: - GH_TOKEN: ${{ github.token }} - with: - cl_repo: smartcontractkit/chainlink-ccip - cl_ref: ${{ env.CHAINLINK_VERSION }} - push_tag: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink:${{ env.CHAINLINK_VERSION }} - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: ccip-on-demand-live-testnet-tests-build-chainlink-image - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Build Chainlink Image - continue-on-error: true - - build-test-image: - environment: integration - if: ${{ github.event_name == 'workflow_dispatch' && inputs.test_type == 'load' }} - permissions: - id-token: write - contents: read - name: Build Test Image - runs-on: ubuntu20.04-16cores-64GB - steps: - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: ccip-on-demand-live-testnet-tests-build-test-image - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Build Test Image - continue-on-error: true - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Build Test Image - uses: ./.github/actions/build-test-image - with: - tag: ${{ env.CHAINLINK_TEST_VERSION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - - ccip-load-test: - name: CCIP Load Test - environment: integration - runs-on: ubuntu-latest - strategy: - matrix: - config: [mainnet.toml] - needs: [ build-chainlink, build-test-image ] - # if the event is a workflow_dispatch event and the test type is load and no previous job failed - if: ${{ github.event_name == 'workflow_dispatch' && inputs.test_type == 'load' && !contains(needs.*.result, 'failure') }} - permissions: - issues: read - checks: write - pull-requests: write - id-token: write - contents: read - env: - CHAINLINK_ENV_USER: ${{ github.actor }} - TEST_LOG_LEVEL: info - REF_NAME: ${{ github.head_ref || github.ref_name }} - CHAINLINK_TEST_VERSION: ${{ github.sha }} - ENV_JOB_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink-ccip-tests:${{ github.sha }} - ENV_JOB_IMAGE_BASE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink-ccip-tests - - steps: - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: ccip-on-demand-live-testnet-tests-load-tests - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: CCIP Load Test - continue-on-error: true - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - ref: ${{ env.REF_NAME }} - - name: Prepare Base64 TOML override - id: set_override_config - shell: bash - run: | - SLACK_USER=$(jq -r '.inputs.slackMemberID' $GITHUB_EVENT_PATH) - echo ::add-mask::$SLACK_USER - echo "SLACK_USER=$SLACK_USER" >> "$GITHUB_ENV" - if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then - BASE64_CONFIG_OVERRIDE=$(jq -r '.inputs.base64_test_input' $GITHUB_EVENT_PATH) - echo ::add-mask::$BASE64_CONFIG_OVERRIDE - echo "base_64_override=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_OUTPUT - fi - if [[ "${{ github.event_name }}" == "schedule" ]]; then - BASE64_CONFIG_OVERRIDE=$(base64 -w 0 -i ./integration-tests/ccip-tests/testconfig/override/${{ matrix.config }}) - echo ::add-mask::$BASE64_CONFIG_OVERRIDE - echo "base_64_override=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_OUTPUT - echo "SLACK_USER=${{ secrets.QA_SLACK_USER }}" >> $GITHUB_ENV - fi - - name: step summary - shell: bash - run: | - echo "### chainlink image used for this test run :link:" >>$GITHUB_STEP_SUMMARY - echo "\`${{ env.CHAINLINK_VERSION }}\`" >> $GITHUB_STEP_SUMMARY - echo "### chainlink-tests image tag for this test run :ship:" >>$GITHUB_STEP_SUMMARY - echo "\`${{ env.CHAINLINK_TEST_VERSION }}\`" >> $GITHUB_STEP_SUMMARY - - name: Prepare Base64 TOML override for CCIP secrets - uses: ./.github/actions/setup-create-base64-config-ccip - id: setup_create_base64_config_ccip - with: - runId: ${{ github.run_id }} - testLogCollect: ${{ vars.TEST_LOG_COLLECT }} - chainlinkVersion: ${{ env.CHAINLINK_VERSION }} - logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} - - name: Run Tests - uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@94cb11f4bd545607a2f221c6685052b3abee723d # v2.3.32 - env: - TEST_SUITE: load - TEST_ARGS: -test.timeout 900h - DATABASE_URL: postgresql://postgres:node@localhost:5432/chainlink_test?sslmode=disable - RR_MEM: 8Gi - RR_CPU: 4 - DETACH_RUNNER: true - TEST_TRIGGERED_BY: ccip-load-test-ci - BASE64_CONFIG_OVERRIDE: ${{ steps.setup_create_base64_config_ccip.outputs.base64_config }},${{ steps.set_override_config.outputs.base_64_override }} - TEST_BASE64_CONFIG_OVERRIDE: ${{ steps.setup_create_base64_config_ccip.outputs.base64_config }},${{ steps.set_override_config.outputs.base_64_override }} - E2E_TEST_GRAFANA_DASHBOARD_URL: "/d/6vjVx-1V8/ccip-long-running-tests" - with: - test_command_to_run: cd ./integration-tests/ccip-tests && go test -v -timeout 70m -count=1 -json -run ^TestLoadCCIPStableRPS$ ./load 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci -singlepackage -hidepassingtests=false - test_download_vendor_packages_command: cd ./integration-tests && go mod download - # Other default test secrets loaded from dotenv Github Secret. - test_secrets_defaults_base64: ${{ secrets.CCIP_DEFAULT_TEST_SECRETS }} - test_secrets_override_base64: ${{ secrets[inputs.test_secrets_override_key] }} - token: ${{ secrets.GITHUB_TOKEN }} - go_mod_path: ./integration-tests/go.mod - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - triggered_by: ${{ env.TEST_TRIGGERED_BY }} - artifacts_location: ./integration-tests/load/logs/payload_ccip.json - aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - cache_key_id: ccip-load-${{ env.MOD_CACHE_VERSION }} - cache_restore_only: "true" - should_cleanup: false - - ccip-smoke-test: - name: 'CCIP Smoke - Source: ${{ matrix.lanes.name }}' - environment: integration - runs-on: ubuntu-latest - needs: [ build-chainlink ] - # if the event is a scheduled event or the test type is smoke and no previous job failed - if: ${{ (github.event_name == 'schedule' || inputs.test_type == 'smoke') && !contains(needs.*.result, 'failure') }} - permissions: - issues: read - checks: write - pull-requests: write - id-token: write - contents: read - env: - CHAINLINK_ENV_USER: ${{ github.actor }} - TEST_LOG_LEVEL: info - REF_NAME: ${{ github.head_ref || github.ref_name }} - strategy: - fail-fast: false - matrix: - lanes: - - name: 'ARBITRUM_MAINNET' - pairs: 'ARBITRUM_MAINNET,BSC_MAINNET;ARBITRUM_MAINNET,OPTIMISM_MAINNET' - enabled: true - phaseTimeout: 20m - - name: 'AVALANCHE_MAINNET' - pairs: 'AVALANCHE_MAINNET,ARBITRUM_MAINNET;AVALANCHE_MAINNET,BASE_MAINNET;AVALANCHE_MAINNET,BSC_MAINNET;AVALANCHE_MAINNET,OPTIMISM_MAINNET;AVALANCHE_MAINNET,POLYGON_MAINNET;AVALANCHE_MAINNET,WEMIX_MAINNET' - enabled: true - phaseTimeout: 20m - - name: 'BASE_MAINNET' - pairs: 'BASE_MAINNET,ARBITRUM_MAINNET;BASE_MAINNET,BSC_MAINNET;BASE_MAINNET,OPTIMISM_MAINNET;BASE_MAINNET,POLYGON_MAINNET' - enabled: true - phaseTimeout: 20m - - name: 'BLAST_MAINNET' - pairs: 'BLAST_MAINNET,ARBITRUM_MAINNET;BLAST_MAINNET,BASE_MAINNET;BLAST_MAINNET,BSC_MAINNET' - enabled: true - phaseTimeout: 20m - - name: 'BSC_MAINNET' - pairs: 'BSC_MAINNET,OPTIMISM_MAINNET;BSC_MAINNET,POLYGON_MAINNET;BSC_MAINNET,WEMIX_MAINNET' - enabled: true - phaseTimeout: 20m - - name: 'ETHEREUM_MAINNET 1' - pairs: 'ETHEREUM_MAINNET,ARBITRUM_MAINNET;ETHEREUM_MAINNET,AVALANCHE_MAINNET;ETHEREUM_MAINNET,BASE_MAINNET;ETHEREUM_MAINNET,BLAST_MAINNET;ETHEREUM_MAINNET,BSC_MAINNET;ETHEREUM_MAINNET,CELO_MAINNET;ETHEREUM_MAINNET,GNOSIS_MAINNET;ETHEREUM_MAINNET,OPTIMISM_MAINNET;ETHEREUM_MAINNET,POLYGON_MAINNET;ETHEREUM_MAINNET,WEMIX_MAINNET' - enabled: true - phaseTimeout: 40m - - name: 'ETHEREUM_MAINNET 2' - pairs: 'ETHEREUM_MAINNET,METIS_ANDROMEDA;ETHEREUM_MAINNET,ZKSYNC_MAINNET' - enabled: true - phaseTimeout: 90m - - name: 'GNOSIS_MAINNET' - pairs: 'GNOSIS_MAINNET,ARBITRUM_MAINNET;GNOSIS_MAINNET,AVALANCHE_MAINNET;GNOSIS_MAINNET,BASE_MAINNET;GNOSIS_MAINNET,BSC_MAINNET;GNOSIS_MAINNET,OPTIMISM_MAINNET;GNOSIS_MAINNET,POLYGON_MAINNET' - enabled: true - phaseTimeout: 20m - - name: 'METIS_ANDROMEDA' - pairs: 'METIS_ANDROMEDA,ARBITRUM_MAINNET' - enabled: true - phaseTimeout: 300m - - name: 'MODE_MAINNET' - pairs: 'MODE_MAINNET,OPTIMISM_MAINNET;MODE_MAINNET,ARBITRUM_MAINNET;MODE_MAINNET,BASE_MAINNET;MODE_MAINNET,BSC_MAINNET' - enabled: true - phaseTimeout: 20m - - name: 'OPTIMISM_MAINNET' - pairs: 'OPTIMISM_MAINNET,POLYGON_MAINNET;OPTIMISM_MAINNET,WEMIX_MAINNET' - enabled: true - phaseTimeout: 20m - - name: 'POLYGON_MAINNET' - pairs: 'POLYGON_MAINNET,ARBITRUM_MAINNET;POLYGON_MAINNET,WEMIX_MAINNET' - enabled: true - phaseTimeout: 20m - - name: 'WEMIX_MAINNET' - pairs: 'WEMIX_MAINNET,ARBITRUM_MAINNET;WEMIX_MAINNET,KROMA_MAINNET' - enabled: true - phaseTimeout: 20m - - name: 'ZKSYNC_MAINNET' - pairs: 'ZKSYNC_MAINNET,ARBITRUM_MAINNET' - enabled: true - phaseTimeout: 300m - steps: - - name: Collect Metrics - if: ${{ matrix.lanes.enabled == true }} - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: ccip-on-demand-live-testnet-tests-smoke-test - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: 'CCIP Smoke - Source: ${{ matrix.lanes.name }}' - continue-on-error: true - - name: Checkout the repo - if: ${{ matrix.lanes.enabled == true }} - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - ref: ${{ env.REF_NAME }} - - name: Prepare Base64 TOML override - if: ${{ matrix.lanes.enabled == true }} - id: set_override_config - shell: bash - run: | - SLACK_USER=$(jq -r '.inputs.slackMemberID' $GITHUB_EVENT_PATH) - echo ::add-mask::$SLACK_USER - echo "SLACK_USER=$SLACK_USER" >> "$GITHUB_ENV" - if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then - BASE64_CONFIG_OVERRIDE=$(jq -r '.inputs.base64_test_input' $GITHUB_EVENT_PATH) - echo ::add-mask::$BASE64_CONFIG_OVERRIDE - echo "base_64_override=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_OUTPUT - fi - if [[ "${{ github.event_name }}" == "schedule" ]]; then - BASE64_CONFIG_OVERRIDE=$(base64 -w 0 -i ./integration-tests/ccip-tests/testconfig/override/mainnet.toml) - echo ::add-mask::$BASE64_CONFIG_OVERRIDE - echo "base_64_override=$BASE64_CONFIG_OVERRIDE" >> $GITHUB_OUTPUT - echo "SLACK_USER=${{ secrets.QA_SLACK_USER }}" >> $GITHUB_ENV - fi - - name: step summary - if: ${{ matrix.lanes.enabled == true }} - shell: bash - run: | - echo "### chainlink image used for this test run :link:" >>$GITHUB_STEP_SUMMARY - echo "\`${{ env.CHAINLINK_VERSION }}\`" >> $GITHUB_STEP_SUMMARY - - name: Prepare Base64 TOML override for CCIP secrets - if: ${{ matrix.lanes.enabled == true }} - uses: ./.github/actions/setup-create-base64-config-ccip - id: setup_create_base64_config_ccip - with: - runId: ${{ github.run_id }} - testLogCollect: ${{ vars.TEST_LOG_COLLECT }} - chainlinkVersion: ${{ env.CHAINLINK_VERSION }} - logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} - - name: Run Tests - uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@94cb11f4bd545607a2f221c6685052b3abee723d # v2.3.32 - if: ${{ matrix.lanes.enabled == true }} - env: - TEST_SUITE: smoke - TEST_ARGS: -test.timeout 900h - DETACH_RUNNER: false - DATABASE_URL: postgresql://postgres:node@localhost:5432/chainlink_test?sslmode=disable - RR_MEM: 8Gi - RR_CPU: 4 - TEST_TRIGGERED_BY: ccip-smoke-test-ci - BASE64_CONFIG_OVERRIDE: ${{ steps.setup_create_base64_config_ccip.outputs.base64_config }},${{ steps.set_override_config.outputs.base_64_override }} - TEST_BASE64_CONFIG_OVERRIDE: ${{ steps.setup_create_base64_config_ccip.outputs.base64_config }},${{ steps.set_override_config.outputs.base_64_override }} - E2E_TEST_GRAFANA_DASHBOARD_URL: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" - OVERRIDE_NETWORK_PAIRS: ${{ matrix.lanes.pairs }} - OVERRIDE_PHASE_TIMEOUT: ${{ matrix.lanes.phaseTimeout }} - with: - test_command_to_run: cd ./integration-tests/ccip-tests && go test -v -timeout 3h -count=1 -p 30 -json -run ^TestSmokeCCIPForGivenNetworkPairs$ ./smoke 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci -singlepackage -hidepassingtests=false - test_download_vendor_packages_command: cd ./integration-tests && go mod download - # Other default test secrets loaded from dotenv Github Secret. - test_secrets_defaults_base64: ${{ secrets.CCIP_DEFAULT_TEST_SECRETS }} - test_secrets_override_base64: ${{ secrets[inputs.test_secrets_override_key] }} - token: ${{ secrets.GITHUB_TOKEN }} - go_mod_path: ./integration-tests/go.mod - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - triggered_by: ${{ env.TEST_TRIGGERED_BY }} - artifacts_location: ./integration-tests/smoke/logs/payload_ccip.json - aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - cache_key_id: ccip-smoke-${{ env.MOD_CACHE_VERSION }} - cache_restore_only: "true" - should_cleanup: false - - # Custom reporting Jobs - start-slack-thread: - name: Start Slack Thread - if: ${{ failure() && needs.ccip-smoke-test.result != 'skipped' && needs.ccip-smoke-test.result != 'cancelled' }} - environment: integration - outputs: - thread_ts: ${{ steps.slack.outputs.thread_ts }} - permissions: - checks: write - pull-requests: write - id-token: write - contents: read - runs-on: ubuntu-latest - needs: [ ccip-smoke-test ] - steps: - - name: Debug Result - run: echo ${{ join(needs.*.result, ',') }} - - name: Main Slack Notification - uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 - id: slack - with: - channel-id: "#ccip-testing" - payload: | - { - "attachments": [ - { - "color": "${{ contains(join(needs.*.result, ','), 'failure') && '#C62828' || '#2E7D32' }}", - "blocks": [ - { - "type": "header", - "text": { - "type": "plain_text", - "text": "CCIP Mainnet Smoke tests ${{ contains(join(needs.*.result, ','), 'failure') && ':x:' || ':white_check_mark:'}}", - "emoji": true - } - }, - { - "type": "divider" - }, - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": "<${{ github.server_url }}/${{ github.repository }}/${{contains(github.ref_name, 'release') && 'releases/tag' || 'tree'}}/${{ github.ref_name }}|${{ github.ref_name }}> | <${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}|${{ github.sha }}> | <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|Run>" - } - } - ] - } - ] - } - env: - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - - post-test-results-to-slack: - name: Post Test Results - if: ${{ failure() && needs.start-slack-thread.result != 'skipped' && needs.start-slack-thread.result != 'cancelled' }} - environment: integration - permissions: - checks: write - pull-requests: write - id-token: write - contents: read - runs-on: ubuntu-latest - needs: start-slack-thread - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} - - name: Post Test Results - uses: ./.github/actions/notify-slack-jobs-result - with: - github_token: ${{ github.token }} - github_repository: ${{ github.repository }} - workflow_run_id: ${{ github.run_id }} - github_job_name_regex: ^CCIP Smoke (.*)$ - message_title: CCIP Mainnet Smoke test - slack_channel_id: "#ccip-testing" - slack_bot_token: ${{ secrets.QA_SLACK_API_KEY }} - slack_thread_ts: ${{ needs.start-slack-thread.outputs.thread_ts }} - - # End Reporting Jobs \ No newline at end of file diff --git a/.github/workflows/ccip-load-tests.yml b/.github/workflows/ccip-load-tests.yml deleted file mode 100644 index 1b71c044cd..0000000000 --- a/.github/workflows/ccip-load-tests.yml +++ /dev/null @@ -1,62 +0,0 @@ -name: CCIP Load Test -on: - push: - branches: - - ccip-develop - tags: - - '*' - workflow_dispatch: - inputs: - test_config_override_path: - description: Path to a test config file used to override the default test config - required: false - type: string - test_secrets_override_key: - description: 'Key to run tests with custom test secrets' - required: false - type: string - chainlink_version: - description: Chainlink image version to use. Commit sha if not provided - required: false - type: string - -# Only run 1 of this workflow at a time per PR -concurrency: - group: load-ccip-tests-chainlink-${{ github.ref }} - cancel-in-progress: true - -jobs: - run-e2e-tests-workflow: - name: Run E2E Tests - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - test_path: .github/e2e-tests.yml - test_trigger: E2E CCIP Load Tests - test_config_override_path: ${{ inputs.test_config_override_path }} - chainlink_version: ${{ inputs.chainlink_version || github.sha }} - slack_notification_after_tests: always - slack_notification_after_tests_channel_id: '#ccip-testing' - slack_notification_after_tests_name: CCIP E2E Load Tests - test_image_suites: ccip-load - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - TEST_SECRETS_OVERRIDE_BASE64: ${{ secrets[inputs.test_secrets_override_key] }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }} - diff --git a/.github/workflows/ccip-offchain-upgrade-tests.yml b/.github/workflows/ccip-offchain-upgrade-tests.yml deleted file mode 100644 index 78569fc370..0000000000 --- a/.github/workflows/ccip-offchain-upgrade-tests.yml +++ /dev/null @@ -1,416 +0,0 @@ -name: CCIP Offchain Upgrade Compatibility Tests -on: - merge_group: - pull_request: - push: - tags: - - "*" - workflow_dispatch: - -concurrency: - group: upgrade-tests-ccip-${{ github.ref }} - cancel-in-progress: true - -env: - # for run-test variables and environment - ECR_TAG: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink-ccip-tests:ccip-develop - ENV_JOB_IMAGE_BASE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink-ccip-tests - CHAINLINK_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink - INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com - MOD_CACHE_VERSION: 2 - AWS_ECR_REPO_PUBLIC_REGISTRY: public.ecr.aws - # Default private key test secret loaded from Github Secret as only security team has access to it. - # this key secrets.QA_SHARED_803C_KEY has a story behind it. To know more, see CCIP-2875 and SECHD-16575 tickets. - E2E_TEST_ETHEREUM_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_ARBITRUM_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_BASE_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_WEMIX_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_AVALANCHE_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_ZKSYNC_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_MODE_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_METIS_ANDROMEDA_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_OPTIMISM_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_KROMA_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_GNOSIS_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_POLYGON_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - E2E_TEST_BSC_MAINNET_WALLET_KEY: ${{ secrets.QA_SHARED_803C_KEY }} - -jobs: - # Build Test Dependencies - changes: - environment: integration - name: Check Paths That Require Tests To Run - runs-on: ubuntu-latest - # We don't directly merge dependabot PRs, so let's not waste the resources - if: github.actor != 'dependabot[bot]' - steps: - - name: Checkout the repo - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: smartcontractkit/ccip - ref: ${{ inputs.cl_ref }} - - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 - id: changes - with: - filters: | - src: - - '**/*.go' - - '**/*go.sum' - - '**/*go.mod' - - '.github/workflows/integration-tests.yml' - - '**/*Dockerfile' - - 'core/**/config/**/*.toml' - - 'integration-tests/**/*.toml' - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: ccip-offchain-upgrade-compatibility-tests-changes - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Check Paths That Require Tests To Run - continue-on-error: true - outputs: - src: ${{ inputs.set_changes_output || steps.changes.outputs.src }} - - build-chainlink: - if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' - needs: [ changes ] - environment: integration - permissions: - id-token: write - contents: read - name: Build Chainlink Image - runs-on: ubuntu-latest - steps: - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: ccip-offchain-upgrade-compatibility-tests-build-chainlink - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Build Chainlink Image - continue-on-error: true - - name: Checkout the repo - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: smartcontractkit/ccip - ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} - - name: Build Chainlink Image - uses: ./.github/actions/build-chainlink-image - with: - tag_suffix: "" - dockerfile: core/chainlink.Dockerfile - git_commit_sha: ${{ github.sha }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - - build-test-image: - if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' - needs: [ changes ] - environment: integration - outputs: - tag: ${{ steps.build-test-image.outputs.test_image_tag }} - permissions: - id-token: write - contents: read - name: Build Test Image - runs-on: ubuntu-latest - steps: - - name: Collect Metrics - if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: ccip-offchain-upgrade-compatibility-tests-build-test-image - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Build Test Image with Current Sha - continue-on-error: true - - name: Checkout the repo - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: smartcontractkit/ccip - ref: ${{ github.event.pull_request.head.sha || github.sha }} - - name: Build Test Image - id: build-test-image - if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' - uses: smartcontractkit/.github/actions/ctf-build-test-image@a5e4f4c8fbb8e15ab2ad131552eca6ac83c4f4b3 # ctf-build-test-image@0.1.0 - with: - # we just want to build the load tests - suites: ccip-tests/load ccip-tests/smoke - repository: chainlink-ccip-tests - tag: ${{ github.sha }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - - last-release-info: - if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' - needs: [ changes ] - environment: integration - permissions: - id-token: write - contents: read - name: Fetch Info for Last Release - runs-on: ubuntu-latest - outputs: - release_name: ${{ steps.fetch_last_release.outputs.release_name }} - release_sha: ${{ steps.fetch_last_release.outputs.sha_ref }} - release_tag: ${{ steps.fetch_last_release.outputs.release_tag_name }} - steps: - - name: Collect Metrics - if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: ccip-offchain-upgrade-compatibility-tests-last-release-info - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Build Test Image for Last Release - continue-on-error: true - - name: find last release - id: fetch_last_release - shell: bash - run: | - release_name=$(curl --header "Authorization: token ${{ secrets.GITHUB_TOKEN }}" --request GET https://api.github.com/repos/${{ github.repository }}/releases | jq -r --arg SUFFIX "release" '.[] | select(.tag_name | endswith("release")) | .tag_name' | sort -V | tail -n 1) - release_tag_name="${release_name:1}" - echo "release_tag_name=${release_tag_name}" >> $GITHUB_OUTPUT - echo "release_name=${release_name}" >> $GITHUB_OUTPUT - sha_ref=$(curl -s --header "Authorization: token ${{ secrets.GITHUB_TOKEN }}" --request GET https://api.github.com/repos/${{ github.repository }}/git/refs/tags/${release_name} | jq -r '.object.sha' | sort -V | tail -n 1) - echo "sha_ref=${sha_ref}" >> $GITHUB_OUTPUT - # End Build Test Dependencies - - # run test with previous image - run-test-with-last-release: - environment: integration - permissions: - actions: read - checks: write - pull-requests: write - id-token: write - contents: read - needs: [ changes, last-release-info, build-test-image ] - outputs: - existing_namespace: ${{ steps.fetch_namespace.outputs.existing_namespace }} - triggered_by: ${{ steps.fetch_namespace.outputs.triggered_by }} - strategy: - fail-fast: false - matrix: - product: - - name: ccip-smoke-with-last-release - os: ubuntu-latest - run: ^TestSmokeCCIPForBidirectionalLane$ - config_path: ./integration-tests/ccip-tests/testconfig/tomls/node-pre-upgrade-compatibility.toml - runs-on: ubuntu-latest - name: CCIP Deployment with ${{ needs.last-release-info.outputs.release_tag }} - ${{ matrix.product.name }} - env: - RELEASE_TAG: ${{ needs.last-release-info.outputs.release_tag }} - RELEASE_SHA: ${{ needs.last-release-info.outputs.release_sha }} - RELEASE_NAME: ${{ needs.last-release-info.outputs.release_name }} - TEST_TRIGGERED_BY: ${{ matrix.product.name }}-${{ github.run_id }} - steps: - - name: Collect Metrics - if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: ccip-offchain-upgrade-compatibility-tests-run-tests-with-last-release - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: CCIP Deployment with ${{ needs.last-release-info.outputs.release_tag }} - ${{ matrix.product.name }} - test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' - - name: Checkout the repo - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: smartcontractkit/ccip - ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} - - name: Set Override Config - id: set_override_config - run: | - # if the matrix.product.config_path is set, use it as the override config - if [ "${{ matrix.product.config_path }}" != "" ]; then - echo "base_64_override=$(base64 -w 0 -i ${{ matrix.product.config_path }})" >> "$GITHUB_OUTPUT" - fi - - name: Prepare Base64 TOML override for CCIP secrets - uses: ./.github/actions/setup-create-base64-config-ccip - id: setup_create_base64_config_ccip - with: - runId: ${{ github.run_id }} - testLogCollect: ${{ vars.TEST_LOG_COLLECT }} - chainlinkVersion: ${{ env.RELEASE_TAG }} - logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} - selectedNetworks: SIMULATED_1,SIMULATED_2 - - name: Run Tests - if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' - uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@94cb11f4bd545607a2f221c6685052b3abee723d # v2.3.32 - env: - BASE64_CONFIG_OVERRIDE: ${{ steps.set_override_config.outputs.base_64_override }},${{ steps.setup_create_base64_config_ccip.outputs.base64_config }} - TEST_BASE64_CONFIG_OVERRIDE: ${{ steps.set_override_config.outputs.base_64_override }},${{ steps.setup_create_base64_config_ccip.outputs.base64_config }} - ENV_JOB_IMAGE: ${{ env.ENV_JOB_IMAGE_BASE }}:${{ needs.build-test-image.outputs.tag }} - TEST_SUITE: smoke - TEST_ARGS: -test.timeout 30m - TEST_LOG_LEVEL: info - DATABASE_URL: postgresql://postgres:node@localhost:5432/chainlink_test?sslmode=disable - RR_MEM: 8Gi - RR_CPU: 4 - E2E_TEST_CHAINLINK_IMAGE: ${{ env.AWS_ECR_REPO_PUBLIC_REGISTRY }}/w0i8p0z9/chainlink-ccip - E2E_TEST_LOKI_TENANT_ID: ${{ vars.LOKI_TENANT_ID }} - E2E_TEST_LOKI_ENDPOINT: ${{ secrets.LOKI_URL_CI }} - E2E_TEST_LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - E2E_TEST_GRAFANA_BASE_URL: ${{ vars.GRAFANA_URL }} - E2E_TEST_GRAFANA_DASHBOARD_URL: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" - with: - test_command_to_run: cd ./integration-tests/ccip-tests && go test -timeout 30m -count=1 -json -run ${{ matrix.product.run }} ./smoke 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci - test_download_vendor_packages_command: cd ./integration-tests && go mod download - # Load default test secrets - test_secrets_defaults_base64: ${{ secrets.CCIP_DEFAULT_TEST_SECRETS }} - cl_repo: ${{ env.AWS_ECR_REPO_PUBLIC_REGISTRY }}/w0i8p0z9/chainlink-ccip # releases are published to public registry - cl_image_tag: ${{ env.RELEASE_TAG }} - aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - artifacts_name: ${{ matrix.product.name }}${{ matrix.product.tag_suffix }}-test-logs - artifacts_location: | - ./integration-tests/ccip-tests/smoke/logs/* - token: ${{ secrets.GITHUB_TOKEN }} - go_mod_path: ./integration-tests/go.mod - cache_key_id: ccip-e2e-${{ env.MOD_CACHE_VERSION }} - cache_restore_only: "true" - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - triggered_by: ${{ env.TEST_TRIGGERED_BY }} - should_tidy: "false" - should_cleanup: "false" - - - name: store laneconfig in artifacts - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 - with: - name: lane-config - path: ./integration-tests/ccip-tests/smoke/tmp_*.json - if-no-files-found: error - - - name: fetch namespace - id: fetch_namespace - run: | - echo "looking for namespaces" - ITEMS=$(kubectl get ns -l=triggered-by=${{ env.TEST_TRIGGERED_BY }}-${{ github.event.pull_request.number || github.run_id }} -o jsonpath='{.items}') - COUNT=$(echo "${ITEMS}" | jq '. | length') - echo "found ${COUNT} namespaces. will set the env var with first one" - for ((i=0;i<${COUNT};i++)); do - name=$(echo "${ITEMS}" | jq -r ".[${i}].metadata.name") - echo "setting output var with namespace: ${name}" - echo "existing_namespace=${name}" >> $GITHUB_OUTPUT - break - done - echo "triggered_by=${{ env.TEST_TRIGGERED_BY }}" >> $GITHUB_OUTPUT - echo "completed env var set up" - - # run load test with current image - run-test-with-current-sha: - environment: integration - permissions: - actions: read - checks: write - pull-requests: write - id-token: write - contents: read - needs: [ build-chainlink, changes, build-test-image, run-test-with-last-release ] - strategy: - fail-fast: false - matrix: - product: - - name: ccip-load-after-upgrade - os: ubuntu-latest - run: ^TestLoadCCIPWithUpgradeNodeVersion$ - config_path: ./integration-tests/ccip-tests/testconfig/tomls/node-post-upgrade-compatibility.toml - runs-on: ubuntu-latest - name: Upgrade Nodes with Current SHA and Run ${{ matrix.product.name }} - steps: - - name: Collect Metrics - if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: ccip-offchain-upgrade-compatibility-tests-run-test-image-current-sha - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Upgrade Nodes with Current SHA and Run ${{ matrix.product.name }} - test-results-file: '{"testType":"go","filePath":"/tmp/gotest.log"}' - - name: Checkout the repo - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - repository: smartcontractkit/ccip - ref: ${{ github.event.pull_request.head.sha || github.event.merge_group.head_sha }} - - name: Download LaneConfig From Last Release - uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4 - with: - name: lane-config - path: ./integration-tests/ccip-tests/load/lane-config - - - name: Set Override Config - id: set_override_config - run: | - # if the matrix.product.config_path is set, use it as the override config - if [ "${{ matrix.product.config_path }}" != "" ]; then - echo "base_64_override=$(base64 -w 0 -i ${{ matrix.product.config_path }})" >> "$GITHUB_OUTPUT" - fi - - name: Prepare Base64 TOML override for CCIP secrets - uses: ./.github/actions/setup-create-base64-config-ccip - id: setup_create_base64_config_ccip - with: - runId: ${{ github.run_id }} - testLogCollect: ${{ vars.TEST_LOG_COLLECT }} - chainlinkVersion: ${{ github.sha }} - upgradeVersion: ${{ github.sha }} - logstreamLogTargets: ${{ vars.LOGSTREAM_LOG_TARGETS }} - existingNamespace: ${{ needs.run-test-with-last-release.outputs.existing_namespace }} - - name: Run Tests - if: needs.changes.outputs.src == 'true' || github.event_name == 'workflow_dispatch' - uses: smartcontractkit/chainlink-github-actions/chainlink-testing-framework/run-tests@94cb11f4bd545607a2f221c6685052b3abee723d # v2.3.32 - env: - BASE64_CONFIG_OVERRIDE: ${{ steps.set_override_config.outputs.base_64_override }},${{ steps.setup_create_base64_config_ccip.outputs.base64_config }} - TEST_BASE64_CONFIG_OVERRIDE: ${{ steps.set_override_config.outputs.base_64_override }},${{ steps.setup_create_base64_config_ccip.outputs.base64_config }} - ENV_JOB_IMAGE: ${{ env.ENV_JOB_IMAGE_BASE }}:${{ github.sha }} - TEST_SUITE: load - TEST_ARGS: -test.timeout 1h - DATABASE_URL: postgresql://postgres:node@localhost:5432/chainlink_test?sslmode=disable - RR_MEM: 8Gi - RR_CPU: 4 - E2E_TEST_GRAFANA_DASHBOARD_URL: "/d/6vjVx-1V8/ccip-long-running-tests" - E2E_TEST_CHAINLINK_IMAGE: ${{ env.CHAINLINK_IMAGE }} - E2E_TEST_CHAINLINK_UPGRADE_IMAGE: ${{ env.CHAINLINK_IMAGE }} - E2E_TEST_LOKI_TENANT_ID: ${{ vars.LOKI_TENANT_ID }} - E2E_TEST_LOKI_ENDPOINT: ${{ secrets.LOKI_URL }} - E2E_TEST_LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - E2E_TEST_GRAFANA_BASE_URL: ${{ vars.GRAFANA_URL }} - with: - test_command_to_run: cd ./integration-tests/ccip-tests && go test -timeout 1h -count=1 -json -run ${{ matrix.product.run }} ./load 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci - test_download_vendor_packages_command: cd ./integration-tests && go mod download - # Load default test secrets - test_secrets_defaults_base64: ${{ secrets.CCIP_DEFAULT_TEST_SECRETS }} - cl_repo: ${{ env.CHAINLINK_IMAGE }} - cl_image_tag: ${{ github.sha }} - aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - artifacts_name: ${{ matrix.product.name }}${{ matrix.product.tag_suffix }}-test-logs - artifacts_location: | - ./integration-tests/ccip-tests/load/tmp_*.json - ./integration-tests/ccip-tests/load/logs/* - publish_check_name: ${{ matrix.product.name }} - token: ${{ secrets.GITHUB_TOKEN }} - go_mod_path: ./integration-tests/go.mod - cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} - cache_restore_only: "true" - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - should_tidy: "false" - should_cleanup: "true" - triggered_by: ${{ needs.run-test-with-last-release.outputs.triggered_by }} diff --git a/.github/workflows/certora.yml b/.github/workflows/certora.yml new file mode 100644 index 0000000000..64e2f32053 --- /dev/null +++ b/.github/workflows/certora.yml @@ -0,0 +1,43 @@ +name: certora + +on: push + +jobs: + verify: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Install python + uses: actions/setup-python@v2 + with: { python-version: 3.9 } + + - name: Install java + uses: actions/setup-java@v1 + with: { java-version: '11', java-package: jre } + + - name: Install certora cli + run: pip install certora-cli==7.20.3 + + - name: Install solc + run: | + wget https://github.com/ethereum/solidity/releases/download/v0.8.24/solc-static-linux + chmod +x solc-static-linux + sudo mv solc-static-linux /usr/local/bin/solc8.24 + + - name: Verify rule ${{ matrix.rule }} + run: | + echo "key length" ${#CERTORAKEY} + certoraRun certora/confs/${{ matrix.rule }} + env: + CERTORAKEY: ${{ secrets.CERTORAKEY }} + + strategy: + fail-fast: false + max-parallel: 16 + matrix: + rule: + - ccip.conf diff --git a/.github/workflows/chain-selectors-check.yml b/.github/workflows/chain-selectors-check.yml deleted file mode 100644 index 633388b319..0000000000 --- a/.github/workflows/chain-selectors-check.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: Chain Selectors Version Check - -on: - push: - branches: - - ccip-develop - - release/* - tags: - - v* - pull_request: - branches: - - release/* - - -jobs: - verify-version: - runs-on: ubuntu-latest - steps: - - name: Checkout Repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - name: Setup Go - uses: ./.github/actions/setup-go - with: - only-modules: true - go-version-file: "go.mod" - - - name: Get chain-selectors version - id: get-chain-selectors-version - shell: bash - env: - GH_TOKEN: ${{ github.token }} - run: | - current_chain_selector_version=$(go list -m -f '{{.Version}}' github.com/smartcontractkit/chain-selectors) - latest_chain_selector_version=$(gh release view -R smartcontractkit/chain-selectors --json tagName --jq '.tagName') - if [[ "$current_chain_selector_version" != "$latest_chain_selector_version" ]]; then - echo "::error:: Chain Selectors version mismatch. Current version: $current_chain_selector_version, Latest version: $latest_chain_selector_version" - exit 1 - fi diff --git a/.github/workflows/changeset.yml b/.github/workflows/changeset.yml deleted file mode 100644 index 06c9ea6ab7..0000000000 --- a/.github/workflows/changeset.yml +++ /dev/null @@ -1,173 +0,0 @@ -# -# This action checks PRs to see if any changeset files were added in the PR core files were changed. -# If none were, it will add a comment in the PR to run changeset command to generate a changeset file. -# -name: Changeset - -on: - -jobs: - changeset: - env: - TAGS: | - - `#added` For any new functionality added. - - `#breaking_change` For any functionality that requires manual action for the node to boot. - - `#bugfix` For bug fixes. - - `#changed` For any change to the existing functionality. - - `#db_update` For any feature that introduces updates to database schema. - - `#deprecation_notice` For any upcoming deprecation functionality. - - `#internal` For changesets that need to be excluded from the final changelog. - - `#nops` For any feature that is NOP facing and needs to be in the official Release Notes for the release. - - `#removed` For any functionality/config that is removed. - - `#updated` For any functionality that is updated. - - `#wip` For any change that is not ready yet and external communication about it should be held off till it is feature complete. - - # For security reasons, GITHUB_TOKEN is read-only on forks, so we cannot leave comments on PRs. - # This check skips the job if it is detected we are running on a fork. - if: ${{ github.event.pull_request.head.repo.full_name == 'smartcontractkit/ccip' }} - name: Changeset checker - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 - id: files-changed - with: - token: ${{ secrets.GITHUB_TOKEN }} - predicate-quantifier: every - list-files: shell - filters: | - shared: - - common/** - - '!common/**/*_test.go' - - plugins/** - - '!plugins/**/*_test.go' - core: - - core/** - - '!core/**/*_test.go' - - '!core/**/*.md' - - '!core/**/*.json' - - '!core/chainlink.goreleaser.Dockerfile' - - '!core/chainlink.Dockerfile' - core-changeset: - - added: '.changeset/**' - - - name: Check for changeset tags for core - id: changeset-tags - if: ${{ steps.files-changed.outputs.core-changeset == 'true' }} - shell: bash - run: bash ./.github/scripts/check-changeset-tags.sh ${{ steps.files-changed.outputs.core-changeset_files }} - - - name: Setup pnpm - uses: pnpm/action-setup@a3252b78c470c02df07e9d59298aecedc3ccdd6d # v3.0.0 - if: ${{ steps.files-changed.outputs.core == 'true' || steps.files-changed.outputs.shared == 'true' }} - with: - version: ^9.0.0 - - - name: Setup node - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 - if: ${{ steps.files-changed.outputs.core == 'true' || steps.files-changed.outputs.shared == 'true' }} - with: - node-version: 20 - cache: pnpm - cache-dependency-path: ./pnpm-lock.yaml - - - name: Get next chainlink core version - id: chainlink-version - if: ${{ steps.files-changed.outputs.core == 'true' || steps.files-changed.outputs.shared == 'true' }} - run: | - pnpm install && pnpm changeset version - echo "chainlink_version=$(jq -r '.version' package.json)" >> $GITHUB_OUTPUT - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - # we need to set the top level directory for the jira-tracing action manually - # because now we are working with two repositories and automatic detection would - # select the repository with jira-tracing and not the chainlink repository - - name: Setup git top level directory - id: find-git-top-level-dir - run: echo "top_level_dir=$(pwd)" >> $GITHUB_OUTPUT - - - name: Checkout .Github repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/.github - ref: 9aed33e5298471f20a3d630d711b96ae5538728c # jira-tracing@0.2.0 - path: ./dot_github - - - name: Update Jira ticket for core - id: jira - if: ${{ steps.files-changed.outputs.core == 'true' || steps.files-changed.outputs.shared == 'true' }} - shell: bash - working-directory: ./dot_github - run: | - echo "COMMIT_MESSAGE=$(git log -1 --pretty=format:'%s')" >> $GITHUB_ENV - pnpm install --filter jira-tracing && pnpm --filter jira-tracing issue:update - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - JIRA_HOST: ${{ vars.JIRA_HOST }} - JIRA_USERNAME: ${{ secrets.JIRA_USERNAME }} - JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} - GIT_TOP_LEVEL_DIR: ${{ steps.find-git-top-level-dir.outputs.top_level_dir }} - CHAINLINK_VERSION: ${{ steps.chainlink-version.outputs.chainlink_version }} - PR_TITLE: ${{ github.event.pull_request.title }} - BRANCH_NAME: ${{ github.event.pull_request.head.ref }} - - - name: Make a comment - uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 - if: ${{ steps.files-changed.outputs.core == 'true' || steps.files-changed.outputs.shared == 'true' }} - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - JIRA_COMMENT: ${{ steps.jira.outputs.jiraComment }} - with: - message: | - I see you updated files related to `core`. Please run `pnpm changeset` in the root directory to add a changeset as well as in the text include at least one of the following tags: - ${{ env.TAGS }} - ${{ env.JIRA_COMMENT }} - reactions: eyes - comment_tag: changeset-core - mode: ${{ steps.files-changed.outputs.core-changeset == 'false' && 'upsert' || 'delete' }} - create_if_not_exists: ${{ steps.files-changed.outputs.core-changeset == 'false' && 'true' || 'false' }} - - - name: Check for new changeset for core - if: ${{ (steps.files-changed.outputs.core == 'true' || steps.files-changed.outputs.shared == 'true') && steps.files-changed.outputs.core-changeset == 'false' }} - shell: bash - run: | - echo "Please run pnpm changeset to add a changeset for core and include in the text at least one tag." - exit 1 - - - name: Make a comment - uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 - if: ${{ steps.files-changed.outputs.core-changeset == 'true' }} - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - JIRA_COMMENT: ${{ steps.jira.outputs.jiraComment }} - with: - message: | - I see you added a changeset file but it does not contain a tag. Please edit the text include at least one of the following tags: - ${{ env.TAGS }} - ${{ env.JIRA_COMMENT }} - reactions: eyes - comment_tag: changeset-core-tags - mode: ${{ steps.changeset-tags.outputs.has_tags == 'false' && 'upsert' || 'delete' }} - create_if_not_exists: ${{ steps.changeset-tags.outputs.has_tags == 'false' && 'true' || 'false' }} - - - name: Check for new changeset tags for core - if: ${{ steps.files-changed.outputs.core-changeset == 'true' && steps.changeset-tags.outputs.has_tags == 'false' }} - shell: bash - run: | - echo "Please include at least one tag in the core changeset file" - exit 1 - - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: chainlink-changesets - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Changeset checker - continue-on-error: true diff --git a/.github/workflows/changesets-preview-pr.yml b/.github/workflows/changesets-preview-pr.yml deleted file mode 100644 index 94dc1635c4..0000000000 --- a/.github/workflows/changesets-preview-pr.yml +++ /dev/null @@ -1,67 +0,0 @@ -# -# This action creates or updates a Release Preview PR that shows which changes are going to be part of the next release. -# - -name: Release Preview - Changeset - -on: - push: - branches: - - develop - -jobs: - changesets-release-preview: - runs-on: ubuntu-latest - permissions: - contents: write - pull-requests: write - steps: - - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 - id: change - with: - token: ${{ secrets.GITHUB_TOKEN }} - filters: | - core-changeset: - - '.changeset/**' - - - name: Setup pnpm - uses: pnpm/action-setup@a3252b78c470c02df07e9d59298aecedc3ccdd6d # v3.0.0 - if: steps.change.outputs.core-changeset == 'true' - with: - version: ^9.0.0 - - - name: Setup node - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 - if: steps.change.outputs.core-changeset == 'true' - with: - node-version: 20 - cache: pnpm - cache-dependency-path: ./pnpm-lock.yaml - - - name: Generate new changelog - if: steps.change.outputs.core-changeset == 'true' - id: changelog - run: pnpm install && ./tools/ci/format_changelog - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Create release preview PR - if: steps.change.outputs.core-changeset == 'true' - uses: peter-evans/create-pull-request@6d6857d36972b65feb161a90e484f2984215f83e # v6.0.5 - with: - git-token: ${{ secrets.GITHUB_TOKEN }} - add-paths: | - .changeset/** - CHANGELOG.md - commit-message: "changeset: release preview" - committer: app-token-issuer-releng[bot] - branch: changesets/release-preview - title: "[DO NOT MERGE] Changeset Release Preview - v${{ steps.changelog.outputs.version }}" - body: ${{ steps.changelog.outputs.pr_body }} - draft: true - labels: | - release-preview - do-not-merge diff --git a/.github/workflows/ci-core.yml b/.github/workflows/ci-core.yml deleted file mode 100644 index d7284d2f48..0000000000 --- a/.github/workflows/ci-core.yml +++ /dev/null @@ -1,441 +0,0 @@ -name: CI Core -run-name: CI Core ${{ inputs.distinct_run_name && inputs.distinct_run_name || '' }} - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }}-${{ inputs.distinct_run_name }} - cancel-in-progress: true - -# Run on key branches to make sure integration is good, otherwise run on all PR's -on: - push: - branches: - - develop - - ccip-develop - - "release/*" - - "ccip-release/*" - merge_group: - pull_request: - schedule: - - cron: "0 0 * * *" - workflow_dispatch: - inputs: - distinct_run_name: - description: "A unique identifier for this run, used when running from other repos" - required: false - type: string - evm-ref: - description: The chainlink-evm reference to use when testing against a specific version for compatibliity - required: false - default: "" - type: string - -jobs: - filter: # No need to run core tests if there are only changes to the integration-tests - name: Detect Changes - permissions: - pull-requests: read - outputs: - changes: ${{ steps.ignore-filter.outputs.changes || steps.changes.outputs.changes }} - runs-on: ubuntu-latest - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/ccip - - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 - id: changes - with: - filters: | - changes: - - 'integration-tests/deployment/**' - - '!integration-tests/**' - - 'integration-tests/deployment/**' - - name: Ignore Filter On Workflow Dispatch - if: ${{ github.event_name == 'workflow_dispatch' }} - id: ignore-filter - run: echo "changes=true" >> $GITHUB_OUTPUT - - golangci: - # We don't directly merge dependabot PRs, so let's not waste the resources - if: ${{ (github.event_name == 'pull_request' || github.event_name == 'schedule') && github.actor != 'dependabot[bot]' }} - name: lint - permissions: - # For golangci-lint-actions to annotate code in the PR. - checks: write - contents: read - # For golangci-lint-action's `only-new-issues` option. - pull-requests: read - runs-on: ubuntu22.04-8cores-32GB - needs: [filter] - steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Golang Lint - uses: ./.github/actions/golangci-lint - if: ${{ needs.filter.outputs.changes == 'true' }} - with: - id: core - name: lint - gc-basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - gc-host: ${{ secrets.GRAFANA_INTERNAL_HOST }} - gc-org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - - - name: Notify Slack - if: ${{ failure() && github.event.schedule != '' }} - uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 - env: - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - with: - channel-id: "#team-core" - slack-message: "golangci-lint failed: \n${{ format('https://github.com/{0}/actions/runs/{1}', github.repository, github.run_id) }}" - - core: - env: - # We explicitly have this env var not be "CL_DATABASE_URL" to avoid having it be used by core related tests - # when they should not be using it, while still allowing us to DRY up the setup - DB_URL: postgresql://postgres:postgres@localhost:5432/chainlink_test?sslmode=disable - strategy: - fail-fast: false - matrix: - type: - - cmd: go_core_tests - id: core_unit - os: ubuntu22.04-32cores-128GB - printResults: true - - cmd: go_core_race_tests - id: core_race - # use 64cores for overnight runs only due to massive number of runs from PRs - os: ${{ github.event_name == 'schedule' && 'ubuntu-latest-64cores-256GB' || 'ubuntu-latest-32cores-128GB' }} - - cmd: go_core_fuzz - id: core_fuzz - os: ubuntu22.04-8cores-32GB - name: Core Tests (${{ matrix.type.cmd }}) - # We don't directly merge dependabot PRs, so let's not waste the resources - if: github.actor != 'dependabot[bot]' - needs: [filter] - runs-on: ${{ matrix.type.os }} - permissions: - id-token: write - contents: read - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Setup node - if: ${{ needs.filter.outputs.changes == 'true' }} - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 - - name: Setup NodeJS - if: ${{ needs.filter.outputs.changes == 'true' }} - uses: ./.github/actions/setup-nodejs - with: - prod: "true" - - name: Setup Go - if: ${{ needs.filter.outputs.changes == 'true' }} - uses: ./.github/actions/setup-go - - name: Replace chainlink-evm deps - if: ${{ needs.filter.outputs.changes == 'true' && inputs.evm-ref != ''}} - shell: bash - run: go get github.com/smartcontractkit/chainlink-integrations/evm/relayer@${{ inputs.evm-ref }} - - name: Setup Solana - if: ${{ needs.filter.outputs.changes == 'true' }} - uses: ./.github/actions/setup-solana - - name: Setup wasmd - if: ${{ needs.filter.outputs.changes == 'true' }} - uses: ./.github/actions/setup-wasmd - - name: Setup Postgres - if: ${{ needs.filter.outputs.changes == 'true' }} - uses: ./.github/actions/setup-postgres - - name: Touching core/web/assets/index.html - if: ${{ needs.filter.outputs.changes == 'true' }} - run: mkdir -p core/web/assets && touch core/web/assets/index.html - - name: Download Go vendor packages - if: ${{ needs.filter.outputs.changes == 'true' }} - run: go mod download - - name: Build binary - if: ${{ needs.filter.outputs.changes == 'true' }} - run: go build -o chainlink.test . - - name: Setup DB - if: ${{ needs.filter.outputs.changes == 'true' }} - run: ./chainlink.test local db preparetest - env: - CL_DATABASE_URL: ${{ env.DB_URL }} - - name: Install LOOP Plugins - if: ${{ needs.filter.outputs.changes == 'true' }} - run: | - pushd $(go list -m -f "{{.Dir}}" github.com/smartcontractkit/chainlink-feeds) - go install ./cmd/chainlink-feeds - popd - pushd $(go list -m -f "{{.Dir}}" github.com/smartcontractkit/chainlink-data-streams) - go install ./mercury/cmd/chainlink-mercury - popd - pushd $(go list -m -f "{{.Dir}}" github.com/smartcontractkit/chainlink-solana) - go install ./pkg/solana/cmd/chainlink-solana - popd - pushd $(go list -m -f "{{.Dir}}" github.com/smartcontractkit/chainlink-starknet/relayer) - go install ./pkg/chainlink/cmd/chainlink-starknet - popd - - name: Increase Race Timeout - if: ${{ github.event.schedule != '' && needs.filter.outputs.changes == 'true' }} - run: | - echo "TIMEOUT=10m" >> $GITHUB_ENV - echo "COUNT=50" >> $GITHUB_ENV - - name: Install gotestloghelper - if: ${{ needs.filter.outputs.changes == 'true' }} - run: go install github.com/smartcontractkit/chainlink-testing-framework/tools/gotestloghelper@v1.1.1 - - name: Run tests - if: ${{ needs.filter.outputs.changes == 'true' }} - id: run-tests - env: - OUTPUT_FILE: ./output.txt - USE_TEE: false - CL_DATABASE_URL: ${{ env.DB_URL }} - run: ./tools/bin/${{ matrix.type.cmd }} ./... - - name: Print Filtered Test Results - if: ${{ failure() && needs.filter.outputs.changes == 'true' && steps.run-tests.conclusion == 'failure' }} - run: | - if [[ "${{ matrix.type.printResults }}" == "true" ]]; then - cat output.txt | gotestloghelper -ci - fi - - name: Print Races - id: print-races - if: ${{ failure() && matrix.type.cmd == 'go_core_race_tests' && needs.filter.outputs.changes == 'true' }} - run: | - find race.* | xargs cat > race.txt - if [[ -s race.txt ]]; then - cat race.txt - echo "post_to_slack=true" >> $GITHUB_OUTPUT - else - echo "post_to_slack=false" >> $GITHUB_OUTPUT - fi - echo "github.event_name: ${{ github.event_name }}" - echo "github.ref: ${{ github.ref }}" - - name: Print postgres logs - if: ${{ always() && needs.filter.outputs.changes == 'true' }} - run: docker compose logs postgres | tee ../../../postgres_logs.txt - working-directory: ./.github/actions/setup-postgres - - name: Store logs artifacts - if: ${{ needs.filter.outputs.changes == 'true' && always() }} - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 - with: - name: ${{ matrix.type.cmd }}_logs - path: | - ./output.txt - ./output-short.txt - ./race.* - ./coverage.txt - ./postgres_logs.txt - - name: Notify Slack - if: ${{ failure() && steps.print-races.outputs.post_to_slack == 'true' && matrix.type.cmd == 'go_core_race_tests' && (github.event_name == 'merge_group' || github.event.branch == 'ccip-develop') && needs.filter.outputs.changes == 'true' }} - uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 - env: - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - with: - channel-id: "#topic-data-races" - slack-message: "Race tests failed: \n${{ format('https://github.com/{0}/actions/runs/{1}', github.repository, github.run_id) }}" - - name: Collect Path Output - id: collect-path-output - env: - MATRIX_ID: ${{ matrix.type.id }} - run: | - # only push the test result file for the unit tests - if [[ "$MATRIX_ID" == "core_unit" ]]; then - resultsFile='{"testType":"go","filePath":"./output.txt"}' - echo "path_output=${resultsFile}" >> $GITHUB_OUTPUT - fi - - name: Collect Metrics - if: ${{ needs.filter.outputs.changes == 'true' && always() }} - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: ${{ matrix.type.id }} - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Core Tests (${{ matrix.type.cmd }}) - test-results-file: ${{ steps.collect-path-output.outputs.path_output }} - test-results-batch-split-size: "524288" # 512KB - continue-on-error: true - - detect-flakey-tests: - needs: [filter, core] - name: Flakey Test Detection - runs-on: ubuntu-latest - if: ${{ github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }} - env: - CL_DATABASE_URL: postgresql://postgres:postgres@localhost:5432/chainlink_test?sslmode=disable - permissions: - id-token: write - contents: read - steps: - - name: Checkout the repo - uses: actions/checkout@v4.2.1 - - name: Setup node - uses: actions/setup-node@v4.0.4 - - name: Setup NodeJS - uses: ./.github/actions/setup-nodejs - with: - prod: "true" - - name: Setup Go - uses: ./.github/actions/setup-go - - name: Setup Postgres - uses: ./.github/actions/setup-postgres - - name: Touching core/web/assets/index.html - run: mkdir -p core/web/assets && touch core/web/assets/index.html - - name: Download Go vendor packages - run: go mod download - - name: Replace chainlink-evm deps - if: ${{ github.event_name == 'workflow_dispatch' && inputs.evm-ref != ''}} - shell: bash - run: go get github.com/smartcontractkit/chainlink-integrations/evm/relayer@${{ inputs.evm-ref }} - - name: Build binary - run: go build -o chainlink.test . - - name: Setup DB - run: ./chainlink.test local db preparetest - - name: Load test outputs - uses: actions/download-artifact@v4.1.8 - with: - name: go_core_tests_logs - path: ./artifacts - - name: Delete go_core_tests_logs/coverage.txt - shell: bash - run: | - # Need to delete coverage.txt so the disk doesn't fill up - rm -f ./artifacts/go_core_tests_logs/coverage.txt - - name: Build flakey test runner - run: go build ./tools/flakeytests/cmd/runner - - name: Re-run tests - env: - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GITHUB_EVENT_PATH: ${{ github.event_path }} - GITHUB_EVENT_NAME: ${{ github.event_name }} - GITHUB_REPO: ${{ github.repository }} - GITHUB_RUN_ID: ${{ github.run_id }} - run: | - ./runner \ - -grafana_auth=$GRAFANA_INTERNAL_BASIC_AUTH \ - -grafana_host=$GRAFANA_INTERNAL_HOST \ - -gh_sha=$GITHUB_SHA \ - -gh_event_path=$GITHUB_EVENT_PATH \ - -gh_event_name=$GITHUB_EVENT_NAME \ - -gh_run_id=$GITHUB_RUN_ID \ - -gh_repo=$GITHUB_REPO \ - -command=./tools/bin/go_core_tests \ - `ls -R ./artifacts/output.txt` - - name: Store logs artifacts - if: ${{ always() }} - uses: actions/upload-artifact@v4.4.3 - with: - name: flakey_test_runner_logs - path: | - ./output.txt - retention-days: 7 - - scan: - name: SonarQube Scan - needs: [core] - if: ${{ always() && github.actor != 'dependabot[bot]' }} - runs-on: ubuntu-latest - steps: - - name: Checkout the repo - uses: actions/checkout@v4.2.1 - with: - fetch-depth: 0 # fetches all history for all tags and branches to provide more metadata for sonar reports - - name: Download all workflow run artifacts - uses: actions/download-artifact@v4.1.8 - - - name: Check and Set SonarQube Report Paths - shell: bash - run: | - # Check and assign paths for coverage/test reports - if [ -d "go_core_tests_logs" ]; then - sonarqube_coverage_report_paths=$(find go_core_tests_logs -name coverage.txt | paste -sd "," -) - sonarqube_tests_report_paths=$(find go_core_tests_logs -name output.txt | paste -sd "," -) - else - sonarqube_coverage_report_paths="" - sonarqube_tests_report_paths="" - fi - - # Check and assign paths for lint reports - if [ -d "golangci-lint-report" ]; then - sonarqube_lint_report_paths=$(find golangci-lint-report -name golangci-lint-report.xml | paste -sd "," -) - else - sonarqube_lint_report_paths="" - fi - - ARGS="" - if [[ -z "$sonarqube_tests_report_paths" ]]; then - echo "::warning::No test report paths found, will not pass to sonarqube" - else - echo "Found test report paths: $sonarqube_tests_report_paths" - ARGS="$ARGS -Dsonar.go.tests.reportPaths=$sonarqube_tests_report_paths" - fi - - if [[ -z "$sonarqube_coverage_report_paths" ]]; then - echo "::warning::No coverage report paths found, will not pass to sonarqube" - else - echo "Found coverage report paths: $sonarqube_coverage_report_paths" - ARGS="$ARGS -Dsonar.go.coverage.reportPaths=$sonarqube_coverage_report_paths" - fi - - if [[ -z "$sonarqube_lint_report_paths" ]]; then - echo "::warning::No lint report paths found, will not pass to sonarqube" - else - echo "Found lint report paths: $sonarqube_lint_report_paths" - ARGS="$ARGS -Dsonar.go.golangci-lint.reportPaths=$sonarqube_lint_report_paths" - fi - - echo "Final SONARQUBE_ARGS: $ARGS" - echo "SONARQUBE_ARGS=$ARGS" >> $GITHUB_ENV - - - name: SonarQube Scan - if: ${{ env.SONARQUBE_ARGS != '' }} - uses: sonarsource/sonarqube-scan-action@aecaf43ae57e412bd97d70ef9ce6076e672fe0a9 # v2.3.0 - with: - args: ${{ env.SONARQUBE_ARGS }} - env: - SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} - SONAR_HOST_URL: ${{ secrets.SONAR_HOST_URL }} - SONAR_SCANNER_OPTS: "-Xms6g -Xmx8g" - clean: - name: Clean Go Tidy & Generate - if: ${{ !contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') && github.actor != 'dependabot[bot]' }} - runs-on: ubuntu22.04-8cores-32GB - defaults: - run: - shell: bash - steps: - - name: Check for Skip Tests Label - if: contains(join(github.event.pull_request.labels.*.name, ' '), 'skip-smoke-tests') - run: | - echo "## \`skip-smoke-tests\` label is active, skipping E2E smoke tests" >>$GITHUB_STEP_SUMMARY - exit 0 - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - fetch-depth: 0 - - name: Setup Go - uses: ./.github/actions/setup-go - with: - only-modules: "true" - - name: Install protoc-gen-go-wsrpc - run: curl https://github.com/smartcontractkit/wsrpc/raw/main/cmd/protoc-gen-go-wsrpc/protoc-gen-go-wsrpc --output $HOME/go/bin/protoc-gen-go-wsrpc && chmod +x $HOME/go/bin/protoc-gen-go-wsrpc - - name: Setup NodeJS - uses: ./.github/actions/setup-nodejs - - name: make generate - run: | - make rm-mocked - make generate - - name: Ensure clean after generate - run: git diff --stat --exit-code - - run: make gomodtidy - - name: Ensure clean after tidy - run: git diff --minimal --exit-code - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: ci-core-generate - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Clean Go Tidy & Generate - continue-on-error: true diff --git a/.github/workflows/ci-protobuf.yml b/.github/workflows/ci-protobuf.yml deleted file mode 100644 index d832939ded..0000000000 --- a/.github/workflows/ci-protobuf.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: CI ProtoBuf - -on: - pull_request: - -jobs: - buf-breaking: - runs-on: ubuntu-latest - steps: - - name: Checkout repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - name: Setup buf - uses: bufbuild/buf-setup-action@35c243d7f2a909b1d4e40399b348a7fdab27d78d # v1.34.0 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - - - name: Run buf breaking - uses: bufbuild/buf-breaking-action@c57b3d842a5c3f3b454756ef65305a50a587c5ba # v1.1.4 - env: - REPO_URL: https://github.com/${{ github.repository }} - BASE_BRANCH: ${{ github.base_ref }} - with: - against: "${REPO_URL}.git#branch=${BASE_BRANCH}" - - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: ci-protobuf - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: buf-breaking - continue-on-error: true diff --git a/.github/workflows/ci-scripts.yml b/.github/workflows/ci-scripts.yml deleted file mode 100644 index 8ca66332f1..0000000000 --- a/.github/workflows/ci-scripts.yml +++ /dev/null @@ -1,56 +0,0 @@ -name: CI Scripts - -on: - merge_group: - pull_request: - -jobs: - lint-scripts: - # We don't directly merge dependabot PRs, so let's not waste the resources - if: ${{ (github.event_name == 'pull_request' || github.event_name == 'schedule') && github.actor != 'dependabot[bot]' }} - runs-on: ubuntu-latest - permissions: - # For golangci-lint-actions to annotate code in the PR. - checks: write - contents: read - # For golangci-lint-action's `only-new-issues` option. - pull-requests: read - steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Golang Lint - uses: ./.github/actions/golangci-lint - with: - id: scripts - name: lint-scripts - version: v1.56 - go-directory: core/scripts/ccip - go-version-file: core/scripts/go.mod - go-module-file: core/scripts/go.sum - gc-basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - gc-host: ${{ secrets.GRAFANA_INTERNAL_HOST }} - gc-org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - - test-scripts: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Setup Go - uses: ./.github/actions/setup-go - with: - go-version-file: core/scripts/go.mod - go-module-file: core/scripts/go.sum - - name: Run Tests - shell: bash - working-directory: core/scripts/ccip - run: go test ./... - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: ci-test-scripts - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: test-scripts - continue-on-error: true diff --git a/.github/workflows/client-compatibility-tests.yml b/.github/workflows/client-compatibility-tests.yml deleted file mode 100644 index 2e27f49a39..0000000000 --- a/.github/workflows/client-compatibility-tests.yml +++ /dev/null @@ -1,892 +0,0 @@ -name: Client Compatibility Tests -on: - schedule: - - cron: "30 5 * * TUE,FRI" # Run every Tuesday and Friday at midnight + 30min EST - push: - tags: - - "*" - merge_group: - pull_request: - workflow_dispatch: - inputs: - chainlinkVersion: - description: commit SHA or tag of the Chainlink version to test - required: false - type: string - evmImplementations: - description: comma separated list of EVM implementations to test (ignored if base64TestList is used); supports geth,besu,nethermind,erigon,reth - required: true - type: string - default: "geth,besu,nethermind,erigon,reth" - latestVersionsNumber: - description: how many of latest images of EVM implementations to test with (ignored if base64TestList is used) - required: true - type: number - default: 3 - base64TestList: - description: base64 encoded list of tests to run (same as base64-ed output of testlistgenerator tool) - required: false - type: string - -env: - CHAINLINK_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink-ccip - INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com - MOD_CACHE_VERSION: 2 - -concurrency: - group: ${{ github.ref }}-${{ github.repository }}-${{ github.event_name }}--evm-compatibility-tests - cancel-in-progress: true - -jobs: - # Build Test Dependencies - - check-dependency-bump: - name: Check for go-ethereum dependency bump - if: github.event_name == 'pull_request' || github.event_name == 'merge_queue' - runs-on: ubuntu-latest - outputs: - dependency_changed: ${{ steps.changes.outputs.dependency_changed }} - steps: - - name: Checkout code - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - fetch-depth: 0 - - name: Check for go.mod changes - id: changes - run: | - if [ -z "${{ github.base_ref }}" ]; then - echo "No base branch found, this should not happen in a PR or MQ. Please reach out to the Test Tooling team." - echo "Github even that triggered the workflow: $GITHUB_EVENT_NAME" - echo "Github ref that triggered the workflow: $GITHUB_REF" - exit 1 - fi - git fetch origin ${{ github.base_ref }} - # if no match is found then grep exits with code 1, but if there is a match it exits with code 0 - # this will return a match if there are any changes on that corresponding line, for example if spacing was changed - DEPENDENCY_CHANGED=$(git diff -U0 origin/${{ github.base_ref }}...HEAD -- go.mod | grep -q 'github.com/ethereum/go-ethereum'; echo $?) - PR_VERSION=$(grep 'github.com/ethereum/go-ethereum' go.mod | awk '{print $2}') - - # here 0 means a match was found, 1 means no match was found - if [ "$DEPENDENCY_CHANGED" -eq 0 ]; then - # Dependency was changed in the PR, now compare with the base branch - git fetch origin ${{ github.base_ref }} - BASE_VERSION=$(git show origin/${{ github.base_ref }}:go.mod | grep 'github.com/ethereum/go-ethereum' | awk '{print $2}') - - echo "Base branch version: $BASE_VERSION" - echo "PR branch version: $PR_VERSION" - - echo "Dependency version changed in the PR compared to the base branch." - echo "dependency_changed=true" >> $GITHUB_OUTPUT - else - echo "No changes to ethereum/go-ethereum dependency in the PR." - echo "PR branch version: $PR_VERSION" - echo "dependency_changed=false" >> $GITHUB_OUTPUT - fi - - should-run: - if: always() - name: Check if the job should run - needs: check-dependency-bump - runs-on: ubuntu-latest - outputs: - should_run: ${{ steps.should-run.outputs.should_run }} - eth_implementations: ${{ steps.should-run.outputs.eth_implementations }} - env: - GITHUB_REF_TYPE: ${{ github.ref_type }} - steps: - - name: Check if the job should run - id: should-run - run: | - if [ "${{ needs.check-dependency-bump.outputs.dependency_changed }}" == "true" ]; then - echo "## Build trigger" >> $GITHUB_STEP_SUMMARY - echo "go-ethereum dependency bump" >> $GITHUB_STEP_SUMMARY - echo "Will run tests, because go-ethereum dependency was bumped" - echo "should_run=true" >> $GITHUB_OUTPUT - elif [ "$GITHUB_EVENT_NAME" = "schedule" ]; then - echo "## Build trigger" >> $GITHUB_STEP_SUMMARY - echo "schedule" >> $GITHUB_STEP_SUMMARY - echo "Will run tests, because trigger event was $GITHUB_EVENT_NAME" - echo "should_run=true" >> $GITHUB_OUTPUT - elif [ "$GITHUB_EVENT_NAME" = "workflow_dispatch" ]; then - echo "Will run tests, because trigger event was $GITHUB_EVENT_NAME" - echo "should_run=true" >> $GITHUB_OUTPUT - elif [ "$GITHUB_REF_TYPE" = "tag" ]; then - echo "## Build trigger" >> $GITHUB_STEP_SUMMARY - echo "new tag" >> $GITHUB_STEP_SUMMARY - echo "Will run tests, because new tag was created" - echo "should_run=true" >> $GITHUB_OUTPUT - else - echo "Will not run tests" - echo "should_run=false" >> $GITHUB_OUTPUT - fi - - select-versions: - if: always() && needs.should-run.outputs.should_run == 'true' - name: Select Versions - needs: should-run - runs-on: ubuntu-latest - env: - RELEASED_DAYS_AGO: 4 - GITHUB_REF_TYPE: ${{ github.ref_type }} - outputs: - evm_implementations: ${{ steps.select-implementations.outputs.evm_implementations }} - chainlink_version: ${{ steps.select-chainlink-version.outputs.chainlink_version }} - chainlink_image_version: ${{ steps.select-chainlink-version.outputs.chainlink_image_version }} - latest_image_count: ${{ steps.get-image-count.outputs.image_count }} - chainlink_ref_path: ${{ steps.select-chainlink-version.outputs.cl_ref_path }} - steps: - # ghlatestreleasechecker is a tool to check if new release is available for a given repo - - name: Set Up ghlatestreleasechecker - shell: bash - run: | - go install github.com/smartcontractkit/chainlink-testing-framework/tools/ghlatestreleasechecker@v1.0.0 - - name: Select EVM implementations to test - id: select-implementations - run: | - PATH=$PATH:$(go env GOPATH)/bin - export PATH - - if [ "$GITHUB_EVENT_NAME" = "schedule" ]; then - echo "Checking for new releases" - implementations_arr=() - new_geth=$(ghlatestreleasechecker "ethereum/go-ethereum" $RELEASED_DAYS_AGO) - if [ "$new_geth" != "none" ]; then - echo "New geth release found: $new_geth" - implementations_arr+=("geth") - fi - new_besu=$(ghlatestreleasechecker "hyperledger/besu" $RELEASED_DAYS_AGO) - if [ "$new_besu" != "none" ]; then - echo "New besu release found: $new_besu" - implementations_arr+=("besu") - fi - new_erigon=$(ghlatestreleasechecker "ledgerwatch/erigon" $RELEASED_DAYS_AGO) - if [ "$new_erigon" != "none" ]; then - echo "New erigon release found: $new_erigon" - implementations_arr+=("erigon") - fi - new_nethermind=$(ghlatestreleasechecker "nethermindEth/nethermind" $RELEASED_DAYS_AGO) - if [ "$new_nethermind" != "none" ]; then - echo "New nethermind release found: $new_nethermind" - implementations_arr+=("nethermind") - fi - new_reth=$(ghlatestreleasechecker "paradigmxyz/reth" $RELEASED_DAYS_AGO) - if [ "$new_reth" != "none" ]; then - echo "New reth release found: $new_reth" - implementations_arr+=("reth") - fi - - IFS=',' - eth_implementations="${implementations_arr[*]}" - if [ -n "$eth_implementations" ]; then - echo "Found new releases for: $eth_implementations" - else - echo "No new releases found" - fi - echo "evm_implementations=$eth_implementations" >> $GITHUB_OUTPUT - elif [ "$GITHUB_EVENT_NAME" = "workflow_dispatch" ]; then - if [ -n "${{ github.event.inputs.base64TestList }}" ]; then - echo "Base64-ed Test Input provided, ignoring EVM implementations" - else - echo "Will test following EVM implementations: ${{ github.event.inputs.evmImplementations }}" - echo "evm_implementations=${{ github.event.inputs.evmImplementations }}" >> $GITHUB_OUTPUT - fi - else - echo "Will test all EVM implementations" - echo "evm_implementations=geth,besu,nethermind,erigon,reth" >> $GITHUB_OUTPUT - fi - - name: Select Chainlink version - id: select-chainlink-version - run: | - PATH=$PATH:$(go env GOPATH)/bin - export PATH - - if [ "$GITHUB_EVENT_NAME" = "schedule" ]; then - echo "Fetching latest Chainlink stable version" - implementations_arr=() - # we use 100 days since we really want the latest one, and it's highly improbable there won't be a release in last 100 days - chainlink_version=$(ghlatestreleasechecker "smartcontractkit/chainlink" 100) - chainlink_image_version=$chainlink_version - cl_ref_path="releases" - elif [ "$GITHUB_EVENT_NAME" = "workflow_dispatch" ]; then - echo "Fetching Chainlink version from input" - if [ -n "${{ github.event.inputs.chainlinkVersion }}" ]; then - echo "Chainlink version provided in input" - chainlink_version="${{ github.event.inputs.chainlinkVersion }}" - if [[ "$chainlink_version" =~ ^[0-9a-f]{40}$ ]]; then - cl_ref_path="commit" - chainlink_image_version=$chainlink_version - else - cl_ref_path="releases" - # strip the 'v' from the version, because we tag our Docker images without it - chainlink_image_version="${chainlink_version#v}" - fi - else - echo "Chainlink version not provided in input. Using latest commit SHA." - chainlink_version=${{ github.sha }} - chainlink_image_version=$chainlink_version - cl_ref_path="commit" - fi - elif [ "$GITHUB_EVENT_NAME" = "pull_request" ]; then - echo "Fetching Chainlink version from PR's head commit" - chainlink_version="${{ github.event.pull_request.head.sha }}" - chainlink_image_version=$chainlink_version - cl_ref_path="commit" - elif [ "$GITHUB_EVENT_NAME" = "merge_queue" ]; then - echo "Fetching Chainlink version from merge queue's head commit" - chainlink_version="${{ github.event.merge_group.head_sha }}" - chainlink_image_version=$chainlink_version - cl_ref_path="commit" - elif [ "$GITHUB_REF_TYPE" = "tag" ]; then - echo "Fetching Chainlink version from tag" - chainlink_version="${{ github.ref_name }}" - # strip the 'v' from the version, because we tag our Docker images without it - chainlink_image_version="${chainlink_version#v}" - cl_ref_path="releases" - else - echo "Unsupported trigger event. It's probably an issue with the pipeline definition. Please reach out to the Test Tooling team." - exit 1 - fi - echo "Will use following Chainlink version: $chainlink_version" - echo "chainlink_version=$chainlink_version" >> $GITHUB_OUTPUT - echo "Will use following Chainlink Docker image version: $chainlink_image_version" - echo "chainlink_image_version=$chainlink_image_version" >> $GITHUB_OUTPUT - echo "cl_ref_path=$cl_ref_path" >> $GITHUB_OUTPUT - - name: Get image count - id: get-image-count - run: | - if [ "$GITHUB_EVENT_NAME" = "workflow_dispatch" ]; then - echo "Fetching latest image count from input" - if [ -n "${{ github.event.inputs.base64TestList }}" ]; then - echo "Base64-ed Test Input provided, ignoring latest image count" - else - image_count="${{ github.event.inputs.latestVersionsNumber }}" - echo "image_count=$image_count" >> $GITHUB_OUTPUT - fi - else - echo "Fetching default latest image count" - image_count=3 - echo "image_count=$image_count" >> $GITHUB_OUTPUT - fi - echo "Will use following latest image count: $image_count" - - check-ecr-images-exist: - name: Check images used as test dependencies exist in ECR - if: always() && needs.should-run.outputs.should_run == 'true' && (needs.select-versions.outputs.evm_implementations != '' || github.event.inputs.base64TestList != '') - environment: integration - permissions: - id-token: write - contents: read - needs: [should-run] - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - mirror: - - name: ethereum/client-go - expression: '^(alltools-v|v)[0-9]\.[0-9]+\.[0-9]+$' - - name: hyperledger/besu - expression: '^[0-9]+\.[0-9]+(\.[0-9]+)?$' - page_size: 300 - - name: thorax/erigon - expression: '^v[0-9]+\.[0-9]+\.[0-9]+$' - - name: nethermind/nethermind - expression: '^[0-9]+\.[0-9]+\.[0-9]+$' - - name: tofelb/ethereum-genesis-generator - expression: '^[0-9]+\.[0-9]+\.[0-9]+(\-slots\-per\-epoch)?' - - name: ghcr.io/paradigmxyz/reth - expression: '^v[0-9]+\.[0-9]+\.[0-9]+$' - steps: - - name: Update internal ECR if the latest Ethereum client image does not exist - uses: smartcontractkit/chainlink-testing-framework/.github/actions/update-internal-mirrors@352cf299b529a33208146d9f7f0e0b5534fba6e7 # v1.33.0 - with: - aws_region: ${{ secrets.QA_AWS_REGION }} - role_to_assume: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - aws_account_number: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - image_name: ${{matrix.mirror.name}} - expression: ${{matrix.mirror.expression}} - page_size: ${{matrix.mirror.page_size}} - github_token: ${{ secrets.RETH_GH_TOKEN }} # needed only for checking GHRC.io repositories - - build-chainlink: - if: | - always() && - needs.should-run.outputs.should_run == 'true' && - ( - needs.select-versions.outputs.evm_implementations != '' || - github.event.inputs.base64TestList != '' - ) - environment: integration - permissions: - id-token: write - contents: read - name: Build Chainlink Image - runs-on: ubuntu-latest - needs: [should-run, select-versions] - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - ref: ${{ needs.select-versions.outputs.chainlink_version }} - - name: Build Chainlink Image - uses: ./.github/actions/build-chainlink-image - with: - tag_suffix: "" - dockerfile: core/chainlink.Dockerfile - # for tagged releases Docker image version is different from the Chainlink version (v2.13.0 -> 2.13.0) - # for all other cases (PRs, commits, etc.) Docker image version is the same as the Chainlink version - git_commit_sha: ${{ needs.select-versions.outputs.chainlink_image_version }} - check_image_exists: "true" - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: client-compatablility-build-chainlink - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Build Chainlink Image - continue-on-error: true - - get-latest-available-images: - name: Get Latest EVM Implementation's Images - if: always() && needs.should-run.outputs.should_run == 'true' && needs.select-versions.outputs.evm_implementations != '' && github.event.inputs.base64TestList == '' - environment: integration - runs-on: ubuntu-latest - needs: [check-ecr-images-exist, should-run, select-versions] - permissions: - id-token: write - contents: read - env: - LATEST_IMAGE_COUNT: ${{ needs.select-versions.outputs.latest_image_count }} - outputs: - geth_images: ${{ env.GETH_IMAGES }} - nethermind_images: ${{ env.NETHERMIND_IMAGES }} - besu_images: ${{ env.BESU_IMAGES }} - erigon_images: ${{ env.ERIGON_IMAGES }} - reth_images: ${{ env.RETH_IMAGES }} - steps: - # Setup AWS creds - - name: Configure AWS Credentials - uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 - with: - aws-region: ${{ secrets.QA_AWS_REGION }} - role-to-assume: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - role-duration-seconds: 3600 - # Login to ECR - - name: Login to Amazon ECR - id: login-ecr - uses: aws-actions/amazon-ecr-login@062b18b96a7aff071d4dc91bc00c4c1a7945b076 # v2.0.1 - with: - mask-password: "true" - env: - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - # ecrimagefetcher is a tool to get latest images from ECR - - name: Set Up ecrimagefetcher - shell: bash - run: | - go install github.com/smartcontractkit/chainlink-testing-framework/tools/ecrimagefetcher@v1.0.1 - - name: Get latest docker images from ECR - if: ${{ github.event.inputs.base64TestList == '' }} - env: - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - ETH_IMPLEMENTATIONS: ${{ needs.select-versions.outputs.evm_implementations }} - run: | - PATH=$PATH:$(go env GOPATH)/bin - export PATH - if [[ "$ETH_IMPLEMENTATIONS" == *"geth"* ]]; then - geth_images=$(ecrimagefetcher 'ethereum/client-go' '^v[0-9]+\.[0-9]+\.[0-9]+$' ${{ env.LATEST_IMAGE_COUNT }}) - echo "GETH_IMAGES=$geth_images" >> $GITHUB_ENV - echo "Geth latest images: $geth_images" - fi - - if [[ "$ETH_IMPLEMENTATIONS" == *"nethermind"* ]]; then - nethermind_images=$(ecrimagefetcher 'nethermind/nethermind' '^[0-9]+\.[0-9]+\.[0-9]+$' ${{ env.LATEST_IMAGE_COUNT }}) - echo "NETHERMIND_IMAGES=$nethermind_images" >> $GITHUB_ENV - echo "Nethermind latest images: $nethermind_images" - fi - - if [[ "$ETH_IMPLEMENTATIONS" == *"besu"* ]]; then - # 24.3.3 is ignored as it doesn't support data & input fields in eth_call - besu_images=$(ecrimagefetcher 'hyperledger/besu' '^[0-9]+\.[0-9]+(\.[0-9]+)?$' ${{ env.LATEST_IMAGE_COUNT }} ">=24.5.1") - echo "BESU_IMAGES=$besu_images" >> $GITHUB_ENV - echo "Besu latest images: $besu_images" - fi - - if [[ "$ETH_IMPLEMENTATIONS" == *"erigon"* ]]; then - # 2.60.0 and 2.60.1 are ignored as they stopped working with CL node - erigon_images=$(ecrimagefetcher 'thorax/erigon' '^v[0-9]+\.[0-9]+\.[0-9]+$' ${{ env.LATEST_IMAGE_COUNT }} "> $GITHUB_ENV - echo "Erigon latest images: $erigon_images" - fi - - if [[ "$ETH_IMPLEMENTATIONS" == *"reth"* ]]; then - reth_images=$(ecrimagefetcher 'ghcr.io/paradigmxyz/reth' '^v[0-9]+\.[0-9]+\.[0-9]+$' ${{ env.LATEST_IMAGE_COUNT }}) - echo "RETH_IMAGES=$reth_images" >> $GITHUB_ENV - echo "Reth latest images: $reth_images" - fi - - # End Build Test Dependencies - - prepare-compatibility-matrix: - name: Prepare Compatibility Matrix - if: always() && needs.should-run.outputs.should_run == 'true' && (needs.select-versions.outputs.evm_implementations != '' || github.event.inputs.base64TestList != '') - environment: integration - permissions: - checks: write - pull-requests: write - id-token: write - contents: read - needs: [get-latest-available-images, should-run, select-versions] - runs-on: ubuntu-latest - env: - ETH_IMPLEMENTATIONS: ${{ needs.select-versions.outputs.evm_implementations }} - BASE64_TEST_LIST: ${{ github.event.inputs.base64TestList }} - outputs: - matrix: ${{ env.JOB_MATRIX_JSON }} - steps: - - name: Decode Base64 Test List Input if Set - if: env.BASE64_TEST_LIST != '' - run: | - echo "Decoding base64 tests list from the input" - DECODED_BASE64_TEST_LIST=$(echo $BASE64_TEST_LIST | base64 -d) - echo "Decoded input:" - echo "$DECODED_BASE64_TEST_LIST" - is_valid=$(echo "$DECODED_BASE64_TEST_LIST" | jq . > /dev/null 2>&1; echo $?) - if [ "$is_valid" -ne 0 ]; then - echo "Invalid base64 input. Please provide a valid base64 encoded JSON list of tests." - echo "Here is an example of valid JSON:" - cat <> $GITHUB_ENV - # testlistgenerator is a tool that builds a matrix of tests to run - - name: Set Up testlistgenerator - if: env.BASE64_TEST_LIST == '' - shell: bash - run: | - go install github.com/smartcontractkit/chainlink-testing-framework/tools/testlistgenerator@v1.1.0 - - name: Prepare matrix input - if: env.BASE64_TEST_LIST == '' - run: | - PATH=$PATH:$(go env GOPATH)/bin - export PATH - - if [[ "$ETH_IMPLEMENTATIONS" == *"geth"* ]]; then - echo "Will test compatibility with geth" - testlistgenerator -o compatibility_test_list.json -p cron -r TestCronBasic -f './smoke/cron_test.go' -e geth -d "${{ needs.get-latest-available-images.outputs.geth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p flux -r TestFluxBasic -f './smoke/flux_test.go' -e geth -d "${{ needs.get-latest-available-images.outputs.geth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p runlog -r TestRunLogBasic -f './smoke/runlog_test.go' -e geth -d "${{ needs.get-latest-available-images.outputs.geth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p log_poller -r TestLogPollerFewFiltersFixedDepth -f './smoke/log_poller_test.go' -e geth -d "${{ needs.get-latest-available-images.outputs.geth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p ocr -r TestOCRBasic -f './smoke/ocr_test.go' -e geth -d "${{ needs.get-latest-available-images.outputs.geth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p ocr2 -r '^TestOCRv2Basic/plugins$' -f './smoke/ocr2_test.go' -e geth -d "${{ needs.get-latest-available-images.outputs.geth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p automation -r 'TestAutomationBasic/registry_2_1_logtrigger' -f './smoke/automation_test.go' -e geth -d "${{ needs.get-latest-available-images.outputs.geth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p keeper -r 'TestKeeperBasicSmoke/registry_1_3' -f './smoke/keeper_test.go' -e geth -d "${{ needs.get-latest-available-images.outputs.geth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p vrf -r '^TestVRFBasic/Request_Randomness$' -f './smoke/vrf_test.go' -e geth -d "${{ needs.get-latest-available-images.outputs.geth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p vrfv2 -r '^TestVRFv2Basic/Request_Randomness$' -f './smoke/vrfv2_test.go' -e geth -d "${{ needs.get-latest-available-images.outputs.geth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p vrfv2plus -r '^TestVRFv2Plus$/^Link_Billing$' -f './smoke/vrfv2plus_test.go' -e geth -d "${{ needs.get-latest-available-images.outputs.geth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - else - echo "Will not test compatibility with geth" - fi - - if [[ "$ETH_IMPLEMENTATIONS" == *"besu"* ]]; then - echo "Will test compatibility with besu" - testlistgenerator -o compatibility_test_list.json -p cron -r TestCronBasic -f './smoke/cron_test.go' -e besu -d "${{ needs.get-latest-available-images.outputs.besu_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p flux -r TestFluxBasic -f './smoke/flux_test.go' -e besu -d "${{ needs.get-latest-available-images.outputs.besu_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p runlog -r TestRunLogBasic -f './smoke/runlog_test.go' -e besu -d "${{ needs.get-latest-available-images.outputs.besu_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p log_poller -r TestLogPollerFewFiltersFixedDepth -f './smoke/log_poller_test.go' -e besu -d "${{ needs.get-latest-available-images.outputs.besu_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p ocr -r TestOCRBasic -f './smoke/ocr_test.go' -e besu -d "${{ needs.get-latest-available-images.outputs.besu_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p ocr2 -r '^TestOCRv2Basic/plugins$' -f './smoke/ocr2_test.go' -e besu -d "${{ needs.get-latest-available-images.outputs.besu_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p automation -r 'TestAutomationBasic/registry_2_1_logtrigger' -f './smoke/automation_test.go' -e besu -d "${{ needs.get-latest-available-images.outputs.besu_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p keeper -r 'TestKeeperBasicSmoke/registry_1_3' -f './smoke/keeper_test.go' -e besu -d "${{ needs.get-latest-available-images.outputs.besu_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p vrf -r '^TestVRFBasic/Request_Randomness$' -f './smoke/vrf_test.go' -e besu -d "${{ needs.get-latest-available-images.outputs.besu_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - # VRFv2 and VRFV2Plus tests are disabled for besu until the functionalities they rely on are supported - # testlistgenerator -o compatibility_test_list.json -p vrfv2 -r '^TestVRFv2Basic/Request_Randomness$' -f './smoke/vrfv2_test.go' -e besu -d "${{ needs.get-latest-available-images.outputs.besu_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - # testlistgenerator -o compatibility_test_list.json -p vrfv2plus -r '^TestVRFv2Plus$/^Link_Billing$' -f './smoke/vrfv2plus_test.go' -e besu -d "${{ needs.get-latest-available-images.outputs.besu_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - else - echo "Will not test compatibility with besu" - fi - - if [[ "$ETH_IMPLEMENTATIONS" == *"erigon"* ]]; then - echo "Will test compatibility with erigon" - testlistgenerator -o compatibility_test_list.json -p cron -r TestCronBasic -f './smoke/cron_test.go' -e erigon -d "${{ needs.get-latest-available-images.outputs.erigon_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p flux -r TestFluxBasic -f './smoke/flux_test.go' -e erigon -d "${{ needs.get-latest-available-images.outputs.erigon_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p runlog -r TestRunLogBasic -f './smoke/runlog_test.go' -e erigon -d "${{ needs.get-latest-available-images.outputs.erigon_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p log_poller -r TestLogPollerFewFiltersFixedDepth -f './smoke/log_poller_test.go' -e erigon -d "${{ needs.get-latest-available-images.outputs.erigon_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p ocr -r TestOCRBasic -f './smoke/ocr_test.go' -e erigon -d "${{ needs.get-latest-available-images.outputs.erigon_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p ocr2 -r '^TestOCRv2Basic/plugins$' -f './smoke/ocr2_test.go' -e erigon -d "${{ needs.get-latest-available-images.outputs.erigon_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p automation -r 'TestAutomationBasic/registry_2_1_logtrigger' -f './smoke/automation_test.go' -e erigon -d "${{ needs.get-latest-available-images.outputs.erigon_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p keeper -r 'TestKeeperBasicSmoke/registry_1_3' -f './smoke/keeper_test.go' -e erigon -d "${{ needs.get-latest-available-images.outputs.erigon_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p vrf -r '^TestVRFBasic/Request_Randomness$' -f './smoke/vrf_test.go' -e erigon -d "${{ needs.get-latest-available-images.outputs.erigon_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p vrfv2 -r '^TestVRFv2Basic/Request_Randomness$' -f './smoke/vrfv2_test.go' -e erigon -d "${{ needs.get-latest-available-images.outputs.erigon_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p vrfv2plus -r '^TestVRFv2Plus$/^Link_Billing$' -f './smoke/vrfv2plus_test.go' -e erigon -d "${{ needs.get-latest-available-images.outputs.erigon_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - else - echo "Will not test compatibility with erigon" - fi - - if [[ "$ETH_IMPLEMENTATIONS" == *"nethermind"* ]]; then - echo "Will test compatibility with nethermind" - testlistgenerator -o compatibility_test_list.json -p cron -r TestCronBasic -f './smoke/cron_test.go' -e nethermind -d "${{ needs.get-latest-available-images.outputs.nethermind_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p flux -r TestFluxBasic -f './smoke/flux_test.go' -e nethermind -d "${{ needs.get-latest-available-images.outputs.nethermind_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p runlog -r TestRunLogBasic -f './smoke/runlog_test.go' -e nethermind -d "${{ needs.get-latest-available-images.outputs.nethermind_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p log_poller -r TestLogPollerFewFiltersFixedDepth -f './smoke/log_poller_test.go' -e nethermind -d "${{ needs.get-latest-available-images.outputs.nethermind_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p ocr -r TestOCRBasic -f './smoke/ocr_test.go' -e nethermind -d "${{ needs.get-latest-available-images.outputs.nethermind_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p ocr2 -r '^TestOCRv2Basic/plugins$' -f './smoke/ocr2_test.go' -e nethermind -d "${{ needs.get-latest-available-images.outputs.nethermind_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p automation -r 'TestAutomationBasic/registry_2_1_logtrigger' -f './smoke/automation_test.go' -e nethermind -d "${{ needs.get-latest-available-images.outputs.nethermind_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p keeper -r 'TestKeeperBasicSmoke/registry_1_3' -f './smoke/keeper_test.go' -e nethermind -d "${{ needs.get-latest-available-images.outputs.nethermind_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p vrf -r '^TestVRFBasic/Request_Randomness$' -f './smoke/vrf_test.go' -e nethermind -d "${{ needs.get-latest-available-images.outputs.nethermind_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - # VRFv2 and VRFV2Plus tests are disabled for nethermind until the functionalities they rely on are supported - # testlistgenerator -o compatibility_test_list.json -p vrfv2 -r '^TestVRFv2Basic/Request_Randomness$' -f './smoke/vrfv2_test.go' -e nethermind -d "${{ needs.get-latest-available-images.outputs.nethermind_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - # testlistgenerator -o compatibility_test_list.json -p vrfv2plus -r '^TestVRFv2Plus$/^Link_Billing$' -f './smoke/vrfv2plus_test.go' -e nethermind -d "${{ needs.get-latest-available-images.outputs.nethermind_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - else - echo "Will not test compatibility with nethermind" - fi - - if [[ "$ETH_IMPLEMENTATIONS" == *"reth"* ]]; then - echo "Will test compatibility with reth" - testlistgenerator -o compatibility_test_list.json -p cron -r TestCronBasic -f './smoke/cron_test.go' -e reth -d "${{ needs.get-latest-available-images.outputs.reth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p flux -r TestFluxBasic -f './smoke/flux_test.go' -e reth -d "${{ needs.get-latest-available-images.outputs.reth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p runlog -r TestRunLogBasic -f './smoke/runlog_test.go' -e reth -d "${{ needs.get-latest-available-images.outputs.reth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p log_poller -r TestLogPollerFewFiltersFixedDepth -f './smoke/log_poller_test.go' -e reth -d "${{ needs.get-latest-available-images.outputs.reth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p ocr -r TestOCRBasic -f './smoke/ocr_test.go' -e reth -d "${{ needs.get-latest-available-images.outputs.reth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p ocr2 -r '^TestOCRv2Basic/plugins$' -f './smoke/ocr2_test.go' -e reth -d "${{ needs.get-latest-available-images.outputs.reth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p automation -r 'TestAutomationBasic/registry_2_1_logtrigger' -f './smoke/automation_test.go' -e reth -d "${{ needs.get-latest-available-images.outputs.reth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p keeper -r 'TestKeeperBasicSmoke/registry_1_3' -f './smoke/keeper_test.go' -e reth -d "${{ needs.get-latest-available-images.outputs.reth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p vrf -r '^TestVRFBasic/Request_Randomness$' -f './smoke/vrf_test.go' -e reth -d "${{ needs.get-latest-available-images.outputs.reth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p vrfv2 -r '^TestVRFv2Basic/Request_Randomness$' -f './smoke/vrfv2_test.go' -e reth -d "${{ needs.get-latest-available-images.outputs.reth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - testlistgenerator -o compatibility_test_list.json -p vrfv2plus -r '^TestVRFv2Plus$/^Link_Billing$' -f './smoke/vrfv2plus_test.go' -e reth -d "${{ needs.get-latest-available-images.outputs.reth_images }}" -t "evm-implementation-compatibility-test" -n "ubuntu-latest" - else - echo "Will not test compatibility with reth" - fi - - jq . compatibility_test_list.json - JOB_MATRIX_JSON=$(jq -c . compatibility_test_list.json) - echo "JOB_MATRIX_JSON=${JOB_MATRIX_JSON}" >> $GITHUB_ENV - - run-client-compatibility-matrix: - name: ${{ matrix.evm_node.product }} compatibility with ${{ matrix.evm_node.docker_image }} - if: always() && needs.should-run.outputs.should_run == 'true' && (needs.build-chainlink.result == 'success' || needs.build-chainlink.result == 'skipped') && needs.prepare-compatibility-matrix.outputs.matrix != '' - environment: integration - permissions: - checks: write - pull-requests: write - id-token: write - contents: read - needs: - - build-chainlink - - prepare-compatibility-matrix - - should-run - - select-versions - env: - SELECTED_NETWORKS: SIMULATED,SIMULATED_1,SIMULATED_2 - CHAINLINK_COMMIT_SHA: ${{ needs.select-versions.outputs.chainlink_version }} - CHAINLINK_ENV_USER: ${{ github.actor }} - TEST_LOG_LEVEL: debug - strategy: - fail-fast: false - max-parallel: 10 - matrix: - evm_node: ${{fromJson(needs.prepare-compatibility-matrix.outputs.matrix)}} - runs-on: ubuntu-latest - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/chainlink - ref: ${{ needs.select-versions.outputs.chainlink_version }} - - name: Setup GAP for Grafana - uses: smartcontractkit/.github/actions/setup-gap@d316f66b2990ea4daa479daa3de6fc92b00f863e # setup-gap@0.3.2 - with: - # aws inputs - aws-region: ${{ secrets.AWS_REGION }} - aws-role-arn: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - api-gateway-host: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - # other inputs - duplicate-authorization-header: "true" - - name: Prepare test log name - run: | - replace_special_chars() { - if [ -z "$1" ]; then - echo "Please provide a string as an argument." - return 1 - fi - - local input_string="$1" - - # Replace '/' with '-' - local modified_string="${input_string//\//-}" - - # Replace ':' with '-' - modified_string="${modified_string//:/-}" - - # Replace '.' with '-' - modified_string="${modified_string//./-}" - - echo "$modified_string" - } - echo "TEST_LOG_NAME=$(replace_special_chars "${{ matrix.evm_node.product }}-${{ matrix.evm_node.docker_image }}-test-logs")" >> $GITHUB_ENV - # - name: Collect Workflow Telemetry - # uses: catchpoint/workflow-telemetry-action@v2 - # with: - # comment_on_pr: false - # theme: 'dark' - - name: Run Tests - uses: smartcontractkit/.github/actions/ctf-run-tests@b6e37806737eef87e8c9137ceeb23ef0bff8b1db # ctf-run-tests@0.1.0 - with: - test_command_to_run: cd ./integration-tests && touch .root_dir && go test -timeout 30m -count=1 -json ${{ matrix.evm_node.run }} 2>&1 | tee /tmp/gotest.log | gotestloghelper -ci -singlepackage -hidepassingtests=false -hidepassinglogs - test_download_vendor_packages_command: cd ./integration-tests && go mod download - aws_registries: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - artifacts_name: ${{ env.TEST_LOG_NAME }} - artifacts_location: | - ./integration-tests/smoke/logs/ - ./integration-tests/smoke/db_dumps/ - /tmp/gotest.log - publish_check_name: ${{ matrix.evm_node.product }}-${{ matrix.evm_node.eth_implementation }} - token: ${{ secrets.GITHUB_TOKEN }} - go_mod_path: ./integration-tests/go.mod - cache_key_id: core-e2e-${{ env.MOD_CACHE_VERSION }} - cache_restore_only: "true" - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_KUBECONFIG: "" - should_tidy: "false" - go_coverage_src_dir: /var/tmp/go-coverage - go_coverage_dest_dir: ${{ github.workspace }}/.covdata - env: - E2E_TEST_SELECTED_NETWORK: ${{ env.SELECTED_NETWORKS}} - E2E_TEST_CHAINLINK_IMAGE: ${{ env.CHAINLINK_IMAGE }} - E2E_TEST_CHAINLINK_VERSION: ${{ needs.select-versions.outputs.chainlink_image_version }} - E2E_TEST_LOKI_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - E2E_TEST_LOKI_ENDPOINT: https://${{ secrets.GRAFANA_INTERNAL_HOST }}/loki/api/v1/push - E2E_TEST_LOKI_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - E2E_TEST_GRAFANA_DASHBOARD_URL: "/d/ddf75041-1e39-42af-aa46-361fe4c36e9e/ci-e2e-tests-logs" - E2E_TEST_GRAFANA_BEARER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - E2E_TEST_PYROSCOPE_SERVER_URL: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - E2E_TEST_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - E2E_TEST_PYROSCOPE_ENVIRONMENT: ci-client-compatability-${{ matrix.eth_client }}-testnet - E2E_TEST_PYROSCOPE_ENABLED: "true" - E2E_TEST_LOGGING_RUN_ID: ${{ github.run_id }} - E2E_TEST_LOG_COLLECT: ${{ vars.TEST_LOG_COLLECT }} - E2E_TEST_LOG_STREAM_LOG_TARGETS: ${{ vars.LOGSTREAM_LOG_TARGETS }} - E2E_TEST_PRIVATE_ETHEREUM_EXECUTION_LAYER: ${{ matrix.evm_node.eth_implementation || 'geth' }} - E2E_TEST_PRIVATE_ETHEREUM_ETHEREUM_VERSION: auto_fill # Auto fill the version based on the docker image - E2E_TEST_PRIVATE_ETHEREUM_CUSTOM_DOCKER_IMAGE: ${{ matrix.evm_node.docker_image }} - - - name: Show Grafana url in test summary - if: always() - uses: smartcontractkit/.github/actions/ctf-show-grafana-in-test-summary@b6e37806737eef87e8c9137ceeb23ef0bff8b1db # ctf-show-grafana-in-test-summary@0.1.0 - - start-slack-thread: - name: Start Slack Thread - if: always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' && needs.should-run.outputs.should_run == 'true' && (needs.select-versions.outputs.evm_implementations != '' || github.event.inputs.base64TestList != '') - environment: integration - outputs: - thread_ts: ${{ steps.slack.outputs.thread_ts }} - permissions: - checks: write - pull-requests: write - id-token: write - contents: read - runs-on: ubuntu-latest - needs: [run-client-compatibility-matrix, should-run, select-versions, build-chainlink, prepare-compatibility-matrix] - steps: - - name: Debug Result - run: echo ${{ join(needs.*.result, ',') }} - - name: Main Slack Notification - uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 - id: slack - with: - channel-id: ${{ secrets.QA_SLACK_CHANNEL }} - payload: | - { - "attachments": [ - { - "color": "${{ (contains(join(needs.*.result, ','), 'failure') || needs.build-chainlink.result == 'failure') && '#C62828' || '#2E7D32' }}", - "blocks": [ - { - "type": "header", - "text": { - "type": "plain_text", - "text": "EVM Implementation Compatibility Test Results ${{ contains(join(needs.*.result, ','), 'failure') && ':x:' || ':white_check_mark:'}}", - "emoji": true - } - }, - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": "${{ needs.prepare-compatibility-matrix.result == 'failure' && 'Failed to prepare test matrix, notifying ' || needs.build-chainlink.result == 'failure' && 'Failed to build Chainlink image, notifying ' || contains(join(needs.*.result, ','), 'failure') && format('Some tests failed, notifying ', secrets.COMPAT_SLACK_NOTIFICATION_HANDLE) || 'All Good!' }}" - } - }, - { - "type": "divider" - }, - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": "<${{ github.server_url }}/${{ github.repository }}/${{ needs.select-versions.outputs.chainlink_ref_path }}/${{ needs.select-versions.outputs.chainlink_version }}|${{ needs.select-versions.outputs.chainlink_version }}> | <${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}|Run>" - } - } - ] - } - ] - } - env: - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - - parse-test-results: - name: Parse Test Results - if: always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' && needs.should-run.outputs.should_run == 'true' && (needs.select-versions.outputs.evm_implementations != '' || github.event.inputs.base64TestList != '') - environment: integration - permissions: - checks: write - pull-requests: write - id-token: write - contents: read - runs-on: ubuntu-latest - needs: [run-client-compatibility-matrix, should-run] - outputs: - base64_parsed_results: ${{ steps.get-test-results.outputs.base64_parsed_results }} - steps: - # workflowresultparser is a tool to get job results from a workflow run - - name: Set Up workflowresultparser - shell: bash - run: | - go install github.com/smartcontractkit/chainlink-testing-framework/tools/workflowresultparser@v1.0.0 - - name: Get and parse Test Results - shell: bash - id: get-test-results - run: | - PATH=$PATH:$(go env GOPATH)/bin - export PATH - - workflowresultparser -workflowRunID ${{ github.run_id }} -githubToken ${{ github.token }} -githubRepo "${{ github.repository }}" -jobNameRegex "^automation compatibility with (.*?)$" -namedKey="automation" -outputFile=output.json - workflowresultparser -workflowRunID ${{ github.run_id }} -githubToken ${{ github.token }} -githubRepo "${{ github.repository }}" -jobNameRegex "^keeper compatibility with (.*?)$" -namedKey="keeper" -outputFile=output.json - workflowresultparser -workflowRunID ${{ github.run_id }} -githubToken ${{ github.token }} -githubRepo "${{ github.repository }}" -jobNameRegex "^log_poller compatibility with (.*?)$" -namedKey="log_poller" -outputFile=output.json - workflowresultparser -workflowRunID ${{ github.run_id }} -githubToken ${{ github.token }} -githubRepo "${{ github.repository }}" -jobNameRegex "^ocr compatibility with (.*?)$" -namedKey="ocr" -outputFile=output.json - workflowresultparser -workflowRunID ${{ github.run_id }} -githubToken ${{ github.token }} -githubRepo "${{ github.repository }}" -jobNameRegex "^ocr2 compatibility with (.*?)$" -namedKey="ocr2" -outputFile=output.json - workflowresultparser -workflowRunID ${{ github.run_id }} -githubToken ${{ github.token }} -githubRepo "${{ github.repository }}" -jobNameRegex "^vrf compatibility with (.*?)$" -namedKey="vrf" -outputFile=output.json - workflowresultparser -workflowRunID ${{ github.run_id }} -githubToken ${{ github.token }} -githubRepo "${{ github.repository }}" -jobNameRegex "^vrfv2 compatibility with (.*?)$" -namedKey="vrfv2" -outputFile=output.json - workflowresultparser -workflowRunID ${{ github.run_id }} -githubToken ${{ github.token }} -githubRepo "${{ github.repository }}" -jobNameRegex "^vrfv2plus compatibility with (.*?)$" -namedKey="vrfv2plus" -outputFile=output.json - workflowresultparser -workflowRunID ${{ github.run_id }} -githubToken ${{ github.token }} -githubRepo "${{ github.repository }}" -jobNameRegex "^flux compatibility with (.*?)$" -namedKey="flux" -outputFile=output.json - workflowresultparser -workflowRunID ${{ github.run_id }} -githubToken ${{ github.token }} -githubRepo "${{ github.repository }}" -jobNameRegex "^runlog compatibility with (.*?)$" -namedKey="runlog" -outputFile=output.json - workflowresultparser -workflowRunID ${{ github.run_id }} -githubToken ${{ github.token }} -githubRepo "${{ github.repository }}" -jobNameRegex "^cron compatibility with (.*?)$" -namedKey="cron" -outputFile=output.json - - echo "base64_parsed_results=$(base64 -w 0 output.json)" >> $GITHUB_OUTPUT - - display-test-results: - name: Aggregated test results - if: always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' && needs.should-run.outputs.should_run == 'true' && needs.parse-test-results.result == 'success' - environment: integration - permissions: - checks: write - pull-requests: write - id-token: write - contents: read - runs-on: ubuntu-latest - needs: [start-slack-thread, should-run, select-versions, parse-test-results] - steps: - # asciitable is a tool that prints results in a nice ASCII table - - name: Set Up asciitable - shell: bash - run: | - go install github.com/smartcontractkit/chainlink-testing-framework/tools/asciitable@v1.0.2 - - name: Print aggregated test results - shell: bash - run: | - PATH=$PATH:$(go env GOPATH)/bin - export PATH - - raw_results="$(echo ${{ needs.parse-test-results.outputs.base64_parsed_results }} | base64 -d)" - echo $raw_results > input.json - asciitable --firstColumn "EVM Implementation" --secondColumn Result --jsonfile input.json --outputFile output.txt --section "automation" --namedKey "automation" - asciitable --firstColumn "EVM Implementation" --secondColumn Result --jsonfile input.json --outputFile output.txt --section "keeper" --namedKey "keeper" - asciitable --firstColumn "EVM Implementation" --secondColumn Result --jsonfile input.json --outputFile output.txt --section "log_poller" --namedKey "log_poller" - asciitable --firstColumn "EVM Implementation" --secondColumn Result --jsonfile input.json --outputFile output.txt --section "ocr" --namedKey "ocr" - asciitable --firstColumn "EVM Implementation" --secondColumn Result --jsonfile input.json --outputFile output.txt --section "ocr2" --namedKey "ocr2" - asciitable --firstColumn "EVM Implementation" --secondColumn Result --jsonfile input.json --outputFile output.txt --section "vrf" --namedKey "vrf" - asciitable --firstColumn "EVM Implementation" --secondColumn Result --jsonfile input.json --outputFile output.txt --section "vrfv2" --namedKey "vrfv2" - asciitable --firstColumn "EVM Implementation" --secondColumn Result --jsonfile input.json --outputFile output.txt --section "vrfv2plus" --namedKey "vrfv2plus" - asciitable --firstColumn "EVM Implementation" --secondColumn Result --jsonfile input.json --outputFile output.txt --section "flux" --namedKey "flux" - asciitable --firstColumn "EVM Implementation" --secondColumn Result --jsonfile input.json --outputFile output.txt --section "cron" --namedKey "cron" - asciitable --firstColumn "EVM Implementation" --secondColumn Result --jsonfile input.json --outputFile output.txt --section "runlog" --namedKey "runlog" - - echo - echo "AGGREGATED RESULTS" - cat output.txt - - echo "## Aggregated EVM Implementations compatibility results summary" >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - cat output.txt >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - post-test-results-to-slack: - name: Post Test Results for ${{matrix.product}} - if: always() && needs.*.result != 'skipped' && needs.*.result != 'cancelled' && needs.should-run.outputs.should_run == 'true' && needs.parse-test-results.result == 'success' - environment: integration - permissions: - checks: write - pull-requests: write - id-token: write - contents: read - runs-on: ubuntu-latest - needs: [start-slack-thread, should-run, parse-test-results] - strategy: - fail-fast: false - matrix: - product: - - automation - - keeper - - log_poller - - ocr - - ocr2 - - vrf - - vrfv2 - - vrfv2plus - - cron - - flux - - runlog - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - ref: ${{ needs.select-versions.outputs.chainlink_version }} - - name: Get test results for ${{ matrix.product }} - id: get-product-results - shell: bash - run: | - raw_results="$(echo ${{ needs.parse-test-results.outputs.base64_parsed_results }} | base64 -d)" - product_result=$(echo "$raw_results" | jq -c "select(has(\"${{ matrix.product }}\")) | .${{ matrix.product }}[]") - if [ -n "$product_result" ]; then - base64_result=$(echo $product_result | base64 -w 0) - echo "base64_result=$base64_result" >> $GITHUB_OUTPUT - else - echo "No results found for ${{ matrix.product }}" - echo "base64_result=" >> $GITHUB_OUTPUT - fi - - name: Post Test Results to Slack - uses: ./.github/actions/notify-slack-jobs-result - with: - github_token: ${{ github.token }} - github_repository: ${{ github.repository }} - workflow_run_id: ${{ github.run_id }} - github_job_name_regex: ^${{ matrix.product }} compatibility with (.*?)$ - message_title: ${{ matrix.product }} - slack_channel_id: ${{ secrets.QA_SLACK_CHANNEL }} - slack_bot_token: ${{ secrets.QA_SLACK_API_KEY }} - slack_thread_ts: ${{ needs.start-slack-thread.outputs.thread_ts }} - base64_parsed_results: ${{ steps.get-product-results.outputs.base64_result }} diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml deleted file mode 100644 index 88f5de5668..0000000000 --- a/.github/workflows/codeql-analysis.yml +++ /dev/null @@ -1,55 +0,0 @@ -name: 'CodeQL' - -on: - push: - branches: - - develop - pull_request: - # The branches below must be a subset of the branches above - branches: [develop] - schedule: - - cron: '23 19 * * 4' - -jobs: - analyze: - name: Analyze ${{ matrix.language }} - runs-on: ubuntu-latest - - strategy: - fail-fast: false - matrix: - language: ['go', 'javascript'] - - steps: - - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - name: Set up Go - if: ${{ matrix.language == 'go' }} - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version-file: 'go.mod' - - - name: Touching core/web/assets/index.html - if: ${{ matrix.language == 'go' }} - run: mkdir -p core/web/assets && touch core/web/assets/index.html - - - name: Initialize CodeQL - uses: github/codeql-action/init@65c74964a9ed8c44ed9f19d4bbc5757a6a8e9ab9 # codeql-bundle-v2.16.1 - with: - languages: ${{ matrix.language }} - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@65c74964a9ed8c44ed9f19d4bbc5757a6a8e9ab9 # codeql-bundle-v2.16.1 - - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: chainlink-codeql - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Analyze ${{ matrix.language }} - continue-on-error: true diff --git a/.github/workflows/crib-integration-test.yml b/.github/workflows/crib-integration-test.yml deleted file mode 100644 index 56e025eeff..0000000000 --- a/.github/workflows/crib-integration-test.yml +++ /dev/null @@ -1,113 +0,0 @@ -name: CRIB Integration Tests -on: - schedule: - - cron: "0 1 * * *" - workflow_call: -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true -jobs: - test: - runs-on: ubuntu-latest - environment: integration - permissions: - id-token: write - contents: read - actions: read - steps: - - name: Checkout repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - uses: cachix/install-nix-action@ba0dd844c9180cbf77aa72a116d6fbc515d0e87b # v27 - with: - nix_path: nixpkgs=channel:nixos-unstable - - - name: setup-gap crib - uses: smartcontractkit/.github/actions/setup-gap@00b58566e0ee2761e56d9db0ea72b783fdb89b8d # setup-gap@0.4.0 - with: - aws-role-duration-seconds: 3600 # 1 hour - aws-role-arn: ${{ secrets.AWS_OIDC_CRIB_ROLE_ARN_STAGE }} - api-gateway-host: ${{ secrets.AWS_API_GW_HOST_CRIB_STAGE }} - aws-region: ${{ secrets.AWS_REGION }} - ecr-private-registry: ${{ secrets.AWS_ACCOUNT_ID_PROD }} - k8s-cluster-name: ${{ secrets.AWS_K8S_CLUSTER_NAME_STAGE }} - gap-name: crib - use-private-ecr-registry: true - use-tls: true - proxy-port: 8080 - metrics-job-name: "test" - gc-basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - gc-host: ${{ secrets.GRAFANA_INTERNAL_HOST }} - gc-org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - - - name: setup-gap k8s - uses: smartcontractkit/.github/actions/setup-gap@00b58566e0ee2761e56d9db0ea72b783fdb89b8d # setup-gap@0.4.0 - with: - aws-role-duration-seconds: 3600 # 1 hour - aws-role-arn: ${{ secrets.AWS_OIDC_CRIB_ROLE_ARN_STAGE }} - api-gateway-host: ${{ secrets.AWS_API_GW_HOST_K8S_STAGE }} - aws-region: ${{ secrets.AWS_REGION }} - ecr-private-registry: ${{ secrets.AWS_ACCOUNT_ID_PROD }} - k8s-cluster-name: ${{ secrets.AWS_K8S_CLUSTER_NAME_STAGE }} - gap-name: k8s - use-private-ecr-registry: true - use-k8s: true - proxy-port: 8443 - metrics-job-name: "test" - gc-basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - gc-host: ${{ secrets.GRAFANA_INTERNAL_HOST }} - gc-org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - - - name: Setup GitHub token using GATI - id: token - uses: smartcontractkit/.github/actions/setup-github-token@c0b38e6c40d72d01b8d2f24f92623a2538b3dedb # main - with: - aws-role-arn: ${{ secrets.AWS_OIDC_GLOBAL_READ_ONLY_TOKEN_ISSUER_ROLE_ARN }} - aws-lambda-url: ${{ secrets.AWS_INFRA_RELENG_TOKEN_ISSUER_LAMBDA_URL }} - aws-region: ${{ secrets.AWS_REGION }} - aws-role-duration-seconds: "1800" - - name: Debug workspace dir - shell: bash - run: | - echo ${{ github.workspace }} - echo $GITHUB_WORKSPACE - - - name: Deploy and validate CRIB Environment for Core - uses: smartcontractkit/.github/actions/crib-deploy-environment@4dd21a9d6e3f1383ffe8b9650b55f6e6031d3d0a # crib-deploy-environment@1.0.0 - id: deploy-crib - with: - github-token: ${{ steps.token.outputs.access-token }} - api-gateway-host: ${{ secrets.AWS_API_GW_HOST_K8S_STAGE }} - aws-region: ${{ secrets.AWS_REGION }} - aws-role-arn: ${{ secrets.AWS_OIDC_CRIB_ROLE_ARN_STAGE }} - ecr-private-registry: ${{ secrets.AWS_ACCOUNT_ID_PROD }} - ingress-base-domain: ${{ secrets.INGRESS_BASE_DOMAIN_STAGE }} - k8s-cluster-name: ${{ secrets.AWS_K8S_CLUSTER_NAME_STAGE }} - devspace-profiles: "local-dev-simulated-core-ocr1" - crib-alert-slack-webhook: ${{ secrets.CRIB_ALERT_SLACK_WEBHOOK }} - product-image: ${{ secrets.AWS_SDLC_ECR_HOSTNAME }}/chainlink - product-image-tag: develop - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Setup go - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version-file: "go.mod" - - name: Run CRIB integration test - working-directory: integration-tests/crib - env: - K8S_STAGING_INGRESS_SUFFIX: ${{ secrets.K8S_STAGING_INGRESS_SUFFIX }} - CRIB_NAMESPACE: ${{ steps.deploy-crib.outputs.devspace-namespace }} - CRIB_NETWORK: geth - CRIB_NODES: 5 - GAP_URL: ${{ secrets.GAP_URL }} - SETH_LOG_LEVEL: info - # RESTY_DEBUG: true - TEST_PERSISTENCE: true - run: |- - go test -v -run TestCRIBChaos - - name: Destroy CRIB Environment - id: destroy - if: always() && steps.deploy-crib.outputs.devspace-namespace != '' - uses: smartcontractkit/.github/actions/crib-purge-environment@c0b38e6c40d72d01b8d2f24f92623a2538b3dedb # crib-purge-environment@0.1.0 - with: - namespace: ${{ steps.deploy-crib.outputs.devspace-namespace }} diff --git a/.github/workflows/delete-deployments.yml b/.github/workflows/delete-deployments.yml deleted file mode 100644 index 1eb839e462..0000000000 --- a/.github/workflows/delete-deployments.yml +++ /dev/null @@ -1,34 +0,0 @@ -name: Cleanup integration deployments -on: - workflow_dispatch: - schedule: - # every 10 mins - - cron: "*/10 * * * *" - -jobs: - cleanup: - name: Clean up integration environment deployments - runs-on: ubuntu-latest - steps: - - name: Checkout repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - name: Clean up integration environment - uses: ./.github/actions/delete-deployments - with: - environment: integration - # Delete 300 deployments at a time - num-of-pages: 3 - # We start with page 2 because usually the first 200 deployments are still active, so we cannot delete them - starting-page: 2 - - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: chainlink-delete-deployments - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Clean up integration environment deployments - continue-on-error: true diff --git a/.github/workflows/dependency-check.yml b/.github/workflows/dependency-check.yml deleted file mode 100644 index ede188de64..0000000000 --- a/.github/workflows/dependency-check.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: Dependency Vulnerability Check - -on: - push: - -jobs: - changes: - name: Detect changes - runs-on: ubuntu-latest - outputs: - changes: ${{ steps.changes.outputs.src }} - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 - id: changes - with: - filters: | - src: - - '**/*go.sum' - - '**/*go.mod' - - '.github/workflows/dependency-check.yml' - Go: - runs-on: ubuntu-latest - needs: [changes] - steps: - - name: Check out code - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - name: Set up Go - if: needs.changes.outputs.src == 'true' - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version-file: 'go.mod' - id: go - - - name: Write Go Modules list - if: needs.changes.outputs.src == 'true' - run: go list -json -m all > go.list - - - name: Check vulnerabilities - if: needs.changes.outputs.src == 'true' - uses: sonatype-nexus-community/nancy-github-action@726e338312e68ecdd4b4195765f174d3b3ce1533 # v1.0.3 - with: - nancyVersion: "v1.0.39" - - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: dependency-vulnerability-check - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Go - continue-on-error: true diff --git a/.github/workflows/integration-chaos-tests.yml b/.github/workflows/integration-chaos-tests.yml deleted file mode 100644 index 3198320f6c..0000000000 --- a/.github/workflows/integration-chaos-tests.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: Integration Chaos Test -on: - schedule: - - cron: "0 0 * * *" - push: - tags: - - "*" - workflow_dispatch: - -jobs: - run-e2e-tests-workflow: - name: Run E2E Tests - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - test_path: .github/e2e-tests.yml - chainlink_version: ${{ github.sha }} - require_chainlink_image_versions_in_qa_ecr: ${{ github.sha }} - test_trigger: E2E Chaos Tests - test_log_level: debug - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} diff --git a/.github/workflows/integration-tests-publish.yml b/.github/workflows/integration-tests-publish.yml deleted file mode 100644 index 536d2897ee..0000000000 --- a/.github/workflows/integration-tests-publish.yml +++ /dev/null @@ -1,102 +0,0 @@ -name: Test Image Publish -# Publish the compiled integration tests -on: - push: - tags: - - "v*" - branches: - - ccip-develop - workflow_dispatch: - -env: - CHAINLINK_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink - -jobs: - publish-integration-test-image: - environment: integration - permissions: - id-token: write - contents: read - name: Publish Integration Test Image - runs-on: ubuntu22.04-16cores-64GB - steps: - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: publish-e2e-test-image - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Publish Integration Test Image - continue-on-error: true - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - ref: ${{ github.event.pull_request.head.sha || github.sha }} - - name: Setup Other Tags If Not Workflow Dispatch - id: tags - if: github.event_name != 'workflow_dispatch' - run: | - echo 'release_tag="${{ format('{0}.dkr.ecr.{1}.amazonaws.com/chainlink-ccip-tests:{2}', secrets.QA_AWS_ACCOUNT_NUMBER, secrets.QA_AWS_REGION, github.ref_name) }}"' >> $GITHUB_OUTPUT - - name: Build Image - uses: smartcontractkit/.github/actions/ctf-build-test-image@a5e4f4c8fbb8e15ab2ad131552eca6ac83c4f4b3 # ctf-build-test-image@0.1.0 - with: - other_tags: ${{ steps.tags.outputs.release_tag }} - repository: 'chainlink-ccip-tests' - tag: ${{ github.sha }} - suites: 'chaos migration reorg smoke soak benchmark load ccip-tests/load ccip-tests/smoke ccip-tests/chaos' - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - - name: Notify Slack - # Only run this notification for merge to develop failures - if: failure() && github.event_name != 'workflow_dispatch' - uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 - env: - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - with: - channel-id: "#team-test-tooling-internal" - slack-message: ":x: :mild-panic-intensifies: Publish Integration Test Image failed: \n${{ format('https://github.com/{0}/actions/runs/{1}', github.repository, github.run_id) }}\nRepository: Chainlink\n${{ format('Notifying ', secrets.GUARDIAN_SLACK_NOTIFICATION_HANDLE)}}" - build-chainlink-image: - environment: integration - # Only run this build for workflow_dispatch - if: github.event_name == 'workflow_dispatch' - permissions: - id-token: write - contents: read - strategy: - matrix: - image: - - name: "" - dockerfile: core/chainlink.Dockerfile - tag-suffix: "" - # uncomment in the future if we end up needing to soak test the plugins image - # - name: (plugins) - # dockerfile: plugins/chainlink.Dockerfile - # tag-suffix: -plugins - name: Build Chainlink Image ${{ matrix.image.name }} - runs-on: ubuntu22.04-8cores-32GB - steps: - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: build-chainlink - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Build Chainlink Image ${{ matrix.image.name }} - continue-on-error: true - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - ref: ${{ github.sha }} - - name: Build Chainlink Image - uses: ./.github/actions/build-chainlink-image - with: - tag_suffix: ${{ matrix.image.tag-suffix }} - dockerfile: ${{ matrix.image.dockerfile }} - git_commit_sha: ${{ github.sha }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml deleted file mode 100644 index 8b56b88b70..0000000000 --- a/.github/workflows/integration-tests.yml +++ /dev/null @@ -1,493 +0,0 @@ -name: Integration Tests -run-name: Integration Tests ${{ inputs.distinct_run_name && inputs.distinct_run_name || '' }} -on: - merge_group: - pull_request: - push: - tags: - - "*" - workflow_dispatch: - inputs: - cl_ref: - description: 'The ref to checkout, defaults to the calling branch' - required: false - type: string - evm-ref: - description: 'The sha of the chainlink-evm commit to use if wanted' - required: false - type: string - distinct_run_name: - description: 'A unique identifier for this run, only use from other repos' - required: false - type: string - -# Only run 1 of this workflow at a time per PR -concurrency: - group: ${{ github.ref }}-${{ github.repository }}-${{ github.event_name }}--e2e-tests-${{ inputs.distinct_run_name }} - cancel-in-progress: true - -env: - # for run-test variables and environment - ENV_JOB_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink-ccip-tests:${{ inputs.evm-ref || github.sha }} - CHAINLINK_IMAGE: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com/chainlink - TEST_SUITE: smoke - TEST_ARGS: -test.timeout 12m - INTERNAL_DOCKER_REPO: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }}.dkr.ecr.${{ secrets.QA_AWS_REGION }}.amazonaws.com - MOD_CACHE_VERSION: 2 - COLLECTION_ID: chainlink-e2e-tests - -jobs: - enforce-ctf-version: - name: Enforce CTF Version - runs-on: ubuntu-latest - # We don't directly merge dependabot PRs, so let's not waste the resources - if: github.actor != 'dependabot[bot]' - steps: - - run: echo "${{github.event_name}}" - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/ccip - ref: ${{ inputs.cl_ref }} - - name: Check Merge Group Condition - id: condition-check - run: | - echo "Checking event condition..." - SHOULD_ENFORCE="false" - if [[ "$GITHUB_EVENT_NAME" == "merge_group" ]]; then - echo "We are in a merge_group event, now check if we are on the develop branch" - target_branch=$(cat $GITHUB_EVENT_PATH | jq -r .merge_group.base_ref) - if [[ "$target_branch" == "refs/heads/develop" ]]; then - echo "We are on the develop branch, we should enforce ctf version" - SHOULD_ENFORCE="true" - fi - fi - echo "should we enforce ctf version = $SHOULD_ENFORCE" - echo "should-enforce=$SHOULD_ENFORCE" >> $GITHUB_OUTPUT - - name: Enforce CTF Version - if: steps.condition-check.outputs.should-enforce == 'true' - uses: smartcontractkit/.github/actions/ctf-check-mod-version@21b0189c5fdca0318617d259634b1a91e6d80262 # ctf-check-mod-version@0.0.0 - with: - go-project-path: ./integration-tests - module-name: github.com/smartcontractkit/chainlink-testing-framework/lib - enforce-semantic-tag: "true" - - changes: - environment: integration - name: Check Paths That Require Tests To Run - runs-on: ubuntu-latest - # We don't directly merge dependabot PRs, so let's not waste the resources - if: github.actor != 'dependabot[bot]' - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/ccip - ref: ${{ inputs.cl_ref }} - - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 - id: changes - with: - filters: | - github_ci_changes: - - '.github/workflows/integration-tests.yml' - - '.github/workflows/run-e2e-tests-reusable-workflow.yml' - - '.github/e2e-tests.yml' - core_changes: - - '**/*.go' - - '**/*go.sum' - - '**/*go.mod' - - '**/*Dockerfile' - - 'core/**/migrations/*.sql' - - 'core/**/config/**/*.toml' - - 'integration-tests/**/*.toml' - ccip_changes: - - '**/*ccip*' - - '**/*ccip*/**' - - name: Ignore Filter On Workflow Dispatch - if: ${{ github.event_name == 'workflow_dispatch' }} - id: ignore-filter - run: echo "changes=true" >> $GITHUB_OUTPUT - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: ${{ env.COLLECTION_ID }}-check-paths - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Check Paths That Require Tests To Run - continue-on-error: true - outputs: - github_ci_changes: ${{ steps.ignore-filter.outputs.changes || steps.changes.outputs.github_ci_changes }} - core_changes: ${{ steps.ignore-filter.outputs.changes || steps.changes.outputs.core_changes }} - ccip_changes: ${{ steps.ignore-filter.outputs.changes || steps.changes.outputs.ccip_changes }} - - lint-integration-tests: - name: Lint ${{ matrix.project.name }} - runs-on: ubuntu22.04-8cores-32GB - # We don't directly merge dependabot PRs, so let's not waste the resources - if: github.actor != 'dependabot[bot]' - strategy: - matrix: - project: - - name: integration-tests - id: e2e-tests - path: ./integration-tests - cache_id: e2e-tests - - name: load - id: load - path: ./integration-tests/load - cache_id: load - steps: - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: ${{ env.COLLECTION_ID }}-build-lint-${{ matrix.project.id }} - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Lint ${{ matrix.project.name }} - continue-on-error: true - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/ccip - ref: ${{ inputs.cl_ref }} - - name: Setup Go - uses: smartcontractkit/.github/actions/ctf-setup-go@b0d756c57fcdbcff187e74166562a029fdd5d1b9 # ctf-setup-go@0.0.0 - with: - test_download_vendor_packages_command: cd ${{ matrix.project.path }} && go mod download - go_mod_path: ${{ matrix.project.path }}/go.mod - cache_key_id: ${{ matrix.project.cache_id }} - cache_restore_only: "true" - - name: Lint Go - uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0 - with: - version: v1.59.1 - # We already cache these directories in setup-go - skip-pkg-cache: true - skip-build-cache: true - # only-new-issues is only applicable to PRs, otherwise it is always set to false - only-new-issues: false # disabled for PRs due to unreliability - args: --out-format colored-line-number,checkstyle:golangci-lint-report.xml - working-directory: ${{ matrix.project.path }} - - build-chainlink: - environment: integration - permissions: - id-token: write - contents: read - strategy: - matrix: - image: - - name: "" - dockerfile: core/chainlink.Dockerfile - tag-suffix: "" - - name: (plugins) - dockerfile: plugins/chainlink.Dockerfile - tag-suffix: -plugins - name: Build Chainlink Image ${{ matrix.image.name }} - runs-on: ubuntu22.04-8cores-32GB - needs: [changes, enforce-ctf-version] - steps: - - name: Collect Metrics - if: needs.changes.outputs.core_changes == 'true' || needs.changes.outputs.github_ci_changes == 'true' || github.event_name == 'workflow_dispatch' - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: ${{ env.COLLECTION_ID }}-build-chainlink - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Build Chainlink Image ${{ matrix.image.name }} - continue-on-error: true - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/ccip - ref: ${{ inputs.cl_ref || github.event.pull_request.head.sha || github.event.merge_group.head_sha }} - - name: Setup Github Token - if: ${{ inputs.evm-ref }} - id: get-gh-token - uses: smartcontractkit/.github/actions/setup-github-token@ef78fa97bf3c77de6563db1175422703e9e6674f # setup-github-token@0.2.1 - with: - aws-role-arn: ${{ secrets.AWS_OIDC_GLOBAL_READ_ONLY_TOKEN_ISSUER_ROLE_ARN }} - aws-lambda-url: ${{ secrets.AWS_INFRA_RELENG_TOKEN_ISSUER_LAMBDA_URL }} - aws-region: ${{ secrets.AWS_REGION }} - set-git-config: "true" - - name: Build Chainlink Image - if: needs.changes.outputs.core_changes == 'true' || needs.changes.outputs.github_ci_changes == 'true' || github.event_name == 'workflow_dispatch' - uses: ./.github/actions/build-chainlink-image - with: - tag_suffix: ${{ matrix.image.tag-suffix }} - dockerfile: ${{ matrix.image.dockerfile }} - git_commit_sha: ${{ inputs.evm-ref || github.sha }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - dep_evm_sha: ${{ inputs.evm-ref }} - - run-core-e2e-tests-for-pr: - name: Run Core E2E Tests For PR - permissions: - actions: read - checks: write - pull-requests: write - id-token: write - contents: read - needs: [build-chainlink, changes] - if: github.event_name == 'pull_request' && ( needs.changes.outputs.core_changes == 'true' || needs.changes.outputs.github_ci_changes == 'true') - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - workflow_name: Run Core E2E Tests For PR - chainlink_version: ${{ inputs.evm-ref || github.sha }} - chainlink_upgrade_version: ${{ github.sha }} - test_path: .github/e2e-tests.yml - test_trigger: PR E2E Core Tests - upload_cl_node_coverage_artifact: true - upload_cl_node_coverage_artifact_prefix: cl_node_coverage_data_ - enable_otel_traces_for_ocr2_plugins: ${{ contains(join(github.event.pull_request.labels.*.name, ' '), 'enable tracing') }} - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - - run-core-e2e-tests-for-merge-queue: - name: Run Core E2E Tests For Merge Queue - permissions: - actions: read - checks: write - pull-requests: write - id-token: write - contents: read - needs: [build-chainlink, changes] - if: github.event_name == 'merge_group' && ( needs.changes.outputs.core_changes == 'true' || needs.changes.outputs.github_ci_changes == 'true') - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - workflow_name: Run Core E2E Tests For Merge Queue - chainlink_version: ${{ inputs.evm-ref || github.sha }} - chainlink_upgrade_version: ${{ github.sha }} - test_path: .github/e2e-tests.yml - test_trigger: Merge Queue E2E Core Tests - upload_cl_node_coverage_artifact: true - upload_cl_node_coverage_artifact_prefix: cl_node_coverage_data_ - enable_otel_traces_for_ocr2_plugins: ${{ contains(join(github.event.pull_request.labels.*.name, ' '), 'enable tracing') }} - # Notify Test Tooling team in slack when merge queue tests fail - slack_notification_after_tests: on_failure - slack_notification_after_tests_channel_id: "#team-test-tooling-internal" - slack_notification_after_tests_name: Core E2E Tests In Merge Queue - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - - run-ccip-e2e-tests-for-pr: - name: Run CCIP E2E Tests For PR - permissions: - actions: read - checks: write - pull-requests: write - id-token: write - contents: read - needs: [build-chainlink, changes] - if: github.event_name == 'pull_request' && (needs.changes.outputs.ccip_changes == 'true' || needs.changes.outputs.github_ci_changes == 'true') - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - workflow_name: Run CCIP E2E Tests For PR - chainlink_version: ${{ inputs.evm-ref || github.sha }} - chainlink_upgrade_version: ${{ github.sha }} - test_path: .github/e2e-tests.yml - test_trigger: PR E2E CCIP Tests - upload_cl_node_coverage_artifact: true - upload_cl_node_coverage_artifact_prefix: cl_node_coverage_data_ - enable_otel_traces_for_ocr2_plugins: ${{ contains(join(github.event.pull_request.labels.*.name, ' '), 'enable tracing') }} - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - - run-ccip-e2e-tests-for-merge-queue: - name: Run CCIP E2E Tests For Merge Queue - permissions: - actions: read - checks: write - pull-requests: write - id-token: write - contents: read - needs: [build-chainlink, changes] - if: github.event_name == 'merge_group' && (needs.changes.outputs.ccip_changes == 'true' || needs.changes.outputs.github_ci_changes == 'true') - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - workflow_name: Run CCIP E2E Tests For Merge Queue - chainlink_version: ${{ inputs.evm-ref || github.sha }} - chainlink_upgrade_version: ${{ github.sha }} - test_path: .github/e2e-tests.yml - test_trigger: Merge Queue E2E CCIP Tests - upload_cl_node_coverage_artifact: true - upload_cl_node_coverage_artifact_prefix: cl_node_coverage_data_ - enable_otel_traces_for_ocr2_plugins: ${{ contains(join(github.event.pull_request.labels.*.name, ' '), 'enable tracing') }} - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - - check-e2e-test-results: - if: always() - name: ETH Smoke Tests - runs-on: ubuntu-latest - needs: [lint-integration-tests, run-core-e2e-tests-for-pr, run-ccip-e2e-tests-for-pr, run-core-e2e-tests-for-merge-queue, run-ccip-e2e-tests-for-merge-queue] - steps: - - name: Check Core test results - id: check_core_results - run: | - results='${{ needs.run-core-e2e-tests-for-pr.outputs.test_results }}' - echo "Core test results:" - echo "$results" | jq . - - node_migration_tests_failed=$(echo $results | jq '[.[] | select(.id == "integration-tests/migration/upgrade_version_test.go:*" ) | select(.result != "success")] | length > 0') - echo "node_migration_tests_failed=$node_migration_tests_failed" >> $GITHUB_OUTPUT - - - name: Check CCIP test results - id: check_ccip_results - run: | - if [[ '${{ needs.run-ccip-e2e-tests-for-pr.result }}' != 'skipped' ]]; then - results='${{ needs.run-ccip-e2e-tests-for-pr.outputs.test_results }}' - echo "CCIP test results:" - echo "$results" | jq . - else - echo "CCIP tests were skipped." - fi - - - name: Send slack notification for failed migration tests - if: steps.check_core_results.outputs.node_migration_tests_failed == 'true' && github.event_name != 'workflow_dispatch' - uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0 - env: - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - with: - channel-id: "#team-test-tooling-internal" - slack-message: ":x: :mild-panic-intensifies: Node Migration Tests Failed: \n${{ format('https://github.com/{0}/actions/runs/{1}', github.repository, github.run_id) }}\n${{ format('Notifying ', secrets.GUARDIAN_SLACK_NOTIFICATION_HANDLE) }}" - - - name: Fail the job if core tests in PR not successful - if: always() && needs.run-core-e2e-tests-for-pr.result == 'failure' - run: exit 1 - - - name: Fail the job if core tests in merge queue not successful - if: always() && needs.run-core-e2e-tests-for-merge-queue.result == 'failure' - run: exit 1 - - - name: Fail the job if lint not successful - if: always() && needs.lint-integration-tests.result == 'failure' - run: exit 1 - - cleanup: - name: Clean up integration environment deployments - if: always() - needs: [run-core-e2e-tests-for-pr, run-ccip-e2e-tests-for-pr, run-core-e2e-tests-for-merge-queue, run-ccip-e2e-tests-for-merge-queue] - runs-on: ubuntu-latest - steps: - - name: Checkout repo - if: ${{ github.event_name == 'pull_request' }} - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/ccip - ref: ${{ inputs.cl_ref }} - - - name: 🧼 Clean up Environment - if: ${{ github.event_name == 'pull_request' }} - uses: ./.github/actions/delete-deployments - with: - environment: integration - ref: ${{ github.head_ref }} # See https://github.com/github/docs/issues/15319#issuecomment-1476705663 - - - name: Collect Metrics - if: ${{ github.event_name == 'pull_request' }} - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: ${{ env.COLLECTION_ID }}-env-cleanup - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Clean up integration environment deployments - continue-on-error: true - - show-chainlink-node-coverage: - name: Show Chainlink Node Go Coverage - if: always() - needs: [run-core-e2e-tests-for-pr, run-ccip-e2e-tests-for-pr, run-core-e2e-tests-for-merge-queue, run-ccip-e2e-tests-for-merge-queue] - runs-on: ubuntu-latest - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/ccip - ref: ${{ inputs.cl_ref || github.event.pull_request.head.sha || github.event.merge_group.head_sha }} - - name: Download All Artifacts - uses: actions/download-artifact@9c19ed7fe5d278cd354c7dfd5d3b88589c7e2395 # v4.1.6 - with: - path: cl_node_coverage_data - pattern: cl_node_coverage_data_* - merge-multiple: true - - name: Show Coverage - run: go run ./integration-tests/scripts/show_coverage.go "${{ github.workspace }}/cl_node_coverage_data/*/merged" diff --git a/.github/workflows/lint-gh-workflows.yml b/.github/workflows/lint-gh-workflows.yml deleted file mode 100644 index c7727199e9..0000000000 --- a/.github/workflows/lint-gh-workflows.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Lint GH Workflows -on: - push: -jobs: - lint_workflows: - name: Validate Github Action Workflows - runs-on: ubuntu-latest - steps: - - name: Check out Code - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Run actionlint - uses: reviewdog/action-actionlint@c6ee1eb0a5d47b2af53a203652b5dac0b6c4016e # v1.43.0 - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: lint-gh-workflows - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Validate Github Action Workflows - continue-on-error: true diff --git a/.github/workflows/on-demand-ocr-soak-test.yml b/.github/workflows/on-demand-ocr-soak-test.yml deleted file mode 100644 index 978c1eb67d..0000000000 --- a/.github/workflows/on-demand-ocr-soak-test.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: On Demand OCR Soak Test -on: - workflow_dispatch: - inputs: - testToRun: - description: Select a test to run from .github/e2e-tests.yml - required: true - default: TestOCRSoak - type: choice - options: - - soak/ocr_test.go:TestOCRv1Soak - - soak/ocr_test.go:TestOCRv2Soak - - soak/ocr_test.go:TestForwarderOCRv1Soak - - soak/ocr_test.go:TestForwarderOCRv2Soak - - soak/ocr_test.go:TestOCRSoak_GethReorgBelowFinality_FinalityTagDisabled - - soak/ocr_test.go:TestOCRSoak_GethReorgBelowFinality_FinalityTagEnabled - - soak/ocr_test.go:TestOCRSoak_GasSpike - - soak/ocr_test.go:TestOCRSoak_ChangeBlockGasLimit - - soak/ocr_test.go:TestOCRSoak_RPCDownForAllCLNodes - - soak/ocr_test.go:TestOCRSoak_RPCDownForHalfCLNodes - test_config_override_path: - description: Path to a test config file used to override the default test config - required: false - type: string - test_secrets_override_key: - description: 'Key to run tests with custom test secrets' - required: false - type: string - chainlink_version: - description: Chainlink image version to use - default: develop - required: true - type: string - slackMemberID: - description: Slack Member ID (Not your @) - required: true - default: U01A2B2C3D4 - -jobs: - run-e2e-tests-workflow: - name: Run E2E Tests - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - test_path: .github/e2e-tests.yml - test_ids: ${{ inputs.testToRun}} - test_config_override_path: ${{ inputs.test_config_override_path }} - chainlink_version: ${{ inputs.chainlink_version }} - SLACK_USER: ${{ inputs.slackMemberID }} - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - TEST_SECRETS_OVERRIDE_BASE64: ${{ secrets[inputs.test_secrets_override_key] }} - SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }} - SLACK_CHANNEL: ${{ secrets.QA_SLACK_CHANNEL }} diff --git a/.github/workflows/on-demand-vrfv2-performance-test.yml b/.github/workflows/on-demand-vrfv2-performance-test.yml deleted file mode 100644 index 1f1a847d82..0000000000 --- a/.github/workflows/on-demand-vrfv2-performance-test.yml +++ /dev/null @@ -1,98 +0,0 @@ -name: On Demand VRFV2 Performance Test -on: - workflow_dispatch: - inputs: - performanceTestType: - description: Performance Test Type of test to run - type: choice - options: - - "Smoke" - - "Soak" - - "Load" - - "Stress" - - "Spike" - test_list_regex: - description: "Regex for tests to run" - required: false - default: "(TestVRFV2Performance)" - test_config_override_path: - description: Path to a test config file used to override the default test config - required: false - type: string - test_secrets_override_key: - description: 'Key to run tests with custom test secrets' - required: false - type: string - notify_user_id_on_failure: - description: 'Enter Slack user ID to notify on test failure' - required: false - type: string - -jobs: - set-tests-to-run: - name: Set tests to run - runs-on: ubuntu-latest - outputs: - test_list: ${{ steps.set-tests.outputs.test_list }} - steps: - - name: Generate Test List JSON - id: set-tests - run: | - TEST_CMD='cd integration-tests/load && go test -v -count=1 -timeout 24h -run "${{ inputs.test_list_regex }}" ./vrfv2' - TEST_CONFIG_OVERRIDE_PATH=${{ inputs.test_config_override_path }} - TEST_TYPE=${{ inputs.performanceTestType }} - - TEST_LIST=$(jq -n -c \ - --arg test_cmd "$TEST_CMD" \ - --arg test_config_override_path "$TEST_CONFIG_OVERRIDE_PATH" \ - --arg TEST_TYPE "$TEST_TYPE" \ - '{ - "tests": [ - { - "id": "TestVRFv2Plus_Performance", - "path": "integration-tests/load/vrfv2plus/vrfv2plus_test.go", - "runs_on": "ubuntu22.04-8cores-32GB", - "test_env_type": "docker", - "test_cmd": $test_cmd, - "test_config_override_path": $test_config_override_path, - "test_env_vars": { - "TEST_TYPE": $TEST_TYPE - } - } - ] - }') - - echo "test_list=$TEST_LIST" >> $GITHUB_OUTPUT - - run-e2e-tests-workflow: - name: Run E2E Tests - needs: set-tests-to-run - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - custom_test_list_json: ${{ needs.set-tests-to-run.outputs.test_list }} - chainlink_version: ${{ inputs.chainlink_version }} - slack_notification_after_tests: always - slack_notification_after_tests_name: "VRF V2 Performance Tests with test config: ${{ inputs.test_config_override_path || 'default' }}" - slack_notification_after_tests_notify_user_id_on_failure: ${{ inputs.notify_user_id_on_failure }} - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - TEST_SECRETS_OVERRIDE_BASE64: ${{ secrets[inputs.test_secrets_override_key] }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }} - SLACK_CHANNEL: ${{ secrets.QA_VRF_SLACK_CHANNEL }} diff --git a/.github/workflows/on-demand-vrfv2-smoke-tests.yml b/.github/workflows/on-demand-vrfv2-smoke-tests.yml deleted file mode 100644 index 9f77c7ab53..0000000000 --- a/.github/workflows/on-demand-vrfv2-smoke-tests.yml +++ /dev/null @@ -1,100 +0,0 @@ -name: On Demand VRFV2 Smoke Tests -on: - workflow_dispatch: - inputs: - test_suite: - description: "Test Suite to run" - required: true - type: choice - default: "All Tests" - options: - - "All Tests" - - "Selected Tests" - test_list_regex: - description: "Regex for 'Selected Tests' to run" - required: false - default: "TestVRFv2Basic/(Request_Randomness|Direct_Funding)|TestVRFV2WithBHS" - test_config_override_path: - description: Path to a test config file used to override the default test config - required: false - type: string - test_secrets_override_key: - description: 'Key to run tests with custom test secrets' - required: false - type: string - chainlink_version: - description: Chainlink image version to use - default: develop - required: false - type: string - notify_user_id_on_failure: - description: 'Enter Slack user ID to notify on test failure' - required: false - type: string - -jobs: - set-tests-to-run: - name: Set tests to run - runs-on: ubuntu-latest - outputs: - test_list: ${{ steps.set-tests.outputs.test_list }} - steps: - - name: Generate Test List JSON - id: set-tests - run: | - if [[ "${{ inputs.test_suite }}" == "All Tests" ]]; then - TEST_CMD="cd integration-tests/smoke && go test vrfv2_test.go -test.parallel=1 -timeout 3h -count=1 -json -v" - else - TEST_CMD='cd integration-tests/smoke && go test -test.run "${{ inputs.test_list_regex }}" -test.parallel=1 -timeout 2h -count=1 -json -v' - fi - TEST_CONFIG_OVERRIDE_PATH=${{ inputs.test_config_override_path }} - - TEST_LIST=$(jq -n -c \ - --arg test_cmd "$TEST_CMD" \ - --arg test_config_override_path "$TEST_CONFIG_OVERRIDE_PATH" \ - '{ - "tests": [ - { - "id": "TestVRFv2_Smoke", - "path": "integration-tests/smoke/vrfv2_test.go", - "runs_on": "ubuntu-latest", - "test_env_type": "docker", - "test_cmd": $test_cmd, - "test_config_override_path": $test_config_override_path - } - ] - }') - - echo "test_list=$TEST_LIST" >> $GITHUB_OUTPUT - - run-e2e-tests-workflow: - name: Run E2E Tests - needs: set-tests-to-run - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - custom_test_list_json: ${{ needs.set-tests-to-run.outputs.test_list }} - chainlink_version: ${{ inputs.chainlink_version }} - slack_notification_after_tests: always - slack_notification_after_tests_name: "VRF V2 Smoke Tests with test config: ${{ inputs.test_config_override_path || 'default' }}" - slack_notification_after_tests_notify_user_id_on_failure: ${{ inputs.notify_user_id_on_failure }} - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - TEST_SECRETS_OVERRIDE_BASE64: ${{ secrets[inputs.test_secrets_override_key] }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - SLACK_NOTIFICATION_AFTER_TESTS_CHANNEL_ID: ${{ secrets.QA_VRF_SLACK_CHANNEL }} diff --git a/.github/workflows/on-demand-vrfv2plus-performance-test.yml b/.github/workflows/on-demand-vrfv2plus-performance-test.yml deleted file mode 100644 index ae42c32945..0000000000 --- a/.github/workflows/on-demand-vrfv2plus-performance-test.yml +++ /dev/null @@ -1,99 +0,0 @@ -name: On Demand VRFV2 Plus Performance Test -on: - workflow_dispatch: - inputs: - performanceTestType: - description: Performance Test Type of test to run - type: string - required: true - test_list_regex: - description: "Regex for tests to run" - required: false - default: "(TestVRFV2PlusPerformance)" - test_config_override_path: - description: Path to a test config file used to override the default test config - required: false - type: string - test_secrets_override_key: - description: 'Key to run tests with custom test secrets' - required: false - type: string - chainlink_version: - description: Chainlink image version to use - default: develop - required: false - type: string - notify_user_id_on_failure: - description: 'Enter Slack user ID to notify on test failure' - required: false - type: string - -jobs: - set-tests-to-run: - name: Set tests to run - runs-on: ubuntu-latest - outputs: - test_list: ${{ steps.set-tests.outputs.test_list }} - steps: - - name: Generate Test List JSON - id: set-tests - run: | - TEST_CMD='cd integration-tests/load && go test -v -count=1 -timeout 24h -run "${{ inputs.test_list_regex }}" ./vrfv2plus' - TEST_CONFIG_OVERRIDE_PATH=${{ inputs.test_config_override_path }} - TEST_TYPE=${{ inputs.performanceTestType }} - - TEST_LIST=$(jq -n -c \ - --arg test_cmd "$TEST_CMD" \ - --arg test_config_override_path "$TEST_CONFIG_OVERRIDE_PATH" \ - --arg TEST_TYPE "$TEST_TYPE" \ - '{ - "tests": [ - { - "id": "TestVRFv2Plus_Performance", - "path": "integration-tests/load/vrfv2plus/vrfv2plus_test.go", - "runs_on": "ubuntu-latest", - "test_env_type": "docker", - "test_cmd": $test_cmd, - "test_config_override_path": $test_config_override_path, - "test_env_vars": { - "TEST_TYPE": $TEST_TYPE - } - } - ] - }') - - echo "test_list=$TEST_LIST" >> $GITHUB_OUTPUT - - run-e2e-tests-workflow: - name: Run E2E Tests - needs: set-tests-to-run - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - custom_test_list_json: ${{ needs.set-tests-to-run.outputs.test_list }} - chainlink_version: ${{ inputs.chainlink_version }} - slack_notification_after_tests: always - slack_notification_after_tests_name: "VRF V2 Plus Performance Tests with test config: ${{ inputs.test_config_override_path || 'default' }}" - slack_notification_after_tests_notify_user_id_on_failure: ${{ inputs.notify_user_id_on_failure }} - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - TEST_SECRETS_OVERRIDE_BASE64: ${{ secrets[inputs.test_secrets_override_key] }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - SLACK_NOTIFICATION_AFTER_TESTS_CHANNEL_ID: ${{ secrets.QA_VRF_SLACK_CHANNEL }} - SLACK_API_KEY: ${{ secrets.QA_SLACK_API_KEY }} - SLACK_CHANNEL: ${{ secrets.QA_VRF_SLACK_CHANNEL }} diff --git a/.github/workflows/on-demand-vrfv2plus-smoke-tests.yml b/.github/workflows/on-demand-vrfv2plus-smoke-tests.yml deleted file mode 100644 index 9cd863eb7e..0000000000 --- a/.github/workflows/on-demand-vrfv2plus-smoke-tests.yml +++ /dev/null @@ -1,100 +0,0 @@ -name: On Demand VRFV2 Plus Smoke Tests -on: - workflow_dispatch: - inputs: - test_suite: - description: "Test Suite to run" - required: true - type: choice - default: "All Tests" - options: - - "All Tests" - - "Selected Tests" - test_list_regex: - description: "Regex for 'Selected Tests' to run" - required: false - default: "TestVRFv2Plus$/(Link_Billing|Native_Billing|Direct_Funding)|TestVRFV2PlusWithBHS" - test_config_override_path: - description: Path to a test config file used to override the default test config - required: false - type: string - test_secrets_override_key: - description: 'Key to run tests with custom test secrets' - required: false - type: string - chainlink_version: - description: Chainlink image version to use - default: develop - required: false - type: string - notify_user_id_on_failure: - description: 'Enter Slack user ID to notify on test failure' - required: false - type: string - -jobs: - set-tests-to-run: - name: Set tests to run - runs-on: ubuntu-latest - outputs: - test_list: ${{ steps.set-tests.outputs.test_list }} - steps: - - name: Generate Test List JSON - id: set-tests - run: | - if [[ "${{ inputs.test_suite }}" == "All Tests" ]]; then - TEST_CMD="cd integration-tests/smoke && go test vrfv2plus_test.go -test.parallel=1 -timeout 3h -count=1 -json -v" - else - TEST_CMD='cd integration-tests/smoke && go test -test.run "${{ inputs.test_list_regex }}" -test.parallel=1 -timeout 2h -count=1 -json -v' - fi - TEST_CONFIG_OVERRIDE_PATH=${{ inputs.test_config_override_path }} - - TEST_LIST=$(jq -n -c \ - --arg test_cmd "$TEST_CMD" \ - --arg test_config_override_path "$TEST_CONFIG_OVERRIDE_PATH" \ - '{ - "tests": [ - { - "id": "TestVRFv2Plus_Smoke", - "path": "integration-tests/smoke/vrfv2plus_test.go", - "runs_on": "ubuntu-latest", - "test_env_type": "docker", - "test_cmd": $test_cmd, - "test_config_override_path": $test_config_override_path - } - ] - }') - - echo "test_list=$TEST_LIST" >> $GITHUB_OUTPUT - - run-e2e-tests-workflow: - name: Run E2E Tests - needs: set-tests-to-run - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - custom_test_list_json: ${{ needs.set-tests-to-run.outputs.test_list }} - chainlink_version: ${{ inputs.chainlink_version }} - slack_notification_after_tests: always - slack_notification_after_tests_name: "VRF V2 Plus Smoke Tests with test config: ${{ inputs.test_config_override_path || 'default' }}" - slack_notification_after_tests_notify_user_id_on_failure: ${{ inputs.notify_user_id_on_failure }} - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - TEST_SECRETS_OVERRIDE_BASE64: ${{ secrets[inputs.test_secrets_override_key] }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} - SLACK_NOTIFICATION_AFTER_TESTS_CHANNEL_ID: ${{ secrets.QA_VRF_SLACK_CHANNEL }} diff --git a/.github/workflows/operator-ui-ci.yml b/.github/workflows/operator-ui-ci.yml deleted file mode 100644 index 442fc06613..0000000000 --- a/.github/workflows/operator-ui-ci.yml +++ /dev/null @@ -1,63 +0,0 @@ -#name: Operator UI CI -#on: -# pull_request: -# -#env: -# TARGET_BRANCH_NAME: ${{ github.event.pull_request.base.ref }} -# -#jobs: -# check-gql: -# permissions: -# id-token: write -# contents: read -# # To allow writing comments to the current PR -# pull-requests: write -# -# name: Breaking Changes GQL Check -# runs-on: ubuntu-latest -# steps: -# - name: Collect Metrics -# id: collect-gha-metrics -# uses: smartcontractkit/push-gha-metrics-action@0281b09807758be1dcc41651e44e62b353808c47 # v2.1.0 -# with: -# basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} -# hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} -# org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} -# this-job-name: Breaking Changes GQL Check -# continue-on-error: true -# -# - name: Assume role capable of dispatching action -# uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # v4.0.2 -# with: -# role-to-assume: ${{ secrets.AWS_OIDC_CHAINLINK_CI_OPERATOR_UI_ACCESS_TOKEN_ISSUER_ROLE_ARN }} -# role-duration-seconds: 3600 -# role-session-name: operator-ui-ci.check-gql -# aws-region: ${{ secrets.AWS_REGION }} -# -# - name: Get Github Token -# id: get-gh-token -# uses: smartcontractkit/chainlink-github-actions/github-app-token-issuer@main -# with: -# url: ${{ secrets.AWS_INFRA_RELENG_TOKEN_ISSUER_LAMBDA_URL }} -# -# - name: Checkout repository -# uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 -# -# - name: Get operator-ui tag -# id: get-operator-ui-tag -# shell: bash -# run: | -# if [[ $TARGET_BRANCH_NAME == release/* ]]; then -# TAG=$(cat ./operator_ui/TAG) -# echo "TAG=$TAG" >> $GITHUB_OUTPUT -# else -# echo "TAG=main" >> $GITHUB_OUTPUT -# fi -# -# - uses: convictional/trigger-workflow-and-wait@f69fa9eedd3c62a599220f4d5745230e237904be #v1.6.5 -# with: -# owner: smartcontractkit -# repo: operator-ui -# github_token: ${{ steps.get-gh-token.outputs.access-token }} -# workflow_file_name: chainlink-ci.yml -# client_payload: '{"ref": "${{ github.event.pull_request.head.sha }}", "tag": "${{ steps.get-operator-ui-tag.outputs.TAG }}"}' diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml deleted file mode 100644 index 130796a3c9..0000000000 --- a/.github/workflows/publish.yml +++ /dev/null @@ -1,84 +0,0 @@ -name: publish - -on: - workflow_dispatch: - push: - tags: - - "v*" - branches: - - ccip-develop - - "release/**" - -jobs: - build-and-publish: - # Do not trigger from versioned tags. - if: ${{ ! startsWith(github.ref, 'refs/tags/v') }} - environment: publish - permissions: - id-token: write - contents: read - runs-on: ubuntu-latest - steps: - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: ccip-build-and-publish - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: build-and-publish - continue-on-error: true - - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Build and publish chainlink image - uses: ./.github/actions/build-sign-publish-chainlink - with: - publish: true - aws-role-to-assume: ${{ secrets.AWS_OIDC_IAM_ROLE_PROD_PUBLISH_ARN }} - aws-role-duration-seconds: ${{ secrets.AWS_ROLE_DURATION_SECONDS }} - aws-region: ${{ secrets.AWS_REGION }} - ecr-hostname: ${{ secrets.AWS_ECR_REPO_URL }} - ecr-image-name: chainlink-ccip - sign-images: false - dockerfile: ./core/chainlink.Dockerfile - - build-and-publish-release: - # Trigger only from versioned tags. - if: ${{ startsWith(github.ref, 'refs/tags/v') }} - environment: publish - env: - # Public ECR is only available in us-east-1; not a secret. - AWS_REGION: us-east-1 - AWS_ECR_REPO_PUBLIC_REGISTRY: public.ecr.aws - permissions: - id-token: write - contents: read - runs-on: ubuntu-latest - steps: - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: ccip-build-and-publish-release - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: build-and-publish-release - continue-on-error: true - - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Build and publish chainlink image - uses: ./.github/actions/build-sign-publish-chainlink - with: - publish: true - aws-role-to-assume: ${{ secrets.AWS_OIDC_IAM_ROLE_PROD_PUBLISH_ARN }} - aws-role-duration-seconds: ${{ secrets.AWS_ROLE_DURATION_SECONDS }} - aws-region: ${{ env.AWS_REGION }} - ecr-hostname: ${{ env.AWS_ECR_REPO_PUBLIC_REGISTRY }} - ecr-image-name: w0i8p0z9/chainlink-ccip - sign-images: false - dockerfile: ./core/chainlink.Dockerfile - diff --git a/.github/workflows/run-nightly-e2e-tests.yml b/.github/workflows/run-nightly-e2e-tests.yml deleted file mode 100644 index 397258c237..0000000000 --- a/.github/workflows/run-nightly-e2e-tests.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: Run Nightly E2E Tests - -on: - # Disable the workflow as the notification already sent on core workflow - # schedule: - # Run every night at midnight UTC (0:00 AM) - # - cron: '0 0 * * *' - workflow_dispatch: - -jobs: - call-run-e2e-tests-workflow: - name: Run E2E Tests - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - chainlink_version: develop - test_path: .github/e2e-tests.yml - test_trigger: Nightly E2E Tests - slack_notification_after_tests: true - slack_notification_after_tests_channel_id: "#team-test-tooling-internal" - slack_notification_after_tests_name: Nightly E2E Tests - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} diff --git a/.github/workflows/run-selected-e2e-tests.yml b/.github/workflows/run-selected-e2e-tests.yml deleted file mode 100644 index e81d4f0d6d..0000000000 --- a/.github/workflows/run-selected-e2e-tests.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: Run Selected E2E Tests - -on: - workflow_dispatch: - inputs: - chainlink_version: - description: 'Enter Chainlink version to use for the tests. Example: "v2.10.0" or sha' - required: false - type: string - test_ids: - description: 'Run all tests "*" by default. Or, enter test IDs to run separated by commas. Example: "run_all_in_ocr_tests_go,run_TestOCRv2Request_in_ocr2_test_go". Check all test IDs in .github/e2e-tests.yml' - default: "*" - required: true - type: string - test_secrets_override_key: - description: 'Enter the secret key to override test secrets' - required: false - type: string - test_config_override_path: - description: 'Path to a test config file used to override the default test config' - required: false - type: string - with_existing_remote_runner_version: - description: 'Use the existing remote runner version for k8s tests. Example: "d3bf5044af33e08be788a2df31c4a745cf69d787"' - required: false - type: string - workflow_run_name: - description: 'Enter the name of the workflow run' - default: 'Run E2E Tests' - required: false - type: string - -run-name: ${{ inputs.workflow_run_name }} - -jobs: - call-run-e2e-tests-workflow: - name: Run E2E Tests - uses: smartcontractkit/.github/.github/workflows/run-e2e-tests.yml@aad83f232743646faa35f5ac03ee3829148d37ce - with: - chainlink_version: ${{ github.event.inputs.chainlink_version }} - test_path: .github/e2e-tests.yml - test_ids: ${{ github.event.inputs.test_ids }} - test_config_override_path: ${{ github.event.inputs.test_config_override_path }} - with_existing_remote_runner_version: ${{ github.event.inputs.with_existing_remote_runner_version }} - secrets: - QA_AWS_REGION: ${{ secrets.QA_AWS_REGION }} - QA_AWS_ROLE_TO_ASSUME: ${{ secrets.QA_AWS_ROLE_TO_ASSUME }} - QA_AWS_ACCOUNT_NUMBER: ${{ secrets.QA_AWS_ACCOUNT_NUMBER }} - QA_PYROSCOPE_INSTANCE: ${{ secrets.QA_PYROSCOPE_INSTANCE }} - QA_PYROSCOPE_KEY: ${{ secrets.QA_PYROSCOPE_KEY }} - QA_KUBECONFIG: ${{ secrets.QA_KUBECONFIG }} - GRAFANA_INTERNAL_TENANT_ID: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - GRAFANA_INTERNAL_BASIC_AUTH: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - GRAFANA_INTERNAL_HOST: ${{ secrets.GRAFANA_INTERNAL_HOST }} - GRAFANA_INTERNAL_URL_SHORTENER_TOKEN: ${{ secrets.GRAFANA_INTERNAL_URL_SHORTENER_TOKEN }} - LOKI_TENANT_ID: ${{ secrets.LOKI_TENANT_ID }} - LOKI_URL: ${{ secrets.LOKI_URL }} - LOKI_BASIC_AUTH: ${{ secrets.LOKI_BASIC_AUTH }} - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - AWS_REGION: ${{ secrets.QA_AWS_REGION }} - AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN: ${{ secrets.AWS_OIDC_IAM_ROLE_VALIDATION_PROD_ARN }} - AWS_API_GW_HOST_GRAFANA: ${{ secrets.AWS_API_GW_HOST_GRAFANA }} - TEST_SECRETS_OVERRIDE_BASE64: ${{ secrets[inputs.test_secrets_override_key] }} - SLACK_BOT_TOKEN: ${{ secrets.QA_SLACK_API_KEY }} diff --git a/.github/workflows/sigscanner.yml b/.github/workflows/sigscanner.yml deleted file mode 100644 index 5d22f79ab5..0000000000 --- a/.github/workflows/sigscanner.yml +++ /dev/null @@ -1,36 +0,0 @@ -name: 'SigScanner Check' - -on: - merge_group: - push: - -jobs: - sigscanner-check: - runs-on: ubuntu-latest - steps: - - name: "SigScanner checking ${{ github.sha }} by ${{ github.actor }}" - env: - API_TOKEN: ${{ secrets.SIGSCANNER_API_TOKEN }} - API_URL: ${{ secrets.SIGSCANNER_API_URL }} - run: | - echo "🔎 Checking commit ${{ github.sha }} by ${{ github.actor }} in ${{ github.repository }} - ${{ github.event_name }}" - CODE=`curl --write-out '%{http_code}' -X POST -H "Content-Type: application/json" -H "Authorization: $API_TOKEN" --silent --output /dev/null --url "$API_URL" --data '{"commit":"${{ github.sha }}","repository":"${{ github.repository }}","author":"${{ github.actor }}"}'` - echo "Received $CODE" - if [[ "$CODE" == "200" ]]; then - echo "✅ Commit is verified" - exit 0 - else - echo "❌ Commit is NOT verified" - exit 1 - fi - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: sigscanner - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: sigscanner-check - continue-on-error: true diff --git a/.github/workflows/solidity-foundry-artifacts.yml b/.github/workflows/solidity-foundry-artifacts.yml deleted file mode 100644 index 90d39f36a0..0000000000 --- a/.github/workflows/solidity-foundry-artifacts.yml +++ /dev/null @@ -1,169 +0,0 @@ -name: Solidity Foundry Artifact Generation -on: - workflow_dispatch: - inputs: - product: - type: choice - description: 'product for which to generate artifacts; should be the same as Foundry profile' - required: true - options: - - "automation" - - "ccip" - - "functions" - - "keystone" - - "l2ep" - - "liquiditymanager" - - "llo-feeds" - - "operatorforwarder" - - "shared" - - "transmission" - - "vrf" - commit_to_use: - type: string - description: 'commit SHA to use for artifact generation; if empty HEAD will be used' - required: false - base_ref: - description: 'commit or tag to use as base reference, when looking for modified Solidity files' - required: true - link_with_jira: - description: 'link generated artifacts with Jira issues?' - type: boolean - default: true - required: false - -env: - FOUNDRY_PROFILE: ci - # Unfortunately, we can't use the "default" field in the inputs section, because it does not have - # access to the workflow context - head_ref: ${{ inputs.commit_to_use || github.sha }} - -jobs: - changes: - name: Detect changes - runs-on: ubuntu-latest - outputs: - product_changes: ${{ steps.changes-transform.outputs.product_changes }} - product_files: ${{ steps.changes-transform.outputs.product_files }} - changeset_changes: ${{ steps.changes-dorny.outputs.changeset }} - changeset_files: ${{ steps.changes-dorny.outputs.changeset_files }} - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - ref: ${{ env.head_ref }} - - name: Find modified contracts - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 - id: changes-dorny - with: - list-files: 'csv' - base: ${{ inputs.base_ref }} - # This is a valid input, see https://github.com/dorny/paths-filter/pull/226 - predicate-quantifier: every - filters: | - ignored: &ignored - - '!contracts/src/v0.8/**/test/**' - - '!contracts/src/v0.8/**/tests/**' - - '!contracts/src/v0.8/**/mock/**' - - '!contracts/src/v0.8/**/mocks/**' - - '!contracts/src/v0.8/**/*.t.sol' - - '!contracts/src/v0.8/*.t.sol' - - '!contracts/src/v0.8/**/testhelpers/**' - - '!contracts/src/v0.8/testhelpers/**' - - '!contracts/src/v0.8/vendor/**' - other_shared: - - modified|added: 'contracts/src/v0.8/(interfaces/**/*.sol|*.sol)' - - *ignored - sol: - - modified|added: 'contracts/src/v0.8/**/*.sol' - - *ignored - product: &product - - modified|added: 'contracts/src/v0.8/${{ inputs.product }}/**/*.sol' - - *ignored - changeset: - - modified|added: 'contracts/.changeset/!(README)*.md' - - # Manual transformation needed, because shared contracts have a different folder structure - - name: Transform modified files - id: changes-transform - shell: bash - run: | - if [ "${{ inputs.product }}" = "shared" ]; then - echo "::debug:: Product is shared, transforming changes" - if [[ "${{ steps.changes-dorny.outputs.product }}" == "true" && "${{ steps.changes-dorny.outputs.other_shared }}" == "true" ]]; then - echo "::debug:: Changes were found in 'shared' folder and in 'interfaces' and root folders" - echo "product_changes=true" >> $GITHUB_OUTPUT - echo "product_files=${{ steps.changes-dorny.outputs.product_files }},${{ steps.changes-dorny.outputs.other_shared_files }}" >> $GITHUB_OUTPUT - elif [[ "${{ steps.changes-dorny.outputs.product }}" == "false" && "${{ steps.changes-dorny.outputs.other_shared }}" == "true" ]]; then - echo "::debug:: Only contracts in' interfaces' and root folders were modified" - echo "product_changes=true" >> $GITHUB_OUTPUT - echo "product_files=${{ steps.changes-dorny.outputs.other_shared_files }}" >> $GITHUB_OUTPUT - elif [[ "${{ steps.changes-dorny.outputs.product }}" == "true" && "${{ steps.changes-dorny.outputs.other_shared }}" == "false" ]]; then - echo "::debug:: Only contracts in 'shared' folder were modified" - echo "product_changes=true" >> $GITHUB_OUTPUT - echo "product_files=${{ steps.changes-dorny.outputs.product_files }}" >> $GITHUB_OUTPUT - else - echo "::debug:: No contracts were modified" - echo "product_changes=false" >> $GITHUB_OUTPUT - echo "product_files=" >> $GITHUB_OUTPUT - fi - else - echo "product_changes=${{ steps.changes-dorny.outputs.product }}" >> $GITHUB_OUTPUT - echo "product_files=${{ steps.changes-dorny.outputs.product_files }}" >> $GITHUB_OUTPUT - fi - - - name: Check for changes outside of artifact scope - uses: ./.github/actions/validate-artifact-scope - if: ${{ steps.changes-dorny.outputs.sol == 'true' }} - with: - sol_files: ${{ steps.changes-dorny.outputs.sol_files }} - product: ${{ inputs.product }} - - prepare-workflow-inputs: - name: Prepare workflow inputs - runs-on: ubuntu-22.04 - needs: [ changes ] - outputs: - foundry_version: ${{ steps.extract-foundry-version.outputs.foundry-version }} - generate_code_coverage: ${{ steps.skip-code-coverage.outputs.generate_code_coverage }} - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - name: Extract Foundry version - id: extract-foundry-version - uses: ./.github/actions/detect-solidity-foundry-version - with: - working-directory: contracts - - - name: Should skip code coverage report - id: skip-code-coverage - run: | - if [[ "${{ inputs.product }}" = "automation" || "${{ inputs.product }}" = "vrf" || "${{ inputs.product }}" = "functions" ]]; then - echo "generate_code_coverage=false" >> $GITHUB_OUTPUT - else - echo "generate_code_coverage=true" >> $GITHUB_OUTPUT - fi - - generate-artifacts: - name: Generate Solidity Review Artifacts - needs: [changes, prepare-workflow-inputs] - uses: smartcontractkit/.github/.github/workflows/solidity-review-artifacts.yml@b6e37806737eef87e8c9137ceeb23ef0bff8b1db # validate-solidity-artifacts@0.1.0 - with: - product: ${{ inputs.product }} - commit_to_use: ${{ inputs.commit_to_use }} - base_ref: ${{ inputs.base_ref }} - product_changes: ${{ needs.changes.outputs.product_changes }} - product_files: ${{ needs.changes.outputs.product_files }} - changeset_changes: ${{ needs.changes.outputs.changeset_changes }} - changeset_files: ${{ needs.changes.outputs.changeset_files }} - foundry_version: ${{ needs.prepare-workflow-inputs.outputs.foundry_version }} - contracts_directory: './contracts' - generate_code_coverage: ${{ needs.prepare-workflow-inputs.outputs.generate_code_coverage == 'true' }} - link_with_jira: ${{ inputs.link_with_jira }} - jira_host: ${{ vars.JIRA_HOST }} - install_semver: false - slither_config_file_path: 'contracts/configs/slither/.slither.config-artifacts.json' - lcov_prune_script_path: 'scripts/lcov_prune' - secrets: - jira_username: ${{ secrets.JIRA_USERNAME }} - jira_api_token: ${{ secrets.JIRA_API_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/solidity-foundry.yml b/.github/workflows/solidity-foundry.yml deleted file mode 100644 index 8e517673fd..0000000000 --- a/.github/workflows/solidity-foundry.yml +++ /dev/null @@ -1,595 +0,0 @@ -name: Solidity Foundry -on: [pull_request] - -env: - FOUNDRY_PROFILE: ci - -# Making changes: -# * use the top-level matrix to decide, which checks should run for each product. -# * when enabling code coverage, remember to adjust the minimum code coverage as it's set to 98.5% by default. - -# This pipeline will run product tests only if product-specific contracts were modified or if broad-impact changes were made (e.g. changes to this pipeline, Foundry configuration, etc.) -# For modified contracts we use a LLM to extract new issues introduced by the changes. For new contracts full report is delivered. -# Slither has a default configuration, but also supports per-product configuration. If a product-specific configuration is not found, the default one is used. -# Changes to test files do not trigger static analysis or formatting checks. - -jobs: - define-matrix: - name: Define test matrix - runs-on: ubuntu-latest - outputs: - matrix: ${{ steps.define-matrix.outputs.matrix }} - foundry-version: ${{ steps.extract-foundry-version.outputs.foundry-version }} - steps: - - name: Define test matrix - id: define-matrix - shell: bash - run: | - cat < matrix.json - [ - { "name": "automation", "setup": { "run-coverage": false, "min-coverage": 98.5, "run-gas-snapshot": false, "run-forge-fmt": false }}, - { "name": "ccip", "setup": { "run-coverage": true, "min-coverage": 97.6, "run-gas-snapshot": true, "run-forge-fmt": true }}, - { "name": "functions", "setup": { "run-coverage": false, "min-coverage": 98.5, "run-gas-snapshot": true, "run-forge-fmt": false }}, - { "name": "keystone", "setup": { "run-coverage": true, "min-coverage": 72.8, "run-gas-snapshot": false, "run-forge-fmt": false }}, - { "name": "l2ep", "setup": { "run-coverage": true, "min-coverage": 61.0, "run-gas-snapshot": true, "run-forge-fmt": false }}, - { "name": "liquiditymanager", "setup": { "run-coverage": true, "min-coverage": 44.3, "run-gas-snapshot": true, "run-forge-fmt": false }}, - { "name": "llo-feeds", "setup": { "run-coverage": true, "min-coverage": 49.3, "run-gas-snapshot": true, "run-forge-fmt": false }}, - { "name": "operatorforwarder", "setup": { "run-coverage": true, "min-coverage": 55.7, "run-gas-snapshot": true, "run-forge-fmt": false }}, - { "name": "shared", "setup": { "run-coverage": true, "extra-coverage-params": "--no-match-path='*CallWithExactGas*'", "min-coverage": 32.6, "run-gas-snapshot": true, "run-forge-fmt": false }}, - { "name": "transmission", "setup": { "run-coverage": true, "min-coverage": 61.5, "run-gas-snapshot": true, "run-forge-fmt": false }}, - { "name": "vrf", "setup": { "run-coverage": false, "min-coverage": 98.5, "run-gas-snapshot": false, "run-forge-fmt": false }} - ] - EOF - - matrix=$(cat matrix.json | jq -c .) - echo "matrix=$matrix" >> $GITHUB_OUTPUT - - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - name: Extract Foundry version - id: extract-foundry-version - uses: ./.github/actions/detect-solidity-foundry-version - with: - working-directory: contracts - - changes: - name: Detect changes - runs-on: ubuntu-latest - outputs: - non_src_changes: ${{ steps.changes.outputs.non_src }} - sol_modified_added: ${{ steps.changes.outputs.sol }} - sol_mod_only: ${{ steps.changes.outputs.sol_mod_only }} - sol_mod_only_files: ${{ steps.changes.outputs.sol_mod_only_files }} - not_test_sol_modified: ${{ steps.changes-non-test.outputs.not_test_sol }} - not_test_sol_modified_files: ${{ steps.changes-non-test.outputs.not_test_sol_files }} - all_changes: ${{ steps.changes.outputs.changes }} - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Detect changes - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 - id: changes - with: - list-files: 'shell' - filters: | - non_src: - - '.github/workflows/solidity-foundry.yml' - - 'contracts/foundry.toml' - - 'contracts/gas-snapshots/*.gas-snapshot' - - 'contracts/package.json' - - 'contracts/GNUmakefile' - sol: - - modified|added: 'contracts/src/v0.8/**/*.sol' - sol_mod_only: - - modified: 'contracts/src/v0.8/**/!(tests|mocks)/!(*.t).sol' - not_test_sol: - - modified|added: 'contracts/src/v0.8/**/!(tests|mocks)/!(*.t).sol' - automation: - - 'contracts/src/v0.8/automation/**/*.sol' - ccip: - - 'contracts/src/v0.8/ccip/**/*.sol' - functions: - - 'contracts/src/v0.8/functions/**/*.sol' - keystone: - - 'contracts/src/v0.8/keystone/**/*.sol' - l2ep: - - 'contracts/src/v0.8/l2ep/**/*.sol' - liquiditymanager: - - 'contracts/src/v0.8/liquiditymanager/**/*.sol' - llo-feeds: - - 'contracts/src/v0.8/llo-feeds/**/*.sol' - operatorforwarder: - - 'contracts/src/v0.8/operatorforwarder/**/*.sol' - vrf: - - 'contracts/src/v0.8/vrf/**/*.sol' - shared: - - 'contracts/src/v0.8/shared/**/*.sol' - - 'contracts/src/v0.8/*.sol' - - 'contracts/src/v0.8/mocks/**/*.sol' - - 'contracts/src/v0.8/tests/**/*.sol' - - 'contracts/src/v0.8/vendor/**/*.sol' - transmission: - - 'contracts/src/v0.8/transmission/**/*.sol' - - - name: Detect non-test changes - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 - id: changes-non-test - with: - list-files: 'shell' - # This is a valid input, see https://github.com/dorny/paths-filter/pull/226 - predicate-quantifier: every - filters: | - not_test_sol: - - modified|added: 'contracts/src/v0.8/**/!(*.t).sol' - - '!contracts/src/v0.8/**/test/**' - - '!contracts/src/v0.8/**/tests/**' - - '!contracts/src/v0.8/**/mock/**' - - '!contracts/src/v0.8/**/mocks/**' - - '!contracts/src/v0.8/**/*.t.sol' - - '!contracts/src/v0.8/*.t.sol' - - '!contracts/src/v0.8/**/testhelpers/**' - - '!contracts/src/v0.8/testhelpers/**' - - '!contracts/src/v0.8/vendor/**' - - tests: - strategy: - fail-fast: false - matrix: - product: ${{fromJson(needs.define-matrix.outputs.matrix)}} - needs: [define-matrix, changes] - name: Foundry Tests ${{ matrix.product.name }} - runs-on: ubuntu-22.04 - - # The if statements for steps after checkout repo is workaround for - # passing required check for PRs that don't have filtered changes. - steps: - - name: Checkout the repo - if: ${{ contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) - || contains(fromJson(needs.changes.outputs.all_changes), 'shared') - || needs.changes.outputs.non_src_changes == 'true' }} - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - submodules: recursive - - # Only needed because we use the NPM versions of packages - # and not native Foundry. This is to make sure the dependencies - # stay in sync. - - name: Setup NodeJS - if: ${{ contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) - || contains(fromJson(needs.changes.outputs.all_changes), 'shared') - || needs.changes.outputs.non_src_changes == 'true' }} - uses: ./.github/actions/setup-nodejs - with: - prod: "true" - - - name: Install Foundry - if: ${{ contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) - || contains(fromJson(needs.changes.outputs.all_changes), 'shared') - || needs.changes.outputs.non_src_changes == 'true' }} - uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 - with: - version: ${{ needs.define-matrix.outputs.foundry-version }} - - - name: Run Forge build - if: ${{ contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) - || contains(fromJson(needs.changes.outputs.all_changes), 'shared') - || needs.changes.outputs.non_src_changes == 'true' }} - run: | - forge --version - forge build - id: build - working-directory: contracts - env: - FOUNDRY_PROFILE: ${{ matrix.product.name }} - - - name: Run Forge tests - if: ${{ contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) - || contains(fromJson(needs.changes.outputs.all_changes), 'shared') - || needs.changes.outputs.non_src_changes == 'true' }} - run: | - forge test -vvv - id: test - working-directory: contracts - env: - FOUNDRY_PROFILE: ${{ matrix.product.name }} - - - name: Run Forge snapshot - if: ${{ (contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) - || contains(fromJson(needs.changes.outputs.all_changes), 'shared') - || needs.changes.outputs.non_src_changes == 'true') - && matrix.product.setup.run-gas-snapshot }} - run: | - forge snapshot --nmt "test_?Fuzz_\w{1,}?" --check gas-snapshots/${{ matrix.product.name }}.gas-snapshot - id: snapshot - working-directory: contracts - env: - FOUNDRY_PROFILE: ${{ matrix.product.name }} - - # required for code coverage report generation - - name: Setup LCOV - if: ${{ (contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) - || contains(fromJson(needs.changes.outputs.all_changes), 'shared') - || needs.changes.outputs.non_src_changes == 'true') - && matrix.product.setup.run-coverage }} - uses: hrishikesh-kadam/setup-lcov@f5da1b26b0dcf5d893077a3c4f29cf78079c841d # v1.0.0 - - - name: Run coverage for ${{ matrix.product.name }} - if: ${{ (contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) - || contains(fromJson(needs.changes.outputs.all_changes), 'shared') - || needs.changes.outputs.non_src_changes == 'true') - && matrix.product.setup.run-coverage }} - working-directory: contracts - shell: bash - run: | - if [[ -n "${{ matrix.product.setup.extra-coverage-params }}" ]]; then - forge coverage --report lcov ${{ matrix.product.setup.extra-coverage-params }} - else - forge coverage --report lcov - fi - env: - FOUNDRY_PROFILE: ${{ matrix.product.name }} - - - name: Prune lcov report - if: ${{ (contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) - || contains(fromJson(needs.changes.outputs.all_changes), 'shared') - || needs.changes.outputs.non_src_changes == 'true') - && matrix.product.setup.run-coverage }} - run: | - ./contracts/scripts/lcov_prune ${{ matrix.product.name }} ./contracts/lcov.info ./contracts/lcov.info.pruned - - - name: Report code coverage for ${{ matrix.product.name }} - if: ${{ (contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) - || contains(fromJson(needs.changes.outputs.all_changes), 'shared') - || needs.changes.outputs.non_src_changes == 'true') - && matrix.product.setup.run-coverage }} - uses: zgosalvez/github-actions-report-lcov@a546f89a65a0cdcd82a92ae8d65e74d450ff3fbc # v4.1.4 - with: - update-comment: false - coverage-files: ./contracts/lcov.info.pruned - minimum-coverage: ${{ matrix.product.setup.min-coverage }} - artifact-name: code-coverage-report-${{ matrix.product.name }} - working-directory: ./contracts - - - name: Collect Metrics - if: ${{ contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) - || contains(fromJson(needs.changes.outputs.all_changes), 'shared') - || needs.changes.outputs.non_src_changes == 'true' }} - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: ${{ matrix.product.name }}-solidity-foundry - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Foundry Tests ${{ matrix.product.name }} - continue-on-error: true - - # runs only if non-test contracts were modified; scoped only to modified or added contracts - analyze: - needs: [ changes, define-matrix ] - name: Run static analysis - if: needs.changes.outputs.not_test_sol_modified == 'true' - runs-on: ubuntu-22.04 - steps: - - name: Checkout this repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - name: Checkout .github repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/.github - ref: b6e37806737eef87e8c9137ceeb23ef0bff8b1db # validate-solidity-artifacts@0.1.0 - path: ./dot_github - - - name: Setup NodeJS - uses: ./.github/actions/setup-nodejs - - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 - with: - version: ${{ needs.define-matrix.outputs.foundry-version }} - - - name: Set up Python - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f #v5.1.1 - with: - python-version: '3.8' - - - name: Install solc-select and solc - uses: smartcontractkit/.github/actions/setup-solc-select@b6e37806737eef87e8c9137ceeb23ef0bff8b1db # validate-solidity-artifacts@0.1.0 - with: - to_install: '0.8.24' - to_use: '0.8.24' - - - name: Install Slither - uses: smartcontractkit/.github/actions/setup-slither@b6e37806737eef87e8c9137ceeb23ef0bff8b1db # validate-solidity-artifacts@0.1.0 - - - name: Run Slither - shell: bash - run: | - # modify remappings so that solc can find dependencies - ./dot_github/tools/scripts/solidity/modify_remappings.sh contracts contracts/remappings.txt - mv remappings_modified.txt remappings.txt - - # without it Slither sometimes fails to use remappings correctly - cp contracts/foundry.toml foundry.toml - - FILES="${{ needs.changes.outputs.not_test_sol_modified_files }}" - - for FILE in $FILES; do - PRODUCT=$(echo "$FILE" | awk -F'src/[^/]*/' '{print $2}' | cut -d'/' -f1) - echo "::debug::Running Slither for $FILE in $PRODUCT" - SLITHER_CONFIG="contracts/configs/slither/.slither.config-$PRODUCT-pr.json" - if [[ ! -f $SLITHER_CONFIG ]]; then - echo "::debug::No Slither config found for $PRODUCT, using default" - SLITHER_CONFIG="contracts/configs/slither/.slither.config-default-pr.json" - fi - ./dot_github/tools/scripts/solidity/generate_slither_report.sh "${{ github.server_url }}/${{ github.repository }}/blob/${{ github.sha }}/" "$SLITHER_CONFIG" "./contracts" "$FILE" "contracts/slither-reports-current" "--solc-remaps @=contracts/node_modules/@" - done - - # all the actions below, up to printing results, run only if any existing contracts were modified - # in that case we extract new issues introduced by the changes by using an LLM model - - name: Upload Slither results for current branch - if: needs.changes.outputs.sol_mod_only == 'true' - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 - timeout-minutes: 2 - continue-on-error: true - with: - name: slither-reports-current-${{ github.sha }} - path: contracts/slither-reports-current - retention-days: 7 - - # we need to upload scripts and configuration in case base_ref doesn't have the scripts, or they are in different version - - name: Upload Slither scripts - if: needs.changes.outputs.sol_mod_only == 'true' - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 - timeout-minutes: 2 - continue-on-error: true - with: - name: tmp-slither-scripts-${{ github.sha }} - path: ./dot_github/tools/scripts/solidity - retention-days: 7 - - - name: Upload configs - if: needs.changes.outputs.sol_mod_only == 'true' - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 - timeout-minutes: 2 - continue-on-error: true - with: - name: tmp-configs-${{ github.sha }} - path: contracts/configs - retention-days: 7 - - - name: Checkout earlier version of this repository - if: needs.changes.outputs.sol_mod_only == 'true' - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - ref: ${{ github.base_ref }} - - - name: Download Slither scripts - if: needs.changes.outputs.sol_mod_only == 'true' - uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 - with: - name: tmp-slither-scripts-${{ github.sha }} - path: ./dot_github/tools/scripts/solidity - - - name: Download configs - if: needs.changes.outputs.sol_mod_only == 'true' - uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 - with: - name: tmp-configs-${{ github.sha }} - path: contracts/configs - - # since we have just checked out the repository again, we lose NPM dependencies installs previously, we need to install them again to compile contracts - - name: Setup NodeJS - if: needs.changes.outputs.sol_mod_only == 'true' - uses: ./.github/actions/setup-nodejs - - - name: Run Slither for base reference - if: needs.changes.outputs.sol_mod_only == 'true' - shell: bash - run: | - # we need to set file permission again since they are lost during download - for file in ./dot_github/tools/scripts/solidity/*.sh; do - chmod +x "$file" - done - - # modify remappings so that solc can find dependencies - ./dot_github/tools/scripts/solidity/modify_remappings.sh contracts contracts/remappings.txt - mv remappings_modified.txt remappings.txt - - # without it Slither sometimes fails to use remappings correctly - cp contracts/foundry.toml foundry.toml - - FILES="${{ needs.changes.outputs.sol_mod_only_files }}" - - for FILE in $FILES; do - PRODUCT=$(echo "$FILE" | awk -F'src/[^/]*/' '{print $2}' | cut -d'/' -f1) - echo "::debug::Running Slither for $FILE in $PRODUCT" - SLITHER_CONFIG="contracts/configs/slither/.slither.config-$PRODUCT-pr.json" - if [[ ! -f $SLITHER_CONFIG ]]; then - echo "::debug::No Slither config found for $PRODUCT, using default" - SLITHER_CONFIG="contracts/configs/slither/.slither.config-default-pr.json" - fi - ./dot_github/tools/scripts/solidity/generate_slither_report.sh "${{ github.server_url }}/${{ github.repository }}/blob/${{ github.sha }}/" "$SLITHER_CONFIG" "./contracts" "$FILE" "contracts/slither-reports-base-ref" "--solc-remaps @=contracts/node_modules/@" - done - - - name: Upload Slither report - if: needs.changes.outputs.sol_mod_only == 'true' - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 - timeout-minutes: 10 - continue-on-error: true - with: - name: slither-reports-base-${{ github.sha }} - path: | - contracts/slither-reports-base-ref - retention-days: 7 - - - name: Download Slither results for current branch - if: needs.changes.outputs.sol_mod_only == 'true' - uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 - with: - name: slither-reports-current-${{ github.sha }} - path: contracts/slither-reports-current - - - name: Generate diff of Slither reports for modified files - if: needs.changes.outputs.sol_mod_only == 'true' - env: - OPEN_API_KEY: ${{ secrets.OPEN_AI_SLITHER_API_KEY }} - shell: bash - run: | - set -euo pipefail - for base_report in contracts/slither-reports-base-ref/*.md; do - filename=$(basename "$base_report") - current_report="contracts/slither-reports-current/$filename" - new_issues_report="contracts/slither-reports-current/${filename%.md}_new_issues.md" - if [ -f "$current_report" ]; then - if ./contracts/scripts/ci/find_slither_report_diff.sh "$base_report" "$current_report" "$new_issues_report" "contracts/scripts/ci/prompt-difference.md" "contracts/scripts/ci/prompt-validation.md"; then - if [[ -s $new_issues_report ]]; then - awk 'NR==2{print "*This new issues report has been automatically generated by LLM model using two Slither reports. One based on `${{ github.base_ref}}` and another on `${{ github.sha }}` commits.*"}1' $new_issues_report > tmp.md && mv tmp.md $new_issues_report - echo "Replacing full Slither report with diff for $current_report" - rm $current_report && mv $new_issues_report $current_report - else - echo "No difference detected between $base_report and $current_report reports. Won't include any of them." - rm $current_report - fi - else - echo "::warning::Failed to generate a diff report with new issues for $base_report using an LLM model, will use full report." - fi - - else - echo "::warning::Failed to find current commit's equivalent of $base_report (file $current_report doesn't exist, but should have been generated). Please check Slither logs." - fi - done - - # actions that execute only if any existing contracts were modified end here - - name: Print Slither summary - shell: bash - run: | - echo "# Static analysis results " >> $GITHUB_STEP_SUMMARY - for file in "contracts/slither-reports-current"/*.md; do - if [ -e "$file" ]; then - cat "$file" >> $GITHUB_STEP_SUMMARY - fi - done - - - name: Validate if all Slither run for all contracts - uses: smartcontractkit/.github/actions/validate-solidity-artifacts@b6e37806737eef87e8c9137ceeb23ef0bff8b1db # validate-solidity-artifacts@0.1.0 - with: - validate_slither_reports: 'true' - slither_reports_path: 'contracts/slither-reports-current' - sol_files: ${{ needs.changes.outputs.not_test_sol_modified_files }} - - - name: Upload Slither reports - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 - timeout-minutes: 10 - continue-on-error: true - with: - name: slither-reports-${{ github.sha }} - path: | - contracts/slither-reports-current - retention-days: 7 - - - name: Find Slither comment in the PR - # We only want to create the comment if the PR is not modified by a bot - if: "(github.event_name == 'push' && github.event.pusher.username && ! contains(github.event.pusher.username, '[bot]')) || (github.event_name != 'push' && ! contains(github.actor, '[bot]'))" - uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.0.0 - id: find-comment - with: - issue-number: ${{ github.event.pull_request.number }} - comment-author: 'github-actions[bot]' - body-includes: 'Static analysis results' - - - name: Extract job summary URL - id: job-summary-url - uses: pl-strflt/job-summary-url-action@df2d22c5351f73e0a187d20879854b8d98e6e001 # v1.0.0 - with: - job: 'Run static analysis' - - - name: Build Slither reports artifacts URL - id: build-slither-artifact-url - env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - ARTIFACTS=$(gh api -X GET repos/${{ github.repository }}/actions/runs/${{ github.run_id }}/artifacts) - ARTIFACT_ID=$(echo "$ARTIFACTS" | jq '.artifacts[] | select(.name=="slither-reports-${{ github.sha }}") | .id') - echo "Artifact ID: $ARTIFACT_ID" - - slither_artifact_url="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}/artifacts/$ARTIFACT_ID" - echo "slither_artifact_url=$slither_artifact_url" >> $GITHUB_OUTPUT - - - name: Create or update Slither comment in the PR - # We only want to create the comment if the PR is not modified by a bot - if: "(github.event_name == 'push' && github.event.pusher.username && ! contains(github.event.pusher.username, '[bot]')) || (github.event_name != 'push' && ! contains(github.actor, '[bot]'))" - uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0 - with: - comment-id: ${{ steps.find-comment.outputs.comment-id }} - issue-number: ${{ github.event.pull_request.number }} - body: | - ## Static analysis results are available - Hey @${{ github.event.push && github.event.push.pusher && github.event.push.pusher.username || github.actor }}, you can view Slither reports in the job summary [here](${{ steps.job-summary-url.outputs.job_summary_url }}) or download them as artifact [here](${{ steps.build-slither-artifact-url.outputs.slither_artifact_url }}). - - Please check them before merging and make sure you have addressed all issues. - edit-mode: replace - - - name: Remove temp artifacts - uses: geekyeggo/delete-artifact@24928e75e6e6590170563b8ddae9fac674508aa1 # v5.0 - with: - name: tmp-* - - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: solidity-foundry-slither - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Run static analysis - continue-on-error: true - - solidity-forge-fmt: - name: Forge fmt ${{ matrix.product.name }} - if: ${{ needs.changes.outputs.non_src_changes == 'true' || needs.changes.outputs.not_test_sol_modified == 'true' }} - needs: [define-matrix, changes] - strategy: - fail-fast: false - matrix: - product: ${{fromJson(needs.define-matrix.outputs.matrix)}} - runs-on: ubuntu-22.04 - steps: - - name: Checkout the repo - if: ${{ (contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) || needs.changes.outputs.non_src_changes == 'true') && matrix.product.setup.run-forge-fmt }} - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - submodules: recursive - - - name: Setup NodeJS - if: ${{ (contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) || needs.changes.outputs.non_src_changes == 'true') && matrix.product.setup.run-forge-fmt }} - uses: ./.github/actions/setup-nodejs - - - name: Install Foundry - if: ${{ (contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) || needs.changes.outputs.non_src_changes == 'true') && matrix.product.setup.run-forge-fmt }} - uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 - with: - version: ${{ needs.define-matrix.outputs.foundry-version }} - - - name: Run Forge fmt - if: ${{ (contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) || needs.changes.outputs.non_src_changes == 'true') && matrix.product.setup.run-forge-fmt }} - run: forge fmt --check - id: fmt - working-directory: contracts - env: - FOUNDRY_PROFILE: ${{ matrix.product.name }} - - - name: Collect Metrics - if: ${{ (contains(fromJson(needs.changes.outputs.all_changes), matrix.product.name) || needs.changes.outputs.non_src_changes == 'true') && matrix.product.setup.run-forge-fmt }} - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 - with: - id: solidity-forge-fmt-${{ matrix.product.name }} - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Forge fmt ${{ matrix.product.name }} - continue-on-error: true \ No newline at end of file diff --git a/.github/workflows/solidity-tracability.yml b/.github/workflows/solidity-tracability.yml deleted file mode 100644 index 75774713d9..0000000000 --- a/.github/workflows/solidity-tracability.yml +++ /dev/null @@ -1,203 +0,0 @@ -# This workflow handles the enforcement of code Traceability via changesets and jira issue linking for our Solidity codebase. -name: Solidity Tracability - -on: - merge_group: - pull_request: - -defaults: - run: - shell: bash - -jobs: - files-changed: - # The job skips on merge_group events, and any release branches, and forks - # Since we only want to enforce Jira issues on pull requests related to feature branches - if: ${{ github.event_name != 'merge_group' && !startsWith(github.head_ref, 'release/') && github.event.pull_request.head.repo.full_name == 'smartcontractkit/chainlink' }} - name: Detect Changes - runs-on: ubuntu-latest - outputs: - source: ${{ steps.files-changed.outputs.source }} - changesets: ${{ steps.files-changed.outputs.changesets }} - changesets_files: ${{ steps.files-changed.outputs.changesets_files }} - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - name: Filter paths - uses: dorny/paths-filter@de90cc6fb38fc0963ad72b210f1f284cd68cea36 # v3.0.2 - id: files-changed - with: - list-files: "csv" - # This is a valid input, see https://github.com/dorny/paths-filter/pull/226 - predicate-quantifier: "every" - filters: | - source: - - contracts/**/*.sol - - '!contracts/**/*.t.sol' - changesets: - - added|modified: 'contracts/.changeset/**' - - enforce-traceability: - # Note: A job that is skipped will report its status as "Success". - # It will not prevent a pull request from merging, even if it is a required check. - needs: [files-changed] - # We only want to run this job if the source files have changed - if: ${{ needs.files-changed.outputs.source == 'true' }} - name: Enforce Traceability - runs-on: ubuntu-latest - permissions: - actions: read - id-token: write - contents: read - pull-requests: write - steps: - # https://github.com/planetscale/ghcommit-action/blob/c7915d6c18d5ce4eb42b0eff3f10a29fe0766e4c/README.md?plain=1#L41 - # - # Include the pull request ref in the checkout action to prevent merge commit - # https://github.com/actions/checkout?tab=readme-ov-file#checkout-pull-request-head-commit-instead-of-merge-commit - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - ref: ${{ github.event.pull_request.head.sha }} - - - name: Assume role capable of dispatching action - uses: smartcontractkit/.github/actions/setup-github-token@ef78fa97bf3c77de6563db1175422703e9e6674f # setup-github-token@0.2.1 - id: get-gh-token - with: - aws-role-arn: ${{ secrets.AWS_OIDC_CHAINLINK_CI_AUTO_PR_TOKEN_ISSUER_ROLE_ARN }} - aws-lambda-url: ${{ secrets.AWS_INFRA_RELENG_TOKEN_ISSUER_LAMBDA_URL }} - aws-region: ${{ secrets.AWS_REGION }} - - - name: Make a comment - uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 - with: - message: | - I see you updated files related to `contracts`. Please run `pnpm changeset` in the `contracts` directory to add a changeset. - reactions: eyes - comment_tag: changeset-contracts - # If the changeset is added, then we delete the comment, otherwise we add it. - mode: ${{ needs.files-changed.outputs.changesets == 'true' && 'delete' || 'upsert' }} - # We only create the comment if the changeset is not added - create_if_not_exists: ${{ needs.files-changed.outputs.changesets == 'true' && 'false' || 'true' }} - - - name: Check for new changeset for contracts - if: ${{ needs.files-changed.outputs.changesets == 'false' }} - shell: bash - run: | - echo "Please run pnpm changeset to add a changeset for contracts." - exit 1 - - - name: Setup NodeJS - uses: ./.github/actions/setup-nodejs - - - name: Checkout .Github repository - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: smartcontractkit/.github - ref: 9aed33e5298471f20a3d630d711b96ae5538728c # jira-tracing@0.2.0 - path: ./dot_github - - # we need to set the top level directory for the jira-tracing action manually - # because now we are working with two repositories and automatic detection would - # select the repository with jira-tracing and not the chainlink repository - - name: Setup git top level directory - id: find-git-top-level-dir - run: echo "top_level_dir=$(pwd)" >> $GITHUB_OUTPUT - - - name: Setup Jira - working-directory: ./dot_github - run: pnpm install --filter jira-tracing - - # Because of our earlier checks, we know that both the source and changeset files have changed - - name: Enforce Traceability - working-directory: ./dot_github - run: | - echo "COMMIT_MESSAGE=$(git log -1 --pretty=format:'%s')" >> $GITHUB_ENV - pnpm --filter jira-tracing issue:enforce - env: - CHANGESET_FILES: ${{ needs.files-changed.outputs.changesets_files }} - GIT_TOP_LEVEL_DIR: ${{ steps.find-git-top-level-dir.outputs.top_level_dir }} - - PR_TITLE: ${{ github.event.pull_request.title }} - BRANCH_NAME: ${{ github.event.pull_request.head.ref }} - - JIRA_HOST: ${{ vars.JIRA_HOST }} - JIRA_USERNAME: ${{ secrets.JIRA_USERNAME }} - JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} - - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Enforce Solidity Review Jira issue - working-directory: ./dot_github - shell: bash - run: | - # we do not want to fail the workflow if there are issues with the script - if ! pnpm --filter jira-tracing issue:enforce-solidity-review; then - echo "::warning::Failed to enforce Solidity Review Jira issue, this is not a blocking issue. You can safely ignore it." - fi - env: - CHANGESET_FILES: ${{ needs.files-changed.outputs.changesets_files }} - GIT_TOP_LEVEL_DIR: ${{ steps.find-git-top-level-dir.outputs.top_level_dir }} - - SOLIDITY_REVIEW_TEMPLATE_KEY: 'TT-1634' - EXPORT_JIRA_ISSUE_KEYS: 'true' - - JIRA_HOST: ${{ vars.JIRA_HOST }} - JIRA_USERNAME: ${{ secrets.JIRA_USERNAME }} - JIRA_API_TOKEN: ${{ secrets.JIRA_API_TOKEN }} - - # Commit appended changeset file back to repo - - uses: planetscale/ghcommit-action@13a844326508cdefc72235201bb0446d6d10a85f # v0.1.6 - with: - commit_message: "[Bot] Update changeset file with jira issues" - repo: ${{ github.repository }} - branch: ${{ github.head_ref }} - file_pattern: "contracts/.changeset/*" - env: - GITHUB_TOKEN: ${{ steps.get-gh-token.outputs.access-token }} - - - name: Read issue keys from env vars - shell: bash - id: read-issue-keys - run: | - # issue:enforce-solidity-review should have set two env vars with the issue keys - echo "Jira issue key related to pr: ${{ env.PR_JIRA_ISSUE_KEY }}" - echo "Jira issue key related to solidity review: ${{ env.SOLIDITY_REVIEW_JIRA_ISSUE_KEY }}" - - - name: Find traceability comment in the PR - uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.0.0 - id: find-comment - with: - issue-number: ${{ github.event.pull_request.number }} - comment-author: 'github-actions[bot]' - body-includes: 'Solidity Review Jira issue' - - - name: Create or update traceability comment in the PR - uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0 - with: - comment-id: ${{ steps.find-comment.outputs.comment-id }} - issue-number: ${{ github.event.pull_request.number }} - body: | - ## Solidity Review Jira issue - Hey! We have taken the liberty to link this PR to a Jira issue for Solidity Review. - - This is a new feature, that's currently in the pilot phase, so please make sure that the linkage is correct. In a contrary case, please update it manually in JIRA and replace Solidity Review issue key in the changeset file with the correct one. - Please reach out to the Test Tooling team and notify them about any issues you encounter. - - Any changes to the Solidity Review Jira issue should be reflected in the changeset file. If you need to update the issue key, please do so manually in the following changeset file: `${{ needs.files-changed.outputs.changesets_files }}` - - This PR has been linked to Solidity Review Jira issue: [${{ env.SOLIDITY_REVIEW_JIRA_ISSUE_KEY }}](${{ vars.JIRA_HOST }}browse/${{ env.SOLIDITY_REVIEW_JIRA_ISSUE_KEY }}) - edit-mode: replace - - - name: Collect Metrics - id: collect-gha-metrics - if: always() - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: soldity-traceability - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Enforce Traceability - continue-on-error: true diff --git a/.github/workflows/solidity-wrappers.yml b/.github/workflows/solidity-wrappers.yml deleted file mode 100644 index 42371755c9..0000000000 --- a/.github/workflows/solidity-wrappers.yml +++ /dev/null @@ -1,84 +0,0 @@ -name: Solidity Wrappers -# This is its own workflow file rather than being merged into "solidity.yml" to avoid over complicating the conditionals -# used for job execution. The jobs in "solidity.yml" are configured around push events, whereas -# we only want to generate gethwrappers during pull requests. -on: - pull_request: - types: - - opened - - synchronize - - reopened - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - changes: - # We don't directly merge dependabot PRs, so let's not waste the resources - if: ${{ github.actor != 'dependabot[bot]' }} - name: Detect changes - runs-on: ubuntu-latest - outputs: - changes: ${{ steps.ch.outputs.changes }} - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Detect changes - id: ch - uses: ./.github/actions/detect-solidity-file-changes - - # On a pull request event, make updates to gethwrappers if there are changes. - update-wrappers: - needs: [changes] - if: needs.changes.outputs.changes == 'true' - name: Update Wrappers - permissions: - actions: read - id-token: write - contents: read - runs-on: ubuntu22.04-8cores-32GB - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - name: Setup Go - uses: ./.github/actions/setup-go - - - name: Setup NodeJS - uses: ./.github/actions/setup-nodejs - with: - prod: "true" - - - name: Run native compile and generate wrappers - run: make wrappers-all - working-directory: ./contracts - - - name: Assume role capable of dispatching action - uses: smartcontractkit/.github/actions/setup-github-token@ef78fa97bf3c77de6563db1175422703e9e6674f # setup-github-token@0.2.1 - id: get-gh-token - with: - aws-role-arn: ${{ secrets.AWS_OIDC_CCIP_CI_AUTO_PR_TOKEN_ISSUER_ROLE_ARN }} - aws-lambda-url: ${{ secrets.AWS_INFRA_RELENG_TOKEN_ISSUER_LAMBDA_URL }} - aws-region: ${{ secrets.AWS_REGION }} - - - name: Commit any wrapper changes - uses: planetscale/ghcommit-action@21a8cda29f55e5cc2cdae0cdbdd08e38dd148c25 # v0.1.37 - with: - commit_message: "Update gethwrappers" - repo: ${{ github.repository }} - branch: ${{ github.head_ref }} - file_pattern: "core/gethwrappers/**/generated/*.go core/gethwrappers/**/generated-wrapper-dependency-versions-do-not-edit.txt" - env: - GITHUB_TOKEN: ${{ steps.get-gh-token.outputs.access-token }} - - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: solidity-update-wrappers - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Update Wrappers - continue-on-error: true diff --git a/.github/workflows/solidity.yml b/.github/workflows/solidity.yml deleted file mode 100644 index 31833ab2eb..0000000000 --- a/.github/workflows/solidity.yml +++ /dev/null @@ -1,273 +0,0 @@ -name: Solidity - -on: - merge_group: - push: - -defaults: - run: - shell: bash - -jobs: - initialize: - name: Initialize - runs-on: ubuntu-latest - outputs: - is-release: ${{ steps.release-tag-check.outputs.is-release }} - is-pre-release: ${{ steps.release-tag-check.outputs.is-pre-release }} - steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Check release tag - id: release-tag-check - uses: smartcontractkit/chainlink-github-actions/release/release-tag-check@5dd916d08c03cb5f9a97304f4f174820421bb946 # v2.3.11 - env: - # Match semver git tags with a "contracts-ccip/" prefix. - RELEASE_REGEX: '^contracts-ccip/v[0-9]+\.[0-9]+\.[0-9]+$' - PRE_RELEASE_REGEX: '^contracts-ccip/v[0-9]+\.[0-9]+\.[0-9]+-(.+)$' - - readonly_changes: - name: Detect readonly solidity file changes - runs-on: ubuntu-latest - outputs: - changes: ${{ steps.ch.outputs.changes }} - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Detect readonly solidity file changes - id: ch - uses: ./.github/actions/detect-solidity-readonly-file-changes - - changes: - name: Detect changes - runs-on: ubuntu-latest - outputs: - changes: ${{ steps.ch.outputs.changes }} - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Detect changes - id: ch - uses: ./.github/actions/detect-solidity-file-changes - - tag-check: - needs: [changes] - name: Tag Check - runs-on: ubuntu-latest - outputs: - is-release: ${{ steps.release-tag-check.outputs.is-release }} - is-pre-release: ${{ steps.release-tag-check.outputs.is-pre-release }} - release-version: ${{ steps.release-tag-check.outputs.release-version }} - pre-release-version: ${{ steps.release-tag-check.outputs.pre-release-version }} - steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Check release tag - id: release-tag-check - uses: smartcontractkit/chainlink-github-actions/release/release-tag-check@5dd916d08c03cb5f9a97304f4f174820421bb946 # v2.3.11 - env: - # Match semver git tags with a "contracts-ccip/" prefix. - RELEASE_REGEX: '^contracts-ccip/v[0-9]+\.[0-9]+\.[0-9]+$' - PRE_RELEASE_REGEX: '^contracts-ccip/v[0-9]+\.[0-9]+\.[0-9]+-(.+)$' - # Get the version by stripping the "contracts-ccip/v" prefix. - VERSION_PREFIX: 'contracts-ccip/v' - -# prepublish-test: -# needs: [changes, tag-check] -# if: needs.changes.outputs.changes == 'true' || needs.tag-check.outputs.is-pre-release == 'true' -# name: Prepublish Test -# runs-on: ubuntu-latest -# steps: -# - name: Checkout the repo -# uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 -# - name: Setup NodeJS -# uses: ./.github/actions/setup-nodejs -# - name: Run Prepublish test -# working-directory: contracts -# run: pnpm prepublishOnly -# - name: Collect Metrics -# id: collect-gha-metrics -# uses: smartcontractkit/push-gha-metrics-action@dea9b546553cb4ca936607c2267a09c004e4ab3f # v3.0.0 -# with: -# id: solidity-prepublish-test -# org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} -# basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} -# hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} -# this-job-name: Prepublish Test -# continue-on-error: true - - native-compile: - needs: [changes, tag-check] - if: needs.changes.outputs.changes == 'true' || needs.tag-check.outputs.is-release == 'true' || needs.tag-check.outputs.is-pre-release == 'true' - name: Native Compilation - runs-on: ubuntu-latest - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Checkout diff-so-fancy - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - repository: so-fancy/diff-so-fancy - ref: a673cb4d2707f64d92b86498a2f5f71c8e2643d5 # v1.4.3 - path: diff-so-fancy - - name: Install diff-so-fancy - run: echo "$GITHUB_WORKSPACE/diff-so-fancy" >> $GITHUB_PATH - - name: Setup NodeJS - uses: ./.github/actions/setup-nodejs - with: - prod: "true" - - name: Setup Go - uses: ./.github/actions/setup-go - - name: Run native compile and generate wrappers - run: make wrappers-all - working-directory: ./contracts - - name: Verify local solc binaries - run: ./tools/ci/check_solc_hashes - - name: Display git diff - if: ${{ needs.changes.outputs.changes == 'true' }} - run: git diff --minimal --color --exit-code | diff-so-fancy - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: solidity-native-compile - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Native Compilation - continue-on-error: true - - # The if statements for steps after checkout repo is a workaround for - # passing required check for PRs that don't have filtered changes. - lint: - defaults: - run: - working-directory: contracts - needs: [changes, tag-check] - if: needs.changes.outputs.changes == 'true' || needs.tag-check.outputs.is-release == 'true' || needs.tag-check.outputs.is-pre-release == 'true' - name: Solidity Lint - runs-on: ubuntu-latest - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Setup NodeJS - uses: ./.github/actions/setup-nodejs - - name: Run pnpm lint - run: pnpm lint - - name: Run solhint - run: pnpm solhint - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: solidity-lint - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Solidity Lint - continue-on-error: true - - prettier: - defaults: - run: - working-directory: contracts - needs: [changes, tag-check] - if: needs.changes.outputs.changes == 'true' || needs.tag-check.outputs.is-release == 'true' || needs.tag-check.outputs.is-pre-release == 'true' - name: Prettier Formatting - runs-on: ubuntu-latest - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - name: Setup NodeJS - uses: ./.github/actions/setup-nodejs - - name: Run prettier check - run: pnpm prettier:check - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: solidity-prettier - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Prettier Formatting - continue-on-error: true - - publish-beta: - name: Publish Beta NPM - environment: publish-contracts - needs: [tag-check, changes, lint, prettier, native-compile] - if: needs.tag-check.outputs.is-pre-release == 'true' - runs-on: ubuntu-latest - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - name: Setup NodeJS - uses: ./.github/actions/setup-nodejs - - - name: Version package.json - working-directory: contracts - run: | - echo "Bumping version to ${{ needs.tag-check.outputs.pre-release-version }}" - pnpm version ${{ needs.tag-check.outputs.pre-release-version }} --no-git-tag-version --no-commit-hooks --no-git-checks - - - name: Publish to NPM (beta) - uses: smartcontractkit/.github/actions/ci-publish-npm@4b0ab756abcb1760cb82e1e87b94ff431905bffc # ci-publish-npm@0.4.0 - with: - npm-token: ${{ secrets.NPM_TOKEN }} - create-github-release: false - publish-command: "pnpm publish-beta --no-git-checks" - package-json-directory: contracts - - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: ccip-solidity-publish-beta - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Publish Beta NPM - continue-on-error: true - - publish-prod: - name: Publish Prod NPM - environment: publish-contracts - needs: [tag-check, changes, lint, prettier, native-compile] - if: needs.tag-check.outputs.is-release == 'true' - runs-on: ubuntu-latest - permissions: - contents: write - steps: - - name: Checkout the repo - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - - - name: Setup NodeJS - uses: ./.github/actions/setup-nodejs - - - name: Validate version - working-directory: contracts - run: | - PACKAGE_JSON_VERSION="$(cat package.json | jq -r '.version')" - if [ "$PACKAGE_JSON_VERSION" != "${{ needs.tag-check.outputs.release-version }}" ]; then - echo "::error version mismatch: package.json version ($PACKAGE_JSON_VERSION) does not match version computed from tag ${{ needs.tag-check.outputs.release-version }}" - exit 1 - fi - - - name: Publish to NPM (latest) - uses: smartcontractkit/.github/actions/ci-publish-npm@4b0ab756abcb1760cb82e1e87b94ff431905bffc # ci-publish-npm@0.4.0 - with: - npm-token: ${{ secrets.NPM_TOKEN }} - create-github-release: false - publish-command: "pnpm publish-prod --no-git-checks" - package-json-directory: contracts - - - name: Collect Metrics - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: ccip-solidity-publish-prod - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Publish Prod NPM - continue-on-error: true diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml deleted file mode 100644 index de7ef871b3..0000000000 --- a/.github/workflows/stale.yml +++ /dev/null @@ -1,25 +0,0 @@ -# Workflow to manage automatically closing stale pull requests and issues. -# See configuration for configuration. -name: Manage stale Issues and PRs - -on: - schedule: - - cron: "0 0 * * *" # Will be triggered every day at midnight UTC - -jobs: - stale: - - runs-on: ubuntu-latest - permissions: - issues: write - pull-requests: write - - steps: - - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - days-before-issue-stale: -1 # disables marking issues as stale automatically. Issues can still be marked as stale manually, in which the closure policy applies. - stale-pr-message: 'This PR is stale because it has been open 45 days with no activity. Remove stale label or comment or this will be closed in 10 days.' - close-pr-message: 'This PR was closed because it has been stalled for 10 days with no activity.' - days-before-pr-stale: 45 - days-before-pr-close: 10 diff --git a/.github/workflows/sync-develop-from-smartcontractkit-chainlink.yml b/.github/workflows/sync-develop-from-smartcontractkit-chainlink.yml deleted file mode 100644 index 05b7365eba..0000000000 --- a/.github/workflows/sync-develop-from-smartcontractkit-chainlink.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: Sync develop from smartcontractkit/chainlink - -on: - schedule: - # * is a special character in YAML so you have to quote this string - - cron: '*/30 * * * *' - -jobs: - sync: - name: Sync - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2 - with: - ref: develop - if: env.GITHUB_REPOSITORY != 'smartcontractkit/chainlink' - - name: Sync - run: | - git remote add upstream "https://github.com/smartcontractkit/chainlink.git" - COMMIT_HASH_UPSTREAM=$(git ls-remote upstream develop | grep -P '^[0-9a-f]{40}\trefs/heads/develop$' | cut -f 1) - COMMIT_HASH_ORIGIN=$(git ls-remote origin develop | grep -P '^[0-9a-f]{40}\trefs/heads/develop$' | cut -f 1) - if [ "$COMMIT_HASH_UPSTREAM" = "$COMMIT_HASH_ORIGIN" ]; then - echo "Both remotes have develop at $COMMIT_HASH_UPSTREAM. No need to sync." - else - echo "upstream has develop at $COMMIT_HASH_UPSTREAM. origin has develop at $COMMIT_HASH_ORIGIN. Syncing..." - git fetch upstream - git push origin upstream/develop:develop - fi - if: env.GITHUB_REPOSITORY != 'smartcontractkit/chainlink' - - name: Collect Metrics - if: always() - id: collect-gha-metrics - uses: smartcontractkit/push-gha-metrics-action@d9da21a2747016b3e13de58c7d4115a3d5c97935 # v3.0.1 - with: - id: sync-develop - org-id: ${{ secrets.GRAFANA_INTERNAL_TENANT_ID }} - basic-auth: ${{ secrets.GRAFANA_INTERNAL_BASIC_AUTH }} - hostname: ${{ secrets.GRAFANA_INTERNAL_HOST }} - this-job-name: Sync - continue-on-error: true diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000000..5f535d2ef2 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,62 @@ +name: solidity + +on: push + +env: + FOUNDRY_PROFILE: ci + +jobs: + tests: + strategy: + fail-fast: false + matrix: + product: [ccip] + name: Foundry Tests ${{ matrix.product }} + # See https://github.com/foundry-rs/foundry/issues/3827 + runs-on: ubuntu-22.04 + + # The if statements for steps after checkout repo is workaround for + # passing required check for PRs that don't have filtered changes. + steps: + - name: Checkout the repo + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + submodules: recursive + + # Only needed because we use the NPM versions of packages + # and not native Foundry. This is to make sure the dependencies + # stay in sync. + - name: Setup NodeJS + uses: ./.github/actions/setup-nodejs + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + with: + # Has to match the `make foundry` version. + version: nightly-2cb875799419c907cc3709e586ece2559e6b340e + + - name: Run Forge build + run: | + forge --version + forge build + id: build + working-directory: contracts + env: + FOUNDRY_PROFILE: ${{ matrix.product }} + + - name: Run Forge tests + run: | + forge test -vvv + id: test + working-directory: contracts + env: + FOUNDRY_PROFILE: ${{ matrix.product }} + + - name: Run Forge snapshot + if: ${{ !contains(fromJson('["vrf"]'), matrix.product) && !contains(fromJson('["automation"]'), matrix.product) && needs.changes.outputs.changes == 'true' }} + run: | + forge snapshot --nmt "testFuzz_\w{1,}?" --check gas-snapshots/${{ matrix.product }}.gas-snapshot + id: snapshot + working-directory: contracts + env: + FOUNDRY_PROFILE: ${{ matrix.product }} diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000..6081c137b0 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "contracts/foundry-lib/gho-core"] + path = contracts/foundry-lib/gho-core + url = https://github.com/aave/gho-core +[submodule "contracts/foundry-lib/solidity-utils"] + path = contracts/foundry-lib/solidity-utils + url = https://github.com/bgd-labs/solidity-utils diff --git a/certora/Makefile b/certora/Makefile new file mode 100644 index 0000000000..0e33459cab --- /dev/null +++ b/certora/Makefile @@ -0,0 +1,24 @@ +default: help + +PATCH = applyHarness.patch +CONTRACTS_DIR = ../contracts +MUNGED_DIR = munged + +help: + @echo "usage:" + @echo " make clean: remove all generated files (those ignored by git)" + @echo " make $(MUNGED_DIR): create $(MUNGED_DIR) directory by applying the patch file to $(CONTRACTS_DIR)" + @echo " make record: record a new patch file capturing the differences between $(CONTRACTS_DIR) and $(MUNGED_DIR)" + +munged: $(wildcard $(CONTRACTS_DIR)/*.sol) $(PATCH) + rm -rf $@ + cp -r $(CONTRACTS_DIR) $@ + patch -p0 -d $@ < $(PATCH) + +record: + diff -ruN $(CONTRACTS_DIR) $(MUNGED_DIR) | sed 's+\.\./contracts/++g' | sed 's+munged/++g' > $(PATCH) + +clean: + git clean -fdX + touch $(PATCH) + diff --git a/certora/confs/ccip.conf b/certora/confs/ccip.conf new file mode 100644 index 0000000000..c3d10c3c5e --- /dev/null +++ b/certora/confs/ccip.conf @@ -0,0 +1,22 @@ +{ + "files": [ + "contracts/src/v0.8/ccip/pools/GHO/UpgradeableLockReleaseTokenPool.sol", + "certora/harness/SimpleERC20.sol" + ], + "packages": [ + "solidity-utils/=contracts/foundry-lib/solidity-utils/src/" + ], + "link": [ + "UpgradeableLockReleaseTokenPool:i_token=SimpleERC20" + ], + "optimistic_loop": true, + "optimistic_hashing": true, + "process": "emv", + "prover_args": ["-depth 10","-mediumTimeout 700"], + "smt_timeout": "600", + "solc": "solc8.24", + "verify": "UpgradeableLockReleaseTokenPool:certora/specs/ccip.spec", + "rule_sanity": "basic", + "msg": "CCIP" +} + diff --git a/certora/harness/SimpleERC20.sol b/certora/harness/SimpleERC20.sol new file mode 100644 index 0000000000..f9d14a7ff6 --- /dev/null +++ b/certora/harness/SimpleERC20.sol @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: agpl-3.0 +pragma solidity ^0.8.0; + +import {IERC20} from "../../contracts/src/v0.8/vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; + +/** +A simple ERC implementation used as the underlying_asset for the verification process. + */ +contract SimpleERC20 is IERC20 { + uint256 t; + mapping(address => uint256) b; + mapping(address => mapping(address => uint256)) a; + + function add(uint a, uint b) internal pure returns (uint256) { + uint c = a + b; + require(c >= a); + return c; + } + + function sub(uint a, uint b) internal pure returns (uint256) { + require(a >= b); + return a - b; + } + + function totalSupply() external view override returns (uint256) { + return t; + } + + function balanceOf(address account) external view override returns (uint256) { + return b[account]; + } + + function transfer(address recipient, uint256 amount) external override returns (bool) { + b[msg.sender] = sub(b[msg.sender], amount); + b[recipient] = add(b[recipient], amount); + return true; + } + + function allowance(address owner, address spender) external view override returns (uint256) { + return a[owner][spender]; + } + + function approve(address spender, uint256 amount) external override returns (bool) { + a[msg.sender][spender] = amount; + return true; + } + + function transferFrom( + address sender, + address recipient, + uint256 amount + ) external override returns (bool) { + b[sender] = sub(b[sender], amount); + b[recipient] = add(b[recipient], amount); + a[sender][msg.sender] = sub(a[sender][msg.sender], amount); + return true; + } +} diff --git a/certora/munged/.gitignore b/certora/munged/.gitignore new file mode 100644 index 0000000000..d6b7ef32c8 --- /dev/null +++ b/certora/munged/.gitignore @@ -0,0 +1,2 @@ +* +!.gitignore diff --git a/certora/specs/ccip.spec b/certora/specs/ccip.spec new file mode 100644 index 0000000000..0172ac0091 --- /dev/null +++ b/certora/specs/ccip.spec @@ -0,0 +1,133 @@ +/* + This is a Specification File for Smart Contract Verification with the Certora Prover. + Contract name: UpgradeableLockReleaseTokenPool +*/ + +using SimpleERC20 as erc20; + +methods { + function getCurrentBridgedAmount() external returns (uint256) envfree; + function getBridgeLimit() external returns (uint256) envfree; + function owner() external returns (address) envfree; +} + + +rule sanity { + env e; + calldataarg arg; + method f; + f(e, arg); + satisfy true; +} + + + +/* ============================================================================== + invariant: currentBridge_LEQ_bridgeLimit. + Description: The value of s_currentBridged is LEQ than the value of s_bridgeLimit. + Note: this may be violated if one calls to setBridgeLimit(newBridgeLimit) with + newBridgeLimit < s_currentBridged. + ============================================================================*/ +invariant currentBridge_LEQ_bridgeLimit() + getCurrentBridgedAmount() <= getBridgeLimit() + filtered { f -> + !f.isView && + f.selector != sig:setBridgeLimit(uint256).selector && + f.selector != sig:setCurrentBridgedAmount(uint256).selector} + { + preserved initialize(address owner, address[] allowlist, address router, uint256 bridgeLimit) with (env e2) { + require getCurrentBridgedAmount()==0; + } + } + + +/* ============================================================================== + rule: withdrawLiquidity_correctness + description: The rule checks that the balance of the contract is as expected. + ============================================================================*/ +rule withdrawLiquidity_correctness(env e) { + uint256 amount; + + require e.msg.sender != currentContract; + uint256 bal_before = erc20.balanceOf(e, currentContract); + withdrawLiquidity(e, amount); + uint256 bal_after = erc20.balanceOf(e, currentContract); + + assert (to_mathint(bal_after) == bal_before - amount); +} + + +/* ============================================================================== + rule: provideLiquidity_correctness + description: The rule checks that the balance of the contract is as expected. + ============================================================================*/ +rule provideLiquidity_correctness(env e) { + uint256 amount; + + require e.msg.sender != currentContract; + uint256 bal_before = erc20.balanceOf(e, currentContract); + provideLiquidity(e, amount); + uint256 bal_after = erc20.balanceOf(e, currentContract); + + assert (to_mathint(bal_after) == bal_before + amount); +} + +definition filterSetter(method f) returns bool = f.selector != sig:setCurrentBridgedAmount(uint256).selector; + +/* ============================================================================== + rule: only_lockOrBurn_can_increase_currentBridged + ============================================================================*/ +rule only_lockOrBurn_can_increase_currentBridged(env e, method f) + filtered { f -> filterSetter(f) } +{ + calldataarg args; + + uint256 curr_bridge_before = getCurrentBridgedAmount(); + f (e,args); + uint256 curr_bridge_after = getCurrentBridgedAmount(); + + assert + curr_bridge_after > curr_bridge_before => + f.selector==sig:lockOrBurn(Pool.LockOrBurnInV1).selector; +} + + +/* ============================================================================== + rule: only_releaseOrMint_currentBridged + ============================================================================*/ +rule only_releaseOrMint_currentBridged(env e, method f) + filtered { f -> filterSetter(f) } +{ + calldataarg args; + + uint256 curr_bridge_before = getCurrentBridgedAmount(); + f (e,args); + uint256 curr_bridge_after = getCurrentBridgedAmount(); + + assert + curr_bridge_after < curr_bridge_before => + f.selector==sig:releaseOrMint(Pool.ReleaseOrMintInV1).selector; +} + + +/* ============================================================================== + rule: only_bridgeLimitAdmin_or_owner_can_call_setBridgeLimit + ============================================================================*/ +rule only_bridgeLimitAdmin_or_owner_can_call_setBridgeLimit(env e) { + uint256 newBridgeLimit; + + setBridgeLimit(e, newBridgeLimit); + + assert e.msg.sender==getBridgeLimitAdmin(e) || e.msg.sender==owner(); +} + +/* ============================================================================== + rule: only_owner_can_call_setCurrentBridgedAmount + ============================================================================*/ +rule only_owner_can_call_setCurrentBridgedAmount(env e) { + uint256 newBridgedAmount; + + setCurrentBridgedAmount(e, newBridgedAmount); + + assert e.msg.sender==owner(); +} diff --git a/contracts/foundry-lib/gho-core b/contracts/foundry-lib/gho-core new file mode 160000 index 0000000000..a8d05e6e72 --- /dev/null +++ b/contracts/foundry-lib/gho-core @@ -0,0 +1 @@ +Subproject commit a8d05e6e72409aa5ea6fd84d8a3c41e13887654d diff --git a/contracts/foundry-lib/solidity-utils b/contracts/foundry-lib/solidity-utils new file mode 160000 index 0000000000..9d4d041562 --- /dev/null +++ b/contracts/foundry-lib/solidity-utils @@ -0,0 +1 @@ +Subproject commit 9d4d041562f7ac2918e216e2e7c74172afe3d2af diff --git a/contracts/foundry.toml b/contracts/foundry.toml index e1399964dd..dae0325b33 100644 --- a/contracts/foundry.toml +++ b/contracts/foundry.toml @@ -6,8 +6,8 @@ optimizer_runs = 1_000_000 src = 'src/v0.8' test = 'test/v0.8' out = 'foundry-artifacts' -cache_path = 'foundry-cache' -libs = ['node_modules'] +cache_path = 'foundry-cache' +libs = ['node_modules', 'foundry-lib'] bytecode_hash = "none" ffi = false @@ -33,7 +33,7 @@ evm_version = 'paris' solc_version = '0.8.19' src = 'src/v0.8/functions/dev/v1_X' test = 'src/v0.8/functions/tests/v1_X' -gas_price = 3_000_000_000 # 3 gwei +gas_price = 3_000_000_000 # 3 gwei [profile.vrf] optimizer_runs = 1_000 diff --git a/contracts/remappings.txt b/contracts/remappings.txt index 4ed0fcfd9a..e1112d2705 100644 --- a/contracts/remappings.txt +++ b/contracts/remappings.txt @@ -6,3 +6,9 @@ hardhat/=node_modules/hardhat/ @eth-optimism/=node_modules/@eth-optimism/ @scroll-tech/=node_modules/@scroll-tech/ @zksync/=node_modules/@zksync/ + +@aave/=foundry-lib/gho-core/lib/aave-token/ +@aave-gho-core/=foundry-lib/gho-core/src/contracts/ +@aave/core-v3/=foundry-lib/gho-core/lib/aave-v3-core/ +@aave/periphery-v3/=foundry-lib/gho-core/lib/aave-v3-periphery/ +solidity-utils/=foundry-lib/solidity-utils/src/ diff --git a/contracts/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPool.sol b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPool.sol new file mode 100644 index 0000000000..6c1813f51d --- /dev/null +++ b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPool.sol @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import {ITypeAndVersion} from "../../../shared/interfaces/ITypeAndVersion.sol"; +import {IBurnMintERC20} from "../../../shared/token/ERC20/IBurnMintERC20.sol"; + +import {UpgradeableBurnMintTokenPoolAbstract} from "./UpgradeableBurnMintTokenPoolAbstract.sol"; +import {UpgradeableTokenPool} from "./UpgradeableTokenPool.sol"; + +import {Initializable} from "solidity-utils/contracts/transparent-proxy/Initializable.sol"; +import {IRouter} from "../../interfaces/IRouter.sol"; + +/// @title UpgradeableBurnMintTokenPool +/// @author Aave Labs +/// @notice Upgradeable version of Chainlink's CCIP BurnMintTokenPool +/// @dev Contract adaptations: +/// - Implementation of Initializable to allow upgrades +/// - Move of allowlist and router definition to initialization stage +/// - Addition of authorized functions to to directly mint/burn liquidity, thereby increasing/reducing the facilitator's bucket level. +/// - Modifications from inherited contract (see contract for more details): +/// - UpgradeableTokenPool: +/// - Remove i_token decimal check in constructor +/// - Add storage `__gap` for future upgrades. + +/// @dev Pool whitelisting mode is set in the constructor and cannot be modified later. +/// It either accepts any address as originalSender, or only accepts whitelisted originalSender. +/// The only way to change whitelisting mode is to deploy a new pool. +/// If that is expected, please make sure the token's burner/minter roles are adjustable. +/// @dev This contract is a variant of BurnMintTokenPool that uses `burn(amount)`. +contract UpgradeableBurnMintTokenPool is Initializable, UpgradeableBurnMintTokenPoolAbstract, ITypeAndVersion { + string public constant override typeAndVersion = "BurnMintTokenPool 1.5.1"; + + constructor( + address token, + uint8 localTokenDecimals, + address rmnProxy, + bool allowListEnabled + ) UpgradeableTokenPool(IBurnMintERC20(token), localTokenDecimals, rmnProxy, allowListEnabled) {} + + function initialize(address owner_, address[] memory allowlist, address router) external initializer { + if (router == address(0) || owner_ == address(0)) revert ZeroAddressNotAllowed(); + + _transferOwnership(owner_); + s_router = IRouter(router); + if (i_allowlistEnabled) _applyAllowListUpdates(new address[](0), allowlist); + } + + /// @notice Mint an amount of tokens with no additional logic. + /// @dev This GHO-specific functionality is designed for migrating bucket levels between + /// facilitators. The new pool is expected to mint amount of tokens, while the old pool + /// burns an equivalent amount. This ensures the facilitator can be offboarded, as all + /// liquidity minted by it must be fully burned. + /// @param to The address to which the minted tokens will be transferred. + /// @param to The address to which the minted tokens will be transferred. This needs to + /// be the old token pool, or the facilitator being offboarded. + /// @param amount The amount of tokens to mint and transfer to old pool. + function directMint(address to, uint256 amount) external onlyOwner { + IBurnMintERC20(address(i_token)).mint(to, amount); + } + + /// @notice Burn an amount of tokens with no additional logic. + /// @dev This GHO-specific functionality is designed for migrating bucket levels between + /// facilitators. The new pool is expected to mint amount of tokens, while the old pool + /// burns an equivalent amount. This ensures the facilitator can be offboarded, as all + /// liquidity minted by it must be fully burned + /// @param amount The amount of tokens to burn. + function directBurn(uint256 amount) external onlyOwner { + _burn(amount); + } + + /// @inheritdoc UpgradeableBurnMintTokenPoolAbstract + function _burn(uint256 amount) internal virtual override { + IBurnMintERC20(address(i_token)).burn(amount); + } +} diff --git a/contracts/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPoolAbstract.sol b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPoolAbstract.sol new file mode 100644 index 0000000000..2e90c6d4ea --- /dev/null +++ b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPoolAbstract.sol @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import {IBurnMintERC20} from "../../../shared/token/ERC20/IBurnMintERC20.sol"; + +import {Pool} from "../../libraries/Pool.sol"; +import {UpgradeableTokenPool} from "./UpgradeableTokenPool.sol"; + +abstract contract UpgradeableBurnMintTokenPoolAbstract is UpgradeableTokenPool { + /// @notice Contains the specific burn call for a pool. + /// @dev overriding this method allows us to create pools with different burn signatures + /// without duplicating the underlying logic. + function _burn(uint256 amount) internal virtual; + + /// @notice Burn the token in the pool + /// @dev The _validateLockOrBurn check is an essential security check + function lockOrBurn( + Pool.LockOrBurnInV1 calldata lockOrBurnIn + ) external virtual override returns (Pool.LockOrBurnOutV1 memory) { + _validateLockOrBurn(lockOrBurnIn); + + _burn(lockOrBurnIn.amount); + + emit Burned(msg.sender, lockOrBurnIn.amount); + + return + Pool.LockOrBurnOutV1({ + destTokenAddress: getRemoteToken(lockOrBurnIn.remoteChainSelector), + destPoolData: _encodeLocalDecimals() + }); + } + + /// @notice Mint tokens from the pool to the recipient + /// @dev The _validateReleaseOrMint check is an essential security check + function releaseOrMint( + Pool.ReleaseOrMintInV1 calldata releaseOrMintIn + ) external virtual override returns (Pool.ReleaseOrMintOutV1 memory) { + _validateReleaseOrMint(releaseOrMintIn); + + // Calculate the local amount + uint256 localAmount = _calculateLocalAmount( + releaseOrMintIn.amount, + _parseRemoteDecimals(releaseOrMintIn.sourcePoolData) + ); + + // Mint to the receiver + IBurnMintERC20(address(i_token)).mint(releaseOrMintIn.receiver, localAmount); + + emit Minted(msg.sender, releaseOrMintIn.receiver, localAmount); + + return Pool.ReleaseOrMintOutV1({destinationAmount: localAmount}); + } +} diff --git a/contracts/src/v0.8/ccip/pools/GHO/UpgradeableLockReleaseTokenPool.sol b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableLockReleaseTokenPool.sol new file mode 100644 index 0000000000..f8ed82bcc7 --- /dev/null +++ b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableLockReleaseTokenPool.sol @@ -0,0 +1,252 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import {Initializable} from "solidity-utils/contracts/transparent-proxy/Initializable.sol"; + +import {ILiquidityContainer} from "../../../liquiditymanager/interfaces/ILiquidityContainer.sol"; +import {ITypeAndVersion} from "../../../shared/interfaces/ITypeAndVersion.sol"; + +import {IERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; +import {SafeERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol"; +import {IERC165} from "../../../vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/IERC165.sol"; + +import {Pool} from "../../libraries/Pool.sol"; +import {IRouter} from "../../interfaces/IRouter.sol"; +import {UpgradeableTokenPool} from "./UpgradeableTokenPool.sol"; + +/// @title UpgradeableLockReleaseTokenPool +/// @author Aave Labs +/// @notice Upgradeable version of Chainlink's CCIP LockReleaseTokenPool +/// @dev Contract adaptations: +/// - Implementation of Initializable to allow upgrades +/// - Move of allowlist and router definition to initialization stage +/// - Addition of a bridge limit to regulate the maximum amount of tokens that can be transferred out (burned/locked) +/// - Addition of authorized function to update amount of tokens that are currently bridged +/// - Modifications from inherited contract (see contract for more details): +/// - UpgradeableTokenPool +/// - Remove i_token decimal check in constructor +/// - Add storage `__gap` for future upgrades. + +/// @dev Token pool used for tokens on their native chain. This uses a lock and release mechanism. +/// Because of lock/unlock requiring liquidity, this pool contract also has function to add and remove +/// liquidity. This allows for proper bookkeeping for both user and liquidity provider balances. +/// @dev One token per LockReleaseTokenPool. +contract UpgradeableLockReleaseTokenPool is Initializable, UpgradeableTokenPool, ILiquidityContainer, ITypeAndVersion { + using SafeERC20 for IERC20; + + error InsufficientLiquidity(); + error LiquidityNotAccepted(); + error BridgeLimitExceeded(uint256 bridgeLimit); + error NotEnoughBridgedAmount(); + + event BridgeLimitUpdated(uint256 oldBridgeLimit, uint256 newBridgeLimit); + event BridgeLimitAdminUpdated(address indexed oldAdmin, address indexed newAdmin); + + event LiquidityTransferred(address indexed from, uint256 amount); + + string public constant override typeAndVersion = "LockReleaseTokenPool 1.5.1"; + + /// @dev Whether or not the pool accepts liquidity. + /// External liquidity is not required when there is one canonical token deployed to a chain, + /// and CCIP is facilitating mint/burn on all the other chains, in which case the invariant + /// balanceOf(pool) on home chain >= sum(totalSupply(mint/burn "wrapped" token) on all remote chains) should always hold + bool internal immutable i_acceptLiquidity; + /// @notice The address of the rebalancer. + address internal s_rebalancer; + + /// @notice Maximum amount of tokens that can be bridged to other chains + uint256 private s_bridgeLimit; + /// @notice Amount of tokens bridged (transferred out) + /// @dev Must always be equal to or below the bridge limit + uint256 private s_currentBridged; + /// @notice The address of the bridge limit admin. + /// @dev Can be address(0) if none is configured. + address internal s_bridgeLimitAdmin; + + // @notice Constructor + // @param token The bridgeable token that is managed by this pool. + // @param localTokenDecimals The number of decimals of the token that is managed by this pool. + // @param rmnProxy The address of the rmn proxy + // @param allowlistEnabled True if pool is set to access-controlled mode, false otherwise + // @param acceptLiquidity True if the pool accepts liquidity, false otherwise + constructor( + address token, + uint8 localTokenDecimals, + address rmnProxy, + bool allowListEnabled, + bool acceptLiquidity + ) UpgradeableTokenPool(IERC20(token), localTokenDecimals, rmnProxy, allowListEnabled) { + i_acceptLiquidity = acceptLiquidity; + } + + /// @dev Initializer + /// @dev The address passed as `owner` must accept ownership after initialization. + /// @dev The `allowlist` is only effective if pool is set to access-controlled mode + /// @param owner_ The address of the owner + /// @param allowlist A set of addresses allowed to trigger lockOrBurn as original senders + /// @param router The address of the router + /// @param bridgeLimit The maximum amount of tokens that can be bridged to other chains + function initialize( + address owner_, + address[] memory allowlist, + address router, + uint256 bridgeLimit + ) external initializer { + if (router == address(0) || owner_ == address(0)) revert ZeroAddressNotAllowed(); + + _transferOwnership(owner_); + s_router = IRouter(router); + if (i_allowlistEnabled) _applyAllowListUpdates(new address[](0), allowlist); + s_bridgeLimit = bridgeLimit; + } + + /// @notice Locks the token in the pool + /// @dev The _validateLockOrBurn check is an essential security check + function lockOrBurn( + Pool.LockOrBurnInV1 calldata lockOrBurnIn + ) external virtual override returns (Pool.LockOrBurnOutV1 memory) { + // Increase bridged amount because tokens are leaving the source chain + if ((s_currentBridged += lockOrBurnIn.amount) > s_bridgeLimit) revert BridgeLimitExceeded(s_bridgeLimit); + + _validateLockOrBurn(lockOrBurnIn); + + emit Locked(msg.sender, lockOrBurnIn.amount); + + return + Pool.LockOrBurnOutV1({ + destTokenAddress: getRemoteToken(lockOrBurnIn.remoteChainSelector), + destPoolData: _encodeLocalDecimals() + }); + } + + /// @notice Release tokens from the pool to the recipient + /// @dev The _validateReleaseOrMint check is an essential security check + function releaseOrMint( + Pool.ReleaseOrMintInV1 calldata releaseOrMintIn + ) external virtual override returns (Pool.ReleaseOrMintOutV1 memory) { + // This should never occur. Amount should never exceed the current bridged amount + if (releaseOrMintIn.amount > s_currentBridged) revert NotEnoughBridgedAmount(); + // Reduce bridged amount because tokens are back to source chain + s_currentBridged -= releaseOrMintIn.amount; + + _validateReleaseOrMint(releaseOrMintIn); + + // Calculate the local amount + uint256 localAmount = _calculateLocalAmount( + releaseOrMintIn.amount, + _parseRemoteDecimals(releaseOrMintIn.sourcePoolData) + ); + + // Release to the recipient + getToken().safeTransfer(releaseOrMintIn.receiver, localAmount); + + emit Released(msg.sender, releaseOrMintIn.receiver, localAmount); + + return Pool.ReleaseOrMintOutV1({destinationAmount: localAmount}); + } + + /// @inheritdoc IERC165 + function supportsInterface(bytes4 interfaceId) public pure virtual override returns (bool) { + return interfaceId == type(ILiquidityContainer).interfaceId || super.supportsInterface(interfaceId); + } + + /// @notice Gets LiquidityManager, can be address(0) if none is configured. + /// @return The current liquidity manager. + function getRebalancer() external view returns (address) { + return s_rebalancer; + } + + /// @notice Sets the LiquidityManager address. + /// @dev Only callable by the owner. + function setRebalancer(address rebalancer) external onlyOwner { + s_rebalancer = rebalancer; + } + + /// @notice Sets the current bridged amount to other chains + /// @dev Only callable by the owner. + /// @dev Does not emit event, it is expected to only be called during token pool migrations. + /// @param newCurrentBridged The new bridged amount + function setCurrentBridgedAmount(uint256 newCurrentBridged) external onlyOwner { + s_currentBridged = newCurrentBridged; + } + + /// @notice Sets the bridge limit, the maximum amount of tokens that can be bridged out + /// @dev Only callable by the owner or the bridge limit admin. + /// @dev Bridge limit changes should be carefully managed, specially when reducing below the current bridged amount + /// @param newBridgeLimit The new bridge limit + function setBridgeLimit(uint256 newBridgeLimit) external { + if (msg.sender != s_bridgeLimitAdmin && msg.sender != owner()) revert Unauthorized(msg.sender); + uint256 oldBridgeLimit = s_bridgeLimit; + s_bridgeLimit = newBridgeLimit; + emit BridgeLimitUpdated(oldBridgeLimit, newBridgeLimit); + } + + /// @notice Sets the bridge limit admin address. + /// @dev Only callable by the owner. + /// @param bridgeLimitAdmin The new bridge limit admin address. + function setBridgeLimitAdmin(address bridgeLimitAdmin) external onlyOwner { + address oldAdmin = s_bridgeLimitAdmin; + s_bridgeLimitAdmin = bridgeLimitAdmin; + emit BridgeLimitAdminUpdated(oldAdmin, bridgeLimitAdmin); + } + + /// @notice Gets the bridge limit + /// @return The maximum amount of tokens that can be transferred out to other chains + function getBridgeLimit() external view virtual returns (uint256) { + return s_bridgeLimit; + } + + /// @notice Gets the current bridged amount to other chains + /// @return The amount of tokens transferred out to other chains + function getCurrentBridgedAmount() external view virtual returns (uint256) { + return s_currentBridged; + } + + /// @notice Gets the bridge limiter admin address. + function getBridgeLimitAdmin() external view returns (address) { + return s_bridgeLimitAdmin; + } + + /// @notice Checks if the pool can accept liquidity. + /// @return true if the pool can accept liquidity, false otherwise. + function canAcceptLiquidity() external view returns (bool) { + return i_acceptLiquidity; + } + + /// @notice Adds liquidity to the pool. The tokens should be approved first. + /// @param amount The amount of liquidity to provide. + function provideLiquidity(uint256 amount) external { + if (!i_acceptLiquidity) revert LiquidityNotAccepted(); + if (s_rebalancer != msg.sender) revert Unauthorized(msg.sender); + + i_token.safeTransferFrom(msg.sender, address(this), amount); + emit LiquidityAdded(msg.sender, amount); + } + + /// @notice Removed liquidity to the pool. The tokens will be sent to msg.sender. + /// @param amount The amount of liquidity to remove. + function withdrawLiquidity(uint256 amount) external { + if (s_rebalancer != msg.sender) revert Unauthorized(msg.sender); + + if (i_token.balanceOf(address(this)) < amount) revert InsufficientLiquidity(); + i_token.safeTransfer(msg.sender, amount); + emit LiquidityRemoved(msg.sender, amount); + } + + /// @notice This function can be used to transfer liquidity from an older version of the pool to this pool. To do so + /// this pool will have to be set as the rebalancer in the older version of the pool. This allows it to transfer the + /// funds in the old pool to the new pool. + /// @dev When upgrading a LockRelease pool, this function can be called at the same time as the pool is changed in the + /// TokenAdminRegistry. This allows for a smooth transition of both liquidity and transactions to the new pool. + /// Alternatively, when no multicall is available, a portion of the funds can be transferred to the new pool before + /// changing which pool CCIP uses, to ensure both pools can operate. Then the pool should be changed in the + /// TokenAdminRegistry, which will activate the new pool. All new transactions will use the new pool and its + /// liquidity. Finally, the remaining liquidity can be transferred to the new pool using this function one more time. + /// @param from The address of the old pool. + /// @param amount The amount of liquidity to transfer. + function transferLiquidity(address from, uint256 amount) external onlyOwner { + UpgradeableLockReleaseTokenPool(from).withdrawLiquidity(amount); + + emit LiquidityTransferred(from, amount); + } +} diff --git a/contracts/src/v0.8/ccip/pools/GHO/UpgradeableTokenPool.sol b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableTokenPool.sol new file mode 100644 index 0000000000..8c0965a67f --- /dev/null +++ b/contracts/src/v0.8/ccip/pools/GHO/UpgradeableTokenPool.sol @@ -0,0 +1,606 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import {IPoolV1} from "../../interfaces/IPool.sol"; +import {IRMN} from "../../interfaces/IRMN.sol"; +import {IRouter} from "../../interfaces/IRouter.sol"; + +import {Ownable2StepMsgSender} from "../../../shared/access/Ownable2StepMsgSender.sol"; +import {Pool} from "../../libraries/Pool.sol"; +import {RateLimiter} from "../../libraries/RateLimiter.sol"; + +import {IERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; +import {IERC165} from "../../../vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/IERC165.sol"; +import {EnumerableSet} from "../../../vendor/openzeppelin-solidity/v5.0.2/contracts/utils/structs/EnumerableSet.sol"; + +/// @title UpgradeableTokenPool +/// @author Aave Labs +/// @notice Upgradeable version of Chainlink's CCIP TokenPool +/// @dev This pool supports different decimals on different chains but using this feature could impact the total number +/// of tokens in circulation. Since all of the tokens are locked/burned on the source, and a rounded amount is minted/released on the +/// destination, the number of tokens minted/released could be less than the number of tokens burned/locked. This is because the source +/// chain does not know about the destination token decimals. This is not a problem if the decimals are the same on both +/// chains. +/// @dev Contract adaptations: +/// - Remove i_token decimal check in constructor. +/// - Add storage `__gap` for future upgrades. +/// Example: +/// Assume there is a token with 6 decimals on chain A and 3 decimals on chain B. +/// - 1.234567 tokens are burned on chain A. +/// - 1.234 tokens are minted on chain B. +/// When sending the 1.234 tokens back to chain A, you will receive 1.234000 tokens on chain A, effectively losing +/// 0.000567 tokens. +/// In the case of a burnMint pool on chain A, these funds are burned in the pool on chain A. +/// In the case of a lockRelease pool on chain A, these funds accumulate in the pool on chain A. +abstract contract UpgradeableTokenPool is IPoolV1, Ownable2StepMsgSender { + using EnumerableSet for EnumerableSet.Bytes32Set; + using EnumerableSet for EnumerableSet.AddressSet; + using EnumerableSet for EnumerableSet.UintSet; + using RateLimiter for RateLimiter.TokenBucket; + + error CallerIsNotARampOnRouter(address caller); + error ZeroAddressNotAllowed(); + error SenderNotAllowed(address sender); + error AllowListNotEnabled(); + error NonExistentChain(uint64 remoteChainSelector); + error ChainNotAllowed(uint64 remoteChainSelector); + error CursedByRMN(); + error ChainAlreadyExists(uint64 chainSelector); + error InvalidSourcePoolAddress(bytes sourcePoolAddress); + error InvalidToken(address token); + error Unauthorized(address caller); + error PoolAlreadyAdded(uint64 remoteChainSelector, bytes remotePoolAddress); + error InvalidRemotePoolForChain(uint64 remoteChainSelector, bytes remotePoolAddress); + error InvalidRemoteChainDecimals(bytes sourcePoolData); + error OverflowDetected(uint8 remoteDecimals, uint8 localDecimals, uint256 remoteAmount); + error InvalidDecimalArgs(uint8 expected, uint8 actual); + + event Locked(address indexed sender, uint256 amount); + event Burned(address indexed sender, uint256 amount); + event Released(address indexed sender, address indexed recipient, uint256 amount); + event Minted(address indexed sender, address indexed recipient, uint256 amount); + event ChainAdded( + uint64 remoteChainSelector, + bytes remoteToken, + RateLimiter.Config outboundRateLimiterConfig, + RateLimiter.Config inboundRateLimiterConfig + ); + event ChainConfigured( + uint64 remoteChainSelector, + RateLimiter.Config outboundRateLimiterConfig, + RateLimiter.Config inboundRateLimiterConfig + ); + event ChainRemoved(uint64 remoteChainSelector); + event RemotePoolAdded(uint64 indexed remoteChainSelector, bytes remotePoolAddress); + event RemotePoolRemoved(uint64 indexed remoteChainSelector, bytes remotePoolAddress); + event AllowListAdd(address sender); + event AllowListRemove(address sender); + event RouterUpdated(address oldRouter, address newRouter); + event RateLimitAdminSet(address rateLimitAdmin); + + struct ChainUpdate { + uint64 remoteChainSelector; // Remote chain selector + bytes[] remotePoolAddresses; // Address of the remote pool, ABI encoded in the case of a remote EVM chain. + bytes remoteTokenAddress; // Address of the remote token, ABI encoded in the case of a remote EVM chain. + RateLimiter.Config outboundRateLimiterConfig; // Outbound rate limited config, meaning the rate limits for all of the onRamps for the given chain + RateLimiter.Config inboundRateLimiterConfig; // Inbound rate limited config, meaning the rate limits for all of the offRamps for the given chain + } + + struct RemoteChainConfig { + RateLimiter.TokenBucket outboundRateLimiterConfig; // Outbound rate limited config, meaning the rate limits for all of the onRamps for the given chain + RateLimiter.TokenBucket inboundRateLimiterConfig; // Inbound rate limited config, meaning the rate limits for all of the offRamps for the given chain + bytes remoteTokenAddress; // Address of the remote token, ABI encoded in the case of a remote EVM chain. + EnumerableSet.Bytes32Set remotePools; // Set of remote pool hashes, ABI encoded in the case of a remote EVM chain. + } + + /// @dev The bridgeable token that is managed by this pool. Pools could support multiple tokens at the same time if + /// required, but this implementation only supports one token. + IERC20 internal immutable i_token; + /// @dev The number of decimals of the token managed by this pool. + uint8 internal immutable i_tokenDecimals; + /// @dev The address of the RMN proxy + address internal immutable i_rmnProxy; + /// @dev The immutable flag that indicates if the pool is access-controlled. + bool internal immutable i_allowlistEnabled; + /// @dev A set of addresses allowed to trigger lockOrBurn as original senders. + /// Only takes effect if i_allowlistEnabled is true. + /// This can be used to ensure only token-issuer specified addresses can move tokens. + EnumerableSet.AddressSet internal s_allowlist; + /// @dev The address of the router + IRouter internal s_router; + /// @dev A set of allowed chain selectors. We want the allowlist to be enumerable to + /// be able to quickly determine (without parsing logs) who can access the pool. + /// @dev The chain selectors are in uint256 format because of the EnumerableSet implementation. + EnumerableSet.UintSet internal s_remoteChainSelectors; + mapping(uint64 remoteChainSelector => RemoteChainConfig) internal s_remoteChainConfigs; + /// @notice A mapping of hashed pool addresses to their unhashed form. This is used to be able to find the actually + /// configured pools and not just their hashed versions. + mapping(bytes32 poolAddressHash => bytes poolAddress) internal s_remotePoolAddresses; + /// @notice The address of the rate limiter admin. + /// @dev Can be address(0) if none is configured. + address internal s_rateLimitAdmin; + + constructor(IERC20 token, uint8 localTokenDecimals, address rmnProxy, bool allowListEnabled) { + if (address(token) == address(0) || rmnProxy == address(0)) revert ZeroAddressNotAllowed(); + i_token = token; + i_rmnProxy = rmnProxy; + i_tokenDecimals = localTokenDecimals; + + // Pool can be set as permissioned or permissionless at deployment time only to save hot-path gas. + i_allowlistEnabled = allowListEnabled; + } + + /// @inheritdoc IPoolV1 + function isSupportedToken(address token) public view virtual returns (bool) { + return token == address(i_token); + } + + /// @notice Gets the IERC20 token that this pool can lock or burn. + /// @return token The IERC20 token representation. + function getToken() public view returns (IERC20 token) { + return i_token; + } + + /// @notice Get RMN proxy address + /// @return rmnProxy Address of RMN proxy + function getRmnProxy() public view returns (address rmnProxy) { + return i_rmnProxy; + } + + /// @notice Gets the pool's Router + /// @return router The pool's Router + function getRouter() public view returns (address router) { + return address(s_router); + } + + /// @notice Sets the pool's Router + /// @param newRouter The new Router + function setRouter(address newRouter) public onlyOwner { + if (newRouter == address(0)) revert ZeroAddressNotAllowed(); + address oldRouter = address(s_router); + s_router = IRouter(newRouter); + + emit RouterUpdated(oldRouter, newRouter); + } + + /// @notice Signals which version of the pool interface is supported + function supportsInterface(bytes4 interfaceId) public pure virtual override returns (bool) { + return + interfaceId == Pool.CCIP_POOL_V1 || + interfaceId == type(IPoolV1).interfaceId || + interfaceId == type(IERC165).interfaceId; + } + + // ================================================================ + // │ Validation │ + // ================================================================ + + /// @notice Validates the lock or burn input for correctness on + /// - token to be locked or burned + /// - RMN curse status + /// - allowlist status + /// - if the sender is a valid onRamp + /// - rate limit status + /// @param lockOrBurnIn The input to validate. + /// @dev This function should always be called before executing a lock or burn. Not doing so would allow + /// for various exploits. + function _validateLockOrBurn(Pool.LockOrBurnInV1 calldata lockOrBurnIn) internal { + if (!isSupportedToken(lockOrBurnIn.localToken)) revert InvalidToken(lockOrBurnIn.localToken); + if (IRMN(i_rmnProxy).isCursed(bytes16(uint128(lockOrBurnIn.remoteChainSelector)))) revert CursedByRMN(); + _checkAllowList(lockOrBurnIn.originalSender); + + _onlyOnRamp(lockOrBurnIn.remoteChainSelector); + _consumeOutboundRateLimit(lockOrBurnIn.remoteChainSelector, lockOrBurnIn.amount); + } + + /// @notice Validates the release or mint input for correctness on + /// - token to be released or minted + /// - RMN curse status + /// - if the sender is a valid offRamp + /// - if the source pool is valid + /// - rate limit status + /// @param releaseOrMintIn The input to validate. + /// @dev This function should always be called before executing a release or mint. Not doing so would allow + /// for various exploits. + function _validateReleaseOrMint(Pool.ReleaseOrMintInV1 calldata releaseOrMintIn) internal { + if (!isSupportedToken(releaseOrMintIn.localToken)) revert InvalidToken(releaseOrMintIn.localToken); + if (IRMN(i_rmnProxy).isCursed(bytes16(uint128(releaseOrMintIn.remoteChainSelector)))) revert CursedByRMN(); + _onlyOffRamp(releaseOrMintIn.remoteChainSelector); + + // Validates that the source pool address is configured on this pool. + if (!isRemotePool(releaseOrMintIn.remoteChainSelector, releaseOrMintIn.sourcePoolAddress)) { + revert InvalidSourcePoolAddress(releaseOrMintIn.sourcePoolAddress); + } + + _consumeInboundRateLimit(releaseOrMintIn.remoteChainSelector, releaseOrMintIn.amount); + } + + // ================================================================ + // │ Token decimals │ + // ================================================================ + + /// @notice Gets the IERC20 token decimals on the local chain. + function getTokenDecimals() public view virtual returns (uint8 decimals) { + return i_tokenDecimals; + } + + function _encodeLocalDecimals() internal view virtual returns (bytes memory) { + return abi.encode(i_tokenDecimals); + } + + function _parseRemoteDecimals(bytes memory sourcePoolData) internal view virtual returns (uint8) { + // Fallback to the local token decimals if the source pool data is empty. This allows for backwards compatibility. + if (sourcePoolData.length == 0) { + return i_tokenDecimals; + } + if (sourcePoolData.length != 32) { + revert InvalidRemoteChainDecimals(sourcePoolData); + } + uint256 remoteDecimals = abi.decode(sourcePoolData, (uint256)); + if (remoteDecimals > type(uint8).max) { + revert InvalidRemoteChainDecimals(sourcePoolData); + } + return uint8(remoteDecimals); + } + + /// @notice Calculates the local amount based on the remote amount and decimals. + /// @param remoteAmount The amount on the remote chain. + /// @param remoteDecimals The decimals of the token on the remote chain. + /// @return The local amount. + /// @dev This function protects against overflows. If there is a transaction that hits the overflow check, it is + /// probably incorrect as that means the amount cannot be represented on this chain. If the local decimals have been + /// wrongly configured, the token issuer could redeploy the pool with the correct decimals and manually re-execute the + /// CCIP tx to fix the issue. + function _calculateLocalAmount(uint256 remoteAmount, uint8 remoteDecimals) internal view virtual returns (uint256) { + if (remoteDecimals == i_tokenDecimals) { + return remoteAmount; + } + if (remoteDecimals > i_tokenDecimals) { + uint8 decimalsDiff = remoteDecimals - i_tokenDecimals; + if (decimalsDiff > 77) { + // This is a safety check to prevent overflow in the next calculation. + revert OverflowDetected(remoteDecimals, i_tokenDecimals, remoteAmount); + } + // Solidity rounds down so there is no risk of minting more tokens than the remote chain sent. + return remoteAmount / (10 ** decimalsDiff); + } + + // This is a safety check to prevent overflow in the next calculation. + // More than 77 would never fit in a uint256 and would cause an overflow. We also check if the resulting amount + // would overflow. + uint8 diffDecimals = i_tokenDecimals - remoteDecimals; + if (diffDecimals > 77 || remoteAmount > type(uint256).max / (10 ** diffDecimals)) { + revert OverflowDetected(remoteDecimals, i_tokenDecimals, remoteAmount); + } + + return remoteAmount * (10 ** diffDecimals); + } + + // ================================================================ + // │ Chain permissions │ + // ================================================================ + + /// @notice Gets the pool address on the remote chain. + /// @param remoteChainSelector Remote chain selector. + /// @dev To support non-evm chains, this value is encoded into bytes + function getRemotePools(uint64 remoteChainSelector) public view returns (bytes[] memory) { + bytes32[] memory remotePoolHashes = s_remoteChainConfigs[remoteChainSelector].remotePools.values(); + + bytes[] memory remotePools = new bytes[](remotePoolHashes.length); + for (uint256 i = 0; i < remotePoolHashes.length; ++i) { + remotePools[i] = s_remotePoolAddresses[remotePoolHashes[i]]; + } + + return remotePools; + } + + /// @notice Checks if the pool address is configured on the remote chain. + /// @param remoteChainSelector Remote chain selector. + /// @param remotePoolAddress The address of the remote pool. + function isRemotePool(uint64 remoteChainSelector, bytes calldata remotePoolAddress) public view returns (bool) { + return s_remoteChainConfigs[remoteChainSelector].remotePools.contains(keccak256(remotePoolAddress)); + } + + /// @notice Gets the token address on the remote chain. + /// @param remoteChainSelector Remote chain selector. + /// @dev To support non-evm chains, this value is encoded into bytes + function getRemoteToken(uint64 remoteChainSelector) public view returns (bytes memory) { + return s_remoteChainConfigs[remoteChainSelector].remoteTokenAddress; + } + + /// @notice Adds a remote pool for a given chain selector. This could be due to a pool being upgraded on the remote + /// chain. We don't simply want to replace the old pool as there could still be valid inflight messages from the old + /// pool. This function allows for multiple pools to be added for a single chain selector. + /// @param remoteChainSelector The remote chain selector for which the remote pool address is being added. + /// @param remotePoolAddress The address of the new remote pool. + function addRemotePool(uint64 remoteChainSelector, bytes calldata remotePoolAddress) external onlyOwner { + if (!isSupportedChain(remoteChainSelector)) revert NonExistentChain(remoteChainSelector); + + _setRemotePool(remoteChainSelector, remotePoolAddress); + } + + /// @notice Removes the remote pool address for a given chain selector. + /// @dev All inflight txs from the remote pool will be rejected after it is removed. To ensure no loss of funds, there + /// should be no inflight txs from the given pool. + function removeRemotePool(uint64 remoteChainSelector, bytes calldata remotePoolAddress) external onlyOwner { + if (!isSupportedChain(remoteChainSelector)) revert NonExistentChain(remoteChainSelector); + + if (!s_remoteChainConfigs[remoteChainSelector].remotePools.remove(keccak256(remotePoolAddress))) { + revert InvalidRemotePoolForChain(remoteChainSelector, remotePoolAddress); + } + + emit RemotePoolRemoved(remoteChainSelector, remotePoolAddress); + } + + /// @inheritdoc IPoolV1 + function isSupportedChain(uint64 remoteChainSelector) public view returns (bool) { + return s_remoteChainSelectors.contains(remoteChainSelector); + } + + /// @notice Get list of allowed chains + /// @return list of chains. + function getSupportedChains() public view returns (uint64[] memory) { + uint256[] memory uint256ChainSelectors = s_remoteChainSelectors.values(); + uint64[] memory chainSelectors = new uint64[](uint256ChainSelectors.length); + for (uint256 i = 0; i < uint256ChainSelectors.length; ++i) { + chainSelectors[i] = uint64(uint256ChainSelectors[i]); + } + + return chainSelectors; + } + + /// @notice Sets the permissions for a list of chains selectors. Actual senders for these chains + /// need to be allowed on the Router to interact with this pool. + /// @param remoteChainSelectorsToRemove A list of chain selectors to remove. + /// @param chainsToAdd A list of chains and their new permission status & rate limits. + /// @dev Only callable by the owner + function applyChainUpdates( + uint64[] calldata remoteChainSelectorsToRemove, + ChainUpdate[] calldata chainsToAdd + ) external virtual onlyOwner { + for (uint256 i = 0; i < remoteChainSelectorsToRemove.length; ++i) { + uint64 remoteChainSelectorToRemove = remoteChainSelectorsToRemove[i]; + // If the chain doesn't exist, revert + if (!s_remoteChainSelectors.remove(remoteChainSelectorToRemove)) { + revert NonExistentChain(remoteChainSelectorToRemove); + } + + // Remove all remote pool hashes for the chain + bytes32[] memory remotePools = s_remoteChainConfigs[remoteChainSelectorToRemove].remotePools.values(); + for (uint256 j = 0; j < remotePools.length; ++j) { + s_remoteChainConfigs[remoteChainSelectorToRemove].remotePools.remove(remotePools[j]); + } + + delete s_remoteChainConfigs[remoteChainSelectorToRemove]; + + emit ChainRemoved(remoteChainSelectorToRemove); + } + + for (uint256 i = 0; i < chainsToAdd.length; ++i) { + ChainUpdate memory newChain = chainsToAdd[i]; + RateLimiter._validateTokenBucketConfig(newChain.outboundRateLimiterConfig, false); + RateLimiter._validateTokenBucketConfig(newChain.inboundRateLimiterConfig, false); + + if (newChain.remoteTokenAddress.length == 0) { + revert ZeroAddressNotAllowed(); + } + + // If the chain already exists, revert + if (!s_remoteChainSelectors.add(newChain.remoteChainSelector)) { + revert ChainAlreadyExists(newChain.remoteChainSelector); + } + + RemoteChainConfig storage remoteChainConfig = s_remoteChainConfigs[newChain.remoteChainSelector]; + + remoteChainConfig.outboundRateLimiterConfig = RateLimiter.TokenBucket({ + rate: newChain.outboundRateLimiterConfig.rate, + capacity: newChain.outboundRateLimiterConfig.capacity, + tokens: newChain.outboundRateLimiterConfig.capacity, + lastUpdated: uint32(block.timestamp), + isEnabled: newChain.outboundRateLimiterConfig.isEnabled + }); + remoteChainConfig.inboundRateLimiterConfig = RateLimiter.TokenBucket({ + rate: newChain.inboundRateLimiterConfig.rate, + capacity: newChain.inboundRateLimiterConfig.capacity, + tokens: newChain.inboundRateLimiterConfig.capacity, + lastUpdated: uint32(block.timestamp), + isEnabled: newChain.inboundRateLimiterConfig.isEnabled + }); + remoteChainConfig.remoteTokenAddress = newChain.remoteTokenAddress; + + for (uint256 j = 0; j < newChain.remotePoolAddresses.length; ++j) { + _setRemotePool(newChain.remoteChainSelector, newChain.remotePoolAddresses[j]); + } + + emit ChainAdded( + newChain.remoteChainSelector, + newChain.remoteTokenAddress, + newChain.outboundRateLimiterConfig, + newChain.inboundRateLimiterConfig + ); + } + } + + /// @notice Adds a pool address to the allowed remote token pools for a particular chain. + /// @param remoteChainSelector The remote chain selector for which the remote pool address is being added. + /// @param remotePoolAddress The address of the new remote pool. + function _setRemotePool(uint64 remoteChainSelector, bytes memory remotePoolAddress) internal { + if (remotePoolAddress.length == 0) { + revert ZeroAddressNotAllowed(); + } + + bytes32 poolHash = keccak256(remotePoolAddress); + + // Check if the pool already exists. + if (!s_remoteChainConfigs[remoteChainSelector].remotePools.add(poolHash)) { + revert PoolAlreadyAdded(remoteChainSelector, remotePoolAddress); + } + + // Add the pool to the mapping to be able to un-hash it later. + s_remotePoolAddresses[poolHash] = remotePoolAddress; + + emit RemotePoolAdded(remoteChainSelector, remotePoolAddress); + } + + // ================================================================ + // │ Rate limiting │ + // ================================================================ + + /// @dev The inbound rate limits should be slightly higher than the outbound rate limits. This is because many chains + /// finalize blocks in batches. CCIP also commits messages in batches: the commit plugin bundles multiple messages in + /// a single merkle root. + /// Imagine the following scenario. + /// - Chain A has an inbound and outbound rate limit of 100 tokens capacity and 1 token per second refill rate. + /// - Chain B has an inbound and outbound rate limit of 100 tokens capacity and 1 token per second refill rate. + /// + /// At time 0: + /// - Chain A sends 100 tokens to Chain B. + /// At time 5: + /// - Chain A sends 5 tokens to Chain B. + /// At time 6: + /// The epoch that contains blocks [0-5] is finalized. + /// Both transactions will be included in the same merkle root and become executable at the same time. This means + /// the token pool on chain B requires a capacity of 105 to successfully execute both messages at the same time. + /// The exact additional capacity required depends on the refill rate and the size of the source chain epochs and the + /// CCIP round time. For simplicity, a 5-10% buffer should be sufficient in most cases. + + /// @notice Sets the rate limiter admin address. + /// @dev Only callable by the owner. + /// @param rateLimitAdmin The new rate limiter admin address. + function setRateLimitAdmin(address rateLimitAdmin) external onlyOwner { + s_rateLimitAdmin = rateLimitAdmin; + emit RateLimitAdminSet(rateLimitAdmin); + } + + /// @notice Gets the rate limiter admin address. + function getRateLimitAdmin() external view returns (address) { + return s_rateLimitAdmin; + } + + /// @notice Consumes outbound rate limiting capacity in this pool + function _consumeOutboundRateLimit(uint64 remoteChainSelector, uint256 amount) internal { + s_remoteChainConfigs[remoteChainSelector].outboundRateLimiterConfig._consume(amount, address(i_token)); + } + + /// @notice Consumes inbound rate limiting capacity in this pool + function _consumeInboundRateLimit(uint64 remoteChainSelector, uint256 amount) internal { + s_remoteChainConfigs[remoteChainSelector].inboundRateLimiterConfig._consume(amount, address(i_token)); + } + + /// @notice Gets the token bucket with its values for the block it was requested at. + /// @return The token bucket. + function getCurrentOutboundRateLimiterState( + uint64 remoteChainSelector + ) external view returns (RateLimiter.TokenBucket memory) { + return s_remoteChainConfigs[remoteChainSelector].outboundRateLimiterConfig._currentTokenBucketState(); + } + + /// @notice Gets the token bucket with its values for the block it was requested at. + /// @return The token bucket. + function getCurrentInboundRateLimiterState( + uint64 remoteChainSelector + ) external view returns (RateLimiter.TokenBucket memory) { + return s_remoteChainConfigs[remoteChainSelector].inboundRateLimiterConfig._currentTokenBucketState(); + } + + /// @notice Sets the chain rate limiter config. + /// @param remoteChainSelector The remote chain selector for which the rate limits apply. + /// @param outboundConfig The new outbound rate limiter config, meaning the onRamp rate limits for the given chain. + /// @param inboundConfig The new inbound rate limiter config, meaning the offRamp rate limits for the given chain. + function setChainRateLimiterConfig( + uint64 remoteChainSelector, + RateLimiter.Config memory outboundConfig, + RateLimiter.Config memory inboundConfig + ) external { + if (msg.sender != s_rateLimitAdmin && msg.sender != owner()) revert Unauthorized(msg.sender); + + _setRateLimitConfig(remoteChainSelector, outboundConfig, inboundConfig); + } + + function _setRateLimitConfig( + uint64 remoteChainSelector, + RateLimiter.Config memory outboundConfig, + RateLimiter.Config memory inboundConfig + ) internal { + if (!isSupportedChain(remoteChainSelector)) revert NonExistentChain(remoteChainSelector); + RateLimiter._validateTokenBucketConfig(outboundConfig, false); + s_remoteChainConfigs[remoteChainSelector].outboundRateLimiterConfig._setTokenBucketConfig(outboundConfig); + RateLimiter._validateTokenBucketConfig(inboundConfig, false); + s_remoteChainConfigs[remoteChainSelector].inboundRateLimiterConfig._setTokenBucketConfig(inboundConfig); + emit ChainConfigured(remoteChainSelector, outboundConfig, inboundConfig); + } + + // ================================================================ + // │ Access │ + // ================================================================ + + /// @notice Checks whether remote chain selector is configured on this contract, and if the msg.sender + /// is a permissioned onRamp for the given chain on the Router. + function _onlyOnRamp(uint64 remoteChainSelector) internal view { + if (!isSupportedChain(remoteChainSelector)) revert ChainNotAllowed(remoteChainSelector); + if (!(msg.sender == s_router.getOnRamp(remoteChainSelector))) revert CallerIsNotARampOnRouter(msg.sender); + } + + /// @notice Checks whether remote chain selector is configured on this contract, and if the msg.sender + /// is a permissioned offRamp for the given chain on the Router. + function _onlyOffRamp(uint64 remoteChainSelector) internal view { + if (!isSupportedChain(remoteChainSelector)) revert ChainNotAllowed(remoteChainSelector); + if (!s_router.isOffRamp(remoteChainSelector, msg.sender)) revert CallerIsNotARampOnRouter(msg.sender); + } + + // ================================================================ + // │ Allowlist │ + // ================================================================ + + function _checkAllowList(address sender) internal view { + if (i_allowlistEnabled) { + if (!s_allowlist.contains(sender)) { + revert SenderNotAllowed(sender); + } + } + } + + /// @notice Gets whether the allowlist functionality is enabled. + /// @return true is enabled, false if not. + function getAllowListEnabled() external view returns (bool) { + return i_allowlistEnabled; + } + + /// @notice Gets the allowed addresses. + /// @return The allowed addresses. + function getAllowList() external view returns (address[] memory) { + return s_allowlist.values(); + } + + /// @notice Apply updates to the allow list. + /// @param removes The addresses to be removed. + /// @param adds The addresses to be added. + function applyAllowListUpdates(address[] calldata removes, address[] calldata adds) external onlyOwner { + _applyAllowListUpdates(removes, adds); + } + + /// @notice Internal version of applyAllowListUpdates to allow for reuse in the constructor. + function _applyAllowListUpdates(address[] memory removes, address[] memory adds) internal { + if (!i_allowlistEnabled) revert AllowListNotEnabled(); + + for (uint256 i = 0; i < removes.length; ++i) { + address toRemove = removes[i]; + if (s_allowlist.remove(toRemove)) { + emit AllowListRemove(toRemove); + } + } + for (uint256 i = 0; i < adds.length; ++i) { + address toAdd = adds[i]; + if (toAdd == address(0)) { + continue; + } + if (s_allowlist.add(toAdd)) { + emit AllowListAdd(toAdd); + } + } + } + + /// @dev This empty reserved space is put in place to allow future versions to add new + /// variables without shifting down storage in the inheritance chain. + uint256[42] private __gap; +} diff --git a/contracts/src/v0.8/ccip/pools/GHO/diffs/UpgradeableBurnMintTokenPoolAbstract_diff.md b/contracts/src/v0.8/ccip/pools/GHO/diffs/UpgradeableBurnMintTokenPoolAbstract_diff.md new file mode 100644 index 0000000000..2c55b34313 --- /dev/null +++ b/contracts/src/v0.8/ccip/pools/GHO/diffs/UpgradeableBurnMintTokenPoolAbstract_diff.md @@ -0,0 +1,60 @@ +```diff +diff --git a/src/v0.8/ccip/pools/BurnMintTokenPoolAbstract.sol b/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPoolAbstract.sol +index b3bbf4ff5e..2e90c6d4ea 100644 +--- a/src/v0.8/ccip/pools/BurnMintTokenPoolAbstract.sol ++++ b/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPoolAbstract.sol +@@ -1,18 +1,16 @@ + // SPDX-License-Identifier: BUSL-1.1 +-pragma solidity 0.8.24; ++pragma solidity ^0.8.0; + +-import {IBurnMintERC20} from "../../shared/token/ERC20/IBurnMintERC20.sol"; ++import {IBurnMintERC20} from "../../../shared/token/ERC20/IBurnMintERC20.sol"; + +-import {Pool} from "../libraries/Pool.sol"; +-import {TokenPool} from "./TokenPool.sol"; ++import {Pool} from "../../libraries/Pool.sol"; ++import {UpgradeableTokenPool} from "./UpgradeableTokenPool.sol"; + +-abstract contract BurnMintTokenPoolAbstract is TokenPool { ++abstract contract UpgradeableBurnMintTokenPoolAbstract is UpgradeableTokenPool { + /// @notice Contains the specific burn call for a pool. + /// @dev overriding this method allows us to create pools with different burn signatures + /// without duplicating the underlying logic. +- function _burn( +- uint256 amount +- ) internal virtual; ++ function _burn(uint256 amount) internal virtual; + + /// @notice Burn the token in the pool + /// @dev The _validateLockOrBurn check is an essential security check +@@ -25,10 +23,11 @@ abstract contract BurnMintTokenPoolAbstract is TokenPool { + + emit Burned(msg.sender, lockOrBurnIn.amount); + +- return Pool.LockOrBurnOutV1({ +- destTokenAddress: getRemoteToken(lockOrBurnIn.remoteChainSelector), +- destPoolData: _encodeLocalDecimals() +- }); ++ return ++ Pool.LockOrBurnOutV1({ ++ destTokenAddress: getRemoteToken(lockOrBurnIn.remoteChainSelector), ++ destPoolData: _encodeLocalDecimals() ++ }); + } + + /// @notice Mint tokens from the pool to the recipient +@@ -39,8 +38,10 @@ abstract contract BurnMintTokenPoolAbstract is TokenPool { + _validateReleaseOrMint(releaseOrMintIn); + + // Calculate the local amount +- uint256 localAmount = +- _calculateLocalAmount(releaseOrMintIn.amount, _parseRemoteDecimals(releaseOrMintIn.sourcePoolData)); ++ uint256 localAmount = _calculateLocalAmount( ++ releaseOrMintIn.amount, ++ _parseRemoteDecimals(releaseOrMintIn.sourcePoolData) ++ ); + + // Mint to the receiver + IBurnMintERC20(address(i_token)).mint(releaseOrMintIn.receiver, localAmount); +``` diff --git a/contracts/src/v0.8/ccip/pools/GHO/diffs/UpgradeableBurnMintTokenPool_diff.md b/contracts/src/v0.8/ccip/pools/GHO/diffs/UpgradeableBurnMintTokenPool_diff.md new file mode 100644 index 0000000000..f5a90d42b1 --- /dev/null +++ b/contracts/src/v0.8/ccip/pools/GHO/diffs/UpgradeableBurnMintTokenPool_diff.md @@ -0,0 +1,97 @@ +```diff +diff --git a/src/v0.8/ccip/pools/BurnMintTokenPool.sol b/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPool.sol +index 30203a4ced..6c1813f51d 100644 +--- a/src/v0.8/ccip/pools/BurnMintTokenPool.sol ++++ b/src/v0.8/ccip/pools/GHO/UpgradeableBurnMintTokenPool.sol +@@ -1,33 +1,75 @@ + // SPDX-License-Identifier: BUSL-1.1 +-pragma solidity 0.8.24; ++pragma solidity ^0.8.0; + +-import {ITypeAndVersion} from "../../shared/interfaces/ITypeAndVersion.sol"; +-import {IBurnMintERC20} from "../../shared/token/ERC20/IBurnMintERC20.sol"; ++import {ITypeAndVersion} from "../../../shared/interfaces/ITypeAndVersion.sol"; ++import {IBurnMintERC20} from "../../../shared/token/ERC20/IBurnMintERC20.sol"; + +-import {BurnMintTokenPoolAbstract} from "./BurnMintTokenPoolAbstract.sol"; +-import {TokenPool} from "./TokenPool.sol"; ++import {UpgradeableBurnMintTokenPoolAbstract} from "./UpgradeableBurnMintTokenPoolAbstract.sol"; ++import {UpgradeableTokenPool} from "./UpgradeableTokenPool.sol"; ++ ++import {Initializable} from "solidity-utils/contracts/transparent-proxy/Initializable.sol"; ++import {IRouter} from "../../interfaces/IRouter.sol"; ++ ++/// @title UpgradeableBurnMintTokenPool ++/// @author Aave Labs ++/// @notice Upgradeable version of Chainlink's CCIP BurnMintTokenPool ++/// @dev Contract adaptations: ++/// - Implementation of Initializable to allow upgrades ++/// - Move of allowlist and router definition to initialization stage ++/// - Addition of authorized functions to to directly mint/burn liquidity, thereby increasing/reducing the facilitator's bucket level. ++/// - Modifications from inherited contract (see contract for more details): ++/// - UpgradeableTokenPool: ++/// - Remove i_token decimal check in constructor ++/// - Add storage `__gap` for future upgrades. + +-/// @notice This pool mints and burns a 3rd-party token. + /// @dev Pool whitelisting mode is set in the constructor and cannot be modified later. + /// It either accepts any address as originalSender, or only accepts whitelisted originalSender. + /// The only way to change whitelisting mode is to deploy a new pool. + /// If that is expected, please make sure the token's burner/minter roles are adjustable. + /// @dev This contract is a variant of BurnMintTokenPool that uses `burn(amount)`. +-contract BurnMintTokenPool is BurnMintTokenPoolAbstract, ITypeAndVersion { ++contract UpgradeableBurnMintTokenPool is Initializable, UpgradeableBurnMintTokenPoolAbstract, ITypeAndVersion { + string public constant override typeAndVersion = "BurnMintTokenPool 1.5.1"; + + constructor( +- IBurnMintERC20 token, ++ address token, + uint8 localTokenDecimals, +- address[] memory allowlist, + address rmnProxy, +- address router +- ) TokenPool(token, localTokenDecimals, allowlist, rmnProxy, router) {} ++ bool allowListEnabled ++ ) UpgradeableTokenPool(IBurnMintERC20(token), localTokenDecimals, rmnProxy, allowListEnabled) {} + +- /// @inheritdoc BurnMintTokenPoolAbstract +- function _burn( +- uint256 amount +- ) internal virtual override { ++ function initialize(address owner_, address[] memory allowlist, address router) external initializer { ++ if (router == address(0) || owner_ == address(0)) revert ZeroAddressNotAllowed(); ++ ++ _transferOwnership(owner_); ++ s_router = IRouter(router); ++ if (i_allowlistEnabled) _applyAllowListUpdates(new address[](0), allowlist); ++ } ++ ++ /// @notice Mint an amount of tokens with no additional logic. ++ /// @dev This GHO-specific functionality is designed for migrating bucket levels between ++ /// facilitators. The new pool is expected to mint amount of tokens, while the old pool ++ /// burns an equivalent amount. This ensures the facilitator can be offboarded, as all ++ /// liquidity minted by it must be fully burned. ++ /// @param to The address to which the minted tokens will be transferred. ++ /// @param to The address to which the minted tokens will be transferred. This needs to ++ /// be the old token pool, or the facilitator being offboarded. ++ /// @param amount The amount of tokens to mint and transfer to old pool. ++ function directMint(address to, uint256 amount) external onlyOwner { ++ IBurnMintERC20(address(i_token)).mint(to, amount); ++ } ++ ++ /// @notice Burn an amount of tokens with no additional logic. ++ /// @dev This GHO-specific functionality is designed for migrating bucket levels between ++ /// facilitators. The new pool is expected to mint amount of tokens, while the old pool ++ /// burns an equivalent amount. This ensures the facilitator can be offboarded, as all ++ /// liquidity minted by it must be fully burned ++ /// @param amount The amount of tokens to burn. ++ function directBurn(uint256 amount) external onlyOwner { ++ _burn(amount); ++ } ++ ++ /// @inheritdoc UpgradeableBurnMintTokenPoolAbstract ++ function _burn(uint256 amount) internal virtual override { + IBurnMintERC20(address(i_token)).burn(amount); + } + } +``` diff --git a/contracts/src/v0.8/ccip/pools/GHO/diffs/UpgradeableLockReleaseTokenPool_diff.md b/contracts/src/v0.8/ccip/pools/GHO/diffs/UpgradeableLockReleaseTokenPool_diff.md new file mode 100644 index 0000000000..9138a940c5 --- /dev/null +++ b/contracts/src/v0.8/ccip/pools/GHO/diffs/UpgradeableLockReleaseTokenPool_diff.md @@ -0,0 +1,264 @@ +```diff +diff --git a/src/v0.8/ccip/pools/LockReleaseTokenPool.sol b/src/v0.8/ccip/pools/GHO/UpgradeableLockReleaseTokenPool.sol +index ecc28a14dd..f8ed82bcc7 100644 +--- a/src/v0.8/ccip/pools/LockReleaseTokenPool.sol ++++ b/src/v0.8/ccip/pools/GHO/UpgradeableLockReleaseTokenPool.sol +@@ -1,25 +1,46 @@ + // SPDX-License-Identifier: BUSL-1.1 +-pragma solidity 0.8.24; ++pragma solidity ^0.8.0; + +-import {ILiquidityContainer} from "../../liquiditymanager/interfaces/ILiquidityContainer.sol"; +-import {ITypeAndVersion} from "../../shared/interfaces/ITypeAndVersion.sol"; ++import {Initializable} from "solidity-utils/contracts/transparent-proxy/Initializable.sol"; + +-import {Pool} from "../libraries/Pool.sol"; +-import {TokenPool} from "./TokenPool.sol"; ++import {ILiquidityContainer} from "../../../liquiditymanager/interfaces/ILiquidityContainer.sol"; ++import {ITypeAndVersion} from "../../../shared/interfaces/ITypeAndVersion.sol"; + +-import {IERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; +-import {SafeERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol"; +-import {IERC165} from "../../vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/IERC165.sol"; ++import {IERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; ++import {SafeERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/utils/SafeERC20.sol"; ++import {IERC165} from "../../../vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/IERC165.sol"; + +-/// @notice Token pool used for tokens on their native chain. This uses a lock and release mechanism. ++import {Pool} from "../../libraries/Pool.sol"; ++import {IRouter} from "../../interfaces/IRouter.sol"; ++import {UpgradeableTokenPool} from "./UpgradeableTokenPool.sol"; ++ ++/// @title UpgradeableLockReleaseTokenPool ++/// @author Aave Labs ++/// @notice Upgradeable version of Chainlink's CCIP LockReleaseTokenPool ++/// @dev Contract adaptations: ++/// - Implementation of Initializable to allow upgrades ++/// - Move of allowlist and router definition to initialization stage ++/// - Addition of a bridge limit to regulate the maximum amount of tokens that can be transferred out (burned/locked) ++/// - Addition of authorized function to update amount of tokens that are currently bridged ++/// - Modifications from inherited contract (see contract for more details): ++/// - UpgradeableTokenPool ++/// - Remove i_token decimal check in constructor ++/// - Add storage `__gap` for future upgrades. ++ ++/// @dev Token pool used for tokens on their native chain. This uses a lock and release mechanism. + /// Because of lock/unlock requiring liquidity, this pool contract also has function to add and remove + /// liquidity. This allows for proper bookkeeping for both user and liquidity provider balances. + /// @dev One token per LockReleaseTokenPool. +-contract LockReleaseTokenPool is TokenPool, ILiquidityContainer, ITypeAndVersion { ++contract UpgradeableLockReleaseTokenPool is Initializable, UpgradeableTokenPool, ILiquidityContainer, ITypeAndVersion { + using SafeERC20 for IERC20; + + error InsufficientLiquidity(); + error LiquidityNotAccepted(); ++ error BridgeLimitExceeded(uint256 bridgeLimit); ++ error NotEnoughBridgedAmount(); ++ ++ event BridgeLimitUpdated(uint256 oldBridgeLimit, uint256 newBridgeLimit); ++ event BridgeLimitAdminUpdated(address indexed oldAdmin, address indexed newAdmin); + + event LiquidityTransferred(address indexed from, uint256 amount); + +@@ -33,30 +54,69 @@ contract LockReleaseTokenPool is TokenPool, ILiquidityContainer, ITypeAndVersion + /// @notice The address of the rebalancer. + address internal s_rebalancer; + ++ /// @notice Maximum amount of tokens that can be bridged to other chains ++ uint256 private s_bridgeLimit; ++ /// @notice Amount of tokens bridged (transferred out) ++ /// @dev Must always be equal to or below the bridge limit ++ uint256 private s_currentBridged; ++ /// @notice The address of the bridge limit admin. ++ /// @dev Can be address(0) if none is configured. ++ address internal s_bridgeLimitAdmin; ++ ++ // @notice Constructor ++ // @param token The bridgeable token that is managed by this pool. ++ // @param localTokenDecimals The number of decimals of the token that is managed by this pool. ++ // @param rmnProxy The address of the rmn proxy ++ // @param allowlistEnabled True if pool is set to access-controlled mode, false otherwise ++ // @param acceptLiquidity True if the pool accepts liquidity, false otherwise + constructor( +- IERC20 token, ++ address token, + uint8 localTokenDecimals, +- address[] memory allowlist, + address rmnProxy, +- bool acceptLiquidity, +- address router +- ) TokenPool(token, localTokenDecimals, allowlist, rmnProxy, router) { ++ bool allowListEnabled, ++ bool acceptLiquidity ++ ) UpgradeableTokenPool(IERC20(token), localTokenDecimals, rmnProxy, allowListEnabled) { + i_acceptLiquidity = acceptLiquidity; + } + ++ /// @dev Initializer ++ /// @dev The address passed as `owner` must accept ownership after initialization. ++ /// @dev The `allowlist` is only effective if pool is set to access-controlled mode ++ /// @param owner_ The address of the owner ++ /// @param allowlist A set of addresses allowed to trigger lockOrBurn as original senders ++ /// @param router The address of the router ++ /// @param bridgeLimit The maximum amount of tokens that can be bridged to other chains ++ function initialize( ++ address owner_, ++ address[] memory allowlist, ++ address router, ++ uint256 bridgeLimit ++ ) external initializer { ++ if (router == address(0) || owner_ == address(0)) revert ZeroAddressNotAllowed(); ++ ++ _transferOwnership(owner_); ++ s_router = IRouter(router); ++ if (i_allowlistEnabled) _applyAllowListUpdates(new address[](0), allowlist); ++ s_bridgeLimit = bridgeLimit; ++ } ++ + /// @notice Locks the token in the pool + /// @dev The _validateLockOrBurn check is an essential security check + function lockOrBurn( + Pool.LockOrBurnInV1 calldata lockOrBurnIn + ) external virtual override returns (Pool.LockOrBurnOutV1 memory) { ++ // Increase bridged amount because tokens are leaving the source chain ++ if ((s_currentBridged += lockOrBurnIn.amount) > s_bridgeLimit) revert BridgeLimitExceeded(s_bridgeLimit); ++ + _validateLockOrBurn(lockOrBurnIn); + + emit Locked(msg.sender, lockOrBurnIn.amount); + +- return Pool.LockOrBurnOutV1({ +- destTokenAddress: getRemoteToken(lockOrBurnIn.remoteChainSelector), +- destPoolData: _encodeLocalDecimals() +- }); ++ return ++ Pool.LockOrBurnOutV1({ ++ destTokenAddress: getRemoteToken(lockOrBurnIn.remoteChainSelector), ++ destPoolData: _encodeLocalDecimals() ++ }); + } + + /// @notice Release tokens from the pool to the recipient +@@ -64,11 +124,18 @@ contract LockReleaseTokenPool is TokenPool, ILiquidityContainer, ITypeAndVersion + function releaseOrMint( + Pool.ReleaseOrMintInV1 calldata releaseOrMintIn + ) external virtual override returns (Pool.ReleaseOrMintOutV1 memory) { ++ // This should never occur. Amount should never exceed the current bridged amount ++ if (releaseOrMintIn.amount > s_currentBridged) revert NotEnoughBridgedAmount(); ++ // Reduce bridged amount because tokens are back to source chain ++ s_currentBridged -= releaseOrMintIn.amount; ++ + _validateReleaseOrMint(releaseOrMintIn); + + // Calculate the local amount +- uint256 localAmount = +- _calculateLocalAmount(releaseOrMintIn.amount, _parseRemoteDecimals(releaseOrMintIn.sourcePoolData)); ++ uint256 localAmount = _calculateLocalAmount( ++ releaseOrMintIn.amount, ++ _parseRemoteDecimals(releaseOrMintIn.sourcePoolData) ++ ); + + // Release to the recipient + getToken().safeTransfer(releaseOrMintIn.receiver, localAmount); +@@ -79,9 +146,7 @@ contract LockReleaseTokenPool is TokenPool, ILiquidityContainer, ITypeAndVersion + } + + /// @inheritdoc IERC165 +- function supportsInterface( +- bytes4 interfaceId +- ) public pure virtual override returns (bool) { ++ function supportsInterface(bytes4 interfaceId) public pure virtual override returns (bool) { + return interfaceId == type(ILiquidityContainer).interfaceId || super.supportsInterface(interfaceId); + } + +@@ -93,12 +158,55 @@ contract LockReleaseTokenPool is TokenPool, ILiquidityContainer, ITypeAndVersion + + /// @notice Sets the LiquidityManager address. + /// @dev Only callable by the owner. +- function setRebalancer( +- address rebalancer +- ) external onlyOwner { ++ function setRebalancer(address rebalancer) external onlyOwner { + s_rebalancer = rebalancer; + } + ++ /// @notice Sets the current bridged amount to other chains ++ /// @dev Only callable by the owner. ++ /// @dev Does not emit event, it is expected to only be called during token pool migrations. ++ /// @param newCurrentBridged The new bridged amount ++ function setCurrentBridgedAmount(uint256 newCurrentBridged) external onlyOwner { ++ s_currentBridged = newCurrentBridged; ++ } ++ ++ /// @notice Sets the bridge limit, the maximum amount of tokens that can be bridged out ++ /// @dev Only callable by the owner or the bridge limit admin. ++ /// @dev Bridge limit changes should be carefully managed, specially when reducing below the current bridged amount ++ /// @param newBridgeLimit The new bridge limit ++ function setBridgeLimit(uint256 newBridgeLimit) external { ++ if (msg.sender != s_bridgeLimitAdmin && msg.sender != owner()) revert Unauthorized(msg.sender); ++ uint256 oldBridgeLimit = s_bridgeLimit; ++ s_bridgeLimit = newBridgeLimit; ++ emit BridgeLimitUpdated(oldBridgeLimit, newBridgeLimit); ++ } ++ ++ /// @notice Sets the bridge limit admin address. ++ /// @dev Only callable by the owner. ++ /// @param bridgeLimitAdmin The new bridge limit admin address. ++ function setBridgeLimitAdmin(address bridgeLimitAdmin) external onlyOwner { ++ address oldAdmin = s_bridgeLimitAdmin; ++ s_bridgeLimitAdmin = bridgeLimitAdmin; ++ emit BridgeLimitAdminUpdated(oldAdmin, bridgeLimitAdmin); ++ } ++ ++ /// @notice Gets the bridge limit ++ /// @return The maximum amount of tokens that can be transferred out to other chains ++ function getBridgeLimit() external view virtual returns (uint256) { ++ return s_bridgeLimit; ++ } ++ ++ /// @notice Gets the current bridged amount to other chains ++ /// @return The amount of tokens transferred out to other chains ++ function getCurrentBridgedAmount() external view virtual returns (uint256) { ++ return s_currentBridged; ++ } ++ ++ /// @notice Gets the bridge limiter admin address. ++ function getBridgeLimitAdmin() external view returns (address) { ++ return s_bridgeLimitAdmin; ++ } ++ + /// @notice Checks if the pool can accept liquidity. + /// @return true if the pool can accept liquidity, false otherwise. + function canAcceptLiquidity() external view returns (bool) { +@@ -107,9 +215,7 @@ contract LockReleaseTokenPool is TokenPool, ILiquidityContainer, ITypeAndVersion + + /// @notice Adds liquidity to the pool. The tokens should be approved first. + /// @param amount The amount of liquidity to provide. +- function provideLiquidity( +- uint256 amount +- ) external { ++ function provideLiquidity(uint256 amount) external { + if (!i_acceptLiquidity) revert LiquidityNotAccepted(); + if (s_rebalancer != msg.sender) revert Unauthorized(msg.sender); + +@@ -119,9 +225,7 @@ contract LockReleaseTokenPool is TokenPool, ILiquidityContainer, ITypeAndVersion + + /// @notice Removed liquidity to the pool. The tokens will be sent to msg.sender. + /// @param amount The amount of liquidity to remove. +- function withdrawLiquidity( +- uint256 amount +- ) external { ++ function withdrawLiquidity(uint256 amount) external { + if (s_rebalancer != msg.sender) revert Unauthorized(msg.sender); + + if (i_token.balanceOf(address(this)) < amount) revert InsufficientLiquidity(); +@@ -141,7 +245,7 @@ contract LockReleaseTokenPool is TokenPool, ILiquidityContainer, ITypeAndVersion + /// @param from The address of the old pool. + /// @param amount The amount of liquidity to transfer. + function transferLiquidity(address from, uint256 amount) external onlyOwner { +- LockReleaseTokenPool(from).withdrawLiquidity(amount); ++ UpgradeableLockReleaseTokenPool(from).withdrawLiquidity(amount); + + emit LiquidityTransferred(from, amount); + } +``` diff --git a/contracts/src/v0.8/ccip/pools/GHO/diffs/UpgradeableTokenPool_diff.md b/contracts/src/v0.8/ccip/pools/GHO/diffs/UpgradeableTokenPool_diff.md new file mode 100644 index 0000000000..c1f8beed95 --- /dev/null +++ b/contracts/src/v0.8/ccip/pools/GHO/diffs/UpgradeableTokenPool_diff.md @@ -0,0 +1,254 @@ +```diff +diff --git a/src/v0.8/ccip/pools/TokenPool.sol b/src/v0.8/ccip/pools/GHO/UpgradeableTokenPool.sol +index cd3096f4ef..8c0965a67f 100644 +--- a/src/v0.8/ccip/pools/TokenPool.sol ++++ b/src/v0.8/ccip/pools/GHO/UpgradeableTokenPool.sol +@@ -1,26 +1,29 @@ + // SPDX-License-Identifier: BUSL-1.1 +-pragma solidity 0.8.24; ++pragma solidity ^0.8.0; + +-import {IPoolV1} from "../interfaces/IPool.sol"; +-import {IRMN} from "../interfaces/IRMN.sol"; +-import {IRouter} from "../interfaces/IRouter.sol"; ++import {IPoolV1} from "../../interfaces/IPool.sol"; ++import {IRMN} from "../../interfaces/IRMN.sol"; ++import {IRouter} from "../../interfaces/IRouter.sol"; + +-import {Ownable2StepMsgSender} from "../../shared/access/Ownable2StepMsgSender.sol"; +-import {Pool} from "../libraries/Pool.sol"; +-import {RateLimiter} from "../libraries/RateLimiter.sol"; ++import {Ownable2StepMsgSender} from "../../../shared/access/Ownable2StepMsgSender.sol"; ++import {Pool} from "../../libraries/Pool.sol"; ++import {RateLimiter} from "../../libraries/RateLimiter.sol"; + +-import {IERC20} from "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; +-import {IERC20Metadata} from +- "../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/extensions/IERC20Metadata.sol"; +-import {IERC165} from "../../vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/IERC165.sol"; +-import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v5.0.2/contracts/utils/structs/EnumerableSet.sol"; ++import {IERC20} from "../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; ++import {IERC165} from "../../../vendor/openzeppelin-solidity/v5.0.2/contracts/utils/introspection/IERC165.sol"; ++import {EnumerableSet} from "../../../vendor/openzeppelin-solidity/v5.0.2/contracts/utils/structs/EnumerableSet.sol"; + ++/// @title UpgradeableTokenPool ++/// @author Aave Labs ++/// @notice Upgradeable version of Chainlink's CCIP TokenPool + /// @dev This pool supports different decimals on different chains but using this feature could impact the total number + /// of tokens in circulation. Since all of the tokens are locked/burned on the source, and a rounded amount is minted/released on the + /// destination, the number of tokens minted/released could be less than the number of tokens burned/locked. This is because the source + /// chain does not know about the destination token decimals. This is not a problem if the decimals are the same on both + /// chains. +-/// ++/// @dev Contract adaptations: ++/// - Remove i_token decimal check in constructor. ++/// - Add storage `__gap` for future upgrades. + /// Example: + /// Assume there is a token with 6 decimals on chain A and 3 decimals on chain B. + /// - 1.234567 tokens are burned on chain A. +@@ -29,7 +32,7 @@ import {EnumerableSet} from "../../vendor/openzeppelin-solidity/v5.0.2/contracts + /// 0.000567 tokens. + /// In the case of a burnMint pool on chain A, these funds are burned in the pool on chain A. + /// In the case of a lockRelease pool on chain A, these funds accumulate in the pool on chain A. +-abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { ++abstract contract UpgradeableTokenPool is IPoolV1, Ownable2StepMsgSender { + using EnumerableSet for EnumerableSet.Bytes32Set; + using EnumerableSet for EnumerableSet.AddressSet; + using EnumerableSet for EnumerableSet.UintSet; +@@ -117,34 +120,18 @@ abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { + /// @dev Can be address(0) if none is configured. + address internal s_rateLimitAdmin; + +- constructor(IERC20 token, uint8 localTokenDecimals, address[] memory allowlist, address rmnProxy, address router) { +- if (address(token) == address(0) || router == address(0) || rmnProxy == address(0)) revert ZeroAddressNotAllowed(); ++ constructor(IERC20 token, uint8 localTokenDecimals, address rmnProxy, bool allowListEnabled) { ++ if (address(token) == address(0) || rmnProxy == address(0)) revert ZeroAddressNotAllowed(); + i_token = token; + i_rmnProxy = rmnProxy; +- +- try IERC20Metadata(address(token)).decimals() returns (uint8 actualTokenDecimals) { +- if (localTokenDecimals != actualTokenDecimals) { +- revert InvalidDecimalArgs(localTokenDecimals, actualTokenDecimals); +- } +- } catch { +- // The decimals function doesn't exist, which is possible since it's optional in the ERC20 spec. We skip the check and +- // assume the supplied token decimals are correct. +- } + i_tokenDecimals = localTokenDecimals; + +- s_router = IRouter(router); +- + // Pool can be set as permissioned or permissionless at deployment time only to save hot-path gas. +- i_allowlistEnabled = allowlist.length > 0; +- if (i_allowlistEnabled) { +- _applyAllowListUpdates(new address[](0), allowlist); +- } ++ i_allowlistEnabled = allowListEnabled; + } + + /// @inheritdoc IPoolV1 +- function isSupportedToken( +- address token +- ) public view virtual returns (bool) { ++ function isSupportedToken(address token) public view virtual returns (bool) { + return token == address(i_token); + } + +@@ -168,9 +155,7 @@ abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { + + /// @notice Sets the pool's Router + /// @param newRouter The new Router +- function setRouter( +- address newRouter +- ) public onlyOwner { ++ function setRouter(address newRouter) public onlyOwner { + if (newRouter == address(0)) revert ZeroAddressNotAllowed(); + address oldRouter = address(s_router); + s_router = IRouter(newRouter); +@@ -179,11 +164,11 @@ abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { + } + + /// @notice Signals which version of the pool interface is supported +- function supportsInterface( +- bytes4 interfaceId +- ) public pure virtual override returns (bool) { +- return interfaceId == Pool.CCIP_POOL_V1 || interfaceId == type(IPoolV1).interfaceId +- || interfaceId == type(IERC165).interfaceId; ++ function supportsInterface(bytes4 interfaceId) public pure virtual override returns (bool) { ++ return ++ interfaceId == Pool.CCIP_POOL_V1 || ++ interfaceId == type(IPoolV1).interfaceId || ++ interfaceId == type(IERC165).interfaceId; + } + + // ================================================================ +@@ -199,9 +184,7 @@ abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { + /// @param lockOrBurnIn The input to validate. + /// @dev This function should always be called before executing a lock or burn. Not doing so would allow + /// for various exploits. +- function _validateLockOrBurn( +- Pool.LockOrBurnInV1 calldata lockOrBurnIn +- ) internal { ++ function _validateLockOrBurn(Pool.LockOrBurnInV1 calldata lockOrBurnIn) internal { + if (!isSupportedToken(lockOrBurnIn.localToken)) revert InvalidToken(lockOrBurnIn.localToken); + if (IRMN(i_rmnProxy).isCursed(bytes16(uint128(lockOrBurnIn.remoteChainSelector)))) revert CursedByRMN(); + _checkAllowList(lockOrBurnIn.originalSender); +@@ -219,9 +202,7 @@ abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { + /// @param releaseOrMintIn The input to validate. + /// @dev This function should always be called before executing a release or mint. Not doing so would allow + /// for various exploits. +- function _validateReleaseOrMint( +- Pool.ReleaseOrMintInV1 calldata releaseOrMintIn +- ) internal { ++ function _validateReleaseOrMint(Pool.ReleaseOrMintInV1 calldata releaseOrMintIn) internal { + if (!isSupportedToken(releaseOrMintIn.localToken)) revert InvalidToken(releaseOrMintIn.localToken); + if (IRMN(i_rmnProxy).isCursed(bytes16(uint128(releaseOrMintIn.remoteChainSelector)))) revert CursedByRMN(); + _onlyOffRamp(releaseOrMintIn.remoteChainSelector); +@@ -247,9 +228,7 @@ abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { + return abi.encode(i_tokenDecimals); + } + +- function _parseRemoteDecimals( +- bytes memory sourcePoolData +- ) internal view virtual returns (uint8) { ++ function _parseRemoteDecimals(bytes memory sourcePoolData) internal view virtual returns (uint8) { + // Fallback to the local token decimals if the source pool data is empty. This allows for backwards compatibility. + if (sourcePoolData.length == 0) { + return i_tokenDecimals; +@@ -304,9 +283,7 @@ abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { + /// @notice Gets the pool address on the remote chain. + /// @param remoteChainSelector Remote chain selector. + /// @dev To support non-evm chains, this value is encoded into bytes +- function getRemotePools( +- uint64 remoteChainSelector +- ) public view returns (bytes[] memory) { ++ function getRemotePools(uint64 remoteChainSelector) public view returns (bytes[] memory) { + bytes32[] memory remotePoolHashes = s_remoteChainConfigs[remoteChainSelector].remotePools.values(); + + bytes[] memory remotePools = new bytes[](remotePoolHashes.length); +@@ -327,9 +304,7 @@ abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { + /// @notice Gets the token address on the remote chain. + /// @param remoteChainSelector Remote chain selector. + /// @dev To support non-evm chains, this value is encoded into bytes +- function getRemoteToken( +- uint64 remoteChainSelector +- ) public view returns (bytes memory) { ++ function getRemoteToken(uint64 remoteChainSelector) public view returns (bytes memory) { + return s_remoteChainConfigs[remoteChainSelector].remoteTokenAddress; + } + +@@ -358,9 +333,7 @@ abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { + } + + /// @inheritdoc IPoolV1 +- function isSupportedChain( +- uint64 remoteChainSelector +- ) public view returns (bool) { ++ function isSupportedChain(uint64 remoteChainSelector) public view returns (bool) { + return s_remoteChainSelectors.contains(remoteChainSelector); + } + +@@ -379,8 +352,7 @@ abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { + /// @notice Sets the permissions for a list of chains selectors. Actual senders for these chains + /// need to be allowed on the Router to interact with this pool. + /// @param remoteChainSelectorsToRemove A list of chain selectors to remove. +- /// @param chainsToAdd A list of chains and their new permission status & rate limits. Rate limits +- /// are only used when the chain is being added through `allowed` being true. ++ /// @param chainsToAdd A list of chains and their new permission status & rate limits. + /// @dev Only callable by the owner + function applyChainUpdates( + uint64[] calldata remoteChainSelectorsToRemove, +@@ -495,9 +467,7 @@ abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { + /// @notice Sets the rate limiter admin address. + /// @dev Only callable by the owner. + /// @param rateLimitAdmin The new rate limiter admin address. +- function setRateLimitAdmin( +- address rateLimitAdmin +- ) external onlyOwner { ++ function setRateLimitAdmin(address rateLimitAdmin) external onlyOwner { + s_rateLimitAdmin = rateLimitAdmin; + emit RateLimitAdminSet(rateLimitAdmin); + } +@@ -566,18 +536,14 @@ abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { + + /// @notice Checks whether remote chain selector is configured on this contract, and if the msg.sender + /// is a permissioned onRamp for the given chain on the Router. +- function _onlyOnRamp( +- uint64 remoteChainSelector +- ) internal view { ++ function _onlyOnRamp(uint64 remoteChainSelector) internal view { + if (!isSupportedChain(remoteChainSelector)) revert ChainNotAllowed(remoteChainSelector); + if (!(msg.sender == s_router.getOnRamp(remoteChainSelector))) revert CallerIsNotARampOnRouter(msg.sender); + } + + /// @notice Checks whether remote chain selector is configured on this contract, and if the msg.sender + /// is a permissioned offRamp for the given chain on the Router. +- function _onlyOffRamp( +- uint64 remoteChainSelector +- ) internal view { ++ function _onlyOffRamp(uint64 remoteChainSelector) internal view { + if (!isSupportedChain(remoteChainSelector)) revert ChainNotAllowed(remoteChainSelector); + if (!s_router.isOffRamp(remoteChainSelector, msg.sender)) revert CallerIsNotARampOnRouter(msg.sender); + } +@@ -586,9 +552,7 @@ abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { + // │ Allowlist │ + // ================================================================ + +- function _checkAllowList( +- address sender +- ) internal view { ++ function _checkAllowList(address sender) internal view { + if (i_allowlistEnabled) { + if (!s_allowlist.contains(sender)) { + revert SenderNotAllowed(sender); +@@ -635,4 +599,8 @@ abstract contract TokenPool is IPoolV1, Ownable2StepMsgSender { + } + } + } ++ ++ /// @dev This empty reserved space is put in place to allow future versions to add new ++ /// variables without shifting down storage in the inheritance chain. ++ uint256[42] private __gap; + } + +``` diff --git a/contracts/src/v0.8/ccip/test/mocks/MockUpgradeable.sol b/contracts/src/v0.8/ccip/test/mocks/MockUpgradeable.sol new file mode 100644 index 0000000000..45eeb5c5d7 --- /dev/null +++ b/contracts/src/v0.8/ccip/test/mocks/MockUpgradeable.sol @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import {Initializable} from "solidity-utils/contracts/transparent-proxy/Initializable.sol"; + +/** + * @dev Mock contract to test upgrades, not to be used in production. + */ +contract MockUpgradeable is Initializable { + /** + * @dev Constructor + */ + constructor() { + // Intentionally left bank + } + + /** + * @dev Initializer + */ + function initialize() public reinitializer(2) { + // Intentionally left bank + } +} diff --git a/contracts/src/v0.8/ccip/test/pools/End2End.t.sol b/contracts/src/v0.8/ccip/test/pools/End2End.t.sol new file mode 100644 index 0000000000..f4f4b00a5e --- /dev/null +++ b/contracts/src/v0.8/ccip/test/pools/End2End.t.sol @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import "../commitStore/CommitStore.t.sol"; +import "../onRamp/EVM2EVMOnRampSetup.t.sol"; +import "../offRamp/EVM2EVMOffRampSetup.t.sol"; + +contract E2E is EVM2EVMOnRampSetup, CommitStoreSetup, EVM2EVMOffRampSetup { + using Internal for Internal.EVM2EVMMessage; + + function setUp() public virtual override(EVM2EVMOnRampSetup, CommitStoreSetup, EVM2EVMOffRampSetup) { + EVM2EVMOnRampSetup.setUp(); + CommitStoreSetup.setUp(); + EVM2EVMOffRampSetup.setUp(); + + deployOffRamp(s_commitStore, s_destRouter, address(0)); + } +} diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GhoBaseTest.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GhoBaseTest.t.sol new file mode 100644 index 0000000000..5bdbe7cd52 --- /dev/null +++ b/contracts/src/v0.8/ccip/test/pools/GHO/GhoBaseTest.t.sol @@ -0,0 +1,310 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import {GhoToken} from "@aave-gho-core/gho/GhoToken.sol"; +import {TransparentUpgradeableProxy} from "solidity-utils/contracts/transparent-proxy/TransparentUpgradeableProxy.sol"; + +import {IBurnMintERC20} from "../../../../shared/token/ERC20/IBurnMintERC20.sol"; +import {IPoolV1} from "../../../interfaces/IPool.sol"; +import {UpgradeableLockReleaseTokenPool} from "../../../pools/GHO/UpgradeableLockReleaseTokenPool.sol"; +import {UpgradeableBurnMintTokenPool} from "../../../pools/GHO/UpgradeableBurnMintTokenPool.sol"; +import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol"; +import {RateLimiter} from "../../../libraries/RateLimiter.sol"; +import {Pool} from "../../../libraries/Pool.sol"; +import {BaseTest} from "../../BaseTest.t.sol"; + +abstract contract GhoBaseTest is BaseTest { + error Unauthorized(address caller); + error OnlyCallableByOwner(); + + address internal RMN_PROXY = makeAddr("RMN_PROXY"); + address internal ROUTER = makeAddr("ROUTER"); + address internal RAMP = makeAddr("RAMP"); + address internal AAVE_DAO = makeAddr("AAVE_DAO"); + address internal PROXY_ADMIN = makeAddr("PROXY_ADMIN"); + address internal USER = makeAddr("USER"); + + uint256 public immutable INITIAL_BRIDGE_LIMIT = 100e6 * 1e18; + + struct UtilsStorage { + uint256[] chainsList; + mapping(uint256 => address) pools; // chainId => bridgeTokenPool + mapping(uint256 => address) tokens; // chainId => ghoToken + mapping(uint256 => uint256) bucketCapacities; // chainId => bucketCapacities + mapping(uint256 => uint256) bucketLevels; // chainId => bucketLevels + mapping(uint256 => uint256) liquidity; // chainId => liquidity + uint256 remoteLiquidity; + uint256 bridged; + bool capacityBelowLevelUpdate; + } + + function _deployUpgradeableBurnMintTokenPool( + address ghoToken, + address rmn, + address router, + address owner, + address proxyAdmin + ) internal returns (address) { + // Deploy BurnMintTokenPool for GHO token on source chain + UpgradeableBurnMintTokenPool tokenPoolImpl = new UpgradeableBurnMintTokenPool(ghoToken, 18, rmn, false); + // proxy deploy and init + address[] memory emptyArray = new address[](0); + bytes memory tokenPoolInitParams = abi.encodeWithSignature( + "initialize(address,address[],address)", + owner, + emptyArray, + router + ); + TransparentUpgradeableProxy tokenPoolProxy = new TransparentUpgradeableProxy( + address(tokenPoolImpl), + proxyAdmin, + tokenPoolInitParams + ); + // Manage ownership + vm.stopPrank(); + vm.prank(owner); + UpgradeableBurnMintTokenPool(address(tokenPoolProxy)).acceptOwnership(); + vm.startPrank(OWNER); + + return address(tokenPoolProxy); + } + + function _deployUpgradeableLockReleaseTokenPool( + address ghoToken, + address rmn, + address router, + address owner, + uint256 bridgeLimit, + address proxyAdmin + ) internal returns (address) { + UpgradeableLockReleaseTokenPool tokenPoolImpl = new UpgradeableLockReleaseTokenPool(ghoToken, 18, rmn, false, true); + // proxy deploy and init + address[] memory emptyArray = new address[](0); + bytes memory tokenPoolInitParams = abi.encodeWithSignature( + "initialize(address,address[],address,uint256)", + owner, + emptyArray, + router, + bridgeLimit + ); + TransparentUpgradeableProxy tokenPoolProxy = new TransparentUpgradeableProxy( + address(tokenPoolImpl), + proxyAdmin, + tokenPoolInitParams + ); + + // Manage ownership + vm.stopPrank(); + vm.prank(owner); + UpgradeableLockReleaseTokenPool(address(tokenPoolProxy)).acceptOwnership(); + vm.startPrank(OWNER); + + return address(tokenPoolProxy); + } + + function _inflateFacilitatorLevel(address tokenPool, address ghoToken, uint256 amount) internal { + vm.stopPrank(); + vm.prank(tokenPool); + IBurnMintERC20(ghoToken).mint(address(0), amount); + } + + function _getProxyAdminAddress(address proxy) internal view returns (address) { + bytes32 ERC1967_ADMIN_SLOT = 0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103; + bytes32 adminSlot = vm.load(proxy, ERC1967_ADMIN_SLOT); + return address(uint160(uint256(adminSlot))); + } + + function _getProxyImplementationAddress(address proxy) internal view returns (address) { + bytes32 ERC1967_IMPLEMENTATION_SLOT = 0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc; + bytes32 implSlot = vm.load(proxy, ERC1967_IMPLEMENTATION_SLOT); + return address(uint160(uint256(implSlot))); + } + + function _getUpgradeableVersion(address proxy) internal view returns (uint8) { + // version is 1st slot + return uint8(uint256(vm.load(proxy, bytes32(uint256(0))))); + } + + function _enableLane(UtilsStorage storage s, uint256 fromId, uint256 toId) internal { + // from + UpgradeableTokenPool.ChainUpdate[] memory chainUpdate = new UpgradeableTokenPool.ChainUpdate[](1); + RateLimiter.Config memory emptyRateConfig = RateLimiter.Config(false, 0, 0); + bytes[] memory remotePoolAddresses = new bytes[](1); + remotePoolAddresses[0] = abi.encode(s.pools[toId]); + chainUpdate[0] = UpgradeableTokenPool.ChainUpdate({ + remoteChainSelector: uint64(toId), + remotePoolAddresses: remotePoolAddresses, + remoteTokenAddress: abi.encode(s.tokens[toId]), + outboundRateLimiterConfig: emptyRateConfig, + inboundRateLimiterConfig: emptyRateConfig + }); + + vm.startPrank(AAVE_DAO); + UpgradeableTokenPool(s.pools[fromId]).applyChainUpdates(new uint64[](0), chainUpdate); + remotePoolAddresses[0] = abi.encode(s.pools[fromId]); + chainUpdate[0].remoteChainSelector = uint64(fromId); + chainUpdate[0].remotePoolAddresses = remotePoolAddresses; + chainUpdate[0].remoteTokenAddress = abi.encode(s.tokens[fromId]); + + UpgradeableTokenPool(s.pools[toId]).applyChainUpdates(new uint64[](0), chainUpdate); + vm.stopPrank(); + } + + function _addBridge(UtilsStorage storage s, uint256 chainId, uint256 bucketCapacity) internal { + require(s.tokens[chainId] == address(0), "BRIDGE_ALREADY_EXISTS"); + + s.chainsList.push(chainId); + + // GHO Token + GhoToken ghoToken = new GhoToken(AAVE_DAO); + s.tokens[chainId] = address(ghoToken); + + // UpgradeableTokenPool + address bridgeTokenPool = _deployUpgradeableBurnMintTokenPool( + address(ghoToken), + RMN_PROXY, + ROUTER, + AAVE_DAO, + PROXY_ADMIN + ); + s.pools[chainId] = bridgeTokenPool; + + // Facilitator + s.bucketCapacities[chainId] = bucketCapacity; + vm.stopPrank(); + vm.startPrank(AAVE_DAO); + ghoToken.grantRole(ghoToken.FACILITATOR_MANAGER_ROLE(), AAVE_DAO); + ghoToken.addFacilitator(bridgeTokenPool, "UpgradeableTokenPool", uint128(bucketCapacity)); + vm.stopPrank(); + } + + function _updateBridgeLimit(UtilsStorage storage s, uint256 newBridgeLimit) internal { + vm.stopPrank(); + vm.startPrank(AAVE_DAO); + UpgradeableLockReleaseTokenPool(s.pools[0]).setBridgeLimit(newBridgeLimit); + vm.stopPrank(); + } + + function _updateBucketCapacity(UtilsStorage storage s, uint256 chainId, uint256 newBucketCapacity) internal { + s.bucketCapacities[chainId] = newBucketCapacity; + vm.stopPrank(); + vm.startPrank(AAVE_DAO); + GhoToken(s.tokens[chainId]).grantRole(GhoToken(s.tokens[chainId]).BUCKET_MANAGER_ROLE(), AAVE_DAO); + GhoToken(s.tokens[chainId]).setFacilitatorBucketCapacity(s.pools[chainId], uint128(newBucketCapacity)); + vm.stopPrank(); + } + + function _getCapacity(UtilsStorage storage s, uint256 chain) internal view returns (uint256) { + require(!_isEthereumChain(chain), "No bucket on Ethereum"); + (uint256 capacity, ) = GhoToken(s.tokens[chain]).getFacilitatorBucket(s.pools[chain]); + return capacity; + } + + function _getLevel(UtilsStorage storage s, uint256 chain) internal view returns (uint256) { + require(!_isEthereumChain(chain), "No bucket on Ethereum"); + (, uint256 level) = GhoToken(s.tokens[chain]).getFacilitatorBucket(s.pools[chain]); + return level; + } + + function _getMaxToBridgeOut(UtilsStorage storage s, uint256 fromChain) internal view returns (uint256) { + if (_isEthereumChain(fromChain)) { + UpgradeableLockReleaseTokenPool ethTokenPool = UpgradeableLockReleaseTokenPool(s.pools[0]); + uint256 bridgeLimit = ethTokenPool.getBridgeLimit(); + uint256 currentBridged = ethTokenPool.getCurrentBridgedAmount(); + return currentBridged >= bridgeLimit ? 0 : bridgeLimit - currentBridged; + } else { + (, uint256 level) = GhoToken(s.tokens[fromChain]).getFacilitatorBucket(s.pools[fromChain]); + return level; + } + } + + function _getMaxToBridgeIn(UtilsStorage storage s, uint256 toChain) internal view returns (uint256) { + if (_isEthereumChain(toChain)) { + UpgradeableLockReleaseTokenPool ethTokenPool = UpgradeableLockReleaseTokenPool(s.pools[0]); + return ethTokenPool.getCurrentBridgedAmount(); + } else { + (uint256 capacity, uint256 level) = GhoToken(s.tokens[toChain]).getFacilitatorBucket(s.pools[toChain]); + return level >= capacity ? 0 : capacity - level; + } + } + + function _bridgeGho( + UtilsStorage storage s, + uint256 fromChain, + uint256 toChain, + address user, + uint256 amount + ) internal { + _moveGhoOrigin(s, fromChain, toChain, user, amount); + _moveGhoDestination(s, fromChain, toChain, user, amount); + } + + function _moveGhoOrigin( + UtilsStorage storage s, + uint256 fromChain, + uint256 toChain, + address user, + uint256 amount + ) internal { + // Simulate CCIP pull of funds + vm.startPrank(user); + GhoToken(s.tokens[fromChain]).transfer(s.pools[fromChain], amount); + + vm.startPrank(RAMP); + IPoolV1(s.pools[fromChain]).lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: abi.encode(user), + remoteChainSelector: uint64(toChain), + originalSender: user, + amount: amount, + localToken: s.tokens[fromChain] + }) + ); + + if (_isEthereumChain(fromChain)) { + // Lock + s.bridged += amount; + } else { + // Burn + s.bucketLevels[fromChain] -= amount; + s.liquidity[fromChain] -= amount; + s.remoteLiquidity -= amount; + } + } + + function _moveGhoDestination( + UtilsStorage storage s, + uint256 fromChain, + uint256 toChain, + address user, + uint256 amount + ) internal { + vm.startPrank(RAMP); + IPoolV1(s.pools[toChain]).releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: abi.encode(user), + remoteChainSelector: uint64(fromChain), + receiver: user, + amount: amount, + localToken: s.tokens[toChain], + sourcePoolAddress: abi.encode(s.pools[fromChain]), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + + if (_isEthereumChain(toChain)) { + // Release + s.bridged -= amount; + } else { + // Mint + s.bucketLevels[toChain] += amount; + s.liquidity[toChain] += amount; + s.remoteLiquidity += amount; + } + } + + function _isEthereumChain(uint256 chainId) internal pure returns (bool) { + return chainId == 0; + } +} diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolEthereum.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolEthereum.t.sol new file mode 100644 index 0000000000..0b45eda495 --- /dev/null +++ b/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolEthereum.t.sol @@ -0,0 +1,984 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import {TransparentUpgradeableProxy} from "solidity-utils/contracts/transparent-proxy/TransparentUpgradeableProxy.sol"; +import {stdError} from "forge-std/Test.sol"; +import {IERC165} from "../../../../vendor/openzeppelin-solidity/v4.8.3/contracts/utils/introspection/IERC165.sol"; +import {ILiquidityContainer} from "../../../../liquiditymanager/interfaces/ILiquidityContainer.sol"; +import {IPoolV1} from "../../../interfaces/IPool.sol"; +import {Pool} from "../../../libraries/Pool.sol"; +import {LockReleaseTokenPool} from "../../../pools/LockReleaseTokenPool.sol"; +import {UpgradeableLockReleaseTokenPool} from "../../../pools/GHO/UpgradeableLockReleaseTokenPool.sol"; +import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol"; +import {EVM2EVMOffRamp} from "../../../offRamp/EVM2EVMOffRamp.sol"; +import {RateLimiter} from "../../../libraries/RateLimiter.sol"; +import {MockUpgradeable} from "../../mocks/MockUpgradeable.sol"; +import {GhoTokenPoolEthereumSetup} from "./GhoTokenPoolEthereumSetup.t.sol"; + +contract GhoTokenPoolEthereum_setRebalancer is GhoTokenPoolEthereumSetup { + function testSetRebalancerSuccess() public { + assertEq(address(s_ghoTokenPool.getRebalancer()), OWNER); + changePrank(AAVE_DAO); + s_ghoTokenPool.setRebalancer(STRANGER); + assertEq(address(s_ghoTokenPool.getRebalancer()), STRANGER); + } + + function testSetRebalancerReverts() public { + vm.startPrank(STRANGER); + + vm.expectRevert(OnlyCallableByOwner.selector); + s_ghoTokenPool.setRebalancer(STRANGER); + } +} + +contract GhoTokenPoolEthereum_lockOrBurn is GhoTokenPoolEthereumSetup { + error SenderNotAllowed(address sender); + + event Locked(address indexed sender, uint256 amount); + event TokensConsumed(uint256 tokens); + + function testFuzz_LockOrBurnNoAllowListSuccess(uint256 amount, uint256 bridgedAmount) public { + uint256 maxAmount = _getOutboundRateLimiterConfig().capacity < INITIAL_BRIDGE_LIMIT + ? _getOutboundRateLimiterConfig().capacity + : INITIAL_BRIDGE_LIMIT; + amount = bound(amount, 1, maxAmount); + bridgedAmount = bound(bridgedAmount, 0, INITIAL_BRIDGE_LIMIT - amount); + + changePrank(s_allowedOnRamp); + if (bridgedAmount > 0) { + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: bridgedAmount, + localToken: address(s_token) + }) + ); + assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), bridgedAmount); + } + + vm.expectEmit(); + emit TokensConsumed(amount); + vm.expectEmit(); + emit Locked(s_allowedOnRamp, amount); + + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: amount, + localToken: address(s_token) + }) + ); + + assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), bridgedAmount + amount); + } + + function testTokenMaxCapacityExceededReverts() public { + RateLimiter.Config memory rateLimiterConfig = _getOutboundRateLimiterConfig(); + uint256 capacity = rateLimiterConfig.capacity; + uint256 amount = 10 * capacity; + + // increase bridge limit to hit the rate limit error + vm.startPrank(AAVE_DAO); + s_ghoTokenPool.setBridgeLimit(amount); + + vm.expectRevert( + abi.encodeWithSelector(RateLimiter.TokenMaxCapacityExceeded.selector, capacity, amount, address(s_token)) + ); + vm.startPrank(s_allowedOnRamp); + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: amount, + localToken: address(s_token) + }) + ); + } + + function testTokenBridgeLimitExceededReverts() public { + uint256 bridgeLimit = s_ghoTokenPool.getBridgeLimit(); + uint256 amount = bridgeLimit + 1; + + vm.expectRevert(abi.encodeWithSelector(UpgradeableLockReleaseTokenPool.BridgeLimitExceeded.selector, bridgeLimit)); + vm.startPrank(s_allowedOnRamp); + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: amount, + localToken: address(s_token) + }) + ); + } +} + +contract GhoTokenPoolEthereum_releaseOrMint is GhoTokenPoolEthereumSetup { + event TokensConsumed(uint256 tokens); + event Released(address indexed sender, address indexed recipient, uint256 amount); + event ChainRemoved(uint64 chainSelector); + + function setUp() public virtual override { + GhoTokenPoolEthereumSetup.setUp(); + + UpgradeableTokenPool.ChainUpdate[] memory chainUpdate = new UpgradeableTokenPool.ChainUpdate[](1); + bytes[] memory remotePoolAddresses = new bytes[](1); + remotePoolAddresses[0] = abi.encode(s_sourcePool); + chainUpdate[0] = UpgradeableTokenPool.ChainUpdate({ + remoteChainSelector: SOURCE_CHAIN_SELECTOR, + remotePoolAddresses: remotePoolAddresses, + remoteTokenAddress: abi.encode(s_sourceToken), + outboundRateLimiterConfig: _getOutboundRateLimiterConfig(), + inboundRateLimiterConfig: _getInboundRateLimiterConfig() + }); + + changePrank(AAVE_DAO); + s_ghoTokenPool.applyChainUpdates(new uint64[](0), chainUpdate); + } + + function test_ReleaseOrMintSuccess() public { + uint256 amount = 100; + deal(address(s_token), address(s_ghoTokenPool), amount); + + // Inflate current bridged amount so it can be reduced in `releaseOrMint` function + vm.startPrank(s_allowedOnRamp); + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: amount, + localToken: address(s_token) + }) + ); + + vm.expectEmit(); + emit TokensConsumed(amount); + vm.expectEmit(); + emit Released(s_allowedOffRamp, OWNER, amount); + + vm.startPrank(s_allowedOffRamp); + s_ghoTokenPool.releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + amount: amount, + remoteChainSelector: SOURCE_CHAIN_SELECTOR, + receiver: OWNER, + localToken: address(s_token), + sourcePoolAddress: abi.encode(s_sourcePool), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + + assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), 0); + } + + function testFuzz_ReleaseOrMintSuccess(address recipient, uint256 amount, uint256 bridgedAmount) public { + // Since the owner already has tokens this would break the checks + vm.assume(recipient != OWNER); + vm.assume(recipient != address(0)); + vm.assume(recipient != address(s_token)); + + amount = uint128(bound(amount, 2, type(uint128).max)); + bridgedAmount = uint128(bound(bridgedAmount, amount, type(uint128).max)); + + // Inflate current bridged amount so it can be reduced in `releaseOrMint` function + vm.startPrank(AAVE_DAO); + s_ghoTokenPool.setBridgeLimit(bridgedAmount); + s_ghoTokenPool.setChainRateLimiterConfig( + DEST_CHAIN_SELECTOR, + RateLimiter.Config({isEnabled: true, capacity: type(uint128).max, rate: 1e15}), + RateLimiter.Config({isEnabled: true, capacity: type(uint128).max, rate: 1e15}) + ); + vm.warp(block.timestamp + 1e50); // wait to refill capacity + vm.startPrank(s_allowedOnRamp); + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: bridgedAmount, + localToken: address(s_token) + }) + ); + + // Makes sure the pool always has enough funds + deal(address(s_token), address(s_ghoTokenPool), amount); + vm.startPrank(s_allowedOffRamp); + + uint256 capacity = _getInboundRateLimiterConfig().capacity; + uint256 bridgedAmountAfter = bridgedAmount; + // Determine if we hit the rate limit or the txs should succeed. + if (amount > capacity) { + vm.expectRevert( + abi.encodeWithSelector(RateLimiter.TokenMaxCapacityExceeded.selector, capacity, amount, address(s_token)) + ); + } else { + // Only rate limit if the amount is >0 + if (amount > 0) { + vm.expectEmit(); + emit TokensConsumed(amount); + } + + vm.expectEmit(); + emit Released(s_allowedOffRamp, recipient, amount); + + bridgedAmountAfter -= amount; + } + + s_ghoTokenPool.releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + amount: amount, + remoteChainSelector: SOURCE_CHAIN_SELECTOR, + receiver: recipient, + localToken: address(s_token), + sourcePoolAddress: abi.encode(s_sourcePool), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + + assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), bridgedAmountAfter); + } + + function testChainNotAllowedReverts() public { + uint256 amount = 1e5; + vm.startPrank(AAVE_DAO); + // increase bridge amount which can later be offRamped + s_ghoTokenPool.setCurrentBridgedAmount(amount); + + uint64[] memory remoteChainSelectorsToRemove = new uint64[](1); + remoteChainSelectorsToRemove[0] = SOURCE_CHAIN_SELECTOR; + vm.expectEmit(address(s_ghoTokenPool)); + emit ChainRemoved(SOURCE_CHAIN_SELECTOR); + s_ghoTokenPool.applyChainUpdates(remoteChainSelectorsToRemove, new UpgradeableTokenPool.ChainUpdate[](0)); + + vm.startPrank(s_allowedOffRamp); + + vm.expectRevert(abi.encodeWithSelector(UpgradeableTokenPool.ChainNotAllowed.selector, SOURCE_CHAIN_SELECTOR)); + s_ghoTokenPool.releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + amount: amount, + remoteChainSelector: SOURCE_CHAIN_SELECTOR, + receiver: OWNER, + localToken: address(s_token), + sourcePoolAddress: abi.encode(s_sourcePool), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + } + + function testPoolMintNotHealthyReverts() public { + vm.startPrank(s_allowedOnRamp); + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: 1e5, + localToken: address(s_token) + }) + ); + + // Should not mint tokens if cursed. + s_mockRMN.setGlobalCursed(true); + uint256 before = s_token.balanceOf(OWNER); + vm.startPrank(s_allowedOffRamp); + vm.expectRevert(EVM2EVMOffRamp.CursedByRMN.selector); + s_ghoTokenPool.releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + amount: 1e5, + remoteChainSelector: SOURCE_CHAIN_SELECTOR, + receiver: OWNER, + localToken: address(s_token), + sourcePoolAddress: abi.encode(s_sourcePool), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + assertEq(s_token.balanceOf(OWNER), before); + } + + function testReleaseNoFundsReverts() public { + uint256 amount = 1; + + // Inflate current bridged amount so it can be reduced in `releaseOrMint` function + vm.startPrank(s_allowedOnRamp); + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: amount, + localToken: address(s_token) + }) + ); + + vm.expectRevert(stdError.arithmeticError); + vm.startPrank(s_allowedOffRamp); + s_ghoTokenPool.releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + amount: amount, + remoteChainSelector: SOURCE_CHAIN_SELECTOR, + receiver: STRANGER, + localToken: address(s_token), + sourcePoolAddress: abi.encode(s_sourcePool), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + } + + function testTokenMaxCapacityExceededReverts() public { + RateLimiter.Config memory rateLimiterConfig = _getInboundRateLimiterConfig(); + uint256 capacity = rateLimiterConfig.capacity; + uint256 amount = 10 * capacity; + + // Inflate current bridged amount so it can be reduced in `releaseOrMint` function + vm.startPrank(AAVE_DAO); + s_ghoTokenPool.setBridgeLimit(amount); + s_ghoTokenPool.setChainRateLimiterConfig( + DEST_CHAIN_SELECTOR, + RateLimiter.Config({isEnabled: true, capacity: type(uint128).max, rate: 1e15}), + _getInboundRateLimiterConfig() + ); + vm.warp(block.timestamp + 1e50); // wait to refill capacity + vm.startPrank(s_allowedOnRamp); + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: amount, + localToken: address(s_token) + }) + ); + + vm.expectRevert( + abi.encodeWithSelector(RateLimiter.TokenMaxCapacityExceeded.selector, capacity, amount, address(s_token)) + ); + vm.startPrank(s_allowedOffRamp); + s_ghoTokenPool.releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + amount: amount, + remoteChainSelector: SOURCE_CHAIN_SELECTOR, + receiver: STRANGER, + localToken: address(s_token), + sourcePoolAddress: abi.encode(s_sourcePool), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + } + + function testBridgedAmountNotEnoughReverts() public { + uint256 amount = 10; + vm.expectRevert(abi.encodeWithSelector(UpgradeableLockReleaseTokenPool.NotEnoughBridgedAmount.selector)); + vm.startPrank(s_allowedOffRamp); + s_ghoTokenPool.releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + amount: amount, + remoteChainSelector: SOURCE_CHAIN_SELECTOR, + receiver: STRANGER, + localToken: address(s_token), + sourcePoolAddress: abi.encode(s_sourcePool), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + } +} + +contract GhoTokenPoolEthereum_canAcceptLiquidity is GhoTokenPoolEthereumSetup { + function test_CanAcceptLiquiditySuccess() public { + assertEq(true, s_ghoTokenPool.canAcceptLiquidity()); + + s_ghoTokenPool = new UpgradeableLockReleaseTokenPool(address(s_token), 18, address(s_mockRMN), false, false); + + assertEq(false, s_ghoTokenPool.canAcceptLiquidity()); + } +} + +contract GhoTokenPoolEthereum_provideLiquidity is GhoTokenPoolEthereumSetup { + function testFuzz_ProvideLiquiditySuccess(uint256 amount) public { + vm.assume(amount < type(uint128).max); + + uint256 balancePre = s_token.balanceOf(OWNER); + s_token.approve(address(s_ghoTokenPool), amount); + + s_ghoTokenPool.provideLiquidity(amount); + + assertEq(s_token.balanceOf(OWNER), balancePre - amount); + assertEq(s_token.balanceOf(address(s_ghoTokenPool)), amount); + } + + // Reverts + + function test_UnauthorizedReverts() public { + vm.startPrank(STRANGER); + vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector, STRANGER)); + + s_ghoTokenPool.provideLiquidity(1); + } + + function testFuzz_ExceedsAllowance(uint256 amount) public { + vm.assume(amount > 0); + + vm.expectRevert(stdError.arithmeticError); + s_ghoTokenPool.provideLiquidity(amount); + } + + function testLiquidityNotAcceptedReverts() public { + s_ghoTokenPool = new UpgradeableLockReleaseTokenPool(address(s_token), 18, address(s_mockRMN), false, false); + + vm.expectRevert(LockReleaseTokenPool.LiquidityNotAccepted.selector); + s_ghoTokenPool.provideLiquidity(1); + } +} + +contract GhoTokenPoolEthereum_withdrawalLiquidity is GhoTokenPoolEthereumSetup { + function testFuzz_WithdrawalLiquiditySuccess(uint256 amount) public { + vm.assume(amount < type(uint128).max); + + uint256 balancePre = s_token.balanceOf(OWNER); + s_token.approve(address(s_ghoTokenPool), amount); + s_ghoTokenPool.provideLiquidity(amount); + + s_ghoTokenPool.withdrawLiquidity(amount); + + assertEq(s_token.balanceOf(OWNER), balancePre); + } + + // Reverts + + function test_UnauthorizedReverts() public { + vm.startPrank(STRANGER); + vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector, STRANGER)); + + s_ghoTokenPool.withdrawLiquidity(1); + } + + function testInsufficientLiquidityReverts() public { + uint256 maxUint128 = 2 ** 128 - 1; + + s_token.approve(address(s_ghoTokenPool), maxUint128); + s_ghoTokenPool.provideLiquidity(maxUint128); + + changePrank(address(s_ghoTokenPool)); + s_token.transfer(OWNER, maxUint128); + changePrank(OWNER); + + vm.expectRevert(LockReleaseTokenPool.InsufficientLiquidity.selector); + s_ghoTokenPool.withdrawLiquidity(1); + } +} + +contract GhoTokenPoolEthereum_transferLiquidity is GhoTokenPoolEthereumSetup { + UpgradeableLockReleaseTokenPool internal s_oldLockReleaseTokenPool; + + uint256 internal s_amount = 100_000_000e18; + + error BridgeLimitExceeded(uint256 limit); + error InsufficientLiquidity(); + + function setUp() public virtual override { + super.setUp(); + + s_oldLockReleaseTokenPool = UpgradeableLockReleaseTokenPool( + _deployUpgradeableLockReleaseTokenPool( + address(s_token), + address(s_mockRMN), + address(s_sourceRouter), + AAVE_DAO, + INITIAL_BRIDGE_LIMIT, + PROXY_ADMIN + ) + ); + deal(address(s_token), address(s_oldLockReleaseTokenPool), s_amount); + changePrank(AAVE_DAO); + s_oldLockReleaseTokenPool.setCurrentBridgedAmount(s_amount); + } + + function testFuzz_TransferLiquidity(uint256 amount) public { + amount = bound(amount, 1, s_amount); + + s_oldLockReleaseTokenPool.setRebalancer(address(s_ghoTokenPool)); + + s_ghoTokenPool.transferLiquidity(address(s_oldLockReleaseTokenPool), amount); + + assertEq(s_token.balanceOf(address(s_ghoTokenPool)), amount); + assertEq(s_token.balanceOf(address(s_oldLockReleaseTokenPool)), s_amount - amount); + } + + // Reverts + + function test_UnauthorizedReverts() public { + changePrank(STRANGER); + vm.expectRevert(OnlyCallableByOwner.selector); + + s_ghoTokenPool.transferLiquidity(address(1), 1); + } + + function testFuzz_RevertsTransferLiquidityExcess(uint256 amount) public { + uint256 existingLiquidity = s_token.balanceOf(address(s_oldLockReleaseTokenPool)); + amount = bound(amount, existingLiquidity + 1, type(uint256).max); + + s_oldLockReleaseTokenPool.setRebalancer(address(s_ghoTokenPool)); + + vm.expectRevert(InsufficientLiquidity.selector); + s_ghoTokenPool.transferLiquidity(address(s_oldLockReleaseTokenPool), amount); + } +} + +contract GhoTokenPoolEthereum_setCurrentBridgedAmount is GhoTokenPoolEthereumSetup { + function test_UnauthorizedReverts() public { + changePrank(STRANGER); + vm.expectRevert(OnlyCallableByOwner.selector); + + s_ghoTokenPool.setCurrentBridgedAmount(1); + } + + function test_SetCurrentBridgedAmountAdminSuccess(uint256 amount) public { + changePrank(AAVE_DAO); + s_ghoTokenPool.setCurrentBridgedAmount(amount); + + assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), amount); + } +} + +contract GhoTokenPoolEthereum_supportsInterface is GhoTokenPoolEthereumSetup { + function testSupportsInterfaceSuccess() public view { + assertTrue(s_ghoTokenPool.supportsInterface(type(ILiquidityContainer).interfaceId)); + assertTrue(s_ghoTokenPool.supportsInterface(type(IPoolV1).interfaceId)); + assertTrue(s_ghoTokenPool.supportsInterface(type(IERC165).interfaceId)); + } +} + +contract GhoTokenPoolEthereum_setChainRateLimiterConfig is GhoTokenPoolEthereumSetup { + event ConfigChanged(RateLimiter.Config); + event ChainConfigured( + uint64 chainSelector, + RateLimiter.Config outboundRateLimiterConfig, + RateLimiter.Config inboundRateLimiterConfig + ); + + uint64 internal s_remoteChainSelector; + + function setUp() public virtual override { + GhoTokenPoolEthereumSetup.setUp(); + UpgradeableTokenPool.ChainUpdate[] memory chainUpdates = new UpgradeableTokenPool.ChainUpdate[](1); + s_remoteChainSelector = 123124; + bytes[] memory remotePoolAddresses = new bytes[](1); + remotePoolAddresses[0] = abi.encode(s_sourcePool); + chainUpdates[0] = UpgradeableTokenPool.ChainUpdate({ + remoteChainSelector: s_remoteChainSelector, + remotePoolAddresses: remotePoolAddresses, + remoteTokenAddress: abi.encode(s_sourceToken), + outboundRateLimiterConfig: _getOutboundRateLimiterConfig(), + inboundRateLimiterConfig: _getInboundRateLimiterConfig() + }); + changePrank(AAVE_DAO); + s_ghoTokenPool.applyChainUpdates(new uint64[](0), chainUpdates); + changePrank(OWNER); + } + + function testFuzz_SetChainRateLimiterConfigSuccess(uint128 capacity, uint128 rate, uint32 newTime) public { + // Cap the lower bound to 4 so 4/2 is still >= 2 + vm.assume(capacity >= 4); + // Cap the lower bound to 2 so 2/2 is still >= 1 + rate = uint128(bound(rate, 2, capacity - 2)); + // Bucket updates only work on increasing time + newTime = uint32(bound(newTime, block.timestamp + 1, type(uint32).max)); + vm.warp(newTime); + + uint256 oldOutboundTokens = s_ghoTokenPool.getCurrentOutboundRateLimiterState(s_remoteChainSelector).tokens; + uint256 oldInboundTokens = s_ghoTokenPool.getCurrentInboundRateLimiterState(s_remoteChainSelector).tokens; + + RateLimiter.Config memory newOutboundConfig = RateLimiter.Config({isEnabled: true, capacity: capacity, rate: rate}); + RateLimiter.Config memory newInboundConfig = RateLimiter.Config({ + isEnabled: true, + capacity: capacity / 2, + rate: rate / 2 + }); + + vm.expectEmit(); + emit ConfigChanged(newOutboundConfig); + vm.expectEmit(); + emit ConfigChanged(newInboundConfig); + vm.expectEmit(); + emit ChainConfigured(s_remoteChainSelector, newOutboundConfig, newInboundConfig); + + changePrank(AAVE_DAO); + s_ghoTokenPool.setChainRateLimiterConfig(s_remoteChainSelector, newOutboundConfig, newInboundConfig); + + uint256 expectedTokens = RateLimiter._min(newOutboundConfig.capacity, oldOutboundTokens); + + RateLimiter.TokenBucket memory bucket = s_ghoTokenPool.getCurrentOutboundRateLimiterState(s_remoteChainSelector); + assertEq(bucket.capacity, newOutboundConfig.capacity); + assertEq(bucket.rate, newOutboundConfig.rate); + assertEq(bucket.tokens, expectedTokens); + assertEq(bucket.lastUpdated, newTime); + + expectedTokens = RateLimiter._min(newInboundConfig.capacity, oldInboundTokens); + + bucket = s_ghoTokenPool.getCurrentInboundRateLimiterState(s_remoteChainSelector); + assertEq(bucket.capacity, newInboundConfig.capacity); + assertEq(bucket.rate, newInboundConfig.rate); + assertEq(bucket.tokens, expectedTokens); + assertEq(bucket.lastUpdated, newTime); + } + + function testOnlyOwnerOrRateLimitAdminSuccess() public { + address rateLimiterAdmin = address(28973509103597907); + + changePrank(AAVE_DAO); + s_ghoTokenPool.setRateLimitAdmin(rateLimiterAdmin); + + changePrank(rateLimiterAdmin); + + s_ghoTokenPool.setChainRateLimiterConfig( + s_remoteChainSelector, + _getOutboundRateLimiterConfig(), + _getInboundRateLimiterConfig() + ); + + changePrank(AAVE_DAO); + + s_ghoTokenPool.setChainRateLimiterConfig( + s_remoteChainSelector, + _getOutboundRateLimiterConfig(), + _getInboundRateLimiterConfig() + ); + } + + // Reverts + + function testOnlyOwnerReverts() public { + changePrank(STRANGER); + + vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector, STRANGER)); + s_ghoTokenPool.setChainRateLimiterConfig( + s_remoteChainSelector, + _getOutboundRateLimiterConfig(), + _getInboundRateLimiterConfig() + ); + } + + function testNonExistentChainReverts() public { + uint64 wrongChainSelector = 9084102894; + + vm.expectRevert(abi.encodeWithSelector(UpgradeableTokenPool.NonExistentChain.selector, wrongChainSelector)); + changePrank(AAVE_DAO); + s_ghoTokenPool.setChainRateLimiterConfig( + wrongChainSelector, + _getOutboundRateLimiterConfig(), + _getInboundRateLimiterConfig() + ); + } +} + +contract GhoTokenPoolEthereum_setRateLimitAdmin is GhoTokenPoolEthereumSetup { + function testSetRateLimitAdminSuccess() public { + assertEq(address(0), s_ghoTokenPool.getRateLimitAdmin()); + changePrank(AAVE_DAO); + s_ghoTokenPool.setRateLimitAdmin(OWNER); + assertEq(OWNER, s_ghoTokenPool.getRateLimitAdmin()); + } + + // Reverts + + function testSetRateLimitAdminReverts() public { + vm.startPrank(STRANGER); + + vm.expectRevert(OnlyCallableByOwner.selector); + s_ghoTokenPool.setRateLimitAdmin(STRANGER); + } +} + +contract GhoTokenPoolEthereum_setBridgeLimit is GhoTokenPoolEthereumSetup { + event BridgeLimitUpdated(uint256 oldBridgeLimit, uint256 newBridgeLimit); + event BridgeLimitAdminUpdated(address indexed oldAdmin, address indexed newAdmin); + + function testSetBridgeLimitAdminSuccess() public { + assertEq(INITIAL_BRIDGE_LIMIT, s_ghoTokenPool.getBridgeLimit()); + + uint256 newBridgeLimit = INITIAL_BRIDGE_LIMIT * 2; + + vm.expectEmit(); + emit BridgeLimitUpdated(INITIAL_BRIDGE_LIMIT, newBridgeLimit); + + vm.startPrank(AAVE_DAO); + s_ghoTokenPool.setBridgeLimit(newBridgeLimit); + + assertEq(newBridgeLimit, s_ghoTokenPool.getBridgeLimit()); + + // Bridge Limit Admin + address bridgeLimitAdmin = address(28973509103597907); + + vm.expectEmit(); + emit BridgeLimitAdminUpdated(address(0), bridgeLimitAdmin); + + s_ghoTokenPool.setBridgeLimitAdmin(bridgeLimitAdmin); + + vm.startPrank(bridgeLimitAdmin); + newBridgeLimit += 1; + + s_ghoTokenPool.setBridgeLimit(newBridgeLimit); + + assertEq(newBridgeLimit, s_ghoTokenPool.getBridgeLimit()); + } + + function testZeroBridgeLimitReverts() public { + vm.stopPrank(); + vm.startPrank(AAVE_DAO); + s_ghoTokenPool.setBridgeLimit(0); + + uint256 amount = 1; + + vm.expectRevert(abi.encodeWithSelector(UpgradeableLockReleaseTokenPool.BridgeLimitExceeded.selector, 0)); + vm.startPrank(s_allowedOnRamp); + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: amount, + localToken: address(s_token) + }) + ); + } + + function testBridgeLimitBelowCurrent() public { + // Increase current bridged amount to 10 + uint256 amount = 10e18; + vm.startPrank(s_allowedOnRamp); + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: amount, + localToken: address(s_token) + }) + ); + + // Reduce bridge limit below current bridged amount + vm.startPrank(AAVE_DAO); + uint256 newBridgeLimit = amount - 1; + s_ghoTokenPool.setBridgeLimit(newBridgeLimit); + assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), amount); + assertEq(s_ghoTokenPool.getBridgeLimit(), newBridgeLimit); + assertGt(s_ghoTokenPool.getCurrentBridgedAmount(), s_ghoTokenPool.getBridgeLimit()); + + // Lock reverts due to maxed out bridge limit + vm.expectRevert( + abi.encodeWithSelector(UpgradeableLockReleaseTokenPool.BridgeLimitExceeded.selector, newBridgeLimit) + ); + vm.startPrank(s_allowedOnRamp); + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: 1, + localToken: address(s_token) + }) + ); + + // Increase bridge limit by 1 + vm.startPrank(AAVE_DAO); + newBridgeLimit = amount + 1; + s_ghoTokenPool.setBridgeLimit(newBridgeLimit); + assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), amount); + assertEq(s_ghoTokenPool.getBridgeLimit(), newBridgeLimit); + assertGt(s_ghoTokenPool.getBridgeLimit(), s_ghoTokenPool.getCurrentBridgedAmount()); + + // Bridge limit maxed out again + vm.startPrank(s_allowedOnRamp); + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: 1, + localToken: address(s_token) + }) + ); + assertEq(s_ghoTokenPool.getBridgeLimit(), s_ghoTokenPool.getCurrentBridgedAmount()); + } + + function testCurrentBridgedAmountRecover() public { + // Reach maximum + vm.startPrank(s_allowedOnRamp); + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: INITIAL_BRIDGE_LIMIT, + localToken: address(s_token) + }) + ); + assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), INITIAL_BRIDGE_LIMIT); + assertEq(s_ghoTokenPool.getBridgeLimit(), s_ghoTokenPool.getCurrentBridgedAmount()); + + // Lock reverts due to maxed out bridge limit + vm.expectRevert( + abi.encodeWithSelector(UpgradeableLockReleaseTokenPool.BridgeLimitExceeded.selector, INITIAL_BRIDGE_LIMIT) + ); + s_ghoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: DEST_CHAIN_SELECTOR, + originalSender: STRANGER, + amount: 1, + localToken: address(s_token) + }) + ); + + // Amount available to bridge recovers thanks to liquidity coming back + UpgradeableTokenPool.ChainUpdate[] memory chainUpdate = new UpgradeableTokenPool.ChainUpdate[](1); + bytes[] memory remotePoolAddresses = new bytes[](1); + remotePoolAddresses[0] = abi.encode(s_sourcePool); + chainUpdate[0] = UpgradeableTokenPool.ChainUpdate({ + remoteChainSelector: SOURCE_CHAIN_SELECTOR, + remotePoolAddresses: remotePoolAddresses, + remoteTokenAddress: abi.encode(s_sourceToken), + outboundRateLimiterConfig: _getOutboundRateLimiterConfig(), + inboundRateLimiterConfig: _getInboundRateLimiterConfig() + }); + + changePrank(AAVE_DAO); + s_ghoTokenPool.applyChainUpdates(new uint64[](0), chainUpdate); + + uint256 amount = 10; + deal(address(s_token), address(s_ghoTokenPool), amount); + vm.startPrank(s_allowedOffRamp); + s_ghoTokenPool.releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + amount: amount, + remoteChainSelector: SOURCE_CHAIN_SELECTOR, + receiver: OWNER, + localToken: address(s_token), + sourcePoolAddress: abi.encode(s_sourcePool), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + assertEq(s_ghoTokenPool.getCurrentBridgedAmount(), INITIAL_BRIDGE_LIMIT - amount); + } + + // Reverts + + function testSetBridgeLimitAdminReverts() public { + vm.startPrank(STRANGER); + + vm.expectRevert(abi.encodeWithSelector(Unauthorized.selector, STRANGER)); + s_ghoTokenPool.setBridgeLimit(0); + } +} + +contract GhoTokenPoolEthereum_setBridgeLimitAdmin is GhoTokenPoolEthereumSetup { + event BridgeLimitAdminUpdated(address indexed oldAdmin, address indexed newAdmin); + + function testSetBridgeLimitAdminSuccess() public { + assertEq(address(0), s_ghoTokenPool.getBridgeLimitAdmin()); + + address bridgeLimitAdmin = address(28973509103597907); + changePrank(AAVE_DAO); + + vm.expectEmit(); + emit BridgeLimitAdminUpdated(address(0), bridgeLimitAdmin); + + s_ghoTokenPool.setBridgeLimitAdmin(bridgeLimitAdmin); + + assertEq(bridgeLimitAdmin, s_ghoTokenPool.getBridgeLimitAdmin()); + } + + // Reverts + + function testSetBridgeLimitAdminReverts() public { + vm.startPrank(STRANGER); + + vm.expectRevert(OnlyCallableByOwner.selector); + s_ghoTokenPool.setBridgeLimitAdmin(STRANGER); + } +} + +contract GhoTokenPoolEthereum_upgradeability is GhoTokenPoolEthereumSetup { + function testInitialization() public { + // Upgradeability + assertEq(_getUpgradeableVersion(address(s_ghoTokenPool)), 1); + vm.startPrank(PROXY_ADMIN); + (bool ok, bytes memory result) = address(s_ghoTokenPool).staticcall( + abi.encodeWithSelector(TransparentUpgradeableProxy.admin.selector) + ); + assertTrue(ok, "proxy admin fetch failed"); + address decodedProxyAdmin = abi.decode(result, (address)); + assertEq(decodedProxyAdmin, PROXY_ADMIN, "proxy admin is wrong"); + assertEq(decodedProxyAdmin, _getProxyAdminAddress(address(s_ghoTokenPool)), "proxy admin is wrong"); + + // TokenPool + vm.startPrank(OWNER); + assertEq(s_ghoTokenPool.getAllowList().length, 0); + assertEq(s_ghoTokenPool.getAllowListEnabled(), false); + assertEq(s_ghoTokenPool.getRmnProxy(), address(s_mockRMN)); + assertEq(s_ghoTokenPool.getRouter(), address(s_sourceRouter)); + assertEq(address(s_ghoTokenPool.getToken()), address(s_token)); + assertEq(s_ghoTokenPool.owner(), AAVE_DAO, "owner is wrong"); + } + + function testUpgrade() public { + MockUpgradeable newImpl = new MockUpgradeable(); + bytes memory mockImpleParams = abi.encodeWithSignature("initialize()"); + vm.startPrank(PROXY_ADMIN); + TransparentUpgradeableProxy(payable(address(s_ghoTokenPool))).upgradeToAndCall(address(newImpl), mockImpleParams); + + vm.startPrank(OWNER); + assertEq(_getUpgradeableVersion(address(s_ghoTokenPool)), 2); + } + + function testUpgradeAdminReverts() public { + vm.expectRevert(); + TransparentUpgradeableProxy(payable(address(s_ghoTokenPool))).upgradeToAndCall(address(0), bytes("")); + assertEq(_getUpgradeableVersion(address(s_ghoTokenPool)), 1); + + vm.expectRevert(); + TransparentUpgradeableProxy(payable(address(s_ghoTokenPool))).upgradeTo(address(0)); + assertEq(_getUpgradeableVersion(address(s_ghoTokenPool)), 1); + } + + function testChangeAdmin() public { + assertEq(_getProxyAdminAddress(address(s_ghoTokenPool)), PROXY_ADMIN); + + address newAdmin = makeAddr("newAdmin"); + vm.startPrank(PROXY_ADMIN); + TransparentUpgradeableProxy(payable(address(s_ghoTokenPool))).changeAdmin(newAdmin); + + assertEq(_getProxyAdminAddress(address(s_ghoTokenPool)), newAdmin, "Admin change failed"); + } + + function testChangeAdminAdminReverts() public { + assertEq(_getProxyAdminAddress(address(s_ghoTokenPool)), PROXY_ADMIN); + + address newAdmin = makeAddr("newAdmin"); + vm.expectRevert(); + TransparentUpgradeableProxy(payable(address(s_ghoTokenPool))).changeAdmin(newAdmin); + + assertEq(_getProxyAdminAddress(address(s_ghoTokenPool)), PROXY_ADMIN, "Unauthorized admin change"); + } +} diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolEthereumBridgeLimit.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolEthereumBridgeLimit.t.sol new file mode 100644 index 0000000000..9007b83fb5 --- /dev/null +++ b/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolEthereumBridgeLimit.t.sol @@ -0,0 +1,1042 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import {GhoToken} from "@aave-gho-core/gho/GhoToken.sol"; + +import {IPoolV1} from "../../../interfaces/IPool.sol"; +import {Pool} from "../../../libraries/Pool.sol"; +import {UpgradeableLockReleaseTokenPool} from "../../../pools/GHO/UpgradeableLockReleaseTokenPool.sol"; +import {GhoBaseTest} from "./GhoBaseTest.t.sol"; + +contract GhoTokenPoolEthereumBridgeLimitSetup is GhoBaseTest { + error BridgeLimitExceeded(uint256 bridgeLimit); + + UtilsStorage public s; + + function setUp() public virtual override { + // Ethereum with id 0 + s.chainsList.push(0); + s.tokens[0] = address(new GhoToken(AAVE_DAO)); + s.pools[0] = _deployUpgradeableLockReleaseTokenPool( + s.tokens[0], + RMN_PROXY, + ROUTER, + AAVE_DAO, + INITIAL_BRIDGE_LIMIT, + PROXY_ADMIN + ); + + // Mock calls for bridging + vm.mockCall(ROUTER, abi.encodeWithSelector(bytes4(keccak256("getOnRamp(uint64)"))), abi.encode(RAMP)); + vm.mockCall(ROUTER, abi.encodeWithSelector(bytes4(keccak256("isOffRamp(uint64,address)"))), abi.encode(true)); + vm.mockCall(RMN_PROXY, abi.encodeWithSelector(bytes4(keccak256("isCursed(bytes16)"))), abi.encode(false)); + } + + function _assertInvariant() internal view { + // Check bridged + assertEq(UpgradeableLockReleaseTokenPool(s.pools[0]).getCurrentBridgedAmount(), s.bridged); + + // Check levels and buckets + uint256 sumLevels; + uint256 chainId; + uint256 capacity; + uint256 level; + for (uint i = 1; i < s.chainsList.length; i++) { + // not counting Ethereum -{0} + chainId = s.chainsList[i]; + (capacity, level) = GhoToken(s.tokens[chainId]).getFacilitatorBucket(s.pools[chainId]); + + // Aggregate levels + sumLevels += level; + + assertEq(capacity, s.bucketCapacities[chainId], "wrong bucket capacity"); + assertEq(level, s.bucketLevels[chainId], "wrong bucket level"); + + assertEq( + capacity, + UpgradeableLockReleaseTokenPool(s.pools[0]).getBridgeLimit(), + "capacity must be equal to bridgeLimit" + ); + assertLe( + level, + UpgradeableLockReleaseTokenPool(s.pools[0]).getBridgeLimit(), + "level cannot be higher than bridgeLimit" + ); + } + // Check bridged is equal to sum of levels + assertEq(UpgradeableLockReleaseTokenPool(s.pools[0]).getCurrentBridgedAmount(), sumLevels, "wrong bridged"); + assertEq(s.remoteLiquidity, sumLevels, "wrong bridged"); + } +} + +contract GhoTokenPoolEthereumBridgeLimitSimpleScenario is GhoTokenPoolEthereumBridgeLimitSetup { + function setUp() public virtual override { + super.setUp(); + + // Arbitrum + _addBridge(s, 1, INITIAL_BRIDGE_LIMIT); + _enableLane(s, 0, 1); + } + + function testFuzz_Bridge(uint256 amount) public { + uint256 maxAmount = _getMaxToBridgeOut(s, 0); + amount = bound(amount, 1, maxAmount); + + _assertInvariant(); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount); + assertEq(_getMaxToBridgeIn(s, 0), 0); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1]); + + deal(s.tokens[0], USER, amount); + _moveGhoOrigin(s, 0, 1, USER, amount); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount - amount); + assertEq(_getMaxToBridgeIn(s, 0), amount); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1]); + + _moveGhoDestination(s, 0, 1, USER, amount); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount - amount); + assertEq(_getMaxToBridgeIn(s, 0), amount); + assertEq(_getMaxToBridgeOut(s, 1), s.bucketLevels[1]); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1] - s.bucketLevels[1]); + + _assertInvariant(); + } + + function testBridgeAll() public { + _assertInvariant(); + + uint256 maxAmount = _getMaxToBridgeOut(s, 0); + assertEq(_getMaxToBridgeIn(s, 0), 0); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1]); + + deal(s.tokens[0], USER, maxAmount); + _moveGhoOrigin(s, 0, 1, USER, maxAmount); + + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 0), maxAmount); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1]); + + _moveGhoDestination(s, 0, 1, USER, maxAmount); + + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 0), maxAmount); + assertEq(_getMaxToBridgeOut(s, 1), s.bucketCapacities[1]); + assertEq(_getMaxToBridgeIn(s, 1), 0); + + _assertInvariant(); + } + + /// @dev Bridge out two times + function testFuzz_BridgeTwoSteps(uint256 amount1, uint256 amount2) public { + uint256 maxAmount = _getMaxToBridgeOut(s, 0); + amount1 = bound(amount1, 1, maxAmount); + amount2 = bound(amount2, 1, maxAmount); + + _assertInvariant(); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount); + assertEq(_getMaxToBridgeIn(s, 0), 0); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1]); + + deal(s.tokens[0], USER, amount1); + _moveGhoOrigin(s, 0, 1, USER, amount1); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount - amount1); + assertEq(_getMaxToBridgeIn(s, 0), amount1); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1]); + + _moveGhoDestination(s, 0, 1, USER, amount1); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount - amount1); + assertEq(_getMaxToBridgeIn(s, 0), amount1); + assertEq(_getMaxToBridgeOut(s, 1), s.bucketLevels[1]); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1] - s.bucketLevels[1]); + + _assertInvariant(); + + // Bridge up to bridge limit amount + if (amount1 + amount2 > maxAmount) { + vm.expectRevert(); + vm.prank(RAMP); + IPoolV1(s.pools[0]).lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: uint64(1), + originalSender: USER, + amount: amount2, + localToken: s.tokens[0] + }) + ); + + amount2 = maxAmount - amount1; + } + + if (amount2 > 0) { + _assertInvariant(); + + uint256 acc = amount1 + amount2; + deal(s.tokens[0], USER, amount2); + _moveGhoOrigin(s, 0, 1, USER, amount2); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount - acc); + assertEq(_getMaxToBridgeIn(s, 0), acc); + assertEq(_getMaxToBridgeOut(s, 1), amount1); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1] - amount1); + + _moveGhoDestination(s, 0, 1, USER, amount2); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount - acc); + assertEq(_getMaxToBridgeIn(s, 0), acc); + assertEq(_getMaxToBridgeOut(s, 1), acc); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1] - acc); + + _assertInvariant(); + } + } + + /// @dev Bridge some tokens out and later, bridge them back in + function testFuzz_BridgeBackAndForth(uint256 amountOut, uint256 amountIn) public { + uint256 maxAmount = _getMaxToBridgeOut(s, 0); + amountOut = bound(amountOut, 1, maxAmount); + amountIn = bound(amountIn, 1, _getCapacity(s, 1)); + + _assertInvariant(); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount); + assertEq(_getMaxToBridgeIn(s, 0), 0); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1]); + + deal(s.tokens[0], USER, amountOut); + _moveGhoOrigin(s, 0, 1, USER, amountOut); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount - amountOut); + assertEq(_getMaxToBridgeIn(s, 0), amountOut); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1]); + + _moveGhoDestination(s, 0, 1, USER, amountOut); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount - amountOut); + assertEq(_getMaxToBridgeIn(s, 0), amountOut); + assertEq(_getMaxToBridgeOut(s, 1), s.bucketLevels[1]); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1] - s.bucketLevels[1]); + + _assertInvariant(); + + // Bridge up to current bridged amount + if (amountIn > amountOut) { + // Simulate revert on destination + vm.expectRevert(); + vm.prank(RAMP); + IPoolV1(s.pools[0]).releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + remoteChainSelector: uint64(1), + receiver: USER, + amount: amountIn, + localToken: s.tokens[0], + sourcePoolAddress: abi.encode(address(s.pools[0])), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + + amountIn = amountOut; + } + + if (amountIn > 0) { + _assertInvariant(); + + uint256 acc = amountOut - amountIn; + deal(s.tokens[1], USER, amountIn); + _moveGhoOrigin(s, 1, 0, USER, amountIn); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount - amountOut); + assertEq(_getMaxToBridgeIn(s, 0), amountOut); + assertEq(_getMaxToBridgeOut(s, 1), acc); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1] - acc); + + _moveGhoDestination(s, 1, 0, USER, amountIn); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount - acc); + assertEq(_getMaxToBridgeIn(s, 0), acc); + assertEq(_getMaxToBridgeOut(s, 1), acc); + assertEq(_getMaxToBridgeIn(s, 1), maxAmount - acc); + + _assertInvariant(); + } + } + + /// @dev Bridge from Ethereum to Arbitrum reverts if amount is higher than bridge limit + function testFuzz_BridgeBridgeLimitExceededSourceReverts(uint256 amount, uint256 bridgeAmount) public { + vm.assume(amount < type(uint128).max); + vm.assume(bridgeAmount < INITIAL_BRIDGE_LIMIT); + + // Inflate bridgeAmount + if (bridgeAmount > 0) { + deal(s.tokens[0], USER, bridgeAmount); + _bridgeGho(s, 0, 1, USER, bridgeAmount); + } + + deal(s.tokens[0], USER, amount); + // Simulate CCIP pull of funds + vm.startPrank(USER); + GhoToken(s.tokens[0]).transfer(s.pools[0], amount); + + if (bridgeAmount + amount > INITIAL_BRIDGE_LIMIT) { + vm.expectRevert(); + } + vm.startPrank(RAMP); + IPoolV1(s.pools[0]).lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: uint64(1), + originalSender: USER, + amount: amount, + localToken: s.tokens[0] + }) + ); + } + + /// @dev Bridge from Ethereum to Arbitrum reverts if amount is higher than capacity available + function testFuzz_BridgeCapacityExceededDestinationReverts(uint256 amount, uint256 level) public { + (uint256 capacity, ) = GhoToken(s.tokens[1]).getFacilitatorBucket(s.pools[1]); + vm.assume(level < capacity); + amount = bound(amount, 1, type(uint128).max); + + // Inflate level + if (level > 0) { + _inflateFacilitatorLevel(s.pools[1], s.tokens[1], level); + } + + // Skip origin move + + // Destination execution + if (amount > capacity - level) { + vm.expectRevert(); + } + vm.prank(RAMP); + IPoolV1(s.pools[1]).releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + remoteChainSelector: uint64(0), + receiver: USER, + amount: amount, + localToken: s.tokens[1], + sourcePoolAddress: abi.encode(address(s.pools[0])), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + } + + /// @dev Bridge from Arbitrum To Ethereum reverts if Arbitrum level is lower than amount + function testFuzz_BridgeBackZeroLevelSourceReverts(uint256 amount, uint256 level) public { + (uint256 capacity, ) = GhoToken(s.tokens[1]).getFacilitatorBucket(s.pools[1]); + vm.assume(level < capacity); + amount = bound(amount, 1, capacity - level); + + // Inflate level + if (level > 0) { + _inflateFacilitatorLevel(s.pools[1], s.tokens[1], level); + } + + deal(s.tokens[1], USER, amount); + // Simulate CCIP pull of funds + vm.prank(USER); + GhoToken(s.tokens[1]).transfer(s.pools[1], amount); + + if (amount > level) { + vm.expectRevert(); + } + vm.prank(RAMP); + IPoolV1(s.pools[1]).lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: uint64(0), + originalSender: USER, + amount: amount, + localToken: s.tokens[1] + }) + ); + } + + /// @dev Bridge from Arbitrum To Ethereum reverts if Ethereum current bridged amount is lower than amount + function testFuzz_BridgeBackZeroBridgeLimitDestinationReverts(uint256 amount, uint256 bridgeAmount) public { + (uint256 capacity, ) = GhoToken(s.tokens[1]).getFacilitatorBucket(s.pools[1]); + amount = bound(amount, 1, capacity); + bridgeAmount = bound(bridgeAmount, 0, capacity - amount); + + // Inflate bridgeAmount + if (bridgeAmount > 0) { + deal(s.tokens[0], USER, bridgeAmount); + _bridgeGho(s, 0, 1, USER, bridgeAmount); + } + + // Inflate level on Arbitrum + _inflateFacilitatorLevel(s.pools[1], s.tokens[1], amount); + + // Skip origin move + + // Destination execution + if (amount > bridgeAmount) { + vm.expectRevert(); + } + vm.prank(RAMP); + IPoolV1(s.pools[0]).releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + remoteChainSelector: uint64(1), + receiver: USER, + amount: amount, + localToken: s.tokens[0], + sourcePoolAddress: abi.encode(address(s.pools[1])), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + } + + /// @dev Bucket capacity reduction. Caution: bridge limit reduction must happen first + function testReduceBucketCapacity() public { + // Max out capacity + uint256 maxAmount = _getMaxToBridgeOut(s, 0); + deal(s.tokens[0], USER, maxAmount); + _bridgeGho(s, 0, 1, USER, maxAmount); + + assertEq(_getMaxToBridgeIn(s, 1), 0); + assertEq(_getCapacity(s, 1), maxAmount); + assertEq(_getLevel(s, 1), maxAmount); + + _assertInvariant(); + + uint256 newBucketCapacity = s.bucketCapacities[1] - 10; + // 1. Reduce bridge limit + _updateBridgeLimit(s, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 1), 0); + + // 2. Reduce bucket capacity + _updateBucketCapacity(s, 1, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 1), 0); + + // Maximum to bridge in is all minted on Arbitrum + assertEq(_getMaxToBridgeIn(s, 0), maxAmount); + assertEq(_getMaxToBridgeOut(s, 1), maxAmount); + + _bridgeGho(s, 1, 0, USER, maxAmount); + assertEq(_getMaxToBridgeOut(s, 0), newBucketCapacity); + assertEq(_getMaxToBridgeIn(s, 0), 0); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), newBucketCapacity); + + _assertInvariant(); + } + + /// @dev Bucket capacity reduction, performed following wrong order procedure + function testReduceBucketCapacityIncorrectProcedure() public { + // Bridge a third of the capacity + uint256 amount = _getMaxToBridgeOut(s, 0) / 3; + uint256 availableToBridge = _getMaxToBridgeOut(s, 0) - amount; + + deal(s.tokens[0], USER, amount); + _bridgeGho(s, 0, 1, USER, amount); + + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1] - amount); + assertEq(_getLevel(s, 1), amount); + + _assertInvariant(); + + uint256 newBucketCapacity = s.bucketCapacities[1] - 10; + /// @dev INCORRECT ORDER PROCEDURE!! bridge limit reduction should happen first + // 1. Reduce bucket capacity + _updateBucketCapacity(s, 1, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), availableToBridge); // this is the UX issue + assertEq(_getMaxToBridgeIn(s, 1), availableToBridge - 10); + + // User can come and try to max bridge on Arbitrum + // Transaction will succeed on Ethereum, but revert on Arbitrum + deal(s.tokens[0], USER, availableToBridge); + _moveGhoOrigin(s, 0, 1, USER, availableToBridge); + assertEq(_getMaxToBridgeOut(s, 0), 0); + + vm.expectRevert(); + vm.prank(RAMP); + IPoolV1(s.pools[1]).releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + remoteChainSelector: uint64(0), + receiver: USER, + amount: availableToBridge, + localToken: s.tokens[0], + sourcePoolAddress: abi.encode(address(s.pools[0])), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + + // User can only bridge up to new bucket capacity (10 units less) + assertEq(_getMaxToBridgeIn(s, 1), availableToBridge - 10); + vm.prank(RAMP); + IPoolV1(s.pools[1]).releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + remoteChainSelector: uint64(0), + receiver: USER, + amount: availableToBridge - 10, + localToken: s.tokens[0], + sourcePoolAddress: abi.encode(address(s.pools[0])), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + assertEq(_getMaxToBridgeIn(s, 1), 0); + + // 2. Reduce bridge limit + _updateBridgeLimit(s, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 1), 0); + } + + /// @dev Bucket capacity reduction, with a bridge out in between + function testReduceBucketCapacityWithBridgeOutInBetween() public { + // Bridge a third of the capacity + uint256 amount = _getMaxToBridgeOut(s, 0) / 3; + uint256 availableToBridge = _getMaxToBridgeOut(s, 0) - amount; + + deal(s.tokens[0], USER, amount); + _bridgeGho(s, 0, 1, USER, amount); + + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1] - amount); + assertEq(_getLevel(s, 1), amount); + + _assertInvariant(); + + uint256 newBucketCapacity = s.bucketCapacities[1] - 10; + // 1. Reduce bridge limit + _updateBridgeLimit(s, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), availableToBridge - 10); + assertEq(_getMaxToBridgeIn(s, 1), availableToBridge); + + // User initiates bridge out action + uint256 amount2 = _getMaxToBridgeOut(s, 0); + deal(s.tokens[0], USER, amount2); + _moveGhoOrigin(s, 0, 1, USER, amount2); + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 0), newBucketCapacity); + + // 2. Reduce bucket capacity + _updateBucketCapacity(s, 1, newBucketCapacity); + // Destination execution can happen, no more bridge out actions can be initiated + assertEq(_getMaxToBridgeOut(s, 1), amount); + assertEq(_getMaxToBridgeIn(s, 1), amount2); + + // Finalize bridge out action + _moveGhoDestination(s, 0, 1, USER, amount2); + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 0), newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 1), newBucketCapacity); + assertEq(_getMaxToBridgeIn(s, 1), 0); + + _assertInvariant(); + } + + /// @dev Bucket capacity reduction, with a bridge in in between + function testReduceBucketCapacityWithBridgeInInBetween() public { + // Bridge max amount + uint256 maxAmount = _getMaxToBridgeOut(s, 0); + + deal(s.tokens[0], USER, maxAmount); + _bridgeGho(s, 0, 1, USER, maxAmount); + + assertEq(_getMaxToBridgeIn(s, 1), 0); + assertEq(_getCapacity(s, 1), maxAmount); + assertEq(_getLevel(s, 1), maxAmount); + + _assertInvariant(); + + uint256 newBucketCapacity = s.bucketCapacities[1] - 10; + // 1. Reduce bridge limit + _updateBridgeLimit(s, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 1), 0); + + // User initiates bridge in action + _moveGhoOrigin(s, 1, 0, USER, maxAmount); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), maxAmount); + + // 2. Reduce bucket capacity + _updateBucketCapacity(s, 1, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 0), maxAmount); + + // Finalize bridge in action + _moveGhoDestination(s, 1, 0, USER, maxAmount); + assertEq(_getMaxToBridgeOut(s, 0), newBucketCapacity); + assertEq(_getMaxToBridgeIn(s, 0), 0); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), newBucketCapacity); + + _assertInvariant(); + } + + /// @dev Bucket capacity increase. Caution: bridge limit increase must happen afterwards + function testIncreaseBucketCapacity() public { + // Max out capacity + uint256 maxAmount = _getMaxToBridgeOut(s, 0); + deal(s.tokens[0], USER, maxAmount); + _bridgeGho(s, 0, 1, USER, maxAmount); + + assertEq(_getMaxToBridgeIn(s, 1), 0); + assertEq(_getCapacity(s, 1), maxAmount); + assertEq(_getLevel(s, 1), maxAmount); + + _assertInvariant(); + + uint256 newBucketCapacity = s.bucketCapacities[1] + 10; + // 2. Increase bucket capacity + _updateBucketCapacity(s, 1, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 1), 10); + + // Reverts if a user tries to bridge out 10 + vm.expectRevert(abi.encodeWithSelector(BridgeLimitExceeded.selector, INITIAL_BRIDGE_LIMIT)); + vm.prank(RAMP); + IPoolV1(s.pools[0]).lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: uint64(1), + originalSender: USER, + amount: 10, + localToken: s.tokens[0] + }) + ); + + // 2. Increase bridge limit + _updateBridgeLimit(s, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), 10); + assertEq(_getMaxToBridgeIn(s, 1), 10); + + _assertInvariant(); + + // Now it is possible to bridge some again + _bridgeGho(s, 1, 0, USER, maxAmount); + assertEq(_getMaxToBridgeOut(s, 0), newBucketCapacity); + assertEq(_getMaxToBridgeIn(s, 0), 0); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), newBucketCapacity); + + _assertInvariant(); + } + + /// @dev Bucket capacity increase, performed following wrong order procedure + function testIncreaseBucketCapacityIncorrectProcedure() public { + // Max out capacity + uint256 maxAmount = _getMaxToBridgeOut(s, 0); + deal(s.tokens[0], USER, maxAmount); + _bridgeGho(s, 0, 1, USER, maxAmount); + + assertEq(_getMaxToBridgeIn(s, 1), 0); + assertEq(_getCapacity(s, 1), maxAmount); + assertEq(_getLevel(s, 1), maxAmount); + + _assertInvariant(); + + uint256 newBucketCapacity = s.bucketCapacities[1] + 10; + + /// @dev INCORRECT ORDER PROCEDURE!! bucket capacity increase should happen first + // 1. Increase bridge limit + _updateBridgeLimit(s, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), 10); + assertEq(_getMaxToBridgeIn(s, 1), 0); // this is the UX issue + + // User can come and try to max bridge on Arbitrum + // Transaction will succeed on Ethereum, but revert on Arbitrum + deal(s.tokens[0], USER, 10); + _moveGhoOrigin(s, 0, 1, USER, 10); + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 0), newBucketCapacity); + + // Execution on destination will revert until bucket capacity gets increased + vm.expectRevert(); + vm.prank(RAMP); + IPoolV1(s.pools[1]).releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + remoteChainSelector: uint64(0), + receiver: USER, + amount: 10, + localToken: s.tokens[0], + sourcePoolAddress: abi.encode(address(s.pools[0])), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + + // 2. Increase bucket capacity + _updateBucketCapacity(s, 1, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 1), maxAmount); + assertEq(_getMaxToBridgeIn(s, 1), 10); + + // Now it is possible to execute on destination + _moveGhoDestination(s, 0, 1, USER, 10); + + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 0), newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 1), newBucketCapacity); + assertEq(_getMaxToBridgeIn(s, 1), 0); + + _assertInvariant(); + } + + /// @dev Bucket capacity increase, with a bridge out in between + function testIncreaseBucketCapacityWithBridgeOutInBetween() public { + // Bridge a third of the capacity + uint256 amount = _getMaxToBridgeOut(s, 0) / 3; + uint256 availableToBridge = _getMaxToBridgeOut(s, 0) - amount; + deal(s.tokens[0], USER, amount); + _bridgeGho(s, 0, 1, USER, amount); + + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1] - amount); + assertEq(_getLevel(s, 1), amount); + + _assertInvariant(); + + uint256 newBucketCapacity = s.bucketCapacities[1] + 10; + // 1. Increase bucket capacity + _updateBucketCapacity(s, 1, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), availableToBridge); + assertEq(_getMaxToBridgeIn(s, 1), availableToBridge + 10); + + // Reverts if a user tries to bridge out all up to new bucket capacity + vm.expectRevert(); + vm.prank(RAMP); + IPoolV1(s.pools[0]).lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: uint64(1), + originalSender: USER, + amount: availableToBridge + 10, + localToken: s.tokens[0] + }) + ); + + // User initiates bridge out action + deal(s.tokens[0], USER, availableToBridge); + _bridgeGho(s, 0, 1, USER, availableToBridge); + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 1), 10); + + // 2. Increase bridge limit + _updateBridgeLimit(s, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), 10); + assertEq(_getMaxToBridgeIn(s, 1), 10); + + _assertInvariant(); + + // Now it is possible to bridge some again + deal(s.tokens[0], USER, 10); + _bridgeGho(s, 0, 1, USER, 10); + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 0), newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 1), newBucketCapacity); + assertEq(_getMaxToBridgeIn(s, 1), 0); + + _assertInvariant(); + } + + /// @dev Bucket capacity increase, with a bridge in in between + function testIncreaseBucketCapacityWithBridgeInInBetween() public { + // Max out capacity + uint256 maxAmount = _getMaxToBridgeOut(s, 0); + deal(s.tokens[0], USER, maxAmount); + _bridgeGho(s, 0, 1, USER, maxAmount); + + assertEq(_getMaxToBridgeIn(s, 1), 0); + assertEq(_getCapacity(s, 1), maxAmount); + assertEq(_getLevel(s, 1), maxAmount); + + _assertInvariant(); + + uint256 newBucketCapacity = s.bucketCapacities[1] + 10; + // 1. Increase bucket capacity + _updateBucketCapacity(s, 1, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 0), maxAmount); + assertEq(_getMaxToBridgeOut(s, 1), maxAmount); + assertEq(_getMaxToBridgeIn(s, 1), 10); + + // User initiates bridge in action + _moveGhoOrigin(s, 1, 0, USER, maxAmount); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), newBucketCapacity); + + // 2. Increase bridge limit + _updateBridgeLimit(s, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), 10); + assertEq(_getMaxToBridgeIn(s, 0), maxAmount); + + // User finalizes bridge in action + _moveGhoDestination(s, 1, 0, USER, maxAmount); + assertEq(_getMaxToBridgeOut(s, 0), newBucketCapacity); + assertEq(_getMaxToBridgeIn(s, 0), 0); + + _assertInvariant(); + + // Now it is possible to bridge new bucket capacity + deal(s.tokens[0], USER, newBucketCapacity); + _bridgeGho(s, 0, 1, USER, newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 0), 0); + assertEq(_getMaxToBridgeIn(s, 0), newBucketCapacity); + assertEq(_getMaxToBridgeOut(s, 1), newBucketCapacity); + assertEq(_getMaxToBridgeIn(s, 1), 0); + + _assertInvariant(); + } +} + +contract GhoTokenPoolEthereumBridgeLimitTripleScenario is GhoTokenPoolEthereumBridgeLimitSetup { + function setUp() public virtual override { + super.setUp(); + + // Arbitrum + _addBridge(s, 1, INITIAL_BRIDGE_LIMIT); + _enableLane(s, 0, 1); + + // Avalanche + _addBridge(s, 2, INITIAL_BRIDGE_LIMIT); + _enableLane(s, 1, 2); + _enableLane(s, 0, 2); + } + + /// @dev Bridge out some tokens to third chain via second chain (Ethereum to Arbitrum, Arbitrum to Avalanche) + function testFuzz_BridgeToTwoToThree(uint256 amount) public { + uint256 maxAmount = _getMaxToBridgeOut(s, 0); + amount = bound(amount, 1, maxAmount); + + _assertInvariant(); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount); + assertEq(_getMaxToBridgeIn(s, 0), 0); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1]); + assertEq(_getMaxToBridgeOut(s, 2), 0); + assertEq(_getMaxToBridgeIn(s, 2), s.bucketCapacities[2]); + + deal(s.tokens[0], USER, amount); + _moveGhoOrigin(s, 0, 1, USER, amount); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount - amount); + assertEq(_getMaxToBridgeIn(s, 0), amount); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1]); + assertEq(_getMaxToBridgeOut(s, 2), 0); + assertEq(_getMaxToBridgeIn(s, 2), s.bucketCapacities[2]); + + _moveGhoDestination(s, 0, 1, USER, amount); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount - amount); + assertEq(_getMaxToBridgeIn(s, 0), amount); + assertEq(_getMaxToBridgeOut(s, 1), amount); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1] - s.bucketLevels[1]); + assertEq(_getMaxToBridgeOut(s, 2), 0); + assertEq(_getMaxToBridgeIn(s, 2), s.bucketCapacities[2]); + + _assertInvariant(); + + _moveGhoOrigin(s, 1, 2, USER, amount); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount - amount); + assertEq(_getMaxToBridgeIn(s, 0), amount); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1]); + assertEq(_getMaxToBridgeOut(s, 2), 0); + assertEq(_getMaxToBridgeIn(s, 2), s.bucketCapacities[2]); + + _moveGhoDestination(s, 1, 2, USER, amount); + + assertEq(_getMaxToBridgeOut(s, 0), maxAmount - amount); + assertEq(_getMaxToBridgeIn(s, 0), amount); + assertEq(_getMaxToBridgeOut(s, 1), 0); + assertEq(_getMaxToBridgeIn(s, 1), s.bucketCapacities[1]); + assertEq(_getMaxToBridgeOut(s, 2), amount); + assertEq(_getMaxToBridgeIn(s, 2), s.bucketCapacities[2] - amount); + + _assertInvariant(); + } + + /// @dev Bridge out some tokens to second and third chain randomly + function testFuzz_BridgeRandomlyToTwoAndThree(uint64[] memory amounts) public { + vm.assume(amounts.length < 30); + + uint256 maxAmount = _getMaxToBridgeOut(s, 0); + uint256 sourceAcc; + uint256 amount; + uint256 dest; + bool lastTime; + for (uint256 i = 0; i < amounts.length && !lastTime; i++) { + amount = amounts[i]; + + if (amount == 0) amount += 1; + if (sourceAcc + amount > maxAmount) { + amount = maxAmount - sourceAcc; + lastTime = true; + } + + dest = (amount % 2) + 1; + deal(s.tokens[0], USER, amount); + _bridgeGho(s, 0, dest, USER, amount); + + sourceAcc += amount; + } + assertEq(sourceAcc, s.bridged); + + // Bridge all to Avalanche + uint256 toBridge = _getMaxToBridgeOut(s, 1); + if (toBridge > 0) { + _bridgeGho(s, 1, 2, USER, toBridge); + assertEq(sourceAcc, s.bridged); + assertEq(_getLevel(s, 2), s.bridged); + assertEq(_getLevel(s, 1), 0); + } + } + + /// @dev All remote liquidity is on one chain or the other + function testLiquidityUnbalanced() public { + // Bridge all out to Arbitrum + uint256 amount = _getMaxToBridgeOut(s, 0); + deal(s.tokens[0], USER, amount); + _bridgeGho(s, 0, 1, USER, amount); + + // No more liquidity can go remotely + assertEq(_getMaxToBridgeOut(s, 0), 0); + vm.expectRevert(); + vm.prank(RAMP); + IPoolV1(s.pools[0]).lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: uint64(1), + originalSender: USER, + amount: 1, + localToken: s.tokens[0] + }) + ); + vm.prank(RAMP); + vm.expectRevert(); + IPoolV1(s.pools[0]).lockOrBurn( + Pool.LockOrBurnInV1({ + receiver: bytes(""), + remoteChainSelector: uint64(2), + originalSender: USER, + amount: 1, + localToken: s.tokens[0] + }) + ); + + // All liquidity on Arbitrum, 0 on Avalanche + assertEq(_getLevel(s, 1), s.bridged); + assertEq(_getLevel(s, 1), _getCapacity(s, 1)); + assertEq(_getLevel(s, 2), 0); + + // Move all liquidity to Avalanche + _bridgeGho(s, 1, 2, USER, amount); + assertEq(_getLevel(s, 1), 0); + assertEq(_getLevel(s, 2), s.bridged); + assertEq(_getLevel(s, 2), _getCapacity(s, 2)); + + // Move all liquidity back to Ethereum + _bridgeGho(s, 2, 0, USER, amount); + assertEq(_getLevel(s, 1), 0); + assertEq(_getLevel(s, 2), 0); + assertEq(s.bridged, 0); + assertEq(_getMaxToBridgeOut(s, 0), amount); + } + + /// @dev Test showcasing incorrect bridge limit and bucket capacity configuration + function testIncorrectBridgeLimitBucketConfig() public { + // BridgeLimit 10, Arbitrum 9, Avalanche Bucket 10 + _updateBridgeLimit(s, 10); + _updateBucketCapacity(s, 1, 9); + _updateBucketCapacity(s, 2, 10); + + assertEq(_getMaxToBridgeOut(s, 0), 10); + assertEq(_getMaxToBridgeIn(s, 1), 9); // here the issue + assertEq(_getMaxToBridgeIn(s, 2), 10); + + // Possible to bridge 10 out to 2 + deal(s.tokens[0], USER, 10); + _bridgeGho(s, 0, 2, USER, 10); + + // Liquidity comes back + _bridgeGho(s, 2, 0, USER, 10); + + // Not possible to bridge 10 out to 1 + _moveGhoOrigin(s, 0, 1, USER, 10); + // Reverts on destination + vm.expectRevert(); + vm.prank(RAMP); + IPoolV1(s.pools[1]).releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + amount: 10, + remoteChainSelector: uint64(0), + receiver: USER, + localToken: s.tokens[0], + sourcePoolAddress: abi.encode(address(s.pools[0])), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + + // Only if bucket capacity gets increased, execution can succeed + _updateBucketCapacity(s, 1, 10); + _moveGhoDestination(s, 0, 1, USER, 10); + } + + /// @dev Test showcasing a user locked due to a bridge limit reduction below current bridged amount + function testUserLockedBridgeLimitReductionBelowLevel() public { + // Bridge all out to Arbitrum + uint256 amount = _getMaxToBridgeOut(s, 0); + deal(s.tokens[0], USER, amount); + _bridgeGho(s, 0, 1, USER, amount); + + // Reduce bridge limit below current bridged amount + uint256 newBridgeLimit = amount / 2; + _updateBridgeLimit(s, newBridgeLimit); + _updateBucketCapacity(s, 1, newBridgeLimit); + + // Moving to Avalanche is not a problem because bucket capacity is higher than bridge limit + assertGt(_getMaxToBridgeIn(s, 2), newBridgeLimit); + _bridgeGho(s, 1, 2, USER, amount); + + // Moving back to Arbitrum reverts on destination + assertEq(_getMaxToBridgeIn(s, 1), newBridgeLimit); + _moveGhoOrigin(s, 2, 1, USER, amount); + vm.expectRevert(); + vm.prank(RAMP); + IPoolV1(s.pools[1]).releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + amount: amount, + remoteChainSelector: uint64(2), + receiver: USER, + localToken: s.tokens[0], + sourcePoolAddress: abi.encode(address(s.pools[0])), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + } +} diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolEthereumE2E.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolEthereumE2E.t.sol new file mode 100644 index 0000000000..d5905f8d56 --- /dev/null +++ b/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolEthereumE2E.t.sol @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import {GhoToken} from "@aave-gho-core/gho/GhoToken.sol"; + +import {IERC20} from "../../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; +import {CommitStore} from "../../../CommitStore.sol"; +import {EVM2EVMOnRamp} from "../../../onRamp/EVM2EVMOnRamp.sol"; +import {EVM2EVMOffRamp} from "../../../offRamp/EVM2EVMOffRamp.sol"; +import {IBurnMintERC20} from "../../../../shared/token/ERC20/IBurnMintERC20.sol"; +import {UpgradeableLockReleaseTokenPool} from "../../../pools/GHO/UpgradeableLockReleaseTokenPool.sol"; +import {UpgradeableBurnMintTokenPool} from "../../../pools/GHO/UpgradeableBurnMintTokenPool.sol"; +import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol"; +import {IPriceRegistry} from "../../../interfaces/IPriceRegistry.sol"; +import {IRMN} from "../../../interfaces/IRMN.sol"; +import {RateLimiter} from "../../../libraries/RateLimiter.sol"; +import {Client} from "../../../libraries/Client.sol"; +import {Internal} from "../../../libraries/Internal.sol"; +import {MerkleHelper} from "../../helpers/MerkleHelper.sol"; +import {BaseTest} from "../../BaseTest.t.sol"; +import {E2E} from "../End2End.t.sol"; +import {GhoBaseTest} from "./GhoBaseTest.t.sol"; + +contract GhoTokenPoolEthereumE2E is E2E, GhoBaseTest { + using Internal for Internal.EVM2EVMMessage; + + IBurnMintERC20 internal srcGhoToken; + IBurnMintERC20 internal dstGhoToken; + UpgradeableLockReleaseTokenPool internal srcGhoTokenPool; + UpgradeableBurnMintTokenPool internal dstGhoTokenPool; + + function setUp() public virtual override(E2E, BaseTest) { + E2E.setUp(); + + // Deploy GHO Token on source chain + srcGhoToken = IBurnMintERC20(address(new GhoToken(AAVE_DAO))); + deal(address(srcGhoToken), OWNER, type(uint128).max); + // Add GHO token to source token list + s_sourceTokens.push(address(srcGhoToken)); + + // Deploy GHO Token on destination chain + dstGhoToken = IBurnMintERC20(address(new GhoToken(AAVE_DAO))); + deal(address(dstGhoToken), OWNER, type(uint128).max); + // Add GHO token to destination token list + s_destTokens.push(address(dstGhoToken)); + + // Deploy LockReleaseTokenPool for GHO token on source chain + srcGhoTokenPool = UpgradeableLockReleaseTokenPool( + _deployUpgradeableLockReleaseTokenPool( + address(srcGhoToken), + address(s_mockRMN), + address(s_sourceRouter), + AAVE_DAO, + INITIAL_BRIDGE_LIMIT, + PROXY_ADMIN + ) + ); + + // Add GHO UpgradeableTokenPool to source token pool list + s_sourcePoolByToken[address(srcGhoToken)] = address(srcGhoTokenPool); + s_destTokenBySourceToken[address(srcGhoToken)] = address(dstGhoToken); + + // Deploy BurnMintTokenPool for GHO token on destination chain + dstGhoTokenPool = UpgradeableBurnMintTokenPool( + _deployUpgradeableBurnMintTokenPool( + address(dstGhoToken), + address(s_mockRMN), + address(s_destRouter), + AAVE_DAO, + PROXY_ADMIN + ) + ); + + // Add GHO UpgradeableTokenPool to destination token pool list + s_sourcePoolByToken[address(dstGhoToken)] = address(dstGhoTokenPool); + s_destTokenBySourceToken[address(dstGhoToken)] = address(srcGhoToken); + + // Give mint and burn privileges to destination UpgradeableTokenPool (GHO-specific related) + vm.stopPrank(); + vm.startPrank(AAVE_DAO); + GhoToken(address(dstGhoToken)).grantRole(GhoToken(address(dstGhoToken)).FACILITATOR_MANAGER_ROLE(), AAVE_DAO); + GhoToken(address(dstGhoToken)).addFacilitator(address(dstGhoTokenPool), "UpgradeableTokenPool", type(uint128).max); + vm.stopPrank(); + vm.startPrank(OWNER); + + // Add config for source and destination chains + UpgradeableTokenPool.ChainUpdate[] memory srcChainUpdates = new UpgradeableTokenPool.ChainUpdate[](1); + bytes[] memory remotePoolAddresses = new bytes[](1); + remotePoolAddresses[0] = abi.encode(address(dstGhoTokenPool)); + srcChainUpdates[0] = UpgradeableTokenPool.ChainUpdate({ + remoteChainSelector: DEST_CHAIN_SELECTOR, + remotePoolAddresses: remotePoolAddresses, + remoteTokenAddress: abi.encode(address(dstGhoToken)), + outboundRateLimiterConfig: _getOutboundRateLimiterConfig(), + inboundRateLimiterConfig: _getInboundRateLimiterConfig() + }); + UpgradeableTokenPool.ChainUpdate[] memory dstChainUpdates = new UpgradeableTokenPool.ChainUpdate[](1); + remotePoolAddresses[0] = abi.encode(address(srcGhoTokenPool)); + dstChainUpdates[0] = UpgradeableTokenPool.ChainUpdate({ + remoteChainSelector: SOURCE_CHAIN_SELECTOR, + remotePoolAddresses: remotePoolAddresses, + remoteTokenAddress: abi.encode(address(srcGhoToken)), + outboundRateLimiterConfig: _getOutboundRateLimiterConfig(), + inboundRateLimiterConfig: _getInboundRateLimiterConfig() + }); + vm.stopPrank(); + vm.startPrank(AAVE_DAO); + srcGhoTokenPool.applyChainUpdates(new uint64[](0), srcChainUpdates); + dstGhoTokenPool.applyChainUpdates(new uint64[](0), dstChainUpdates); + vm.stopPrank(); + vm.startPrank(OWNER); + + // Update GHO Token price on source PriceRegistry + EVM2EVMOnRamp.DynamicConfig memory onRampDynamicConfig = s_onRamp.getDynamicConfig(); + IPriceRegistry onRampPriceRegistry = IPriceRegistry(onRampDynamicConfig.priceRegistry); + onRampPriceRegistry.updatePrices(_getSingleTokenPriceUpdateStruct(address(srcGhoToken), 1e18)); + + // Update GHO Token price on destination PriceRegistry + EVM2EVMOffRamp.DynamicConfig memory offRampDynamicConfig = s_offRamp.getDynamicConfig(); + IPriceRegistry offRampPriceRegistry = IPriceRegistry(offRampDynamicConfig.priceRegistry); + offRampPriceRegistry.updatePrices(_getSingleTokenPriceUpdateStruct(address(dstGhoToken), 1e18)); + + s_tokenAdminRegistry.proposeAdministrator(address(srcGhoToken), AAVE_DAO); + s_tokenAdminRegistry.proposeAdministrator(address(dstGhoToken), AAVE_DAO); + vm.stopPrank(); + vm.startPrank(AAVE_DAO); + s_tokenAdminRegistry.acceptAdminRole(address(srcGhoToken)); + s_tokenAdminRegistry.setPool(address(srcGhoToken), address(srcGhoTokenPool)); + s_tokenAdminRegistry.acceptAdminRole(address(dstGhoToken)); + s_tokenAdminRegistry.setPool(address(dstGhoToken), address(dstGhoTokenPool)); + vm.stopPrank(); + vm.startPrank(OWNER); + } + + function testE2E_MessagesSuccess_gas() public { + vm.pauseGasMetering(); + uint256 preGhoTokenBalanceOwner = srcGhoToken.balanceOf(OWNER); + uint256 preGhoTokenBalancePool = srcGhoToken.balanceOf(address(srcGhoTokenPool)); + uint256 preBridgedAmount = srcGhoTokenPool.getCurrentBridgedAmount(); + uint256 preBridgeLimit = srcGhoTokenPool.getBridgeLimit(); + + Internal.EVM2EVMMessage[] memory messages = new Internal.EVM2EVMMessage[](1); + messages[0] = sendRequestGho(1, 1000 * 1e18, false, false); + + uint256 expectedFee = s_sourceRouter.getFee(DEST_CHAIN_SELECTOR, _generateTokenMessage()); + // Asserts that the tokens have been sent and the fee has been paid. + assertEq(preGhoTokenBalanceOwner - 1000 * 1e18, srcGhoToken.balanceOf(OWNER)); + assertEq(preGhoTokenBalancePool + 1000 * 1e18, srcGhoToken.balanceOf(address(srcGhoTokenPool))); + assertGt(expectedFee, 0); + + assertEq(preBridgedAmount + 1000 * 1e18, srcGhoTokenPool.getCurrentBridgedAmount()); + assertEq(preBridgeLimit, srcGhoTokenPool.getBridgeLimit()); + + bytes32 metaDataHash = s_offRamp.metadataHash(); + + bytes32[] memory hashedMessages = new bytes32[](1); + hashedMessages[0] = messages[0]._hash(metaDataHash); + messages[0].messageId = hashedMessages[0]; + + bytes32[] memory merkleRoots = new bytes32[](1); + merkleRoots[0] = MerkleHelper.getMerkleRoot(hashedMessages); + + address[] memory onRamps = new address[](1); + onRamps[0] = ON_RAMP_ADDRESS; + + bytes memory commitReport = abi.encode( + CommitStore.CommitReport({ + priceUpdates: _getEmptyPriceUpdates(), + interval: CommitStore.Interval(messages[0].sequenceNumber, messages[0].sequenceNumber), + merkleRoot: merkleRoots[0] + }) + ); + + vm.resumeGasMetering(); + s_commitStore.report(commitReport, ++s_latestEpochAndRound); + vm.pauseGasMetering(); + + vm.mockCall( + s_commitStore.getStaticConfig().rmnProxy, + abi.encodeWithSelector(IRMN.isBlessed.selector, IRMN.TaggedRoot(address(s_commitStore), merkleRoots[0])), + abi.encode(true) + ); + + bytes32[] memory proofs = new bytes32[](0); + uint256 timestamp = s_commitStore.verify(merkleRoots, proofs, 2 ** 2 - 1); + assertEq(BLOCK_TIME, timestamp); + + // We change the block time so when execute would e.g. use the current + // block time instead of the committed block time the value would be + // incorrect in the checks below. + vm.warp(BLOCK_TIME + 2000); + + vm.expectEmit(); + emit EVM2EVMOffRamp.ExecutionStateChanged( + messages[0].sequenceNumber, + messages[0].messageId, + Internal.MessageExecutionState.SUCCESS, + "" + ); + + Internal.ExecutionReport memory execReport = _generateReportFromMessages(messages); + + uint256 preGhoTokenBalanceUser = dstGhoToken.balanceOf(USER); + (uint256 preCapacity, uint256 preLevel) = GhoToken(address(dstGhoToken)).getFacilitatorBucket( + address(dstGhoTokenPool) + ); + + s_offRamp.execute(execReport, new EVM2EVMOffRamp.GasLimitOverride[](0)); + + assertEq(preGhoTokenBalanceUser + 1000 * 1e18, dstGhoToken.balanceOf(USER), "Wrong balance on destination"); + // Facilitator checks + (uint256 postCapacity, uint256 postLevel) = GhoToken(address(dstGhoToken)).getFacilitatorBucket( + address(dstGhoTokenPool) + ); + assertEq(postCapacity, preCapacity); + assertEq(preLevel + 1000 * 1e18, postLevel, "wrong facilitator bucket level"); + } + + function testE2E_3MessagesSuccess_gas() public { + vm.pauseGasMetering(); + uint256 preGhoTokenBalanceOwner = srcGhoToken.balanceOf(OWNER); + uint256 preGhoTokenBalancePool = srcGhoToken.balanceOf(address(srcGhoTokenPool)); + uint256 preBridgedAmount = srcGhoTokenPool.getCurrentBridgedAmount(); + uint256 preBridgeLimit = srcGhoTokenPool.getBridgeLimit(); + + Internal.EVM2EVMMessage[] memory messages = new Internal.EVM2EVMMessage[](3); + messages[0] = sendRequestGho(1, 1000 * 1e18, false, false); + messages[1] = sendRequestGho(2, 2000 * 1e18, false, false); + messages[2] = sendRequestGho(3, 3000 * 1e18, false, false); + + uint256 expectedFee = s_sourceRouter.getFee(DEST_CHAIN_SELECTOR, _generateTokenMessage()); + // Asserts that the tokens have been sent and the fee has been paid. + assertEq(preGhoTokenBalanceOwner - 6000 * 1e18, srcGhoToken.balanceOf(OWNER)); + assertEq(preGhoTokenBalancePool + 6000 * 1e18, srcGhoToken.balanceOf(address(srcGhoTokenPool))); + assertGt(expectedFee, 0); + + assertEq(preBridgedAmount + 6000 * 1e18, srcGhoTokenPool.getCurrentBridgedAmount()); + assertEq(preBridgeLimit, srcGhoTokenPool.getBridgeLimit()); + + bytes32 metaDataHash = s_offRamp.metadataHash(); + + bytes32[] memory hashedMessages = new bytes32[](3); + hashedMessages[0] = messages[0]._hash(metaDataHash); + messages[0].messageId = hashedMessages[0]; + hashedMessages[1] = messages[1]._hash(metaDataHash); + messages[1].messageId = hashedMessages[1]; + hashedMessages[2] = messages[2]._hash(metaDataHash); + messages[2].messageId = hashedMessages[2]; + + bytes32[] memory merkleRoots = new bytes32[](1); + merkleRoots[0] = MerkleHelper.getMerkleRoot(hashedMessages); + + address[] memory onRamps = new address[](1); + onRamps[0] = ON_RAMP_ADDRESS; + + bytes memory commitReport = abi.encode( + CommitStore.CommitReport({ + priceUpdates: _getEmptyPriceUpdates(), + interval: CommitStore.Interval(messages[0].sequenceNumber, messages[2].sequenceNumber), + merkleRoot: merkleRoots[0] + }) + ); + + vm.resumeGasMetering(); + s_commitStore.report(commitReport, ++s_latestEpochAndRound); + vm.pauseGasMetering(); + + vm.mockCall( + s_commitStore.getStaticConfig().rmnProxy, + abi.encodeWithSelector(IRMN.isBlessed.selector, IRMN.TaggedRoot(address(s_commitStore), merkleRoots[0])), + abi.encode(true) + ); + + bytes32[] memory proofs = new bytes32[](0); + uint256 timestamp = s_commitStore.verify(merkleRoots, proofs, 2 ** 2 - 1); + assertEq(BLOCK_TIME, timestamp); + + // We change the block time so when execute would e.g. use the current + // block time instead of the committed block time the value would be + // incorrect in the checks below. + vm.warp(BLOCK_TIME + 2000); + + vm.expectEmit(); + emit EVM2EVMOffRamp.ExecutionStateChanged( + messages[0].sequenceNumber, + messages[0].messageId, + Internal.MessageExecutionState.SUCCESS, + "" + ); + + vm.expectEmit(); + emit EVM2EVMOffRamp.ExecutionStateChanged( + messages[1].sequenceNumber, + messages[1].messageId, + Internal.MessageExecutionState.SUCCESS, + "" + ); + + vm.expectEmit(); + emit EVM2EVMOffRamp.ExecutionStateChanged( + messages[2].sequenceNumber, + messages[2].messageId, + Internal.MessageExecutionState.SUCCESS, + "" + ); + + Internal.ExecutionReport memory execReport = _generateReportFromMessages(messages); + + uint256 preGhoTokenBalanceUser = dstGhoToken.balanceOf(USER); + (uint256 preCapacity, uint256 preLevel) = GhoToken(address(dstGhoToken)).getFacilitatorBucket( + address(dstGhoTokenPool) + ); + + s_offRamp.execute(execReport, new EVM2EVMOffRamp.GasLimitOverride[](0)); + + assertEq(preGhoTokenBalanceUser + 6000 * 1e18, dstGhoToken.balanceOf(USER), "Wrong balance on destination"); + // Facilitator checks + (uint256 postCapacity, uint256 postLevel) = GhoToken(address(dstGhoToken)).getFacilitatorBucket( + address(dstGhoTokenPool) + ); + assertEq(postCapacity, preCapacity); + assertEq(preLevel + 6000 * 1e18, postLevel, "wrong facilitator bucket level"); + } + + function testRevertRateLimitReached() public { + // increase bridge limit to hit the rate limit error + vm.stopPrank(); + vm.startPrank(AAVE_DAO); + srcGhoTokenPool.setBridgeLimit(type(uint256).max); + vm.startPrank(OWNER); + + RateLimiter.Config memory rateLimiterConfig = _getOutboundRateLimiterConfig(); + + // will revert due to rate limit of tokenPool + sendRequestGho(1, rateLimiterConfig.capacity + 1, true, false); + + // max capacity, won't revert + sendRequestGho(1, rateLimiterConfig.capacity, false, false); + + // revert due to capacity exceed + sendRequestGho(2, 100, true, false); + + // increase blocktime to refill capacity + vm.warp(BLOCK_TIME + 1); + + // won't revert due to refill + sendRequestGho(2, 100, false, false); + } + + function testRevertOnLessTokenToCoverFee() public { + sendRequestGho(1, 1000, false, true); + } + + function testRevertBridgeLimitReached() public { + // increase ccip rate limit to hit the bridge limit error + vm.stopPrank(); + vm.startPrank(AAVE_DAO); + srcGhoTokenPool.setChainRateLimiterConfig( + DEST_CHAIN_SELECTOR, + RateLimiter.Config({isEnabled: true, capacity: uint128(INITIAL_BRIDGE_LIMIT * 2), rate: 1e15}), + _getInboundRateLimiterConfig() + ); + vm.warp(block.timestamp + 100); // wait to refill capacity + vm.startPrank(OWNER); + + // will revert due to bridge limit + sendRequestGho(1, uint128(INITIAL_BRIDGE_LIMIT + 1), true, false); + + // max bridge limit, won't revert + sendRequestGho(1, uint128(INITIAL_BRIDGE_LIMIT), false, false); + assertEq(srcGhoTokenPool.getCurrentBridgedAmount(), INITIAL_BRIDGE_LIMIT); + + // revert due to bridge limit exceed + sendRequestGho(2, 1, true, false); + + // increase bridge limit + vm.startPrank(AAVE_DAO); + srcGhoTokenPool.setBridgeLimit(INITIAL_BRIDGE_LIMIT + 1); + vm.startPrank(OWNER); + + // won't revert due to refill + sendRequestGho(2, 1, false, false); + assertEq(srcGhoTokenPool.getCurrentBridgedAmount(), INITIAL_BRIDGE_LIMIT + 1); + } + + function sendRequestGho( + uint64 expectedSeqNum, + uint256 amount, + bool expectRevert, + bool sendLessFee + ) public returns (Internal.EVM2EVMMessage memory) { + Client.EVM2AnyMessage memory message = _generateSingleTokenMessage(address(srcGhoToken), amount); + uint256 expectedFee = s_sourceRouter.getFee(DEST_CHAIN_SELECTOR, message); + + // err mgmt + uint256 feeToSend = sendLessFee ? expectedFee - 1 : expectedFee; + expectRevert = sendLessFee ? true : expectRevert; + + IERC20(s_sourceTokens[0]).approve(address(s_sourceRouter), feeToSend); // fee + IERC20(srcGhoToken).approve(address(s_sourceRouter), amount); // amount + + message.receiver = abi.encode(USER); + Internal.EVM2EVMMessage memory geEvent = _messageToEvent( + message, + expectedSeqNum, + expectedSeqNum, + expectedFee, + OWNER + ); + + if (!expectRevert) { + vm.expectEmit(); + emit EVM2EVMOnRamp.CCIPSendRequested(geEvent); + } else { + vm.expectRevert(); + } + vm.resumeGasMetering(); + s_sourceRouter.ccipSend(DEST_CHAIN_SELECTOR, message); + vm.pauseGasMetering(); + + return geEvent; + } +} diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolEthereumSetup.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolEthereumSetup.t.sol new file mode 100644 index 0000000000..6f9787ecab --- /dev/null +++ b/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolEthereumSetup.t.sol @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import {GhoToken} from "@aave-gho-core/gho/GhoToken.sol"; + +import {BaseTest} from "../../BaseTest.t.sol"; +import {UpgradeableLockReleaseTokenPool} from "../../../pools/GHO/UpgradeableLockReleaseTokenPool.sol"; +import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol"; +import {Router} from "../../../Router.sol"; +import {IERC20} from "../../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; +import {RouterSetup} from "../../router/RouterSetup.t.sol"; +import {BaseTest} from "../../BaseTest.t.sol"; +import {GhoBaseTest} from "./GhoBaseTest.t.sol"; + +contract GhoTokenPoolEthereumSetup is RouterSetup, GhoBaseTest { + IERC20 internal s_token; + UpgradeableLockReleaseTokenPool internal s_ghoTokenPool; + + address internal s_allowedOnRamp = address(123); + address internal s_allowedOffRamp = address(234); + + address internal s_sourcePool = makeAddr("source_pool"); + address internal s_sourceToken = makeAddr("source_token"); + + function setUp() public virtual override(RouterSetup, BaseTest) { + RouterSetup.setUp(); + + // GHO deployment + GhoToken ghoToken = new GhoToken(AAVE_DAO); + s_token = IERC20(address(ghoToken)); + deal(address(s_token), OWNER, type(uint128).max); + + // Set up UpgradeableTokenPool with permission to mint/burn + s_ghoTokenPool = UpgradeableLockReleaseTokenPool( + _deployUpgradeableLockReleaseTokenPool( + address(s_token), + address(s_mockRMN), + address(s_sourceRouter), + AAVE_DAO, + INITIAL_BRIDGE_LIMIT, + PROXY_ADMIN + ) + ); + + UpgradeableTokenPool.ChainUpdate[] memory chainUpdate = new UpgradeableTokenPool.ChainUpdate[](1); + bytes[] memory remotePoolAddresses = new bytes[](1); + remotePoolAddresses[0] = abi.encode(s_sourcePool); + + chainUpdate[0] = UpgradeableTokenPool.ChainUpdate({ + remoteChainSelector: DEST_CHAIN_SELECTOR, + remotePoolAddresses: remotePoolAddresses, + remoteTokenAddress: abi.encode(s_sourceToken), + outboundRateLimiterConfig: _getOutboundRateLimiterConfig(), + inboundRateLimiterConfig: _getInboundRateLimiterConfig() + }); + + changePrank(AAVE_DAO); + s_ghoTokenPool.applyChainUpdates(new uint64[](0), chainUpdate); + s_ghoTokenPool.setRebalancer(OWNER); + changePrank(OWNER); + + Router.OnRamp[] memory onRampUpdates = new Router.OnRamp[](1); + Router.OffRamp[] memory offRampUpdates = new Router.OffRamp[](1); + onRampUpdates[0] = Router.OnRamp({destChainSelector: DEST_CHAIN_SELECTOR, onRamp: s_allowedOnRamp}); + offRampUpdates[0] = Router.OffRamp({sourceChainSelector: SOURCE_CHAIN_SELECTOR, offRamp: s_allowedOffRamp}); + s_sourceRouter.applyRampUpdates(onRampUpdates, new Router.OffRamp[](0), offRampUpdates); + } +} diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolRemote.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolRemote.t.sol new file mode 100644 index 0000000000..e38d869ac2 --- /dev/null +++ b/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolRemote.t.sol @@ -0,0 +1,610 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import {GhoToken} from "@aave-gho-core/gho/GhoToken.sol"; +import {TransparentUpgradeableProxy} from "solidity-utils/contracts/transparent-proxy/TransparentUpgradeableProxy.sol"; + +import {stdError} from "forge-std/Test.sol"; +import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol"; +import {EVM2EVMOnRamp} from "../../../onRamp/EVM2EVMOnRamp.sol"; +import {EVM2EVMOffRamp} from "../../../offRamp/EVM2EVMOffRamp.sol"; +import {UpgradeableBurnMintTokenPool} from "../../../pools/GHO/UpgradeableBurnMintTokenPool.sol"; +import {RateLimiter} from "../../../libraries/RateLimiter.sol"; +import {Pool} from "../../../libraries/Pool.sol"; +import {MockUpgradeable} from "../../mocks/MockUpgradeable.sol"; + +import {GhoTokenPoolRemoteSetup} from "./GhoTokenPoolRemoteSetup.t.sol"; + +contract GhoTokenPoolRemote_lockOrBurn is GhoTokenPoolRemoteSetup { + function testSetupSuccess() public view { + assertEq(address(s_burnMintERC677), address(s_pool.getToken())); + assertEq(address(s_mockRMN), s_pool.getRmnProxy()); + assertEq(false, s_pool.getAllowListEnabled()); + assertEq("BurnMintTokenPool 1.5.1", s_pool.typeAndVersion()); + } + + function testPoolBurnSuccess() public { + uint256 burnAmount = 20_000e18; + // inflate facilitator level + _inflateFacilitatorLevel(address(s_pool), address(s_burnMintERC677), burnAmount); + + deal(address(s_burnMintERC677), address(s_pool), burnAmount); + assertEq(s_burnMintERC677.balanceOf(address(s_pool)), burnAmount); + + vm.startPrank(s_burnMintOnRamp); + + vm.expectEmit(); + emit TokensConsumed(burnAmount); + + vm.expectEmit(); + emit Transfer(address(s_pool), address(0), burnAmount); + + vm.expectEmit(); + emit Burned(address(s_burnMintOnRamp), burnAmount); + + bytes4 expectedSignature = bytes4(keccak256("burn(uint256)")); + vm.expectCall(address(s_burnMintERC677), abi.encodeWithSelector(expectedSignature, burnAmount)); + + (uint256 preCapacity, uint256 preLevel) = GhoToken(address(s_burnMintERC677)).getFacilitatorBucket(address(s_pool)); + + s_pool.lockOrBurn( + Pool.LockOrBurnInV1({ + originalSender: OWNER, + receiver: bytes(""), + amount: burnAmount, + remoteChainSelector: DEST_CHAIN_SELECTOR, + localToken: address(s_burnMintERC677) + }) + ); + + // Facilitator checks + (uint256 postCapacity, uint256 postLevel) = GhoToken(address(s_burnMintERC677)).getFacilitatorBucket( + address(s_pool) + ); + assertEq(postCapacity, preCapacity); + assertEq(preLevel - burnAmount, postLevel, "wrong facilitator bucket level"); + + assertEq(s_burnMintERC677.balanceOf(address(s_pool)), 0); + } + + // Should not burn tokens if cursed. + function testPoolBurnRevertNotHealthyReverts() public { + s_mockRMN.setGlobalCursed(true); + uint256 before = s_burnMintERC677.balanceOf(address(s_pool)); + vm.startPrank(s_burnMintOnRamp); + + vm.expectRevert(EVM2EVMOnRamp.CursedByRMN.selector); + s_pool.lockOrBurn( + Pool.LockOrBurnInV1({ + originalSender: OWNER, + receiver: bytes(""), + amount: 1e5, + remoteChainSelector: DEST_CHAIN_SELECTOR, + localToken: address(s_burnMintERC677) + }) + ); + + assertEq(s_burnMintERC677.balanceOf(address(s_pool)), before); + } + + function testChainNotAllowedReverts() public { + uint64 wrongChainSelector = 8838833; + vm.expectRevert(abi.encodeWithSelector(UpgradeableTokenPool.ChainNotAllowed.selector, wrongChainSelector)); + s_pool.lockOrBurn( + Pool.LockOrBurnInV1({ + originalSender: OWNER, + receiver: bytes(""), + amount: 1, + remoteChainSelector: wrongChainSelector, + localToken: address(s_burnMintERC677) + }) + ); + } + + function testPoolBurnNoPrivilegesReverts() public { + // Remove privileges + vm.startPrank(AAVE_DAO); + GhoToken(address(s_burnMintERC677)).removeFacilitator(address(s_pool)); + vm.stopPrank(); + + uint256 amount = 1; + vm.startPrank(s_burnMintOnRamp); + vm.expectRevert(stdError.arithmeticError); + s_pool.lockOrBurn( + Pool.LockOrBurnInV1({ + originalSender: STRANGER, + receiver: bytes(""), + amount: amount, + remoteChainSelector: DEST_CHAIN_SELECTOR, + localToken: address(s_burnMintERC677) + }) + ); + } + + function testBucketLevelNotEnoughReverts() public { + (, uint256 bucketLevel) = GhoToken(address(s_burnMintERC677)).getFacilitatorBucket(address(s_pool)); + assertEq(bucketLevel, 0); + + uint256 amount = 1; + vm.expectCall(address(s_burnMintERC677), abi.encodeWithSelector(GhoToken.burn.selector, amount)); + vm.expectRevert(stdError.arithmeticError); + vm.startPrank(s_burnMintOnRamp); + s_pool.lockOrBurn( + Pool.LockOrBurnInV1({ + originalSender: STRANGER, + receiver: bytes(""), + amount: amount, + remoteChainSelector: DEST_CHAIN_SELECTOR, + localToken: address(s_burnMintERC677) + }) + ); + } + + function testTokenMaxCapacityExceededReverts() public { + RateLimiter.Config memory rateLimiterConfig = _getOutboundRateLimiterConfig(); + uint256 capacity = rateLimiterConfig.capacity; + uint256 amount = 10 * capacity; + + vm.expectRevert( + abi.encodeWithSelector(RateLimiter.TokenMaxCapacityExceeded.selector, capacity, amount, address(s_burnMintERC677)) + ); + vm.startPrank(s_burnMintOnRamp); + s_pool.lockOrBurn( + Pool.LockOrBurnInV1({ + originalSender: STRANGER, + receiver: bytes(""), + amount: amount, + remoteChainSelector: DEST_CHAIN_SELECTOR, + localToken: address(s_burnMintERC677) + }) + ); + } +} + +contract GhoTokenPoolRemote_releaseOrMint is GhoTokenPoolRemoteSetup { + function testPoolMintSuccess() public { + uint256 amount = 1e19; + vm.startPrank(s_burnMintOffRamp); + vm.expectEmit(); + emit Transfer(address(0), OWNER, amount); + s_pool.releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + receiver: OWNER, + amount: amount, + remoteChainSelector: DEST_CHAIN_SELECTOR, + localToken: address(s_burnMintERC677), + sourcePoolAddress: abi.encode(s_sourcePool), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + assertEq(s_burnMintERC677.balanceOf(OWNER), amount); + } + + function testPoolMintNotHealthyReverts() public { + // Should not mint tokens if cursed. + s_mockRMN.setGlobalCursed(true); + uint256 before = s_burnMintERC677.balanceOf(OWNER); + vm.startPrank(s_burnMintOffRamp); + vm.expectRevert(EVM2EVMOffRamp.CursedByRMN.selector); + s_pool.releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + receiver: OWNER, + amount: 1e5, + remoteChainSelector: DEST_CHAIN_SELECTOR, + localToken: address(s_burnMintERC677), + sourcePoolAddress: abi.encode(s_sourcePool), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + assertEq(s_burnMintERC677.balanceOf(OWNER), before); + } + + function testChainNotAllowedReverts() public { + uint64 wrongChainSelector = 8838833; + vm.expectRevert(abi.encodeWithSelector(UpgradeableTokenPool.ChainNotAllowed.selector, wrongChainSelector)); + s_pool.releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + receiver: STRANGER, + amount: 1, + remoteChainSelector: wrongChainSelector, + localToken: address(s_burnMintERC677), + sourcePoolAddress: abi.encode(s_sourcePool), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + } + + function testPoolMintNoPrivilegesReverts() public { + // Remove privileges + vm.startPrank(AAVE_DAO); + GhoToken(address(s_burnMintERC677)).removeFacilitator(address(s_pool)); + vm.stopPrank(); + + uint256 amount = 1; + vm.startPrank(s_burnMintOffRamp); + vm.expectRevert("FACILITATOR_BUCKET_CAPACITY_EXCEEDED"); + s_pool.releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + receiver: STRANGER, + amount: amount, + remoteChainSelector: DEST_CHAIN_SELECTOR, + localToken: address(s_burnMintERC677), + sourcePoolAddress: abi.encode(s_sourcePool), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + } + + function testBucketCapacityExceededReverts() public { + // Mint all the bucket capacity + (uint256 bucketCapacity, ) = GhoToken(address(s_burnMintERC677)).getFacilitatorBucket(address(s_pool)); + _inflateFacilitatorLevel(address(s_pool), address(s_burnMintERC677), bucketCapacity); + (uint256 currCapacity, uint256 currLevel) = GhoToken(address(s_burnMintERC677)).getFacilitatorBucket( + address(s_pool) + ); + assertEq(currCapacity, currLevel); + + uint256 amount = 1; + vm.expectCall(address(s_burnMintERC677), abi.encodeWithSelector(GhoToken.mint.selector, STRANGER, amount)); + vm.expectRevert("FACILITATOR_BUCKET_CAPACITY_EXCEEDED"); + vm.startPrank(s_burnMintOffRamp); + s_pool.releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + receiver: STRANGER, + amount: amount, + remoteChainSelector: DEST_CHAIN_SELECTOR, + localToken: address(s_burnMintERC677), + sourcePoolAddress: abi.encode(s_sourcePool), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + } + + function testTokenMaxCapacityExceededReverts() public { + RateLimiter.Config memory rateLimiterConfig = _getInboundRateLimiterConfig(); + uint256 capacity = rateLimiterConfig.capacity; + uint256 amount = 10 * capacity; + + vm.expectRevert( + abi.encodeWithSelector(RateLimiter.TokenMaxCapacityExceeded.selector, capacity, amount, address(s_burnMintERC677)) + ); + vm.startPrank(s_burnMintOffRamp); + s_pool.releaseOrMint( + Pool.ReleaseOrMintInV1({ + originalSender: bytes(""), + receiver: STRANGER, + amount: amount, + remoteChainSelector: DEST_CHAIN_SELECTOR, + localToken: address(s_burnMintERC677), + sourcePoolAddress: abi.encode(s_sourcePool), + sourcePoolData: bytes(""), + offchainTokenData: bytes("") + }) + ); + } +} + +contract GhoTokenPoolEthereum_upgradeability is GhoTokenPoolRemoteSetup { + function testInitialization() public { + // Upgradeability + assertEq(_getUpgradeableVersion(address(s_pool)), 1); + vm.startPrank(PROXY_ADMIN); + (bool ok, bytes memory result) = address(s_pool).staticcall( + abi.encodeWithSelector(TransparentUpgradeableProxy.admin.selector) + ); + assertTrue(ok, "proxy admin fetch failed"); + address decodedProxyAdmin = abi.decode(result, (address)); + assertEq(decodedProxyAdmin, PROXY_ADMIN, "proxy admin is wrong"); + assertEq(decodedProxyAdmin, _getProxyAdminAddress(address(s_pool)), "proxy admin is wrong"); + + // TokenPool + vm.startPrank(OWNER); + assertEq(s_pool.getAllowList().length, 0); + assertEq(s_pool.getAllowListEnabled(), false); + assertEq(s_pool.getRmnProxy(), address(s_mockRMN)); + assertEq(s_pool.getRouter(), address(s_sourceRouter)); + assertEq(address(s_pool.getToken()), address(s_burnMintERC677)); + assertEq(s_pool.owner(), AAVE_DAO, "owner is wrong"); + } + + function testUpgrade() public { + MockUpgradeable newImpl = new MockUpgradeable(); + bytes memory mockImpleParams = abi.encodeWithSignature("initialize()"); + vm.startPrank(PROXY_ADMIN); + TransparentUpgradeableProxy(payable(address(s_pool))).upgradeToAndCall(address(newImpl), mockImpleParams); + + vm.startPrank(OWNER); + assertEq(_getUpgradeableVersion(address(s_pool)), 2); + } + + function testUpgradeAdminReverts() public { + vm.expectRevert(); + TransparentUpgradeableProxy(payable(address(s_pool))).upgradeToAndCall(address(0), bytes("")); + assertEq(_getUpgradeableVersion(address(s_pool)), 1); + + vm.expectRevert(); + TransparentUpgradeableProxy(payable(address(s_pool))).upgradeTo(address(0)); + assertEq(_getUpgradeableVersion(address(s_pool)), 1); + } + + function testChangeAdmin() public { + assertEq(_getProxyAdminAddress(address(s_pool)), PROXY_ADMIN); + + address newAdmin = makeAddr("newAdmin"); + vm.startPrank(PROXY_ADMIN); + TransparentUpgradeableProxy(payable(address(s_pool))).changeAdmin(newAdmin); + + assertEq(_getProxyAdminAddress(address(s_pool)), newAdmin, "Admin change failed"); + } + + function testChangeAdminAdminReverts() public { + assertEq(_getProxyAdminAddress(address(s_pool)), PROXY_ADMIN); + + address newAdmin = makeAddr("newAdmin"); + vm.expectRevert(); + TransparentUpgradeableProxy(payable(address(s_pool))).changeAdmin(newAdmin); + + assertEq(_getProxyAdminAddress(address(s_pool)), PROXY_ADMIN, "Unauthorized admin change"); + } +} + +contract GhoTokenPoolRemote_setChainRateLimiterConfig is GhoTokenPoolRemoteSetup { + event ConfigChanged(RateLimiter.Config); + event ChainConfigured( + uint64 chainSelector, + RateLimiter.Config outboundRateLimiterConfig, + RateLimiter.Config inboundRateLimiterConfig + ); + + uint64 internal s_remoteChainSelector; + + function setUp() public virtual override { + GhoTokenPoolRemoteSetup.setUp(); + UpgradeableTokenPool.ChainUpdate[] memory chainUpdates = new UpgradeableTokenPool.ChainUpdate[](1); + s_remoteChainSelector = 123124; + bytes[] memory remotePoolAddresses = new bytes[](1); + remotePoolAddresses[0] = abi.encode(s_sourcePool); + chainUpdates[0] = UpgradeableTokenPool.ChainUpdate({ + remoteChainSelector: s_remoteChainSelector, + remotePoolAddresses: remotePoolAddresses, + remoteTokenAddress: abi.encode(s_sourceToken), + outboundRateLimiterConfig: _getOutboundRateLimiterConfig(), + inboundRateLimiterConfig: _getInboundRateLimiterConfig() + }); + changePrank(AAVE_DAO); + s_pool.applyChainUpdates(new uint64[](0), chainUpdates); + changePrank(OWNER); + } + + function testFuzz_SetChainRateLimiterConfigSuccess(uint128 capacity, uint128 rate, uint32 newTime) public { + // Cap the lower bound to 4 so 4/2 is still >= 2 + vm.assume(capacity >= 4); + // Cap the lower bound to 2 so 2/2 is still >= 1 + rate = uint128(bound(rate, 2, capacity - 2)); + // Bucket updates only work on increasing time + newTime = uint32(bound(newTime, block.timestamp + 1, type(uint32).max)); + vm.warp(newTime); + + uint256 oldOutboundTokens = s_pool.getCurrentOutboundRateLimiterState(s_remoteChainSelector).tokens; + uint256 oldInboundTokens = s_pool.getCurrentInboundRateLimiterState(s_remoteChainSelector).tokens; + + RateLimiter.Config memory newOutboundConfig = RateLimiter.Config({isEnabled: true, capacity: capacity, rate: rate}); + RateLimiter.Config memory newInboundConfig = RateLimiter.Config({ + isEnabled: true, + capacity: capacity / 2, + rate: rate / 2 + }); + + vm.expectEmit(); + emit ConfigChanged(newOutboundConfig); + vm.expectEmit(); + emit ConfigChanged(newInboundConfig); + vm.expectEmit(); + emit ChainConfigured(s_remoteChainSelector, newOutboundConfig, newInboundConfig); + + changePrank(AAVE_DAO); + s_pool.setChainRateLimiterConfig(s_remoteChainSelector, newOutboundConfig, newInboundConfig); + + uint256 expectedTokens = RateLimiter._min(newOutboundConfig.capacity, oldOutboundTokens); + + RateLimiter.TokenBucket memory bucket = s_pool.getCurrentOutboundRateLimiterState(s_remoteChainSelector); + assertEq(bucket.capacity, newOutboundConfig.capacity); + assertEq(bucket.rate, newOutboundConfig.rate); + assertEq(bucket.tokens, expectedTokens); + assertEq(bucket.lastUpdated, newTime); + + expectedTokens = RateLimiter._min(newInboundConfig.capacity, oldInboundTokens); + + bucket = s_pool.getCurrentInboundRateLimiterState(s_remoteChainSelector); + assertEq(bucket.capacity, newInboundConfig.capacity); + assertEq(bucket.rate, newInboundConfig.rate); + assertEq(bucket.tokens, expectedTokens); + assertEq(bucket.lastUpdated, newTime); + } + + function testOnlyOwnerOrRateLimitAdminSuccess() public { + address rateLimiterAdmin = address(28973509103597907); + + changePrank(AAVE_DAO); + s_pool.setRateLimitAdmin(rateLimiterAdmin); + + changePrank(rateLimiterAdmin); + + s_pool.setChainRateLimiterConfig( + s_remoteChainSelector, + _getOutboundRateLimiterConfig(), + _getInboundRateLimiterConfig() + ); + + changePrank(AAVE_DAO); + + s_pool.setChainRateLimiterConfig( + s_remoteChainSelector, + _getOutboundRateLimiterConfig(), + _getInboundRateLimiterConfig() + ); + } + + // Reverts + + function testOnlyOwnerReverts() public { + changePrank(STRANGER); + + vm.expectRevert(abi.encodeWithSelector(UpgradeableTokenPool.Unauthorized.selector, STRANGER)); + s_pool.setChainRateLimiterConfig( + s_remoteChainSelector, + _getOutboundRateLimiterConfig(), + _getInboundRateLimiterConfig() + ); + } + + function testNonExistentChainReverts() public { + uint64 wrongChainSelector = 9084102894; + + vm.expectRevert(abi.encodeWithSelector(UpgradeableTokenPool.NonExistentChain.selector, wrongChainSelector)); + changePrank(AAVE_DAO); + s_pool.setChainRateLimiterConfig( + wrongChainSelector, + _getOutboundRateLimiterConfig(), + _getInboundRateLimiterConfig() + ); + } +} + +contract GhoTokenPoolRemote_setRateLimitAdmin is GhoTokenPoolRemoteSetup { + function testSetRateLimitAdminSuccess() public { + assertEq(address(0), s_pool.getRateLimitAdmin()); + changePrank(AAVE_DAO); + s_pool.setRateLimitAdmin(OWNER); + assertEq(OWNER, s_pool.getRateLimitAdmin()); + } + + // Reverts + + function testSetRateLimitAdminReverts() public { + vm.startPrank(STRANGER); + + vm.expectRevert(OnlyCallableByOwner.selector); + s_pool.setRateLimitAdmin(STRANGER); + } +} + +contract GhoTokenPoolRemote_directMint is GhoTokenPoolRemoteSetup { + function testFuzzDirectMintSuccess(uint256 amount) public { + amount = bound(amount, 1, type(uint128).max); // current pool capacity + + address oldTokePool = makeAddr("oldTokePool"); + + changePrank(AAVE_DAO); + vm.expectEmit(address(s_burnMintERC677)); + emit Transfer(address(0), oldTokePool, amount); + s_pool.directMint(oldTokePool, amount); + + assertEq(s_burnMintERC677.balanceOf(oldTokePool), amount); + assertEq(s_burnMintERC677.balanceOf(address(s_pool)), 0); + assertEq(GhoToken(address(s_burnMintERC677)).getFacilitator(address(s_pool)).bucketLevel, amount); + } + + // Reverts + + function testDirectMintReverts() public { + vm.startPrank(STRANGER); + + vm.expectRevert(OnlyCallableByOwner.selector); + s_pool.directMint(makeAddr("oldFacilitator"), 13e7); + } +} + +contract GhoTokenPoolRemote_directBurn is GhoTokenPoolRemoteSetup { + function testFuzzDirectBurnSuccess(uint256 amount) public { + amount = bound(amount, 1, type(uint128).max); // bound to bucket capacity + // prank previously bridged supply + vm.startPrank(address(s_pool)); + s_burnMintERC677.mint(address(s_pool), amount); + + vm.startPrank(AAVE_DAO); + s_pool.directBurn(amount); + + assertEq(s_burnMintERC677.balanceOf(address(s_pool)), 0); + assertEq(GhoToken(address(s_burnMintERC677)).getFacilitator(address(s_pool)).bucketLevel, 0); + } + + function testDirectBurnReverts() public { + vm.startPrank(STRANGER); + + vm.expectRevert(OnlyCallableByOwner.selector); + s_pool.directBurn(13e7); + } +} + +contract GhoTokenPoolRemote_migrateLiquidity is GhoTokenPoolRemoteSetup { + UpgradeableBurnMintTokenPool internal s_oldBurnMintTokenPool; + + function setUp() public override { + super.setUp(); + + s_oldBurnMintTokenPool = UpgradeableBurnMintTokenPool( + _deployUpgradeableBurnMintTokenPool( + address(s_burnMintERC677), + address(s_mockRMN), + address(s_sourceRouter), + AAVE_DAO, + PROXY_ADMIN + ) + ); + + changePrank(AAVE_DAO); + GhoToken(address(s_burnMintERC677)).addFacilitator( + address(s_oldBurnMintTokenPool), + "OldTokenPool", + uint128(INITIAL_BRIDGE_LIMIT) + ); + + // mock existing supply offRamped by old token pool (not using `directMint` for clarity) + // which is circulating on remote chain + changePrank(address(s_oldBurnMintTokenPool)); + s_burnMintERC677.mint(makeAddr("users"), INITIAL_BRIDGE_LIMIT); + } + + function testFuzzMigrateFacilitator(uint256 amount) public { + amount = bound(amount, 1, INITIAL_BRIDGE_LIMIT); // old pool bucket level + changePrank(AAVE_DAO); + + assertEq( + GhoToken(address(s_burnMintERC677)).getFacilitator(address(s_oldBurnMintTokenPool)).bucketLevel, + INITIAL_BRIDGE_LIMIT + ); + assertEq(GhoToken(address(s_burnMintERC677)).getFacilitator(address(s_pool)).bucketLevel, 0); + + // note: these two operations should be done atomically such that there no unbacked tokens + // in circulation at any point + // 1. mint tokens to old pool + vm.expectEmit(address(s_burnMintERC677)); + emit Transfer(address(0), address(s_oldBurnMintTokenPool), amount); + s_pool.directMint(address(s_oldBurnMintTokenPool), amount); + + // 2. burn tokens from old pool + vm.expectEmit(address(s_burnMintERC677)); + emit Transfer(address(s_oldBurnMintTokenPool), address(0), amount); + s_oldBurnMintTokenPool.directBurn(amount); + + assertEq(s_burnMintERC677.balanceOf(address(s_oldBurnMintTokenPool)), 0); + assertEq(s_burnMintERC677.balanceOf(address(s_pool)), 0); + + assertEq( + GhoToken(address(s_burnMintERC677)).getFacilitator(address(s_oldBurnMintTokenPool)).bucketLevel, + INITIAL_BRIDGE_LIMIT - amount + ); + assertEq(GhoToken(address(s_burnMintERC677)).getFacilitator(address(s_pool)).bucketLevel, amount); + } +} diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolRemoteE2E.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolRemoteE2E.t.sol new file mode 100644 index 0000000000..3182ed727c --- /dev/null +++ b/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolRemoteE2E.t.sol @@ -0,0 +1,445 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import {GhoToken} from "@aave-gho-core/gho/GhoToken.sol"; + +import {IERC20} from "../../../../vendor/openzeppelin-solidity/v4.8.3/contracts/token/ERC20/IERC20.sol"; +import {IRMN} from "../../../interfaces/IRMN.sol"; +import {CommitStore} from "../../../CommitStore.sol"; +import {EVM2EVMOnRamp} from "../../../onRamp/EVM2EVMOnRamp.sol"; +import {EVM2EVMOffRamp} from "../../../offRamp/EVM2EVMOffRamp.sol"; +import {IBurnMintERC20} from "../../../../shared/token/ERC20/IBurnMintERC20.sol"; +import {UpgradeableLockReleaseTokenPool} from "../../../pools/GHO/UpgradeableLockReleaseTokenPool.sol"; +import {UpgradeableBurnMintTokenPool} from "../../../pools/GHO/UpgradeableBurnMintTokenPool.sol"; +import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol"; +import {IPriceRegistry} from "../../../interfaces/IPriceRegistry.sol"; +import {RateLimiter} from "../../../libraries/RateLimiter.sol"; +import {Pool} from "../../../libraries/Pool.sol"; +import {Internal} from "../../../libraries/Internal.sol"; +import {Client} from "../../../libraries/Client.sol"; +import {MerkleHelper} from "../../helpers/MerkleHelper.sol"; +import {BaseTest} from "../../BaseTest.t.sol"; +import {E2E} from "../End2End.t.sol"; +import {GhoBaseTest} from "./GhoBaseTest.t.sol"; + +contract GhoTokenPoolRemoteE2E is E2E, GhoBaseTest { + using Internal for Internal.EVM2EVMMessage; + + IBurnMintERC20 internal srcGhoToken; + IBurnMintERC20 internal dstGhoToken; + UpgradeableBurnMintTokenPool internal srcGhoTokenPool; + UpgradeableLockReleaseTokenPool internal dstGhoTokenPool; + + function setUp() public virtual override(E2E, BaseTest) { + E2E.setUp(); + + // Deploy GHO Token on source chain + srcGhoToken = IBurnMintERC20(address(new GhoToken(AAVE_DAO))); + deal(address(srcGhoToken), OWNER, type(uint128).max); + // Add GHO token to source token list + s_sourceTokens.push(address(srcGhoToken)); + + // Deploy GHO Token on destination chain + dstGhoToken = IBurnMintERC20(address(new GhoToken(AAVE_DAO))); + deal(address(dstGhoToken), OWNER, type(uint128).max); + // Add GHO token to destination token list + s_destTokens.push(address(dstGhoToken)); + + // Deploy BurnMintTokenPool for GHO token on source chain + srcGhoTokenPool = UpgradeableBurnMintTokenPool( + _deployUpgradeableBurnMintTokenPool( + address(srcGhoToken), + address(s_mockRMN), + address(s_sourceRouter), + AAVE_DAO, + PROXY_ADMIN + ) + ); + + // Add GHO UpgradeableTokenPool to source token pool list + s_sourcePoolByToken[address(srcGhoToken)] = address(srcGhoTokenPool); + s_destTokenBySourceToken[address(srcGhoToken)] = address(dstGhoToken); + + // Deploy LockReleaseTokenPool for GHO token on destination chain + dstGhoTokenPool = UpgradeableLockReleaseTokenPool( + _deployUpgradeableLockReleaseTokenPool( + address(dstGhoToken), + address(s_mockRMN), + address(s_destRouter), + AAVE_DAO, + INITIAL_BRIDGE_LIMIT, + PROXY_ADMIN + ) + ); + + // Add GHO UpgradeableTokenPool to destination token pool list + s_sourcePoolByToken[address(dstGhoToken)] = address(dstGhoTokenPool); + s_destTokenBySourceToken[address(dstGhoToken)] = address(srcGhoToken); + + // Give mint and burn privileges to source UpgradeableTokenPool (GHO-specific related) + vm.stopPrank(); + vm.startPrank(AAVE_DAO); + GhoToken(address(srcGhoToken)).grantRole(GhoToken(address(srcGhoToken)).FACILITATOR_MANAGER_ROLE(), AAVE_DAO); + GhoToken(address(srcGhoToken)).addFacilitator(address(srcGhoTokenPool), "UpgradeableTokenPool", type(uint128).max); + vm.stopPrank(); + vm.startPrank(OWNER); + + // Add config for source and destination chains + UpgradeableTokenPool.ChainUpdate[] memory srcChainUpdates = new UpgradeableTokenPool.ChainUpdate[](1); + bytes[] memory remotePoolAddresses = new bytes[](1); + remotePoolAddresses[0] = abi.encode(address(dstGhoTokenPool)); + srcChainUpdates[0] = UpgradeableTokenPool.ChainUpdate({ + remoteChainSelector: DEST_CHAIN_SELECTOR, + remotePoolAddresses: remotePoolAddresses, + remoteTokenAddress: abi.encode(address(dstGhoToken)), + outboundRateLimiterConfig: _getOutboundRateLimiterConfig(), + inboundRateLimiterConfig: _getInboundRateLimiterConfig() + }); + UpgradeableTokenPool.ChainUpdate[] memory dstChainUpdates = new UpgradeableTokenPool.ChainUpdate[](1); + remotePoolAddresses[0] = abi.encode(address(srcGhoTokenPool)); + dstChainUpdates[0] = UpgradeableTokenPool.ChainUpdate({ + remoteChainSelector: SOURCE_CHAIN_SELECTOR, + remotePoolAddresses: remotePoolAddresses, + remoteTokenAddress: abi.encode(address(srcGhoToken)), + outboundRateLimiterConfig: _getOutboundRateLimiterConfig(), + inboundRateLimiterConfig: _getInboundRateLimiterConfig() + }); + vm.stopPrank(); + vm.startPrank(AAVE_DAO); + srcGhoTokenPool.applyChainUpdates(new uint64[](0), srcChainUpdates); + dstGhoTokenPool.applyChainUpdates(new uint64[](0), dstChainUpdates); + vm.stopPrank(); + vm.startPrank(OWNER); + + // Update GHO Token price on source PriceRegistry + EVM2EVMOnRamp.DynamicConfig memory onRampDynamicConfig = s_onRamp.getDynamicConfig(); + IPriceRegistry onRampPriceRegistry = IPriceRegistry(onRampDynamicConfig.priceRegistry); + onRampPriceRegistry.updatePrices(_getSingleTokenPriceUpdateStruct(address(srcGhoToken), 1e18)); + + // Update GHO Token price on destination PriceRegistry + EVM2EVMOffRamp.DynamicConfig memory offRampDynamicConfig = s_offRamp.getDynamicConfig(); + IPriceRegistry offRampPriceRegistry = IPriceRegistry(offRampDynamicConfig.priceRegistry); + offRampPriceRegistry.updatePrices(_getSingleTokenPriceUpdateStruct(address(dstGhoToken), 1e18)); + + s_tokenAdminRegistry.proposeAdministrator(address(srcGhoToken), AAVE_DAO); + s_tokenAdminRegistry.proposeAdministrator(address(dstGhoToken), AAVE_DAO); + vm.stopPrank(); + vm.startPrank(AAVE_DAO); + s_tokenAdminRegistry.acceptAdminRole(address(srcGhoToken)); + s_tokenAdminRegistry.setPool(address(srcGhoToken), address(srcGhoTokenPool)); + s_tokenAdminRegistry.acceptAdminRole(address(dstGhoToken)); + s_tokenAdminRegistry.setPool(address(dstGhoToken), address(dstGhoTokenPool)); + vm.stopPrank(); + vm.startPrank(OWNER); + } + + function testE2E_MessagesSuccess_gas() public { + vm.pauseGasMetering(); + + // Mint some GHO to inflate UpgradeableBurnMintTokenPool facilitator level + _inflateFacilitatorLevel(address(srcGhoTokenPool), address(srcGhoToken), 1000 * 1e18); + vm.startPrank(OWNER); + + // Lock some GHO on destination so it can be released later on + dstGhoToken.transfer(address(dstGhoTokenPool), 1000 * 1e18); + // Inflate current bridged amount so it can be reduced in `releaseOrMint` function + vm.stopPrank(); + vm.startPrank(address(s_onRamp)); + vm.mockCall( + address(s_destRouter), + abi.encodeWithSelector(bytes4(keccak256("getOnRamp(uint64)"))), + abi.encode(s_onRamp) + ); + dstGhoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + originalSender: STRANGER, + receiver: bytes(""), + amount: 1000 * 1e18, + remoteChainSelector: SOURCE_CHAIN_SELECTOR, + localToken: address(dstGhoToken) + }) + ); + assertEq(dstGhoTokenPool.getCurrentBridgedAmount(), 1000 * 1e18); + vm.startPrank(address(OWNER)); + + uint256 preGhoTokenBalanceOwner = srcGhoToken.balanceOf(OWNER); + uint256 preGhoTokenBalancePool = srcGhoToken.balanceOf(address(srcGhoTokenPool)); + (uint256 preCapacity, uint256 preLevel) = GhoToken(address(srcGhoToken)).getFacilitatorBucket( + address(srcGhoTokenPool) + ); + + Internal.EVM2EVMMessage[] memory messages = new Internal.EVM2EVMMessage[](1); + messages[0] = sendRequestGho(1, 1000 * 1e18, false, false); + + uint256 expectedFee = s_sourceRouter.getFee(DEST_CHAIN_SELECTOR, _generateTokenMessage()); + // Asserts that the tokens have been sent and the fee has been paid. + assertEq(preGhoTokenBalanceOwner - 1000 * 1e18, srcGhoToken.balanceOf(OWNER)); + assertEq(preGhoTokenBalancePool, srcGhoToken.balanceOf(address(srcGhoTokenPool))); // GHO gets burned + assertGt(expectedFee, 0); + assertEq(dstGhoTokenPool.getCurrentBridgedAmount(), 1000 * 1e18); + + // Facilitator checks + (uint256 postCapacity, uint256 postLevel) = GhoToken(address(srcGhoToken)).getFacilitatorBucket( + address(srcGhoTokenPool) + ); + assertEq(postCapacity, preCapacity); + assertEq(preLevel - 1000 * 1e18, postLevel, "wrong facilitator bucket level"); + + bytes32 metaDataHash = s_offRamp.metadataHash(); + + bytes32[] memory hashedMessages = new bytes32[](1); + hashedMessages[0] = messages[0]._hash(metaDataHash); + messages[0].messageId = hashedMessages[0]; + + bytes32[] memory merkleRoots = new bytes32[](1); + merkleRoots[0] = MerkleHelper.getMerkleRoot(hashedMessages); + + address[] memory onRamps = new address[](1); + onRamps[0] = ON_RAMP_ADDRESS; + + bytes memory commitReport = abi.encode( + CommitStore.CommitReport({ + priceUpdates: _getEmptyPriceUpdates(), + interval: CommitStore.Interval(messages[0].sequenceNumber, messages[0].sequenceNumber), + merkleRoot: merkleRoots[0] + }) + ); + + vm.resumeGasMetering(); + s_commitStore.report(commitReport, ++s_latestEpochAndRound); + vm.pauseGasMetering(); + + vm.mockCall( + s_commitStore.getStaticConfig().rmnProxy, + abi.encodeWithSelector(IRMN.isBlessed.selector, IRMN.TaggedRoot(address(s_commitStore), merkleRoots[0])), + abi.encode(true) + ); + + bytes32[] memory proofs = new bytes32[](0); + uint256 timestamp = s_commitStore.verify(merkleRoots, proofs, 2 ** 2 - 1); + assertEq(BLOCK_TIME, timestamp); + + // We change the block time so when execute would e.g. use the current + // block time instead of the committed block time the value would be + // incorrect in the checks below. + vm.warp(BLOCK_TIME + 2000); + + vm.expectEmit(); + emit EVM2EVMOffRamp.ExecutionStateChanged( + messages[0].sequenceNumber, + messages[0].messageId, + Internal.MessageExecutionState.SUCCESS, + "" + ); + + Internal.ExecutionReport memory execReport = _generateReportFromMessages(messages); + + uint256 preGhoTokenBalanceUser = dstGhoToken.balanceOf(USER); + + vm.resumeGasMetering(); + s_offRamp.execute(execReport, new EVM2EVMOffRamp.GasLimitOverride[](0)); + vm.pauseGasMetering(); + + assertEq(preGhoTokenBalanceUser + 1000 * 1e18, dstGhoToken.balanceOf(USER), "Wrong balance on destination"); + assertEq(dstGhoTokenPool.getCurrentBridgedAmount(), 0); + } + + function testE2E_3MessagesSuccess_gas() public { + vm.pauseGasMetering(); + + // Mint some GHO to inflate UpgradeableTokenPool facilitator level + _inflateFacilitatorLevel(address(srcGhoTokenPool), address(srcGhoToken), 6000 * 1e18); + vm.startPrank(OWNER); + + // Lock some GHO on destination so it can be released later on + dstGhoToken.transfer(address(dstGhoTokenPool), 6000 * 1e18); + // Inflate current bridged amount so it can be reduced in `releaseOrMint` function + vm.stopPrank(); + vm.startPrank(address(s_onRamp)); + vm.mockCall( + address(s_destRouter), + abi.encodeWithSelector(bytes4(keccak256("getOnRamp(uint64)"))), + abi.encode(s_onRamp) + ); + dstGhoTokenPool.lockOrBurn( + Pool.LockOrBurnInV1({ + originalSender: STRANGER, + receiver: bytes(""), + amount: 6000 * 1e18, + remoteChainSelector: SOURCE_CHAIN_SELECTOR, + localToken: address(dstGhoToken) + }) + ); + assertEq(dstGhoTokenPool.getCurrentBridgedAmount(), 6000 * 1e18); + vm.startPrank(address(OWNER)); + + uint256 preGhoTokenBalanceOwner = srcGhoToken.balanceOf(OWNER); + uint256 preGhoTokenBalancePool = srcGhoToken.balanceOf(address(srcGhoTokenPool)); + (uint256 preCapacity, uint256 preLevel) = GhoToken(address(srcGhoToken)).getFacilitatorBucket( + address(srcGhoTokenPool) + ); + + Internal.EVM2EVMMessage[] memory messages = new Internal.EVM2EVMMessage[](3); + messages[0] = sendRequestGho(1, 1000 * 1e18, false, false); + messages[1] = sendRequestGho(2, 2000 * 1e18, false, false); + messages[2] = sendRequestGho(3, 3000 * 1e18, false, false); + + uint256 expectedFee = s_sourceRouter.getFee(DEST_CHAIN_SELECTOR, _generateTokenMessage()); + // Asserts that the tokens have been sent and the fee has been paid. + assertEq(preGhoTokenBalanceOwner - 6000 * 1e18, srcGhoToken.balanceOf(OWNER)); + assertEq(preGhoTokenBalancePool, srcGhoToken.balanceOf(address(srcGhoTokenPool))); // GHO gets burned + assertGt(expectedFee, 0); + assertEq(dstGhoTokenPool.getCurrentBridgedAmount(), 6000 * 1e18); + + // Facilitator checks + (uint256 postCapacity, uint256 postLevel) = GhoToken(address(srcGhoToken)).getFacilitatorBucket( + address(srcGhoTokenPool) + ); + assertEq(postCapacity, preCapacity); + assertEq(preLevel - 6000 * 1e18, postLevel, "wrong facilitator bucket level"); + + bytes32 metaDataHash = s_offRamp.metadataHash(); + + bytes32[] memory hashedMessages = new bytes32[](3); + hashedMessages[0] = messages[0]._hash(metaDataHash); + messages[0].messageId = hashedMessages[0]; + hashedMessages[1] = messages[1]._hash(metaDataHash); + messages[1].messageId = hashedMessages[1]; + hashedMessages[2] = messages[2]._hash(metaDataHash); + messages[2].messageId = hashedMessages[2]; + + bytes32[] memory merkleRoots = new bytes32[](1); + merkleRoots[0] = MerkleHelper.getMerkleRoot(hashedMessages); + + address[] memory onRamps = new address[](1); + onRamps[0] = ON_RAMP_ADDRESS; + + bytes memory commitReport = abi.encode( + CommitStore.CommitReport({ + priceUpdates: _getEmptyPriceUpdates(), + interval: CommitStore.Interval(messages[0].sequenceNumber, messages[2].sequenceNumber), + merkleRoot: merkleRoots[0] + }) + ); + + vm.resumeGasMetering(); + s_commitStore.report(commitReport, ++s_latestEpochAndRound); + vm.pauseGasMetering(); + + vm.mockCall( + s_commitStore.getStaticConfig().rmnProxy, + abi.encodeWithSelector(IRMN.isBlessed.selector, IRMN.TaggedRoot(address(s_commitStore), merkleRoots[0])), + abi.encode(true) + ); + + bytes32[] memory proofs = new bytes32[](0); + uint256 timestamp = s_commitStore.verify(merkleRoots, proofs, 2 ** 2 - 1); + assertEq(BLOCK_TIME, timestamp); + + // We change the block time so when execute would e.g. use the current + // block time instead of the committed block time the value would be + // incorrect in the checks below. + vm.warp(BLOCK_TIME + 2000); + + vm.expectEmit(); + emit EVM2EVMOffRamp.ExecutionStateChanged( + messages[0].sequenceNumber, + messages[0].messageId, + Internal.MessageExecutionState.SUCCESS, + "" + ); + + vm.expectEmit(); + emit EVM2EVMOffRamp.ExecutionStateChanged( + messages[1].sequenceNumber, + messages[1].messageId, + Internal.MessageExecutionState.SUCCESS, + "" + ); + + vm.expectEmit(); + emit EVM2EVMOffRamp.ExecutionStateChanged( + messages[2].sequenceNumber, + messages[2].messageId, + Internal.MessageExecutionState.SUCCESS, + "" + ); + + Internal.ExecutionReport memory execReport = _generateReportFromMessages(messages); + + uint256 preGhoTokenBalanceUser = dstGhoToken.balanceOf(USER); + + vm.resumeGasMetering(); + s_offRamp.execute(execReport, new EVM2EVMOffRamp.GasLimitOverride[](0)); + vm.pauseGasMetering(); + + assertEq(preGhoTokenBalanceUser + 6000 * 1e18, dstGhoToken.balanceOf(USER), "Wrong balance on destination"); + assertEq(dstGhoTokenPool.getCurrentBridgedAmount(), 0); + } + + function testRevertRateLimitReached() public { + RateLimiter.Config memory rateLimiterConfig = _getOutboundRateLimiterConfig(); + + // will revert due to rate limit of tokenPool + sendRequestGho(1, rateLimiterConfig.capacity + 1, true, false); + + // max capacity, won't revert + // Mint some GHO to inflate UpgradeableTokenPool facilitator level + _inflateFacilitatorLevel(address(srcGhoTokenPool), address(srcGhoToken), rateLimiterConfig.capacity); + vm.startPrank(OWNER); + sendRequestGho(1, rateLimiterConfig.capacity, false, false); + + // revert due to capacity exceed + sendRequestGho(2, 100, true, false); + + // increase blocktime to refill capacity + vm.warp(BLOCK_TIME + 1); + + // won't revert due to refill + _inflateFacilitatorLevel(address(srcGhoTokenPool), address(srcGhoToken), 100); + vm.startPrank(OWNER); + sendRequestGho(2, 100, false, false); + } + + function testRevertOnLessTokenToCoverFee() public { + sendRequestGho(1, 1000, false, true); + } + + function sendRequestGho( + uint64 expectedSeqNum, + uint256 amount, + bool expectRevert, + bool sendLessFee + ) public returns (Internal.EVM2EVMMessage memory) { + Client.EVM2AnyMessage memory message = _generateSingleTokenMessage(address(srcGhoToken), amount); + uint256 expectedFee = s_sourceRouter.getFee(DEST_CHAIN_SELECTOR, message); + + // err mgmt + uint256 feeToSend = sendLessFee ? expectedFee - 1 : expectedFee; + expectRevert = sendLessFee ? true : expectRevert; + + IERC20(s_sourceTokens[0]).approve(address(s_sourceRouter), feeToSend); // fee + IERC20(srcGhoToken).approve(address(s_sourceRouter), amount); // amount + + message.receiver = abi.encode(USER); + Internal.EVM2EVMMessage memory geEvent = _messageToEvent( + message, + expectedSeqNum, + expectedSeqNum, + expectedFee, + OWNER + ); + + if (!expectRevert) { + vm.expectEmit(); + emit EVM2EVMOnRamp.CCIPSendRequested(geEvent); + } else { + vm.expectRevert(); + } + vm.resumeGasMetering(); + s_sourceRouter.ccipSend(DEST_CHAIN_SELECTOR, message); + vm.pauseGasMetering(); + + return geEvent; + } +} diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolRemoteSetup.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolRemoteSetup.t.sol new file mode 100644 index 0000000000..6283fed245 --- /dev/null +++ b/contracts/src/v0.8/ccip/test/pools/GHO/GhoTokenPoolRemoteSetup.t.sol @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import {GhoToken} from "@aave-gho-core/gho/GhoToken.sol"; +import {UpgradeableTokenPool} from "../../../pools/GHO/UpgradeableTokenPool.sol"; +import {Router} from "../../../Router.sol"; +import {BurnMintERC677} from "../../../../shared/token/ERC677/BurnMintERC677.sol"; +import {UpgradeableBurnMintTokenPool} from "../../../pools/GHO/UpgradeableBurnMintTokenPool.sol"; +import {RouterSetup} from "../../router/RouterSetup.t.sol"; +import {BaseTest} from "../../BaseTest.t.sol"; +import {GhoBaseTest} from "./GhoBaseTest.t.sol"; + +contract GhoTokenPoolRemoteSetup is RouterSetup, GhoBaseTest { + event Transfer(address indexed from, address indexed to, uint256 value); + event TokensConsumed(uint256 tokens); + event Burned(address indexed sender, uint256 amount); + + BurnMintERC677 internal s_burnMintERC677; + address internal s_burnMintOffRamp = makeAddr("burn_mint_offRamp"); + address internal s_burnMintOnRamp = makeAddr("burn_mint_onRamp"); + + address internal s_sourcePool = makeAddr("source_pool"); + address internal s_sourceToken = makeAddr("source_token"); + + UpgradeableBurnMintTokenPool internal s_pool; + + function setUp() public virtual override(RouterSetup, BaseTest) { + RouterSetup.setUp(); + + // GHO deployment + GhoToken ghoToken = new GhoToken(AAVE_DAO); + s_burnMintERC677 = BurnMintERC677(address(ghoToken)); + + s_pool = UpgradeableBurnMintTokenPool( + _deployUpgradeableBurnMintTokenPool( + address(s_burnMintERC677), + address(s_mockRMN), + address(s_sourceRouter), + AAVE_DAO, + PROXY_ADMIN + ) + ); + + // Give mint and burn privileges to source UpgradeableTokenPool (GHO-specific related) + vm.stopPrank(); + vm.startPrank(AAVE_DAO); + GhoToken(address(s_burnMintERC677)).grantRole( + GhoToken(address(s_burnMintERC677)).FACILITATOR_MANAGER_ROLE(), + AAVE_DAO + ); + GhoToken(address(s_burnMintERC677)).addFacilitator(address(s_pool), "UpgradeableTokenPool", type(uint128).max); + vm.stopPrank(); + + _applyChainUpdates(address(s_pool)); + } + + function _applyChainUpdates(address pool) internal { + UpgradeableTokenPool.ChainUpdate[] memory chains = new UpgradeableTokenPool.ChainUpdate[](1); + bytes[] memory remotePoolAddresses = new bytes[](1); + remotePoolAddresses[0] = abi.encode(s_sourcePool); + chains[0] = UpgradeableTokenPool.ChainUpdate({ + remoteChainSelector: DEST_CHAIN_SELECTOR, + remotePoolAddresses: remotePoolAddresses, + remoteTokenAddress: abi.encode(s_sourceToken), + outboundRateLimiterConfig: _getOutboundRateLimiterConfig(), + inboundRateLimiterConfig: _getInboundRateLimiterConfig() + }); + + vm.startPrank(AAVE_DAO); + UpgradeableBurnMintTokenPool(pool).applyChainUpdates(new uint64[](0), chains); + vm.stopPrank(); + vm.startPrank(OWNER); + + Router.OnRamp[] memory onRampUpdates = new Router.OnRamp[](1); + onRampUpdates[0] = Router.OnRamp({destChainSelector: DEST_CHAIN_SELECTOR, onRamp: s_burnMintOnRamp}); + Router.OffRamp[] memory offRampUpdates = new Router.OffRamp[](1); + offRampUpdates[0] = Router.OffRamp({sourceChainSelector: DEST_CHAIN_SELECTOR, offRamp: s_burnMintOffRamp}); + s_sourceRouter.applyRampUpdates(onRampUpdates, new Router.OffRamp[](0), offRampUpdates); + } +} diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/invariant/GhoTokenPoolEthereumBridgeLimitInvariant.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/invariant/GhoTokenPoolEthereumBridgeLimitInvariant.t.sol new file mode 100644 index 0000000000..f3af59e445 --- /dev/null +++ b/contracts/src/v0.8/ccip/test/pools/GHO/invariant/GhoTokenPoolEthereumBridgeLimitInvariant.t.sol @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import {GhoToken} from "@aave-gho-core/gho/GhoToken.sol"; + +import {UpgradeableLockReleaseTokenPool} from "../../../../pools/GHO/UpgradeableLockReleaseTokenPool.sol"; +import {BaseTest} from "../../../BaseTest.t.sol"; +import {GhoTokenPoolHandler} from "./GhoTokenPoolHandler.t.sol"; + +contract GhoTokenPoolEthereumBridgeLimitInvariant is BaseTest { + GhoTokenPoolHandler handler; + + function setUp() public override { + super.setUp(); + + handler = new GhoTokenPoolHandler(); + deal(handler.tokens(0), address(handler), handler.INITIAL_BRIDGE_LIMIT()); + + targetContract(address(handler)); + bytes4[] memory selectors = new bytes4[](2); + selectors[0] = GhoTokenPoolHandler.bridgeGho.selector; + selectors[1] = GhoTokenPoolHandler.updateBucketCapacity.selector; + targetSelector(FuzzSelector({addr: address(handler), selectors: selectors})); + } + + /// forge-config: ccip.invariant.fail-on-revert = true + /// forge-config: ccip.invariant.runs = 2000 + /// forge-config: ccip.invariant.depth = 50 + function invariant_bridgeLimit() public view { + // Check bridged + assertEq(UpgradeableLockReleaseTokenPool(handler.pools(0)).getCurrentBridgedAmount(), handler.bridged()); + + // Check levels and buckets + uint256 sumLevels; + uint256 chainId; + uint256 capacity; + uint256 level; + uint256[] memory chainsListLocal = handler.getChainsList(); + for (uint i = 1; i < chainsListLocal.length; i++) { + // not counting Ethereum -{0} + chainId = chainsListLocal[i]; + (capacity, level) = GhoToken(handler.tokens(chainId)).getFacilitatorBucket(handler.pools(chainId)); + + // Aggregate levels + sumLevels += level; + + assertEq(capacity, handler.bucketCapacities(chainId), "wrong bucket capacity"); + assertEq(level, handler.bucketLevels(chainId), "wrong bucket level"); + + assertGe( + capacity, + UpgradeableLockReleaseTokenPool(handler.pools(0)).getBridgeLimit(), + "capacity must be equal to bridgeLimit" + ); + + // This invariant only holds if there were no bridge limit reductions below the current bridged amount + if (!handler.capacityBelowLevelUpdate()) { + assertLe( + level, + UpgradeableLockReleaseTokenPool(handler.pools(0)).getBridgeLimit(), + "level cannot be higher than bridgeLimit" + ); + } + } + // Check bridged is equal to sum of levels + assertEq(UpgradeableLockReleaseTokenPool(handler.pools(0)).getCurrentBridgedAmount(), sumLevels, "wrong bridged"); + assertEq(handler.remoteLiquidity(), sumLevels, "wrong bridged"); + } +} diff --git a/contracts/src/v0.8/ccip/test/pools/GHO/invariant/GhoTokenPoolHandler.t.sol b/contracts/src/v0.8/ccip/test/pools/GHO/invariant/GhoTokenPoolHandler.t.sol new file mode 100644 index 0000000000..4012407bdc --- /dev/null +++ b/contracts/src/v0.8/ccip/test/pools/GHO/invariant/GhoTokenPoolHandler.t.sol @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: BUSL-1.1 +pragma solidity ^0.8.0; + +import {GhoToken} from "@aave-gho-core/gho/GhoToken.sol"; + +import {GhoBaseTest} from "../GhoBaseTest.t.sol"; + +contract GhoTokenPoolHandler is GhoBaseTest { + UtilsStorage public s; + + constructor() { + // Ethereum with id 0 + s.chainsList.push(0); + s.tokens[0] = address(new GhoToken(AAVE_DAO)); + s.pools[0] = _deployUpgradeableLockReleaseTokenPool( + s.tokens[0], + RMN_PROXY, + ROUTER, + AAVE_DAO, + INITIAL_BRIDGE_LIMIT, + PROXY_ADMIN + ); + + // Mock calls for bridging + vm.mockCall(ROUTER, abi.encodeWithSelector(bytes4(keccak256("getOnRamp(uint64)"))), abi.encode(RAMP)); + vm.mockCall(ROUTER, abi.encodeWithSelector(bytes4(keccak256("isOffRamp(uint64,address)"))), abi.encode(true)); + vm.mockCall(RMN_PROXY, abi.encodeWithSelector(bytes4(keccak256("isCursed(bytes16)"))), abi.encode(false)); + + // Arbitrum + _addBridge(s, 1, INITIAL_BRIDGE_LIMIT); + _enableLane(s, 0, 1); + + // Avalanche + _addBridge(s, 2, INITIAL_BRIDGE_LIMIT); + _enableLane(s, 0, 2); + _enableLane(s, 1, 2); + } + + /// forge-config: ccip.fuzz.runs = 500 + function bridgeGho(uint256 fromChain, uint256 toChain, uint256 amount) public { + fromChain = bound(fromChain, 0, 2); + toChain = bound(toChain, 0, 2); + if (fromChain != toChain) { + uint256 maxBalance = GhoToken(s.tokens[fromChain]).balanceOf(address(this)); + uint256 maxToBridge = _getMaxToBridgeOut(s, fromChain); + uint256 maxAmount = maxBalance > maxToBridge ? maxToBridge : maxBalance; + amount = bound(amount, 0, maxAmount); + + if (amount > 0) { + _bridgeGho(s, fromChain, toChain, address(this), amount); + } + } + } + + /// forge-config: ccip.fuzz.runs = 500 + function updateBucketCapacity(uint256 chain, uint128 newCapacity) public { + chain = bound(chain, 1, 2); + uint256 otherChain = (chain % 2) + 1; + newCapacity = uint128(bound(newCapacity, s.bridged, type(uint128).max)); + + uint256 oldCapacity = s.bucketCapacities[chain]; + + if (newCapacity < s.bucketLevels[chain]) { + s.capacityBelowLevelUpdate = true; + } else { + s.capacityBelowLevelUpdate = false; + } + + if (newCapacity > oldCapacity) { + // Increase + _updateBucketCapacity(s, chain, newCapacity); + // keep bridge limit as the minimum bucket capacity + if (newCapacity < s.bucketCapacities[otherChain]) { + _updateBridgeLimit(s, newCapacity); + } + } else { + // Reduction + // keep bridge limit as the minimum bucket capacity + if (newCapacity < s.bucketCapacities[otherChain]) { + _updateBridgeLimit(s, newCapacity); + } + _updateBucketCapacity(s, chain, newCapacity); + } + } + + function getChainsList() public view returns (uint256[] memory) { + return s.chainsList; + } + + function pools(uint256 i) public view returns (address) { + return s.pools[i]; + } + + function tokens(uint256 i) public view returns (address) { + return s.tokens[i]; + } + + function bucketCapacities(uint256 i) public view returns (uint256) { + return s.bucketCapacities[i]; + } + + function bucketLevels(uint256 i) public view returns (uint256) { + return s.bucketLevels[i]; + } + + function liquidity(uint256 i) public view returns (uint256) { + return s.liquidity[i]; + } + + function remoteLiquidity() public view returns (uint256) { + return s.remoteLiquidity; + } + + function bridged() public view returns (uint256) { + return s.bridged; + } + + function capacityBelowLevelUpdate() public view returns (bool) { + return s.capacityBelowLevelUpdate; + } +} diff --git a/contracts/src/v0.8/shared/access/Ownable2Step.sol b/contracts/src/v0.8/shared/access/Ownable2Step.sol index 5eac576072..4da7878927 100644 --- a/contracts/src/v0.8/shared/access/Ownable2Step.sol +++ b/contracts/src/v0.8/shared/access/Ownable2Step.sol @@ -46,7 +46,7 @@ contract Ownable2Step is IOwnable { /// @notice validate, transfer ownership, and emit relevant events /// @param to The address to which ownership will be transferred. - function _transferOwnership(address to) private { + function _transferOwnership(address to) internal { if (to == msg.sender) { revert CannotTransferToSelf(); }