diff --git a/.config/lychee.toml b/.config/lychee.toml index 733b77ec0cff..ad6a0ef75545 100644 --- a/.config/lychee.toml +++ b/.config/lychee.toml @@ -32,6 +32,7 @@ exclude = [ "https://github.com/paritytech/polkadot-sdk/substrate/frame/timestamp", "https://github.com/paritytech/substrate/frame/fast-unstake", "https://github.com/zkcrypto/bls12_381/blob/e224ad4ea1babfc582ccd751c2bf128611d10936/src/test-data/mod.rs", + "https://polkadot-try-runtime-node.parity-chains.parity.io/", "https://polkadot.network/the-path-of-a-parachain-block/", "https://research.web3.foundation/en/latest/polkadot/BABE/Babe/#6-practical-results", "https://research.web3.foundation/en/latest/polkadot/NPoS/3.%20Balancing.html", diff --git a/.forklift/config.toml b/.forklift/config.toml new file mode 100644 index 000000000000..403a452aa036 --- /dev/null +++ b/.forklift/config.toml @@ -0,0 +1,30 @@ +[compression] +type = "zstd" + +[compression.zstd] +compressionLevel = 3 + +[general] +jobNameVariable = "CI_JOB_NAME" +jobsBlackList = [] +logLevel = "warn" +threadsCount = 6 + +[metrics] +enabled = true +pushEndpoint = "placeholder" + +[metrics.extraLabels] +environment = "production" +job_name = "$CI_JOB_NAME" +project_name = "$CI_PROJECT_PATH" + +[storage] +type = "s3" + +[storage.s3] +accessKeyId = "placeholder" +bucketName = "placeholder" +concurrency = 10 +endpointUrl = "placeholder" +secretAccessKey = "placeholder" diff --git a/.github/workflows/check-runtime-migration.yml b/.github/workflows/check-runtime-migration.yml new file mode 100644 index 000000000000..5de576e2fe4c --- /dev/null +++ b/.github/workflows/check-runtime-migration.yml @@ -0,0 +1,119 @@ +name: check-runtime-migration + +on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + FORKLIFT_storage_s3_bucketName: ${{ secrets.FORKLIFT_storage_s3_bucketName }} + FORKLIFT_storage_s3_accessKeyId: ${{ secrets.FORKLIFT_storage_s3_accessKeyId }} + FORKLIFT_storage_s3_secretAccessKey: ${{ secrets.FORKLIFT_storage_s3_secretAccessKey }} + FORKLIFT_storage_s3_endpointUrl: ${{ secrets.FORKLIFT_storage_s3_endpointUrl }} + FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} + +jobs: + set-image: + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + # rococo and westend are disabled for now (no access to parity-chains.parity.io) + check-runtime-migration: + runs-on: arc-runners-polkadot-sdk-beefy + timeout-minutes: 30 + needs: [set-image] + container: + image: ${{ needs.set-image.outputs.IMAGE }} + strategy: + fail-fast: false + matrix: + network: [ + # westend, + # rococo, + asset-hub-westend, + asset-hub-rococo, + bridge-hub-westend, + bridge-hub-rococo, + contracts-rococo, + collectives-westend, + coretime-rococo, + ] + include: + # - network: westend + # package: westend-runtime + # wasm: westend_runtime.compact.compressed.wasm + # uri: "wss://westend-try-runtime-node.parity-chains.parity.io:443" + # subcommand_extra_args: "--no-weight-warnings" + # command_extra_args: "" + # - network: rococo + # package: rococo-runtime + # wasm: rococo_runtime.compact.compressed.wasm + # uri: "wss://rococo-try-runtime-node.parity-chains.parity.io:443" + # subcommand_extra_args: "--no-weight-warnings" + # command_extra_args: "" + - network: asset-hub-westend + package: asset-hub-westend-runtime + wasm: asset_hub_westend_runtime.compact.compressed.wasm + uri: "wss://westend-asset-hub-rpc.polkadot.io:443" + subcommand_extra_args: "" + command_extra_args: "" + - network: "asset-hub-rococo" + package: "asset-hub-rococo-runtime" + wasm: "asset_hub_rococo_runtime.compact.compressed.wasm" + uri: "wss://rococo-asset-hub-rpc.polkadot.io:443" + subcommand_extra_args: "" + command_extra_args: "" + - network: "bridge-hub-westend" + package: "bridge-hub-westend-runtime" + wasm: "bridge_hub_westend_runtime.compact.compressed.wasm" + uri: "wss://westend-bridge-hub-rpc.polkadot.io:443" + - network: "bridge-hub-rococo" + package: "bridge-hub-rococo-runtime" + wasm: "bridge_hub_rococo_runtime.compact.compressed.wasm" + uri: "wss://rococo-bridge-hub-rpc.polkadot.io:443" + - network: "contracts-rococo" + package: "contracts-rococo-runtime" + wasm: "contracts_rococo_runtime.compact.compressed.wasm" + uri: "wss://rococo-contracts-rpc.polkadot.io:443" + - network: "collectives-westend" + package: "collectives-westend-runtime" + wasm: "collectives_westend_runtime.compact.compressed.wasm" + uri: "wss://westend-collectives-rpc.polkadot.io:443" + command_extra_args: "--disable-spec-name-check" + - network: "coretime-rococo" + package: "coretime-rococo-runtime" + wasm: "coretime_rococo_runtime.compact.compressed.wasm" + uri: "wss://rococo-coretime-rpc.polkadot.io:443" + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: | + echo "Running ${{ matrix.network }} runtime migration check" + export RUST_LOG=remote-ext=debug,runtime=debug + + echo "---------- Downloading try-runtime CLI ----------" + curl -sL https://github.com/paritytech/try-runtime-cli/releases/download/v0.5.4/try-runtime-x86_64-unknown-linux-musl -o try-runtime + chmod +x ./try-runtime + echo "Using try-runtime-cli version:" + ./try-runtime --version + + echo "---------- Building ${{ matrix.package }} runtime ----------" + time forklift cargo build --release --locked -p ${{ matrix.package }} --features try-runtime + + echo "---------- Executing on-runtime-upgrade for ${{ matrix.network }} ----------" + time ./try-runtime ${{ matrix.command_extra_args }} \ + --runtime ./target/release/wbuild/${{ matrix.package }}/${{ matrix.wasm }} \ + on-runtime-upgrade --disable-spec-version-check --checks=all ${{ matrix.subcommand_extra_args }} live --uri ${{ matrix.uri }} + sleep 5 diff --git a/.github/workflows/quick-checks.yml b/.github/workflows/quick-checks.yml index 7bf1983a1f69..a56bf12bb162 100644 --- a/.github/workflows/quick-checks.yml +++ b/.github/workflows/quick-checks.yml @@ -14,7 +14,7 @@ jobs: # GitHub Actions allows using 'env' in a container context. # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 # This workaround sets the container image for each job using 'set-image' job output. - runs-on: arc-runners-polkadot-sdk-default + runs-on: arc-runners-polkadot-sdk timeout-minutes: 10 outputs: IMAGE: ${{ steps.set_image.outputs.IMAGE }} @@ -24,7 +24,7 @@ jobs: - id: set_image run: cat .github/env >> $GITHUB_OUTPUT fmt: - runs-on: arc-runners-polkadot-sdk-default + runs-on: arc-runners-polkadot-sdk timeout-minutes: 10 needs: [set-image] container: @@ -34,7 +34,7 @@ jobs: - name: Cargo fmt run: cargo +nightly fmt --all -- --check check-dependency-rules: - runs-on: arc-runners-polkadot-sdk-default + runs-on: arc-runners-polkadot-sdk timeout-minutes: 10 needs: [set-image] container: @@ -46,8 +46,7 @@ jobs: cd substrate/ ../.gitlab/ensure-deps.sh check-rust-feature-propagation: - runs-on: arc-runners-polkadot-sdk-default - # runs-on: ubuntu-latest + runs-on: arc-runners-polkadot-sdk timeout-minutes: 10 needs: [set-image] container: @@ -57,8 +56,7 @@ jobs: - name: run zepter run: zepter run check test-rust-features: - runs-on: arc-runners-polkadot-sdk-default - # runs-on: ubuntu-latest + runs-on: arc-runners-polkadot-sdk timeout-minutes: 10 needs: [set-image] container: @@ -68,7 +66,7 @@ jobs: - name: run rust features run: bash .gitlab/rust-features.sh . check-toml-format: - runs-on: arc-runners-polkadot-sdk-default + runs-on: arc-runners-polkadot-sdk timeout-minutes: 10 needs: [set-image] container: diff --git a/.github/workflows/test-github-actions.yml b/.github/workflows/test-github-actions.yml deleted file mode 100644 index e35ee0994863..000000000000 --- a/.github/workflows/test-github-actions.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: test-github-actions - -on: - pull_request: - types: [opened, synchronize, reopened, ready_for_review] - merge_group: -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} - cancel-in-progress: true - -jobs: - set-image: - # GitHub Actions allows using 'env' in a container context. - # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 - # This workaround sets the container image for each job using 'set-image' job output. - runs-on: ubuntu-latest - outputs: - IMAGE: ${{ steps.set_image.outputs.IMAGE }} - steps: - - name: Checkout - uses: actions/checkout@v4 - - id: set_image - run: cat .github/env >> $GITHUB_OUTPUT - test-linux-stable-int: - runs-on: arc-runners-polkadot-sdk-beefy - timeout-minutes: 30 - needs: [set-image] - container: - image: ${{ needs.set-image.outputs.IMAGE }} - env: - RUSTFLAGS: "-C debug-assertions -D warnings" - RUST_BACKTRACE: 1 - WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" - # Ensure we run the UI tests. - RUN_UI_TESTS: 1 - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: script - run: WASM_BUILD_NO_COLOR=1 time cargo test -p staging-node-cli --release --locked -- --ignored - quick-benchmarks: - runs-on: arc-runners-polkadot-sdk-beefy - timeout-minutes: 30 - needs: [set-image] - container: - image: ${{ needs.set-image.outputs.IMAGE }} - env: - RUSTFLAGS: "-C debug-assertions -D warnings" - RUST_BACKTRACE: "full" - WASM_BUILD_NO_COLOR: 1 - WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: script - run: time cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 000000000000..8f46959cccc4 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,101 @@ +name: tests + +on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + FORKLIFT_storage_s3_bucketName: ${{ secrets.FORKLIFT_storage_s3_bucketName }} + FORKLIFT_storage_s3_accessKeyId: ${{ secrets.FORKLIFT_storage_s3_accessKeyId }} + FORKLIFT_storage_s3_secretAccessKey: ${{ secrets.FORKLIFT_storage_s3_secretAccessKey }} + FORKLIFT_storage_s3_endpointUrl: ${{ secrets.FORKLIFT_storage_s3_endpointUrl }} + FORKLIFT_metrics_pushEndpoint: ${{ secrets.FORKLIFT_metrics_pushEndpoint }} + +jobs: + set-image: + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + test-linux-stable-int: + runs-on: arc-runners-polkadot-sdk-beefy + timeout-minutes: 30 + needs: [set-image] + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: 1 + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" + # Ensure we run the UI tests. + RUN_UI_TESTS: 1 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: WASM_BUILD_NO_COLOR=1 time forklift cargo test -p staging-node-cli --release --locked -- --ignored + quick-benchmarks: + runs-on: arc-runners-polkadot-sdk-beefy + timeout-minutes: 30 + needs: [set-image] + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + RUSTFLAGS: "-C debug-assertions -D warnings" + RUST_BACKTRACE: "full" + WASM_BUILD_NO_COLOR: 1 + WASM_BUILD_RUSTFLAGS: "-C debug-assertions -D warnings" + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: time forklift cargo run --locked --release -p staging-node-cli --bin substrate-node --features runtime-benchmarks -- benchmark pallet --chain dev --pallet "*" --extrinsic "*" --steps 2 --repeat 1 --quiet + # cf https://github.com/paritytech/polkadot-sdk/issues/1652 + test-syscalls: + runs-on: arc-runners-polkadot-sdk-beefy + timeout-minutes: 30 + needs: [set-image] + container: + image: ${{ needs.set-image.outputs.IMAGE }} + continue-on-error: true # this rarely triggers in practice + env: + SKIP_WASM_BUILD: 1 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: | + forklift cargo build --locked --profile production --target x86_64-unknown-linux-musl --bin polkadot-execute-worker --bin polkadot-prepare-worker + cd polkadot/scripts/list-syscalls + ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-execute-worker --only-used-syscalls | diff -u execute-worker-syscalls - + ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-prepare-worker --only-used-syscalls | diff -u prepare-worker-syscalls - + # todo: + # after_script: + # - if [[ "$CI_JOB_STATUS" == "failed" ]]; then + # printf "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed.\n"; + # fi + cargo-check-all-benches: + runs-on: arc-runners-polkadot-sdk-beefy + timeout-minutes: 30 + needs: [set-image] + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + SKIP_WASM_BUILD: 1 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: time forklift cargo check --all --benches diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5e57dd86f141..73a8c52c448f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -120,7 +120,7 @@ default: .forklift-cache: before_script: - mkdir ~/.forklift - - cp $FL_FORKLIFT_CONFIG ~/.forklift/config.toml + - cp .forklift/config.toml ~/.forklift/config.toml - > if [ "$FORKLIFT_BYPASS" != "true" ]; then echo "FORKLIFT_BYPASS not set"; diff --git a/.gitlab/pipeline/check.yml b/.gitlab/pipeline/check.yml index 6fb8a97fe958..5c1a667a313c 100644 --- a/.gitlab/pipeline/check.yml +++ b/.gitlab/pipeline/check.yml @@ -146,96 +146,6 @@ check-runtime-migration-rococo: URI: "wss://rococo-try-runtime-node.parity-chains.parity.io:443" SUBCOMMAND_EXTRA_ARGS: "--no-weight-warnings" -# Check runtime migrations for Parity managed asset hub chains -check-runtime-migration-asset-hub-westend: - stage: check - extends: - - .docker-env - - .test-pr-refs - - .check-runtime-migration - variables: - NETWORK: "asset-hub-westend" - PACKAGE: "asset-hub-westend-runtime" - WASM: "asset_hub_westend_runtime.compact.compressed.wasm" - URI: "wss://westend-asset-hub-rpc.polkadot.io:443" - -check-runtime-migration-asset-hub-rococo: - stage: check - extends: - - .docker-env - - .test-pr-refs - - .check-runtime-migration - variables: - NETWORK: "asset-hub-rococo" - PACKAGE: "asset-hub-rococo-runtime" - WASM: "asset_hub_rococo_runtime.compact.compressed.wasm" - URI: "wss://rococo-asset-hub-rpc.polkadot.io:443" - -# Check runtime migrations for Parity managed bridge hub chains -check-runtime-migration-bridge-hub-westend: - stage: check - extends: - - .docker-env - - .test-pr-refs - - .check-runtime-migration - variables: - NETWORK: "bridge-hub-westend" - PACKAGE: "bridge-hub-westend-runtime" - WASM: "bridge_hub_westend_runtime.compact.compressed.wasm" - URI: "wss://westend-bridge-hub-rpc.polkadot.io:443" - -check-runtime-migration-bridge-hub-rococo: - stage: check - extends: - - .docker-env - - .test-pr-refs - - .check-runtime-migration - variables: - NETWORK: "bridge-hub-rococo" - PACKAGE: "bridge-hub-rococo-runtime" - WASM: "bridge_hub_rococo_runtime.compact.compressed.wasm" - URI: "wss://rococo-bridge-hub-rpc.polkadot.io:443" - -# Check runtime migrations for Parity managed contract chains -check-runtime-migration-contracts-rococo: - stage: check - extends: - - .docker-env - - .test-pr-refs - - .check-runtime-migration - variables: - NETWORK: "contracts-rococo" - PACKAGE: "contracts-rococo-runtime" - WASM: "contracts_rococo_runtime.compact.compressed.wasm" - URI: "wss://rococo-contracts-rpc.polkadot.io:443" - -# Check runtime migrations for Parity managed collectives chains -check-runtime-migration-collectives-westend: - stage: check - extends: - - .docker-env - - .test-pr-refs - - .check-runtime-migration - variables: - NETWORK: "collectives-westend" - PACKAGE: "collectives-westend-runtime" - WASM: "collectives_westend_runtime.compact.compressed.wasm" - URI: "wss://westend-collectives-rpc.polkadot.io:443" - COMMAND_EXTRA_ARGS: "--disable-spec-name-check" - -# Check runtime migrations for Parity managed coretime chain -check-runtime-migration-coretime-rococo: - stage: check - extends: - - .docker-env - - .test-pr-refs - - .check-runtime-migration - variables: - NETWORK: "coretime-rococo" - PACKAGE: "coretime-rococo-runtime" - WASM: "coretime_rococo_runtime.compact.compressed.wasm" - URI: "wss://rococo-coretime-rpc.polkadot.io:443" - find-fail-ci-phrase: stage: check variables: diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 796e4d653104..7976c2ad2e0f 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -24,7 +24,7 @@ # # -# +# codecov-start: stage: test when: manual @@ -53,11 +53,11 @@ codecov-finish: extends: - .kubernetes-env - .common-refs - - .pipeline-stopper-artifacts + - .pipeline-stopper-artifacts needs: - test-linux-stable-codecov script: - - !reference [.codecov-check, script] + - !reference [.codecov-check, script] - codecovcli -v create-report-results -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --git-service github - codecovcli -v get-report-results -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --git-service github - codecovcli -v send-notifications -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --git-service github @@ -78,14 +78,14 @@ test-linux-stable-codecov: RUST_TOOLCHAIN: stable RUSTFLAGS: "-Cdebug-assertions=y -Cinstrument-coverage" LLVM_PROFILE_FILE: "target/coverage/cargo-test-${CI_NODE_INDEX}-%p-%m.profraw" - CARGO_INCREMENTAL: 0 - FORKLIFT_BYPASS: "true" + CARGO_INCREMENTAL: 0 + FORKLIFT_BYPASS: "true" parallel: 2 script: # tools - - !reference [.codecov-check, script] + - !reference [.codecov-check, script] - rustup component add llvm-tools-preview - - mkdir -p target/coverage/result/ + - mkdir -p target/coverage/result/ # Place real test call here - > time cargo nextest run -p polkadot \ @@ -102,15 +102,15 @@ test-linux-stable-codecov: -t lcov \ --branch \ -o target/coverage/result/report-${CI_NODE_INDEX}.lcov - - ls -l target/coverage/result/ + - ls -l target/coverage/result/ - > if [ "$CI_COMMIT_REF_NAME" != "master" ]; then codecovcli -v do-upload -f target/coverage/result/report-${CI_NODE_INDEX}.lcov --disable-search -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --fail-on-error --pr ${CI_COMMIT_REF_NAME} --git-service github; else codecovcli -v do-upload -f target/coverage/result/report-${CI_NODE_INDEX}.lcov --disable-search -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --fail-on-error --git-service github; fi - - # + + # test-linux-stable: stage: test @@ -254,18 +254,6 @@ test-rustdoc: script: - time cargo doc --workspace --all-features --no-deps -cargo-check-all-benches: - stage: test - extends: - - .docker-env - - .common-refs - # DAG - needs: - - job: cargo-hfuzz - artifacts: false - script: - - time cargo check --all --benches - test-node-metrics: stage: test extends: @@ -598,26 +586,6 @@ cargo-hfuzz: - for target in $(cargo read-manifest | jq -r '.targets | .[] | .name'); do cargo hfuzz run "$target" || { printf "fuzzing failure for %s\n" "$target"; exit 1; }; done -# cf https://github.com/paritytech/polkadot-sdk/issues/1652 -test-syscalls: - stage: test - extends: - - .docker-env - - .common-refs - - .run-immediately - variables: - SKIP_WASM_BUILD: 1 - script: - - cargo build --locked --profile production --target x86_64-unknown-linux-musl --bin polkadot-execute-worker --bin polkadot-prepare-worker - - cd polkadot/scripts/list-syscalls - - ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-execute-worker --only-used-syscalls | diff -u execute-worker-syscalls - - - ./list-syscalls.rb ../../../target/x86_64-unknown-linux-musl/production/polkadot-prepare-worker --only-used-syscalls | diff -u prepare-worker-syscalls - - after_script: - - if [[ "$CI_JOB_STATUS" == "failed" ]]; then - printf "The x86_64 syscalls used by the worker binaries have changed. Please review if this is expected and update polkadot/scripts/list-syscalls/*-worker-syscalls as needed.\n"; - fi - allow_failure: false # this rarely triggers in practice - .subsystem-benchmark-template: stage: test artifacts: diff --git a/Cargo.lock b/Cargo.lock index d43088704b5d..7711d51d0df6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -157,9 +157,9 @@ dependencies = [ "dunce", "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", "syn-solidity", "tiny-keccak", ] @@ -284,9 +284,9 @@ dependencies = [ "include_dir", "itertools 0.10.5", "proc-macro-error", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -515,7 +515,7 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -617,7 +617,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -705,7 +705,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", "synstructure", @@ -717,7 +717,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -830,7 +830,6 @@ dependencies = [ "pallet-nfts-runtime-api", "pallet-proxy", "pallet-session", - "pallet-state-trie-migration", "pallet-timestamp", "pallet-transaction-payment", "pallet-transaction-payment-rpc-runtime-api", @@ -1217,9 +1216,9 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -1234,9 +1233,9 @@ version = "0.1.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -1282,7 +1281,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -1424,12 +1423,12 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease 0.2.12", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "regex", "rustc-hash", "shlex", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -2603,6 +2602,15 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "ckb-merkle-mountain-range" +version = "0.6.0" +source = "git+https://github.com/paritytech/merkle-mountain-range.git?branch=master#537f0e3f67c5adf7afff0800bbb81f02f17570a1" +dependencies = [ + "cfg-if", + "itertools 0.10.5", +] + [[package]] name = "clang-sys" version = "1.6.1" @@ -2695,7 +2703,7 @@ checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -2707,9 +2715,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90239a040c80f5e14809ca132ddc4176ab33d5e17e49691793296e3fcb34d72f" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -2897,7 +2905,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d51beaa537d73d2d1ff34ee70bc095f170420ab2ec5d687ecd3ec2b0d092514b" dependencies = [ "nom", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -3945,10 +3953,10 @@ dependencies = [ name = "cumulus-pallet-parachain-system-proc-macro" version = "0.6.0" dependencies = [ - "proc-macro-crate 3.0.0", - "proc-macro2 1.0.75", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -4507,9 +4515,9 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -4546,10 +4554,10 @@ dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "scratch", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -4564,9 +4572,9 @@ version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -4653,7 +4661,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -4664,7 +4672,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e79116f119dd1dba1abf1f3405f03b9b0e79a27a3883864bfebded8a3dc768cd" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -4675,9 +4683,9 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -4687,7 +4695,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "rustc_version 0.4.0", "syn 1.0.109", @@ -4783,9 +4791,9 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -4843,10 +4851,10 @@ dependencies = [ "common-path", "derive-syn-parse 0.2.0", "once_cell", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "regex", - "syn 2.0.53", + "syn 2.0.61", "termcolor", "toml 0.8.8", "walkdir", @@ -4892,7 +4900,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -5061,7 +5069,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -5073,9 +5081,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -5093,9 +5101,9 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -5104,9 +5112,9 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2ad8cef1d801a4686bfd8919f0b30eac4c8e48968c437a6405ded4fb5272d2b" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -5291,18 +5299,6 @@ dependencies = [ "futures", ] -[[package]] -name = "expander" -version = "0.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a718c0675c555c5f976fff4ea9e2c150fa06cefa201cadef87cfbf9324075881" -dependencies = [ - "blake3", - "fs-err", - "proc-macro2 1.0.75", - "quote 1.0.35", -] - [[package]] name = "expander" version = "2.0.0" @@ -5311,9 +5307,9 @@ checksum = "5f86a749cf851891866c10515ef6c299b5c69661465e9c3bbe7e07a2b77fb0f7" dependencies = [ "blake2 0.10.6", "fs-err", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -5366,26 +5362,27 @@ dependencies = [ [[package]] name = "fatality" -version = "0.0.6" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ad875162843b0d046276327afe0136e9ed3a23d5a754210fb6f1f33610d39ab" +checksum = "61ecdc33d04db74fc23db9f54f6f314c61d29f810d58ba423d0c204888365458" dependencies = [ "fatality-proc-macro", + "syn 2.0.61", "thiserror", ] [[package]] name = "fatality-proc-macro" -version = "0.0.6" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5aa1e3ae159e592ad222dc90c5acbad632b527779ba88486abe92782ab268bd" +checksum = "ac67b350787fd4a934752e30ddb45569da000e14bf3e499224302778b7a918ab" dependencies = [ - "expander 0.0.4", - "indexmap 1.9.3", - "proc-macro-crate 1.3.1", - "proc-macro2 1.0.75", + "expander", + "indexmap 2.2.3", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 1.0.109", + "syn 2.0.61", "thiserror", ] @@ -5708,12 +5705,12 @@ dependencies = [ "frame-election-provider-support", "frame-support", "parity-scale-codec", - "proc-macro-crate 3.0.0", - "proc-macro2 1.0.75", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.82", "quote 1.0.35", "scale-info", "sp-arithmetic", - "syn 2.0.53", + "syn 2.0.61", "trybuild", ] @@ -5875,16 +5872,16 @@ dependencies = [ "Inflector", "cfg-expr", "derive-syn-parse 0.2.0", - "expander 2.0.0", + "expander", "frame-support-procedural-tools", "itertools 0.11.0", "macro_magic", "proc-macro-warning", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "regex", "sp-crypto-hashing", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -5892,19 +5889,19 @@ name = "frame-support-procedural-tools" version = "10.0.0" dependencies = [ "frame-support-procedural-tools-derive", - "proc-macro-crate 3.0.0", - "proc-macro2 1.0.75", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] name = "frame-support-procedural-tools-derive" version = "11.0.0" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -6135,9 +6132,9 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -6806,7 +6803,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -6826,7 +6823,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", ] @@ -7139,10 +7136,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d0bb047e79a143b32ea03974a6bf59b62c2a4c5f5d42a381c907a8bbb3f75c0" dependencies = [ "heck 0.4.1", - "proc-macro-crate 3.0.0", - "proc-macro2 1.0.75", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -8206,7 +8203,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -8218,9 +8215,9 @@ dependencies = [ "const-random", "derive-syn-parse 0.1.5", "macro_magic_core_macros", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -8229,9 +8226,9 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ea73aa640dc01d62a590d48c0c3521ed739d53b27f919b25c3551e233481654" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -8242,7 +8239,7 @@ checksum = "ef9d79ae96aaba821963320eb2b6e34d17df1e5a83d8a1985c29cc5be59577b3" dependencies = [ "macro_magic_core", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -8575,7 +8572,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -8682,7 +8679,7 @@ checksum = "fc076939022111618a5026d3be019fd8b366e76314538ff9a1b59ffbcbf98bcd" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro-error", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", "synstructure", @@ -8707,7 +8704,7 @@ checksum = "d38685e08adb338659871ecfc6ee47ba9b22dcc8abcf6975d379cc49145c3040" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro-error", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", "synstructure", @@ -8755,7 +8752,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91761aed67d03ad966ef783ae962ef9bbaca728d2dd7ceb7939ec110fffad998" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -9285,9 +9282,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -9326,9 +9323,9 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "orchestra" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2356622ffdfe72362a45a1e5e87bb113b8327e596e39b91f11f0ef4395c8da79" +checksum = "92829eef0328a3d1cd22a02c0e51deb92a5362df3e7d21a4e9bdc38934694e66" dependencies = [ "async-trait", "dyn-clonable", @@ -9343,16 +9340,16 @@ dependencies = [ [[package]] name = "orchestra-proc-macro" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eedb646674596266dc9bb2b5c7eea7c36b32ecc7777eba0d510196972d72c4fd" +checksum = "1344346d5af32c95bbddea91b18a88cc83eac394192d20ef2fc4c40a74332355" dependencies = [ - "expander 2.0.0", + "expander", "indexmap 2.2.3", "itertools 0.11.0", "petgraph", - "proc-macro-crate 1.3.1", - "proc-macro2 1.0.75", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -9760,7 +9757,7 @@ dependencies = [ "bp-beefy", "bp-runtime", "bp-test-utils", - "ckb-merkle-mountain-range", + "ckb-merkle-mountain-range 0.5.2", "frame-support", "frame-system", "log", @@ -10060,9 +10057,9 @@ dependencies = [ name = "pallet-contracts-proc-macro" version = "18.0.0" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -11300,11 +11297,11 @@ dependencies = [ name = "pallet-staking-reward-curve" version = "11.0.0" dependencies = [ - "proc-macro-crate 3.0.0", - "proc-macro2 1.0.75", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.82", "quote 1.0.35", "sp-runtime", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -11907,8 +11904,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e69bf016dc406eff7d53a7d3f7cf1c2e72c82b9088aac1118591e36dd2cd3e9" dependencies = [ "bitcoin_hashes 0.13.0", - "rand 0.7.3", - "rand_core 0.5.1", + "rand 0.8.5", + "rand_core 0.6.4", "serde", "unicode-normalization", ] @@ -11941,9 +11938,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.5" +version = "3.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb" +checksum = "a1b5927e4a9ae8d6cdb6a69e4e04a0ec73381a358e21b8a576f44769f34e7c24" dependencies = [ "arrayvec 0.7.4", "bitvec", @@ -11956,12 +11953,12 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.5" +version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" +checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro2 1.0.75", + "proc-macro-crate 2.0.0", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -11996,7 +11993,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "syn 1.0.109", "synstructure", ] @@ -12418,9 +12415,9 @@ checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -12459,9 +12456,9 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -12719,6 +12716,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rstest", "sc-keystore", "sc-network", "sp-core", @@ -13115,7 +13113,6 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", - "rstest", "sc-keystore", "sp-application-crypto", "sp-core", @@ -14403,9 +14400,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c4fdfc49717fb9a196e74a5d28e0bc764eb394a2c803eb11133a31ac996c60c" dependencies = [ "polkavm-common", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -14415,7 +14412,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ "polkavm-derive-impl", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -14577,7 +14574,7 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "syn 1.0.109", ] @@ -14587,8 +14584,8 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ - "proc-macro2 1.0.75", - "syn 2.0.53", + "proc-macro2 1.0.82", + "syn 2.0.61", ] [[package]] @@ -14634,9 +14631,18 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.0.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b2685dd208a3771337d8d386a89840f0f43cd68be8dae90a5f8c2384effc9cd" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +dependencies = [ + "toml_edit 0.20.7", +] + +[[package]] +name = "proc-macro-crate" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" dependencies = [ "toml_edit 0.21.0", ] @@ -14648,7 +14654,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", "version_check", @@ -14660,7 +14666,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "version_check", ] @@ -14677,9 +14683,9 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -14693,9 +14699,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.75" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "907a61bd0f64c2f29cd1cf1dc34d05176426a3f504a78010f08416ddb7b13708" +checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" dependencies = [ "unicode-ident", ] @@ -14758,9 +14764,9 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -14854,7 +14860,7 @@ dependencies = [ "prost 0.12.4", "prost-types 0.12.4", "regex", - "syn 2.0.53", + "syn 2.0.61", "tempfile", ] @@ -14866,7 +14872,7 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -14879,9 +14885,9 @@ checksum = "19de2de2a00075bf566bee3bd4db014b11587e84184d3f7a791bc17f1a8e9e48" dependencies = [ "anyhow", "itertools 0.11.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -15071,7 +15077,7 @@ version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", ] [[package]] @@ -15311,9 +15317,9 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -15869,12 +15875,12 @@ checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" dependencies = [ "cfg-if", "glob", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "regex", "relative-path", "rustc_version 0.4.0", - "syn 2.0.53", + "syn 2.0.61", "unicode-ident", ] @@ -16328,10 +16334,10 @@ dependencies = [ name = "sc-chain-spec-derive" version = "11.0.0" dependencies = [ - "proc-macro-crate 3.0.0", - "proc-macro2 1.0.75", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -17623,10 +17629,10 @@ dependencies = [ name = "sc-tracing-proc-macro" version = "11.0.0" dependencies = [ - "proc-macro-crate 3.0.0", - "proc-macro2 1.0.75", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -17696,9 +17702,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.1" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "788745a868b0e751750388f4e6546eb921ef714a4317fa6954f7cde114eb2eb7" +checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" dependencies = [ "bitvec", "cfg-if", @@ -17710,12 +17716,12 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.1" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dc2f4e8bc344b9fc3d5f74f72c2e55bfc38d28dc2ebc69c194a3df424e4d9ac" +checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro2 1.0.75", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -17747,7 +17753,7 @@ version = "0.8.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0f696e21e10fa546b7ffb1c9672c6de8fbc7a81acf59524386d8639bf12737" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "serde_derive_internals", "syn 1.0.109", @@ -18041,9 +18047,9 @@ version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -18052,7 +18058,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -18142,9 +18148,9 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -19005,11 +19011,11 @@ dependencies = [ "Inflector", "assert_matches", "blake2 0.10.6", - "expander 2.0.0", - "proc-macro-crate 3.0.0", - "proc-macro2 1.0.75", + "expander", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -19394,7 +19400,7 @@ version = "0.1.0" dependencies = [ "quote 1.0.35", "sp-crypto-hashing", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -19410,18 +19416,18 @@ name = "sp-debug-derive" version = "8.0.0" source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] name = "sp-debug-derive" version = "14.0.0" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -19546,7 +19552,7 @@ name = "sp-mmr-primitives" version = "26.0.0" dependencies = [ "array-bytes", - "ckb-merkle-mountain-range", + "ckb-merkle-mountain-range 0.6.0", "log", "parity-scale-codec", "scale-info", @@ -19690,9 +19696,9 @@ source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf5 dependencies = [ "Inflector", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -19700,11 +19706,11 @@ name = "sp-runtime-interface-proc-macro" version = "17.0.0" dependencies = [ "Inflector", - "expander 2.0.0", - "proc-macro-crate 3.0.0", - "proc-macro2 1.0.75", + "expander", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -19963,10 +19969,10 @@ name = "sp-version-proc-macro" version = "13.0.0" dependencies = [ "parity-scale-codec", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "sp-version", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -20048,7 +20054,7 @@ checksum = "5e6915280e2d0db8911e5032a5c275571af6bdded2916abd691a659be25d3439" dependencies = [ "Inflector", "num-format", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "serde", "serde_json", @@ -20073,7 +20079,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -20198,7 +20204,6 @@ dependencies = [ "staging-node-inspect", "substrate-build-script-utils", "substrate-cli-test-utils", - "substrate-frame-cli", "substrate-rpc-client", "tempfile", "tokio", @@ -20341,7 +20346,7 @@ checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" dependencies = [ "cfg_aliases", "memchr", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -20416,7 +20421,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -20452,7 +20457,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "rustversion", "syn 1.0.109", @@ -20465,10 +20470,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "rustversion", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -20478,10 +20483,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "rustversion", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -20526,18 +20531,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "substrate-frame-cli" -version = "32.0.0" -dependencies = [ - "clap 4.5.3", - "frame-support", - "frame-system", - "sc-cli", - "sp-core", - "sp-runtime", -] - [[package]] name = "substrate-frame-rpc-support" version = "29.0.0" @@ -20926,18 +20919,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.53" +version = "2.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7383cd0e49fff4b6b90ca5670bfd3e9d6a733b3f90c686605aa7eec8c4996032" +checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "unicode-ident", ] @@ -20949,9 +20942,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b837ef12ab88835251726eb12237655e61ec8dc8a280085d1961cdc3dfd047" dependencies = [ "paste", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -20960,7 +20953,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", "unicode-xid 0.2.4", @@ -21224,7 +21217,7 @@ version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10ac1c5050e43014d16b2f94d0d2ce79e65ffdd8b38d8048f9c8f6a8a6da62ac" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "syn 1.0.109", ] @@ -21235,9 +21228,9 @@ version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -21398,9 +21391,9 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -21531,6 +21524,17 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.20.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +dependencies = [ + "indexmap 2.2.3", + "toml_datetime", + "winnow", +] + [[package]] name = "toml_edit" version = "0.21.0" @@ -21607,9 +21611,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -21647,11 +21651,11 @@ name = "tracing-gum-proc-macro" version = "5.0.0" dependencies = [ "assert_matches", - "expander 2.0.0", - "proc-macro-crate 3.0.0", - "proc-macro2 1.0.75", + "expander", + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -21929,7 +21933,7 @@ checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", "digest 0.10.7", - "rand 0.7.3", + "rand 0.8.5", "static_assertions", ] @@ -22235,9 +22239,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", "wasm-bindgen-shared", ] @@ -22269,9 +22273,9 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -22302,7 +22306,7 @@ version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecb993dd8c836930ed130e020e77d9b2e65dd0fbab1b67c790b0f5d80b11a575" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", ] @@ -23362,10 +23366,10 @@ name = "xcm-procedural" version = "7.0.0" dependencies = [ "Inflector", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", "staging-xcm", - "syn 2.0.53", + "syn 2.0.61", "trybuild", ] @@ -23485,9 +23489,9 @@ version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] @@ -23505,9 +23509,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.35", - "syn 2.0.53", + "syn 2.0.61", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 1d3f3d8e9ecd..1440c2d497d3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -500,7 +500,6 @@ members = [ "substrate/utils/build-script-utils", "substrate/utils/fork-tree", "substrate/utils/frame/benchmarking-cli", - "substrate/utils/frame/frame-utilities-cli", "substrate/utils/frame/generate-bags", "substrate/utils/frame/generate-bags/node-runtime", "substrate/utils/frame/omni-bencher", diff --git a/bridges/primitives/beefy/src/lib.rs b/bridges/primitives/beefy/src/lib.rs index 0441781e79a6..2494706818ef 100644 --- a/bridges/primitives/beefy/src/lib.rs +++ b/bridges/primitives/beefy/src/lib.rs @@ -22,7 +22,7 @@ pub use binary_merkle_tree::merkle_root; pub use pallet_beefy_mmr::BeefyEcdsaToEthereum; pub use pallet_mmr::{ - primitives::{DataOrHash as MmrDataOrHash, Proof as MmrProof}, + primitives::{DataOrHash as MmrDataOrHash, LeafProof as MmrProof}, verify_leaves_proof as verify_mmr_leaves_proof, }; pub use sp_consensus_beefy::{ diff --git a/cumulus/client/parachain-inherent/src/mock.rs b/cumulus/client/parachain-inherent/src/mock.rs index 22691006f93e..896df7a72425 100644 --- a/cumulus/client/parachain-inherent/src/mock.rs +++ b/cumulus/client/parachain-inherent/src/mock.rs @@ -28,6 +28,9 @@ use std::collections::BTreeMap; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; +/// Relay chain slot duration, in milliseconds. +pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; + /// Inherent data provider that supplies mocked validation data. /// /// This is useful when running a node that is not actually backed by any relay chain. @@ -45,6 +48,8 @@ use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; pub struct MockValidationDataInherentDataProvider { /// The current block number of the local block chain (the parachain) pub current_para_block: u32, + /// The current block head data of the local block chain (the parachain) + pub current_para_block_head: Option, /// The relay block in which this parachain appeared to start. This will be the relay block /// number in para block #P1 pub relay_offset: u32, @@ -159,14 +164,16 @@ impl> InherentDataProvider &self, inherent_data: &mut InherentData, ) -> Result<(), sp_inherents::Error> { - // Calculate the mocked relay block based on the current para block - let relay_parent_number = - self.relay_offset + self.relay_blocks_per_para_block * self.current_para_block; - // Use the "sproof" (spoof proof) builder to build valid mock state root and proof. let mut sproof_builder = RelayStateSproofBuilder { para_id: self.xcm_config.para_id, ..Default::default() }; + // Calculate the mocked relay block based on the current para block + let relay_parent_number = + self.relay_offset + self.relay_blocks_per_para_block * self.current_para_block; + sproof_builder.current_slot = + ((relay_parent_number / RELAY_CHAIN_SLOT_DURATION_MILLIS) as u64).into(); + // Process the downward messages and set up the correct head let mut downward_messages = Vec::new(); let mut dmq_mqc = MessageQueueChain::new(self.xcm_config.starting_dmq_mqc_head); @@ -217,6 +224,9 @@ impl> InherentDataProvider sproof_builder.additional_key_values = key_values.clone() } + // Inject current para block head, if any + sproof_builder.included_para_head = self.current_para_block_head.clone(); + let (relay_parent_storage_root, proof) = sproof_builder.into_state_root_and_proof(); inherent_data.put_data( diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml index 888193c5c6ea..c3f1c8b4f227 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/Cargo.toml @@ -37,7 +37,6 @@ pallet-nfts = { path = "../../../../../substrate/frame/nfts", default-features = pallet-nfts-runtime-api = { path = "../../../../../substrate/frame/nfts/runtime-api", default-features = false } pallet-proxy = { path = "../../../../../substrate/frame/proxy", default-features = false } pallet-session = { path = "../../../../../substrate/frame/session", default-features = false } -pallet-state-trie-migration = { path = "../../../../../substrate/frame/state-trie-migration", default-features = false } pallet-timestamp = { path = "../../../../../substrate/frame/timestamp", default-features = false } pallet-transaction-payment = { path = "../../../../../substrate/frame/transaction-payment", default-features = false } pallet-transaction-payment-rpc-runtime-api = { path = "../../../../../substrate/frame/transaction-payment/rpc/runtime-api", default-features = false } @@ -124,7 +123,6 @@ runtime-benchmarks = [ "pallet-nft-fractionalization/runtime-benchmarks", "pallet-nfts/runtime-benchmarks", "pallet-proxy/runtime-benchmarks", - "pallet-state-trie-migration/runtime-benchmarks", "pallet-timestamp/runtime-benchmarks", "pallet-uniques/runtime-benchmarks", "pallet-utility/runtime-benchmarks", @@ -163,7 +161,6 @@ try-runtime = [ "pallet-nfts/try-runtime", "pallet-proxy/try-runtime", "pallet-session/try-runtime", - "pallet-state-trie-migration/try-runtime", "pallet-timestamp/try-runtime", "pallet-transaction-payment/try-runtime", "pallet-uniques/try-runtime", @@ -213,7 +210,6 @@ std = [ "pallet-nfts/std", "pallet-proxy/std", "pallet-session/std", - "pallet-state-trie-migration/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", "pallet-transaction-payment/std", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index f81a107fae05..0df9b65c714d 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -945,8 +945,6 @@ construct_runtime!( PoolAssets: pallet_assets:: = 55, AssetConversion: pallet_asset_conversion = 56, - StateTrieMigration: pallet_state_trie_migration = 70, - // TODO: the pallet instance should be removed once all pools have migrated // to the new account IDs. AssetConversionMigration: pallet_asset_conversion_ops = 200, @@ -1785,47 +1783,6 @@ cumulus_pallet_parachain_system::register_validate_block! { BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } -parameter_types! { - // The deposit configuration for the singed migration. Specially if you want to allow any signed account to do the migration (see `SignedFilter`, these deposits should be high) - pub const MigrationSignedDepositPerItem: Balance = CENTS; - pub const MigrationSignedDepositBase: Balance = 2_000 * CENTS; - pub const MigrationMaxKeyLen: u32 = 512; -} - -impl pallet_state_trie_migration::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type RuntimeHoldReason = RuntimeHoldReason; - type SignedDepositPerItem = MigrationSignedDepositPerItem; - type SignedDepositBase = MigrationSignedDepositBase; - // An origin that can control the whole pallet: should be Root, or a part of your council. - type ControlOrigin = frame_system::EnsureSignedBy; - // specific account for the migration, can trigger the signed migrations. - type SignedFilter = frame_system::EnsureSignedBy; - - // Replace this with weight based on your runtime. - type WeightInfo = pallet_state_trie_migration::weights::SubstrateWeight; - - type MaxKeyLen = MigrationMaxKeyLen; -} - -frame_support::ord_parameter_types! { - pub const MigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); - pub const RootMigController: AccountId = AccountId::from(hex_literal::hex!("8458ed39dc4b6f6c7255f7bc42be50c2967db126357c999d44e12ca7ac80dc52")); -} - -#[test] -fn ensure_key_ss58() { - use frame_support::traits::SortedMembers; - use sp_core::crypto::Ss58Codec; - let acc = - AccountId::from_ss58check("5F4EbSkZz18X36xhbsjvDNs6NuZ82HyYtq5UiJ1h9SBHJXZD").unwrap(); - assert_eq!(acc, MigController::sorted_members()[0]); - let acc = - AccountId::from_ss58check("5F4EbSkZz18X36xhbsjvDNs6NuZ82HyYtq5UiJ1h9SBHJXZD").unwrap(); - assert_eq!(acc, RootMigController::sorted_members()[0]); -} - #[cfg(test)] mod tests { use super::*; diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs index babb318a9950..a50f33951d05 100644 --- a/cumulus/xcm/xcm-emulator/src/lib.rs +++ b/cumulus/xcm/xcm-emulator/src/lib.rs @@ -34,7 +34,9 @@ pub use frame_support::{ }, weights::{Weight, WeightMeter}, }; -pub use frame_system::{Config as SystemConfig, Pallet as SystemPallet}; +pub use frame_system::{ + pallet_prelude::BlockNumberFor, Config as SystemConfig, Pallet as SystemPallet, +}; pub use pallet_balances::AccountData; pub use pallet_message_queue; pub use sp_arithmetic::traits::Bounded; @@ -54,7 +56,7 @@ pub use cumulus_primitives_core::{ pub use cumulus_primitives_parachain_inherent::ParachainInherentData; pub use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; pub use pallet_message_queue::{Config as MessageQueueConfig, Pallet as MessageQueuePallet}; -pub use parachains_common::{AccountId, Balance, BlockNumber}; +pub use parachains_common::{AccountId, Balance}; pub use polkadot_primitives; pub use polkadot_runtime_parachains::inclusion::{AggregateMessageOrigin, UmpQueueId}; @@ -657,7 +659,7 @@ macro_rules! decl_test_parachains { .clone() ); ::System::initialize(&block_number, &parent_head_data.hash(), &Default::default()); - <::ParachainSystem as Hooks<$crate::BlockNumber>>::on_initialize(block_number); + <::ParachainSystem as Hooks<$crate::BlockNumberFor>>::on_initialize(block_number); let _ = ::ParachainSystem::set_validation_data( ::RuntimeOrigin::none(), diff --git a/polkadot/node/core/backing/Cargo.toml b/polkadot/node/core/backing/Cargo.toml index 26fa54470fbd..9829f1b37cfd 100644 --- a/polkadot/node/core/backing/Cargo.toml +++ b/polkadot/node/core/backing/Cargo.toml @@ -21,7 +21,7 @@ statement-table = { package = "polkadot-statement-table", path = "../../../state bitvec = { version = "1.0.0", default-features = false, features = ["alloc"] } gum = { package = "tracing-gum", path = "../../gum" } thiserror = { workspace = true } -fatality = "0.0.6" +fatality = "0.1.0" schnellru = "0.2.1" [dev-dependencies] diff --git a/polkadot/node/core/backing/src/lib.rs b/polkadot/node/core/backing/src/lib.rs index 23acb0450944..a45edcbef52a 100644 --- a/polkadot/node/core/backing/src/lib.rs +++ b/polkadot/node/core/backing/src/lib.rs @@ -30,7 +30,7 @@ //! assigned group of validators may be backed on-chain and proceed to the availability //! stage. //! -//! Depth is a concept relating to asynchronous backing, by which validators +//! Depth is a concept relating to asynchronous backing, by which //! short sub-chains of candidates are backed and extended off-chain, and then placed //! asynchronously into blocks of the relay chain as those are authored and as the //! relay-chain state becomes ready for them. Asynchronous backing allows parachains to @@ -66,7 +66,7 @@ #![deny(unused_crate_dependencies)] use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::{HashMap, HashSet}, sync::Arc, }; @@ -88,7 +88,7 @@ use polkadot_node_subsystem::{ messages::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CanSecondRequest, CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, - HypotheticalCandidate, HypotheticalFrontierRequest, IntroduceCandidateRequest, + HypotheticalCandidate, HypotheticalMembershipRequest, IntroduceSecondedCandidateRequest, ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, StatementDistributionMessage, StoreAvailableDataError, }, @@ -242,20 +242,44 @@ struct PerRelayParentState { struct PerCandidateState { persisted_validation_data: PersistedValidationData, seconded_locally: bool, - para_id: ParaId, relay_parent: Hash, } -struct ActiveLeafState { - prospective_parachains_mode: ProspectiveParachainsMode, - /// The candidates seconded at various depths under this active - /// leaf with respect to parachain id. A candidate can only be - /// seconded when its hypothetical frontier under every active leaf - /// has an empty entry in this map. - /// - /// When prospective parachains are disabled, the only depth - /// which is allowed is 0. - seconded_at_depth: HashMap>, +enum ActiveLeafState { + // If prospective-parachains is disabled, one validator may only back one candidate per + // paraid. + ProspectiveParachainsDisabled { seconded: HashSet }, + ProspectiveParachainsEnabled { max_candidate_depth: usize, allowed_ancestry_len: usize }, +} + +impl ActiveLeafState { + fn new(mode: ProspectiveParachainsMode) -> Self { + match mode { + ProspectiveParachainsMode::Disabled => + Self::ProspectiveParachainsDisabled { seconded: HashSet::new() }, + ProspectiveParachainsMode::Enabled { max_candidate_depth, allowed_ancestry_len } => + Self::ProspectiveParachainsEnabled { max_candidate_depth, allowed_ancestry_len }, + } + } + + fn add_seconded_candidate(&mut self, para_id: ParaId) { + if let Self::ProspectiveParachainsDisabled { seconded } = self { + seconded.insert(para_id); + } + } +} + +impl From<&ActiveLeafState> for ProspectiveParachainsMode { + fn from(state: &ActiveLeafState) -> Self { + match *state { + ActiveLeafState::ProspectiveParachainsDisabled { .. } => + ProspectiveParachainsMode::Disabled, + ActiveLeafState::ProspectiveParachainsEnabled { + max_candidate_depth, + allowed_ancestry_len, + } => ProspectiveParachainsMode::Enabled { max_candidate_depth, allowed_ancestry_len }, + } + } } /// The state of the subsystem. @@ -277,11 +301,11 @@ struct State { /// parachains. /// /// Relay-chain blocks which don't support prospective parachains are - /// never included in the fragment trees of active leaves which do. + /// never included in the fragment chains of active leaves which do. /// /// While it would be technically possible to support such leaves in - /// fragment trees, it only benefits the transition period when asynchronous - /// backing is being enabled and complicates code complexity. + /// fragment chains, it only benefits the transition period when asynchronous + /// backing is being enabled and complicates code. per_relay_parent: HashMap, /// State tracked for all candidates relevant to the implicit view. /// @@ -864,17 +888,9 @@ async fn handle_active_leaves_update( return Ok(()) } - state.per_leaf.insert( - leaf.hash, - ActiveLeafState { - prospective_parachains_mode: ProspectiveParachainsMode::Disabled, - // This is empty because the only allowed relay-parent and depth - // when prospective parachains are disabled is the leaf hash and 0, - // respectively. We've just learned about the leaf hash, so we cannot - // have any candidates seconded with it as a relay-parent yet. - seconded_at_depth: HashMap::new(), - }, - ); + state + .per_leaf + .insert(leaf.hash, ActiveLeafState::new(ProspectiveParachainsMode::Disabled)); (vec![leaf.hash], ProspectiveParachainsMode::Disabled) }, @@ -882,63 +898,9 @@ async fn handle_active_leaves_update( let fresh_relay_parents = state.implicit_view.known_allowed_relay_parents_under(&leaf.hash, None); - // At this point, all candidates outside of the implicit view - // have been cleaned up. For all which remain, which we've seconded, - // we ask the prospective parachains subsystem where they land in the fragment - // tree for the given active leaf. This comprises our `seconded_at_depth`. - - let remaining_seconded = state - .per_candidate - .iter() - .filter(|(_, cd)| cd.seconded_locally) - .map(|(c_hash, cd)| (*c_hash, cd.para_id)); - - // one-to-one correspondence to remaining_seconded - let mut membership_answers = FuturesOrdered::new(); - - for (candidate_hash, para_id) in remaining_seconded { - let (tx, rx) = oneshot::channel(); - membership_answers - .push_back(rx.map_ok(move |membership| (para_id, candidate_hash, membership))); - - ctx.send_message(ProspectiveParachainsMessage::GetTreeMembership( - para_id, - candidate_hash, - tx, - )) - .await; - } - - let mut seconded_at_depth = HashMap::new(); - while let Some(response) = membership_answers.next().await { - match response { - Err(oneshot::Canceled) => { - gum::warn!( - target: LOG_TARGET, - "Prospective parachains subsystem unreachable for membership request", - ); - }, - Ok((para_id, candidate_hash, membership)) => { - // This request gives membership in all fragment trees. We have some - // wasted data here, and it can be optimized if it proves - // relevant to performance. - if let Some((_, depths)) = - membership.into_iter().find(|(leaf_hash, _)| leaf_hash == &leaf.hash) - { - let para_entry: &mut BTreeMap = - seconded_at_depth.entry(para_id).or_default(); - for depth in depths { - para_entry.insert(depth, candidate_hash); - } - } - }, - } - } + let active_leaf_state = ActiveLeafState::new(prospective_parachains_mode); - state.per_leaf.insert( - leaf.hash, - ActiveLeafState { prospective_parachains_mode, seconded_at_depth }, - ); + state.per_leaf.insert(leaf.hash, active_leaf_state); let fresh_relay_parent = match fresh_relay_parents { Some(f) => f.to_vec(), @@ -981,7 +943,7 @@ async fn handle_active_leaves_update( // block itself did. leaf_mode }, - Some(l) => l.prospective_parachains_mode, + Some(l) => l.into(), }; // construct a `PerRelayParent` from the runtime API @@ -1247,20 +1209,20 @@ async fn construct_per_relay_parent_state( enum SecondingAllowed { No, - Yes(Vec<(Hash, Vec)>), + // On which leaves is seconding allowed. + Yes(Vec), } -/// Checks whether a candidate can be seconded based on its hypothetical frontiers in the fragment -/// tree and what we've already seconded in all active leaves. +/// Checks whether a candidate can be seconded based on its hypothetical membership in the fragment +/// chain. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn seconding_sanity_check( ctx: &mut Context, active_leaves: &HashMap, implicit_view: &ImplicitView, hypothetical_candidate: HypotheticalCandidate, - backed_in_path_only: bool, ) -> SecondingAllowed { - let mut membership = Vec::new(); + let mut leaves_for_seconding = Vec::new(); let mut responses = FuturesOrdered::>>::new(); let candidate_para = hypothetical_candidate.candidate_para(); @@ -1268,7 +1230,7 @@ async fn seconding_sanity_check( let candidate_hash = hypothetical_candidate.candidate_hash(); for (head, leaf_state) in active_leaves { - if leaf_state.prospective_parachains_mode.is_enabled() { + if ProspectiveParachainsMode::from(leaf_state).is_enabled() { // Check that the candidate relay parent is allowed for para, skip the // leaf otherwise. let allowed_parents_for_para = @@ -1278,40 +1240,36 @@ async fn seconding_sanity_check( } let (tx, rx) = oneshot::channel(); - ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalFrontier( - HypotheticalFrontierRequest { + ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalMembership( + HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(*head), - backed_in_path_only, + fragment_chain_relay_parent: Some(*head), }, tx, )) .await; - let response = rx.map_ok(move |frontiers| { - let depths: Vec = frontiers + let response = rx.map_ok(move |candidate_memberships| { + let is_member_or_potential = candidate_memberships .into_iter() - .flat_map(|(candidate, memberships)| { - debug_assert_eq!(candidate.candidate_hash(), candidate_hash); - memberships.into_iter().flat_map(|(relay_parent, depths)| { - debug_assert_eq!(relay_parent, *head); - depths - }) + .find_map(|(candidate, leaves)| { + (candidate.candidate_hash() == candidate_hash).then_some(leaves) }) - .collect(); - (depths, head, leaf_state) + .and_then(|leaves| leaves.into_iter().find(|leaf| leaf == head)) + .is_some(); + + (is_member_or_potential, head) }); responses.push_back(response.boxed()); } else { if *head == candidate_relay_parent { - if leaf_state - .seconded_at_depth - .get(&candidate_para) - .map_or(false, |occupied| occupied.contains_key(&0)) - { - // The leaf is already occupied. - return SecondingAllowed::No + if let ActiveLeafState::ProspectiveParachainsDisabled { seconded } = leaf_state { + if seconded.contains(&candidate_para) { + // The leaf is already occupied. For non-prospective parachains, we only + // second one candidate. + return SecondingAllowed::No + } } - responses.push_back(futures::future::ok((vec![0], head, leaf_state)).boxed()); + responses.push_back(futures::future::ok((true, head)).boxed()); } } } @@ -1325,38 +1283,32 @@ async fn seconding_sanity_check( Err(oneshot::Canceled) => { gum::warn!( target: LOG_TARGET, - "Failed to reach prospective parachains subsystem for hypothetical frontiers", + "Failed to reach prospective parachains subsystem for hypothetical membership", ); return SecondingAllowed::No }, - Ok((depths, head, leaf_state)) => { - for depth in &depths { - if leaf_state - .seconded_at_depth - .get(&candidate_para) - .map_or(false, |occupied| occupied.contains_key(&depth)) - { - gum::debug!( - target: LOG_TARGET, - ?candidate_hash, - depth, - leaf_hash = ?head, - "Refusing to second candidate at depth - already occupied." - ); - - return SecondingAllowed::No - } - } - - membership.push((*head, depths)); + Ok((is_member_or_potential, head)) => match is_member_or_potential { + false => { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + leaf_hash = ?head, + "Refusing to second candidate at leaf. Is not a potential member.", + ); + }, + true => { + leaves_for_seconding.push(*head); + }, }, } } - // At this point we've checked the depths of the candidate against all active - // leaves. - SecondingAllowed::Yes(membership) + if leaves_for_seconding.is_empty() { + SecondingAllowed::No + } else { + SecondingAllowed::Yes(leaves_for_seconding) + } } /// Performs seconding sanity check for an advertisement. @@ -1385,16 +1337,12 @@ async fn handle_can_second_request( &state.per_leaf, &state.implicit_view, hypothetical_candidate, - true, ) .await; match result { SecondingAllowed::No => false, - SecondingAllowed::Yes(membership) => { - // Candidate should be recognized by at least some fragment tree. - membership.iter().any(|(_, m)| !m.is_empty()) - }, + SecondingAllowed::Yes(leaves) => !leaves.is_empty(), } } else { // Relay parent is unknown or async backing is disabled. @@ -1435,20 +1383,6 @@ async fn handle_validated_candidate_command( commitments, }; - let parent_head_data_hash = persisted_validation_data.parent_head.hash(); - // Note that `GetHypotheticalFrontier` doesn't account for recursion, - // i.e. candidates can appear at multiple depths in the tree and in fact - // at all depths, and we don't know what depths a candidate will ultimately - // occupy because that's dependent on other candidates we haven't yet - // received. - // - // The only way to effectively rule this out is to have candidate receipts - // directly commit to the parachain block number or some other incrementing - // counter. That requires a major primitives format upgrade, so for now - // we just rule out trivial cycles. - if parent_head_data_hash == receipt.commitments.head_data.hash() { - return Ok(()) - } let hypothetical_candidate = HypotheticalCandidate::Complete { candidate_hash, receipt: Arc::new(receipt.clone()), @@ -1457,12 +1391,11 @@ async fn handle_validated_candidate_command( // sanity check that we're allowed to second the candidate // and that it doesn't conflict with other candidates we've // seconded. - let fragment_tree_membership = match seconding_sanity_check( + let hypothetical_membership = match seconding_sanity_check( ctx, &state.per_leaf, &state.implicit_view, hypothetical_candidate, - false, ) .await { @@ -1517,8 +1450,8 @@ async fn handle_validated_candidate_command( Some(p) => p.seconded_locally = true, } - // update seconded depths in active leaves. - for (leaf, depths) in fragment_tree_membership { + // record seconded candidates for non-prospective-parachains mode. + for leaf in hypothetical_membership { let leaf_data = match state.per_leaf.get_mut(&leaf) { None => { gum::warn!( @@ -1532,14 +1465,7 @@ async fn handle_validated_candidate_command( Some(d) => d, }; - let seconded_at_depth = leaf_data - .seconded_at_depth - .entry(candidate.descriptor().para_id) - .or_default(); - - for depth in depths { - seconded_at_depth.insert(depth, candidate_hash); - } + leaf_data.add_seconded_candidate(candidate.descriptor().para_id); } rp_state.issued_statements.insert(candidate_hash); @@ -1650,7 +1576,7 @@ fn sign_statement( /// and any of the following are true: /// 1. There is no `PersistedValidationData` attached. /// 2. Prospective parachains are enabled for the relay parent and the prospective parachains -/// subsystem returned an empty `FragmentTreeMembership` i.e. did not recognize the candidate as +/// subsystem returned an empty `HypotheticalMembership` i.e. did not recognize the candidate as /// being applicable to any of the active leaves. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn import_statement( @@ -1686,8 +1612,8 @@ async fn import_statement( if !per_candidate.contains_key(&candidate_hash) { if rp_state.prospective_parachains_mode.is_enabled() { let (tx, rx) = oneshot::channel(); - ctx.send_message(ProspectiveParachainsMessage::IntroduceCandidate( - IntroduceCandidateRequest { + ctx.send_message(ProspectiveParachainsMessage::IntroduceSecondedCandidate( + IntroduceSecondedCandidateRequest { candidate_para: candidate.descriptor().para_id, candidate_receipt: candidate.clone(), persisted_validation_data: pvd.clone(), @@ -1705,17 +1631,9 @@ async fn import_statement( return Err(Error::RejectedByProspectiveParachains) }, - Ok(membership) => - if membership.is_empty() { - return Err(Error::RejectedByProspectiveParachains) - }, + Ok(false) => return Err(Error::RejectedByProspectiveParachains), + Ok(true) => {}, } - - ctx.send_message(ProspectiveParachainsMessage::CandidateSeconded( - candidate.descriptor().para_id, - candidate_hash, - )) - .await; } // Only save the candidate if it was approved by prospective parachains. @@ -1725,7 +1643,6 @@ async fn import_statement( persisted_validation_data: pvd.clone(), // This is set after importing when seconding locally. seconded_locally: false, - para_id: candidate.descriptor().para_id, relay_parent: candidate.descriptor().relay_parent, }, ); @@ -1786,13 +1703,6 @@ async fn post_import_statement_actions( candidate_hash, )) .await; - // Backed candidate potentially unblocks new advertisements, - // notify collator protocol. - ctx.send_message(CollatorProtocolMessage::Backed { - para_id, - para_head: backed.candidate().descriptor.para_head, - }) - .await; // Notify statement distribution of backed candidate. ctx.send_message(StatementDistributionMessage::Backed(candidate_hash)).await; } else { @@ -2016,7 +1926,7 @@ async fn maybe_validate_and_import( if let Some(summary) = summary { // import_statement already takes care of communicating with the // prospective parachains subsystem. At this point, the candidate - // has already been accepted into the fragment trees. + // has already been accepted by the subsystem. let candidate_hash = summary.candidate; diff --git a/polkadot/node/core/backing/src/tests/prospective_parachains.rs b/polkadot/node/core/backing/src/tests/prospective_parachains.rs index 94310d2aa164..8a72902f0815 100644 --- a/polkadot/node/core/backing/src/tests/prospective_parachains.rs +++ b/polkadot/node/core/backing/src/tests/prospective_parachains.rs @@ -17,7 +17,7 @@ //! Tests for the backing subsystem with enabled prospective parachains. use polkadot_node_subsystem::{ - messages::{ChainApiMessage, FragmentTreeMembership}, + messages::{ChainApiMessage, HypotheticalMembership}, ActivatedLeaf, TimeoutExt, }; use polkadot_primitives::{AsyncBackingParams, BlockNumber, Header, OccupiedCore}; @@ -40,7 +40,6 @@ async fn activate_leaf( virtual_overseer: &mut VirtualOverseer, leaf: TestLeaf, test_state: &TestState, - seconded_in_view: usize, ) { let TestLeaf { activated, min_relay_parents } = leaf; let leaf_hash = activated.hash; @@ -122,21 +121,6 @@ async fn activate_leaf( } } - for _ in 0..seconded_in_view { - let msg = match next_overseer_message.take() { - Some(msg) => msg, - None => virtual_overseer.recv().await, - }; - assert_matches!( - msg, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetTreeMembership(.., tx), - ) => { - tx.send(Vec::new()).unwrap(); - } - ); - } - for (hash, number) in ancestry_iter.take(requested_len) { let msg = match next_overseer_message.take() { Some(msg) => msg, @@ -297,11 +281,11 @@ async fn assert_validate_seconded_candidate( ); } -async fn assert_hypothetical_frontier_requests( +async fn assert_hypothetical_membership_requests( virtual_overseer: &mut VirtualOverseer, mut expected_requests: Vec<( - HypotheticalFrontierRequest, - Vec<(HypotheticalCandidate, FragmentTreeMembership)>, + HypotheticalMembershipRequest, + Vec<(HypotheticalCandidate, HypotheticalMembership)>, )>, ) { // Requests come with no particular order. @@ -311,13 +295,13 @@ async fn assert_hypothetical_frontier_requests( assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx), + ProspectiveParachainsMessage::GetHypotheticalMembership(request, tx), ) => { let idx = match expected_requests.iter().position(|r| r.0 == request) { Some(idx) => idx, None => panic!( - "unexpected hypothetical frontier request, no match found for {:?}", + "unexpected hypothetical membership request, no match found for {:?}", request ), }; @@ -330,18 +314,17 @@ async fn assert_hypothetical_frontier_requests( } } -fn make_hypothetical_frontier_response( - depths: Vec, +fn make_hypothetical_membership_response( hypothetical_candidate: HypotheticalCandidate, relay_parent_hash: Hash, -) -> Vec<(HypotheticalCandidate, FragmentTreeMembership)> { - vec![(hypothetical_candidate, vec![(relay_parent_hash, depths)])] +) -> Vec<(HypotheticalCandidate, HypotheticalMembership)> { + vec![(hypothetical_candidate, vec![relay_parent_hash])] } // Test that `seconding_sanity_check` works when a candidate is allowed // for all leaves. #[test] -fn seconding_sanity_check_allowed() { +fn seconding_sanity_check_allowed_on_all() { let test_state = TestState::default(); test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { // Candidate is seconded in a parent of the activated `leaf_a`. @@ -364,8 +347,8 @@ fn seconding_sanity_check_allowed() { let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; let test_leaf_b = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; - activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; + activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -412,24 +395,19 @@ fn seconding_sanity_check_allowed() { receipt: Arc::new(candidate.clone()), persisted_validation_data: pvd.clone(), }; - let expected_request_a = HypotheticalFrontierRequest { + let expected_request_a = HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_a_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_a_hash), }; - let expected_response_a = make_hypothetical_frontier_response( - vec![0, 1, 2, 3], - hypothetical_candidate.clone(), - leaf_a_hash, - ); - let expected_request_b = HypotheticalFrontierRequest { + let expected_response_a = + make_hypothetical_membership_response(hypothetical_candidate.clone(), leaf_a_hash); + let expected_request_b = HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_b_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_b_hash), }; let expected_response_b = - make_hypothetical_frontier_response(vec![3], hypothetical_candidate, leaf_b_hash); - assert_hypothetical_frontier_requests( + make_hypothetical_membership_response(hypothetical_candidate, leaf_b_hash); + assert_hypothetical_membership_requests( &mut virtual_overseer, vec![ (expected_request_a, expected_response_a), @@ -441,7 +419,7 @@ fn seconding_sanity_check_allowed() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( req, tx, ), @@ -449,19 +427,10 @@ fn seconding_sanity_check_allowed() { req.candidate_receipt == candidate && req.candidate_para == para_id && pvd == req.persisted_validation_data => { - // Any non-empty response will do. - tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap(); + tx.send(true).unwrap(); } ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( - _, - _ - )) - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -484,8 +453,8 @@ fn seconding_sanity_check_allowed() { }); } -// Test that `seconding_sanity_check` works when a candidate is disallowed -// for at least one leaf. +// Test that `seconding_sanity_check` disallows seconding when a candidate is disallowed +// for all leaves. #[test] fn seconding_sanity_check_disallowed() { let test_state = TestState::default(); @@ -510,7 +479,7 @@ fn seconding_sanity_check_disallowed() { let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; let test_leaf_b = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -557,17 +526,13 @@ fn seconding_sanity_check_disallowed() { receipt: Arc::new(candidate.clone()), persisted_validation_data: pvd.clone(), }; - let expected_request_a = HypotheticalFrontierRequest { + let expected_request_a = HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_a_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_a_hash), }; - let expected_response_a = make_hypothetical_frontier_response( - vec![0, 1, 2, 3], - hypothetical_candidate, - leaf_a_hash, - ); - assert_hypothetical_frontier_requests( + let expected_response_a = + make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash); + assert_hypothetical_membership_requests( &mut virtual_overseer, vec![(expected_request_a, expected_response_a)], ) @@ -576,7 +541,7 @@ fn seconding_sanity_check_disallowed() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( req, tx, ), @@ -584,19 +549,10 @@ fn seconding_sanity_check_disallowed() { req.candidate_receipt == candidate && req.candidate_para == para_id && pvd == req.persisted_validation_data => { - // Any non-empty response will do. - tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap(); + tx.send(true).unwrap(); } ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( - _, - _ - )) - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -615,10 +571,7 @@ fn seconding_sanity_check_disallowed() { } ); - // A seconded candidate occupies a depth, try to second another one. - // It is allowed in a new leaf but not allowed in the old one. - // Expect it to be rejected. - activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state, 1).await; + activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; let leaf_a_grandparent = get_parent_hash(leaf_a_parent); let candidate = TestCandidateBuilder { para_id, @@ -659,28 +612,20 @@ fn seconding_sanity_check_disallowed() { receipt: Arc::new(candidate), persisted_validation_data: pvd, }; - let expected_request_a = HypotheticalFrontierRequest { + let expected_request_a = HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_a_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_a_hash), }; - let expected_response_a = make_hypothetical_frontier_response( - vec![3], - hypothetical_candidate.clone(), - leaf_a_hash, - ); - let expected_request_b = HypotheticalFrontierRequest { + let expected_empty_response = vec![(hypothetical_candidate.clone(), vec![])]; + let expected_request_b = HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_b_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_b_hash), }; - let expected_response_b = - make_hypothetical_frontier_response(vec![1], hypothetical_candidate, leaf_b_hash); - assert_hypothetical_frontier_requests( + assert_hypothetical_membership_requests( &mut virtual_overseer, vec![ - (expected_request_a, expected_response_a), // All depths are occupied. - (expected_request_b, expected_response_b), + (expected_request_a, expected_empty_response.clone()), + (expected_request_b, expected_empty_response), ], ) .await; @@ -695,6 +640,137 @@ fn seconding_sanity_check_disallowed() { }); } +// Test that `seconding_sanity_check` allows seconding a candidate when it's allowed on at least one +// leaf. +#[test] +fn seconding_sanity_check_allowed_on_at_least_one_leaf() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + // `a` is grandparent of `b`. + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = new_leaf(leaf_a_hash, LEAF_A_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; + const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; + + let leaf_b_hash = Hash::from_low_u64_be(128); + let activated = new_leaf(leaf_b_hash, LEAF_B_BLOCK_NUMBER); + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; + let test_leaf_b = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; + activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_a_hash), + }; + let expected_response_a = + make_hypothetical_membership_response(hypothetical_candidate.clone(), leaf_a_hash); + let expected_request_b = HypotheticalMembershipRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_chain_relay_parent: Some(leaf_b_hash), + }; + let expected_response_b = vec![(hypothetical_candidate.clone(), vec![])]; + assert_hypothetical_membership_requests( + &mut virtual_overseer, + vec![ + (expected_request_a, expected_response_a), + (expected_request_b, expected_response_b), + ], + ) + .await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { + tx.send(true).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == leaf_a_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(leaf_a_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + virtual_overseer + }); +} + // Test that a seconded candidate which is not approved by prospective parachains // subsystem doesn't change the view. #[test] @@ -712,7 +788,7 @@ fn prospective_parachains_reject_candidate() { let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -760,25 +836,20 @@ fn prospective_parachains_reject_candidate() { persisted_validation_data: pvd.clone(), }; let expected_request_a = vec![( - HypotheticalFrontierRequest { + HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_a_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_a_hash), }, - make_hypothetical_frontier_response( - vec![0, 1, 2, 3], - hypothetical_candidate, - leaf_a_hash, - ), + make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash), )]; - assert_hypothetical_frontier_requests(&mut virtual_overseer, expected_request_a.clone()) + assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request_a.clone()) .await; // Prospective parachains are notified. assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( req, tx, ), @@ -787,7 +858,7 @@ fn prospective_parachains_reject_candidate() { && req.candidate_para == para_id && pvd == req.persisted_validation_data => { // Reject it. - tx.send(Vec::new()).unwrap(); + tx.send(false).unwrap(); } ); @@ -825,12 +896,12 @@ fn prospective_parachains_reject_candidate() { .await; // `seconding_sanity_check` - assert_hypothetical_frontier_requests(&mut virtual_overseer, expected_request_a).await; + assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request_a).await; // Prospective parachains are notified. assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( req, tx, ), @@ -838,19 +909,10 @@ fn prospective_parachains_reject_candidate() { req.candidate_receipt == candidate && req.candidate_para == para_id && pvd == req.persisted_validation_data => { - // Any non-empty response will do. - tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap(); + tx.send(true).unwrap(); } ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( - _, - _ - )) - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -890,7 +952,7 @@ fn second_multiple_candidates_per_relay_parent() { let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -911,12 +973,10 @@ fn second_multiple_candidates_per_relay_parent() { let mut candidate_b = candidate_a.clone(); candidate_b.relay_parent = leaf_grandparent; - // With depths. - let candidate_a = (candidate_a.build(), 1); - let candidate_b = (candidate_b.build(), 2); + let candidate_a = candidate_a.build(); + let candidate_b = candidate_b.build(); for candidate in &[candidate_a, candidate_b] { - let (candidate, depth) = candidate; let second = CandidateBackingMessage::Second( leaf_hash, candidate.to_plain(), @@ -945,46 +1005,33 @@ fn second_multiple_candidates_per_relay_parent() { persisted_validation_data: pvd.clone(), }; let expected_request_a = vec![( - HypotheticalFrontierRequest { + HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_hash), }, - make_hypothetical_frontier_response( - vec![*depth], - hypothetical_candidate, - leaf_hash, - ), + make_hypothetical_membership_response(hypothetical_candidate, leaf_hash), )]; - assert_hypothetical_frontier_requests( + assert_hypothetical_membership_requests( &mut virtual_overseer, expected_request_a.clone(), ) .await; // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( - req, - tx, - ), - ) if - &req.candidate_receipt == candidate - && req.candidate_para == para_id - && pvd == req.persisted_validation_data - => { - // Any non-empty response will do. - tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); - } - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded(_, _) - ) + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + &req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { + tx.send(true).unwrap(); + } ); assert_matches!( @@ -1026,7 +1073,7 @@ fn backing_works() { let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -1048,7 +1095,6 @@ fn backing_works() { .build(); let candidate_a_hash = candidate_a.hash(); - let candidate_a_para_head = candidate_a.descriptor().para_head; let public1 = Keystore::sr25519_generate_new( &*test_state.keystore, @@ -1096,7 +1142,7 @@ fn backing_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( req, tx, ), @@ -1104,19 +1150,10 @@ fn backing_works() { req.candidate_receipt == candidate_a && req.candidate_para == para_id && pvd == req.persisted_validation_data => { - // Any non-empty response will do. - tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + tx.send(true).unwrap(); } ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( - _, - _ - )) - ); - assert_validate_seconded_candidate( &mut virtual_overseer, candidate_a.descriptor().relay_parent, @@ -1147,13 +1184,6 @@ fn backing_works() { ), ) if candidate_a_hash == candidate_hash && candidate_para_id == para_id ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Backed { - para_id: _para_id, - para_head, - }) if para_id == _para_id && candidate_a_para_head == para_head - ); assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution(StatementDistributionMessage::Backed ( @@ -1187,7 +1217,7 @@ fn concurrent_dependent_candidates() { let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; let head_data = &[ HeadData(vec![10, 20, 30]), // Before `a`. @@ -1299,13 +1329,10 @@ fn concurrent_dependent_candidates() { // Order is not guaranteed since we have 2 statements being handled concurrently. match msg { AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate(_, tx), + ProspectiveParachainsMessage::IntroduceSecondedCandidate(_, tx), ) => { - tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + tx.send(true).unwrap(); }, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded(_, _), - ) => {}, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, RuntimeApiRequest::ValidationCodeByHash(_, tx), @@ -1362,7 +1389,6 @@ fn concurrent_dependent_candidates() { AllMessages::ProspectiveParachains( ProspectiveParachainsMessage::CandidateBacked(..), ) => {}, - AllMessages::CollatorProtocol(CollatorProtocolMessage::Backed { .. }) => {}, AllMessages::StatementDistribution(StatementDistributionMessage::Share( _, statement, @@ -1447,7 +1473,7 @@ fn seconding_sanity_check_occupy_same_depth() { let min_relay_parents = vec![(para_id_a, min_block_number), (para_id_b, min_block_number)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -1506,44 +1532,35 @@ fn seconding_sanity_check_occupy_same_depth() { persisted_validation_data: pvd.clone(), }; let expected_request_a = vec![( - HypotheticalFrontierRequest { + HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_hash), }, // Send the same membership for both candidates. - make_hypothetical_frontier_response(vec![0, 1], hypothetical_candidate, leaf_hash), + make_hypothetical_membership_response(hypothetical_candidate, leaf_hash), )]; - assert_hypothetical_frontier_requests( + assert_hypothetical_membership_requests( &mut virtual_overseer, expected_request_a.clone(), ) .await; // Prospective parachains are notified. - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( - req, - tx, - ), - ) if - &req.candidate_receipt == candidate - && &req.candidate_para == para_id - && pvd == req.persisted_validation_data - => { - // Any non-empty response will do. - tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); - } - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded(_, _) - ) + ProspectiveParachainsMessage::IntroduceSecondedCandidate( + req, + tx, + ), + ) if + &req.candidate_receipt == candidate + && &req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { + tx.send(true).unwrap(); + } ); assert_matches!( @@ -1600,7 +1617,7 @@ fn occupied_core_assignment() { let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; - activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; let pvd = dummy_pvd(); @@ -1648,23 +1665,18 @@ fn occupied_core_assignment() { persisted_validation_data: pvd.clone(), }; let expected_request = vec![( - HypotheticalFrontierRequest { + HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(leaf_a_hash), - backed_in_path_only: false, + fragment_chain_relay_parent: Some(leaf_a_hash), }, - make_hypothetical_frontier_response( - vec![0, 1, 2, 3], - hypothetical_candidate, - leaf_a_hash, - ), + make_hypothetical_membership_response(hypothetical_candidate, leaf_a_hash), )]; - assert_hypothetical_frontier_requests(&mut virtual_overseer, expected_request).await; + assert_hypothetical_membership_requests(&mut virtual_overseer, expected_request).await; // Prospective parachains are notified. assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::IntroduceCandidate( + ProspectiveParachainsMessage::IntroduceSecondedCandidate( req, tx, ), @@ -1673,19 +1685,10 @@ fn occupied_core_assignment() { && req.candidate_para == para_id && pvd == req.persisted_validation_data => { - // Any non-empty response will do. - tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap(); + tx.send(true).unwrap(); } ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( - _, - _ - )) - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( diff --git a/polkadot/node/core/dispute-coordinator/Cargo.toml b/polkadot/node/core/dispute-coordinator/Cargo.toml index cd3238449bea..938fdce1cb82 100644 --- a/polkadot/node/core/dispute-coordinator/Cargo.toml +++ b/polkadot/node/core/dispute-coordinator/Cargo.toml @@ -16,7 +16,7 @@ parity-scale-codec = "3.6.1" kvdb = "0.13.0" thiserror = { workspace = true } schnellru = "0.2.1" -fatality = "0.0.6" +fatality = "0.1.0" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index ab3cef99e54f..8d0aec96f010 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.30" gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "3.6.4" thiserror = { workspace = true } -fatality = "0.0.6" +fatality = "0.1.0" bitvec = "1" polkadot-primitives = { path = "../../../primitives" } @@ -23,7 +23,6 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } [dev-dependencies] -rstest = "0.18.2" assert_matches = "1" polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } polkadot-node-subsystem-types = { path = "../../subsystem-types" } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index 86814b976d13..f87d4820ff9a 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -14,35 +14,49 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! A tree utility for managing parachain fragments not referenced by the relay-chain. +//! Utility for managing parachain fragments not referenced by the relay-chain. //! //! # Overview //! -//! This module exposes two main types: [`FragmentTree`] and [`CandidateStorage`] which are meant to -//! be used in close conjunction. Each fragment tree is associated with a particular relay-parent -//! and each node in the tree represents a candidate. Each parachain has a single candidate storage, -//! but can have multiple trees for each relay chain block in the view. +//! This module exposes two main types: [`FragmentChain`] and [`CandidateStorage`] which are meant +//! to be used in close conjunction. Each fragment chain is associated with a particular +//! relay-parent and each node in the chain represents a candidate. Each parachain has a single +//! candidate storage, but can have one chain for each relay chain block in the view. +//! Therefore, the same candidate can be present in multiple fragment chains of a parachain. One of +//! the purposes of the candidate storage is to deduplicate the large candidate data that is being +//! referenced from multiple fragment chains. //! -//! A tree has an associated [`Scope`] which defines limits on candidates within the tree. +//! A chain has an associated [`Scope`] which defines limits on candidates within the chain. //! Candidates themselves have their own [`Constraints`] which are either the constraints from the -//! scope, or, if there are previous nodes in the tree, a modified version of the previous +//! scope, or, if there are previous nodes in the chain, a modified version of the previous //! candidate's constraints. //! +//! Another use of the `CandidateStorage` is to keep a record of candidates which may not be yet +//! included in any chain, but which may become part of a chain in the future. This is needed for +//! elastic scaling, so that we may parallelise the backing process across different groups. As long +//! as some basic constraints are not violated by an unconnected candidate (like the relay parent +//! being in scope), we proceed with the backing process, hoping that its predecessors will be +//! backed soon enough. This is commonly called a potential candidate. Note that not all potential +//! candidates will be maintained in the CandidateStorage. The total number of connected + potential +//! candidates will be at most max_candidate_depth + 1. +//! //! This module also makes use of types provided by the Inclusion Emulator module, such as //! [`Fragment`] and [`Constraints`]. These perform the actual job of checking for validity of //! prospective fragments. //! -//! # Usage +//! # Parachain forks //! -//! It's expected that higher-level code will have a tree for each relay-chain block which might -//! reasonably have blocks built upon it. +//! Parachains are expected to not create forks, hence the use of fragment chains as opposed to +//! fragment trees. If parachains do create forks, their performance in regards to async backing and +//! elastic scaling will suffer, because different validators will have different views of the +//! future. //! -//! Because a para only has a single candidate storage, trees only store indices into the storage. -//! The storage is meant to be pruned when trees are dropped by higher-level code. +//! This is a compromise we can make - collators which want to use async backing and elastic scaling +//! need to cooperate for the highest throughput. //! -//! # Cycles +//! # Parachain cycles //! -//! Nodes do not uniquely refer to a parachain block for two reasons. +//! Parachains can create cycles, because: //! 1. There's no requirement that head-data is unique for a parachain. Furthermore, a parachain //! is under no obligation to be acyclic, and this is mostly just because it's totally //! inefficient to enforce it. Practical use-cases are acyclic, but there is still more than @@ -50,34 +64,17 @@ //! 2. and candidates only refer to their parent by its head-data. This whole issue could be //! resolved by having candidates reference their parent by candidate hash. //! -//! The implication is that when we receive a candidate receipt, there are actually multiple -//! possibilities for any candidates between the para-head recorded in the relay parent's state -//! and the candidate in question. -//! -//! This means that our candidates need to handle multiple parents and that depth is an -//! attribute of a node in a tree, not a candidate. Put another way, the same candidate might -//! have different depths in different parts of the tree. +//! However, dealing with cycles increases complexity during the backing/inclusion process for no +//! practical reason. Therefore, fragment chains will not accept such candidates. //! -//! As an extreme example, a candidate which produces head-data which is the same as its parent -//! can correspond to multiple nodes within the same [`FragmentTree`]. Such cycles are bounded -//! by the maximum depth allowed by the tree. An example with `max_depth: 4`: +//! On the other hand, enforcing that a parachain will NEVER be acyclic would be very complicated +//! (looping through the entire parachain's history on every new candidate or changing the candidate +//! receipt to reference the parent's candidate hash). //! -//! ```text -//! committed head -//! | -//! depth 0: head_a -//! | -//! depth 1: head_b -//! | -//! depth 2: head_a -//! | -//! depth 3: head_b -//! | -//! depth 4: head_a -//! ``` +//! # Spam protection //! //! As long as the [`CandidateStorage`] has bounded input on the number of candidates supplied, -//! [`FragmentTree`] complexity is bounded. This means that higher-level code needs to be selective +//! [`FragmentChain`] complexity is bounded. This means that higher-level code needs to be selective //! about limiting the amount of candidates that are considered. //! //! The code in this module is not designed for speed or efficiency, but conceptual simplicity. @@ -90,16 +87,15 @@ mod tests; use std::{ - borrow::Cow, collections::{ hash_map::{Entry, HashMap}, BTreeMap, HashSet, }, + sync::Arc, }; use super::LOG_TARGET; -use bitvec::prelude::*; -use polkadot_node_subsystem::messages::Ancestors; +use polkadot_node_subsystem::messages::{Ancestors, HypotheticalCandidate}; use polkadot_node_subsystem_util::inclusion_emulator::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; @@ -120,11 +116,19 @@ pub enum CandidateStorageInsertionError { /// Stores candidates and information about them such as their relay-parents and their backing /// states. +#[derive(Clone, Default)] pub(crate) struct CandidateStorage { - // Index from head data hash to candidate hashes with that head data as a parent. + // Index from head data hash to candidate hashes with that head data as a parent. Purely for + // efficiency when responding to `ProspectiveValidationDataRequest`s or when trying to find a + // new candidate to push to a chain. + // Even though having multiple candidates with same parent would be invalid for a parachain, it + // could happen across different relay chain forks, hence the HashSet. by_parent_head: HashMap>, - // Index from head data hash to candidate hashes outputting that head data. + // Index from head data hash to candidate hashes outputting that head data. Purely for + // efficiency when responding to `ProspectiveValidationDataRequest`s. + // Even though having multiple candidates with same output would be invalid for a parachain, + // it could happen across different relay chain forks. by_output_head: HashMap>, // Index from candidate hash to fragment node. @@ -132,23 +136,14 @@ pub(crate) struct CandidateStorage { } impl CandidateStorage { - /// Create a new `CandidateStorage`. - pub fn new() -> Self { - CandidateStorage { - by_parent_head: HashMap::new(), - by_output_head: HashMap::new(), - by_candidate_hash: HashMap::new(), - } - } - /// Introduce a new candidate. pub fn add_candidate( &mut self, candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, + state: CandidateState, ) -> Result { let candidate_hash = candidate.hash(); - if self.by_candidate_hash.contains_key(&candidate_hash) { return Err(CandidateStorageInsertionError::CandidateAlreadyKnown(candidate_hash)) } @@ -157,24 +152,30 @@ impl CandidateStorage { return Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) } - let parent_head_hash = persisted_validation_data.parent_head.hash(); - let output_head_hash = candidate.commitments.head_data.hash(); let entry = CandidateEntry { candidate_hash, + parent_head_data_hash: persisted_validation_data.parent_head.hash(), + output_head_data_hash: candidate.commitments.head_data.hash(), relay_parent: candidate.descriptor.relay_parent, - state: CandidateState::Introduced, - candidate: ProspectiveCandidate { - commitments: Cow::Owned(candidate.commitments), + state, + candidate: Arc::new(ProspectiveCandidate { + commitments: candidate.commitments, collator: candidate.descriptor.collator, collator_signature: candidate.descriptor.signature, persisted_validation_data, pov_hash: candidate.descriptor.pov_hash, validation_code_hash: candidate.descriptor.validation_code_hash, - }, + }), }; - self.by_parent_head.entry(parent_head_hash).or_default().insert(candidate_hash); - self.by_output_head.entry(output_head_hash).or_default().insert(candidate_hash); + self.by_parent_head + .entry(entry.parent_head_data_hash()) + .or_default() + .insert(candidate_hash); + self.by_output_head + .entry(entry.output_head_data_hash()) + .or_default() + .insert(candidate_hash); // sanity-checked already. self.by_candidate_hash.insert(candidate_hash, entry); @@ -184,21 +185,20 @@ impl CandidateStorage { /// Remove a candidate from the store. pub fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) { - let parent_head_hash = entry.candidate.persisted_validation_data.parent_head.hash(); - if let Entry::Occupied(mut e) = self.by_parent_head.entry(parent_head_hash) { + if let Entry::Occupied(mut e) = self.by_parent_head.entry(entry.parent_head_data_hash()) + { e.get_mut().remove(&candidate_hash); if e.get().is_empty() { e.remove(); } } - } - } - /// Note that an existing candidate has been seconded. - pub fn mark_seconded(&mut self, candidate_hash: &CandidateHash) { - if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { - if entry.state != CandidateState::Backed { - entry.state = CandidateState::Seconded; + if let Entry::Occupied(mut e) = self.by_output_head.entry(entry.output_head_data_hash()) + { + e.get_mut().remove(&candidate_hash); + if e.get().is_empty() { + e.remove(); + } } } } @@ -225,6 +225,11 @@ impl CandidateStorage { self.by_candidate_hash.contains_key(candidate_hash) } + /// Return an iterator over the stored candidates. + pub fn candidates(&self) -> impl Iterator { + self.by_candidate_hash.values() + } + /// Retain only candidates which pass the predicate. pub(crate) fn retain(&mut self, pred: impl Fn(&CandidateHash) -> bool) { self.by_candidate_hash.retain(|h, _v| pred(h)); @@ -260,16 +265,17 @@ impl CandidateStorage { } /// Returns candidate's relay parent, if present. - pub(crate) fn relay_parent_by_candidate_hash( - &self, - candidate_hash: &CandidateHash, - ) -> Option { + pub(crate) fn relay_parent_of_candidate(&self, candidate_hash: &CandidateHash) -> Option { self.by_candidate_hash.get(candidate_hash).map(|entry| entry.relay_parent) } - fn iter_para_children<'a>( + /// Returns the candidates which have the given head data hash as parent. + /// We don't allow forks in a parachain, but we may have multiple candidates with same parent + /// across different relay chain forks. That's why it returns an iterator (but only one will be + /// valid and used in the end). + fn possible_para_children<'a>( &'a self, - parent_head_hash: &Hash, + parent_head_hash: &'a Hash, ) -> impl Iterator + 'a { let by_candidate_hash = &self.by_candidate_hash; self.by_parent_head @@ -279,10 +285,6 @@ impl CandidateStorage { .filter_map(move |h| by_candidate_hash.get(h)) } - fn get(&'_ self, candidate_hash: &CandidateHash) -> Option<&'_ CandidateEntry> { - self.by_candidate_hash.get(candidate_hash) - } - #[cfg(test)] pub fn len(&self) -> (usize, usize) { (self.by_parent_head.len(), self.by_candidate_hash.len()) @@ -292,25 +294,38 @@ impl CandidateStorage { /// The state of a candidate. /// /// Candidates aren't even considered until they've at least been seconded. -#[derive(Debug, PartialEq)] -enum CandidateState { - /// The candidate has been introduced in a spam-protected way but - /// is not necessarily backed. - Introduced, +#[derive(Debug, PartialEq, Clone)] +pub(crate) enum CandidateState { /// The candidate has been seconded. Seconded, /// The candidate has been completely backed by the group. Backed, } -#[derive(Debug)] -struct CandidateEntry { +#[derive(Debug, Clone)] +pub(crate) struct CandidateEntry { candidate_hash: CandidateHash, + parent_head_data_hash: Hash, + output_head_data_hash: Hash, relay_parent: Hash, - candidate: ProspectiveCandidate<'static>, + candidate: Arc, state: CandidateState, } +impl CandidateEntry { + pub fn hash(&self) -> CandidateHash { + self.candidate_hash + } + + pub fn parent_head_data_hash(&self) -> Hash { + self.parent_head_data_hash + } + + pub fn output_head_data_hash(&self) -> Hash { + self.output_head_data_hash + } +} + /// A candidate existing on-chain but pending availability, for special treatment /// in the [`Scope`]. #[derive(Debug, Clone)] @@ -321,15 +336,22 @@ pub(crate) struct PendingAvailability { pub relay_parent: RelayChainBlockInfo, } -/// The scope of a [`FragmentTree`]. -#[derive(Debug)] +/// The scope of a [`FragmentChain`]. +#[derive(Debug, Clone)] pub(crate) struct Scope { + /// The assigned para id of this `FragmentChain`. para: ParaId, + /// The relay parent we're currently building on top of. relay_parent: RelayChainBlockInfo, + /// The other relay parents candidates are allowed to build upon, mapped by the block number. ancestors: BTreeMap, + /// The other relay parents candidates are allowed to build upon, mapped by the block hash. ancestors_by_hash: HashMap, + /// The candidates pending availability at this block. pending_availability: Vec, + /// The base constraints derived from the latest included candidate. base_constraints: Constraints, + /// Equal to `max_candidate_depth`. max_depth: usize, } @@ -398,7 +420,7 @@ impl Scope { }) } - /// Get the earliest relay-parent allowed in the scope of the fragment tree. + /// Get the earliest relay-parent allowed in the scope of the fragment chain. pub fn earliest_relay_parent(&self) -> RelayChainBlockInfo { self.ancestors .iter() @@ -407,8 +429,8 @@ impl Scope { .unwrap_or_else(|| self.relay_parent.clone()) } - /// Get the ancestor of the fragment tree by hash. - pub fn ancestor_by_hash(&self, hash: &Hash) -> Option { + /// Get the relay ancestor of the fragment chain by hash. + pub fn ancestor(&self, hash: &Hash) -> Option { if hash == &self.relay_parent.hash { return Some(self.relay_parent.clone()) } @@ -430,67 +452,48 @@ impl Scope { } } -/// We use indices into a flat vector to refer to nodes in the tree. -/// Every tree also has an implicit root. -#[derive(Debug, Clone, Copy, PartialEq)] -enum NodePointer { - Root, - Storage(usize), -} - -/// A hypothetical candidate, which may or may not exist in -/// the fragment tree already. -pub(crate) enum HypotheticalCandidate<'a> { - Complete { - receipt: Cow<'a, CommittedCandidateReceipt>, - persisted_validation_data: Cow<'a, PersistedValidationData>, - }, - Incomplete { - relay_parent: Hash, - parent_head_data_hash: Hash, - }, +pub struct FragmentNode { + fragment: Fragment, + candidate_hash: CandidateHash, + cumulative_modifications: ConstraintModifications, } -impl<'a> HypotheticalCandidate<'a> { - fn parent_head_data_hash(&self) -> Hash { - match *self { - HypotheticalCandidate::Complete { ref persisted_validation_data, .. } => - persisted_validation_data.as_ref().parent_head.hash(), - HypotheticalCandidate::Incomplete { ref parent_head_data_hash, .. } => - *parent_head_data_hash, - } - } - +impl FragmentNode { fn relay_parent(&self) -> Hash { - match *self { - HypotheticalCandidate::Complete { ref receipt, .. } => - receipt.descriptor().relay_parent, - HypotheticalCandidate::Incomplete { ref relay_parent, .. } => *relay_parent, - } + self.fragment.relay_parent().hash } } -/// This is a tree of candidates based on some underlying storage of candidates and a scope. +/// Response given by `can_add_candidate_as_potential` +#[derive(PartialEq, Debug)] +pub enum PotentialAddition { + /// Can be added as either connected or unconnected candidate. + Anyhow, + /// Can only be added as a connected candidate to the chain. + IfConnected, + /// Cannot be added. + None, +} + +/// This is a chain of candidates based on some underlying storage of candidates and a scope. /// -/// All nodes in the tree must be either pending availability or within the scope. Within the scope +/// All nodes in the chain must be either pending availability or within the scope. Within the scope /// means it's built off of the relay-parent or an ancestor. -pub(crate) struct FragmentTree { +pub(crate) struct FragmentChain { scope: Scope, - // Invariant: a contiguous prefix of the 'nodes' storage will contain - // the top-level children. - nodes: Vec, + chain: Vec, + + candidates: HashSet, - // The candidates stored in this tree, mapped to a bitvec indicating the depths - // where the candidate is stored. - candidates: HashMap>, + // Index from head data hash to candidate hashes with that head data as a parent. + by_parent_head: HashMap, + // Index from head data hash to candidate hashes outputting that head data. + by_output_head: HashMap, } -impl FragmentTree { - /// Create a new [`FragmentTree`] with given scope and populated from the storage. - /// - /// Can be populated recursively (i.e. `populate` will pick up candidates that build on other - /// candidates). +impl FragmentChain { + /// Create a new [`FragmentChain`] with given scope and populated from the storage. pub fn populate(scope: Scope, storage: &CandidateStorage) -> Self { gum::trace!( target: LOG_TARGET, @@ -498,285 +501,152 @@ impl FragmentTree { relay_parent_num = scope.relay_parent.number, para_id = ?scope.para, ancestors = scope.ancestors.len(), - "Instantiating Fragment Tree", + "Instantiating Fragment Chain", ); - let mut tree = FragmentTree { scope, nodes: Vec::new(), candidates: HashMap::new() }; + let mut fragment_chain = Self { + scope, + chain: Vec::new(), + candidates: HashSet::new(), + by_parent_head: HashMap::new(), + by_output_head: HashMap::new(), + }; - tree.populate_from_bases(storage, vec![NodePointer::Root]); + fragment_chain.populate_chain(storage); - tree + fragment_chain } - /// Get the scope of the Fragment Tree. + /// Get the scope of the Fragment Chain. pub fn scope(&self) -> &Scope { &self.scope } - // Inserts a node and updates child references in a non-root parent. - fn insert_node(&mut self, node: FragmentNode) { - let pointer = NodePointer::Storage(self.nodes.len()); - let parent_pointer = node.parent; - let candidate_hash = node.candidate_hash; - - let max_depth = self.scope.max_depth; - - self.candidates - .entry(candidate_hash) - .or_insert_with(|| bitvec![u16, Msb0; 0; max_depth + 1]) - .set(node.depth, true); - - match parent_pointer { - NodePointer::Storage(ptr) => { - self.nodes.push(node); - self.nodes[ptr].children.push((pointer, candidate_hash)) - }, - NodePointer::Root => { - // Maintain the invariant of node storage beginning with depth-0. - if self.nodes.last().map_or(true, |last| last.parent == NodePointer::Root) { - self.nodes.push(node); - } else { - let pos = - self.nodes.iter().take_while(|n| n.parent == NodePointer::Root).count(); - self.nodes.insert(pos, node); - } - }, - } - } - - fn node_has_candidate_child( - &self, - pointer: NodePointer, - candidate_hash: &CandidateHash, - ) -> bool { - self.node_candidate_child(pointer, candidate_hash).is_some() - } - - fn node_candidate_child( - &self, - pointer: NodePointer, - candidate_hash: &CandidateHash, - ) -> Option { - match pointer { - NodePointer::Root => self - .nodes - .iter() - .take_while(|n| n.parent == NodePointer::Root) - .enumerate() - .find(|(_, n)| &n.candidate_hash == candidate_hash) - .map(|(i, _)| NodePointer::Storage(i)), - NodePointer::Storage(ptr) => - self.nodes.get(ptr).and_then(|n| n.candidate_child(candidate_hash)), - } + /// Returns the number of candidates in the chain + pub(crate) fn len(&self) -> usize { + self.candidates.len() } - /// Returns an O(n) iterator over the hashes of candidates contained in the - /// tree. - pub(crate) fn candidates(&self) -> impl Iterator + '_ { - self.candidates.keys().cloned() + /// Whether the candidate exists. + pub(crate) fn contains_candidate(&self, candidate: &CandidateHash) -> bool { + self.candidates.contains(candidate) } - /// Whether the candidate exists and at what depths. - pub(crate) fn candidate(&self, candidate: &CandidateHash) -> Option> { - self.candidates.get(candidate).map(|d| d.iter_ones().collect()) + /// Return a vector of the chain's candidate hashes, in-order. + pub(crate) fn to_vec(&self) -> Vec { + self.chain.iter().map(|candidate| candidate.candidate_hash).collect() } - /// Add a candidate and recursively populate from storage. + /// Try accumulating more candidates onto the chain. /// - /// Candidates can be added either as children of the root or children of other candidates. - pub(crate) fn add_and_populate(&mut self, hash: CandidateHash, storage: &CandidateStorage) { - let candidate_entry = match storage.get(&hash) { - None => return, - Some(e) => e, - }; - - let candidate_parent = &candidate_entry.candidate.persisted_validation_data.parent_head; - - // Select an initial set of bases, whose required relay-parent matches that of the - // candidate. - let root_base = if &self.scope.base_constraints.required_parent == candidate_parent { - Some(NodePointer::Root) - } else { - None - }; - - let non_root_bases = self - .nodes - .iter() - .enumerate() - .filter(|(_, n)| { - n.cumulative_modifications.required_parent.as_ref() == Some(candidate_parent) - }) - .map(|(i, _)| NodePointer::Storage(i)); - - let bases = root_base.into_iter().chain(non_root_bases).collect(); - - // Pass this into the population function, which will sanity-check stuff like depth, - // fragments, etc. and then recursively populate. - self.populate_from_bases(storage, bases); + /// Candidates can only be added if they build on the already existing chain. + pub(crate) fn extend_from_storage(&mut self, storage: &CandidateStorage) { + self.populate_chain(storage); } - /// Returns `true` if the path from the root to the node's parent (inclusive) - /// only contains backed candidates, `false` otherwise. - fn path_contains_backed_only_candidates( + /// Returns the hypothetical state of a candidate with the given hash and parent head data + /// in regards to the existing chain. + /// + /// Returns true if either: + /// - the candidate is already present + /// - the candidate can be added to the chain + /// - the candidate could potentially be added to the chain in the future (its ancestors are + /// still unknown but it doesn't violate other rules). + /// + /// If this returns false, the candidate could never be added to the current chain (not now, not + /// ever) + pub(crate) fn hypothetical_membership( &self, - mut parent_pointer: NodePointer, + candidate: HypotheticalCandidate, candidate_storage: &CandidateStorage, ) -> bool { - while let NodePointer::Storage(ptr) = parent_pointer { - let node = &self.nodes[ptr]; - let candidate_hash = &node.candidate_hash; - - if candidate_storage.get(candidate_hash).map_or(true, |candidate_entry| { - !matches!(candidate_entry.state, CandidateState::Backed) - }) { - return false - } - parent_pointer = node.parent; + let candidate_hash = candidate.candidate_hash(); + + // If we've already used this candidate in the chain + if self.candidates.contains(&candidate_hash) { + return true } - true - } + let can_add_as_potential = self.can_add_candidate_as_potential( + candidate_storage, + &candidate.candidate_hash(), + &candidate.relay_parent(), + candidate.parent_head_data_hash(), + candidate.output_head_data_hash(), + ); - /// Returns the hypothetical depths where a candidate with the given hash and parent head data - /// would be added to the tree, without applying other candidates recursively on top of it. - /// - /// If the candidate is already known, this returns the actual depths where this - /// candidate is part of the tree. - /// - /// Setting `backed_in_path_only` to `true` ensures this function only returns such membership - /// that every candidate in the path from the root is backed. - pub(crate) fn hypothetical_depths( - &self, - hash: CandidateHash, - candidate: HypotheticalCandidate, - candidate_storage: &CandidateStorage, - backed_in_path_only: bool, - ) -> Vec { - // if `true`, we always have to traverse the tree. - if !backed_in_path_only { - // if known. - if let Some(depths) = self.candidates.get(&hash) { - return depths.iter_ones().collect() - } + if can_add_as_potential == PotentialAddition::None { + return false } - // if out of scope. - let candidate_relay_parent = candidate.relay_parent(); - let candidate_relay_parent = if self.scope.relay_parent.hash == candidate_relay_parent { - self.scope.relay_parent.clone() - } else if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) { - info.clone() + let Some(candidate_relay_parent) = self.scope.ancestor(&candidate.relay_parent()) else { + // can_add_candidate_as_potential already checked for this, but just to be safe. + return false + }; + + let identity_modifications = ConstraintModifications::identity(); + let cumulative_modifications = if let Some(last_candidate) = self.chain.last() { + &last_candidate.cumulative_modifications } else { - return Vec::new() + &identity_modifications }; - let max_depth = self.scope.max_depth; - let mut depths = bitvec![u16, Msb0; 0; max_depth + 1]; - - // iterate over all nodes where parent head-data matches, - // relay-parent number is <= candidate, and depth < max_depth. - let node_pointers = (0..self.nodes.len()).map(NodePointer::Storage); - for parent_pointer in std::iter::once(NodePointer::Root).chain(node_pointers) { - let (modifications, child_depth, earliest_rp) = match parent_pointer { - NodePointer::Root => - (ConstraintModifications::identity(), 0, self.scope.earliest_relay_parent()), - NodePointer::Storage(ptr) => { - let node = &self.nodes[ptr]; - let parent_rp = self - .scope - .ancestor_by_hash(&node.relay_parent()) - .or_else(|| { - self.scope - .get_pending_availability(&node.candidate_hash) - .map(|_| self.scope.earliest_relay_parent()) - }) - .expect("All nodes in tree are either pending availability or within scope; qed"); - - (node.cumulative_modifications.clone(), node.depth + 1, parent_rp) + let child_constraints = + match self.scope.base_constraints.apply_modifications(&cumulative_modifications) { + Err(e) => { + gum::debug!( + target: LOG_TARGET, + new_parent_head = ?cumulative_modifications.required_parent, + ?candidate_hash, + err = ?e, + "Failed to apply modifications", + ); + + return false }, + Ok(c) => c, }; - if child_depth > max_depth { - continue - } - - if earliest_rp.number > candidate_relay_parent.number { - continue - } - - let child_constraints = - match self.scope.base_constraints.apply_modifications(&modifications) { - Err(e) => { - gum::debug!( - target: LOG_TARGET, - new_parent_head = ?modifications.required_parent, - err = ?e, - "Failed to apply modifications", - ); - - continue - }, - Ok(c) => c, - }; - - let parent_head_hash = candidate.parent_head_data_hash(); - if parent_head_hash != child_constraints.required_parent.hash() { - continue - } - + let parent_head_hash = candidate.parent_head_data_hash(); + if parent_head_hash == child_constraints.required_parent.hash() { // We do additional checks for complete candidates. - if let HypotheticalCandidate::Complete { ref receipt, ref persisted_validation_data } = - candidate + if let HypotheticalCandidate::Complete { + ref receipt, + ref persisted_validation_data, + .. + } = candidate { - let prospective_candidate = ProspectiveCandidate { - commitments: Cow::Borrowed(&receipt.commitments), - collator: receipt.descriptor().collator.clone(), - collator_signature: receipt.descriptor().signature.clone(), - persisted_validation_data: persisted_validation_data.as_ref().clone(), - pov_hash: receipt.descriptor().pov_hash, - validation_code_hash: receipt.descriptor().validation_code_hash, - }; - - if Fragment::new( - candidate_relay_parent.clone(), - child_constraints, - prospective_candidate, + if Fragment::check_against_constraints( + &candidate_relay_parent, + &child_constraints, + &receipt.commitments, + &receipt.descriptor().validation_code_hash, + persisted_validation_data, ) .is_err() { - continue + gum::debug!( + target: LOG_TARGET, + "Fragment::check_against_constraints() returned error", + ); + return false } } - // Check that the path only contains backed candidates, if necessary. - if !backed_in_path_only || - self.path_contains_backed_only_candidates(parent_pointer, candidate_storage) - { - depths.set(child_depth, true); - } + // If we got this far, it can be added to the chain right now. + true + } else if can_add_as_potential == PotentialAddition::Anyhow { + // Otherwise it is or can be an unconnected candidate, but only if PotentialAddition + // does not force us to only add a connected candidate. + true + } else { + false } - - depths.iter_ones().collect() } /// Select `count` candidates after the given `ancestors` which pass /// the predicate and have not already been backed on chain. /// - /// Does an exhaustive search into the tree after traversing the ancestors path. - /// If the ancestors draw out a path that can be traversed in multiple ways, no - /// candidates will be returned. - /// If the ancestors do not draw out a full path (the path contains holes), candidates will be - /// suggested that may fill these holes. - /// If the ancestors don't draw out a valid path, no candidates will be returned. If there are - /// multiple possibilities of the same size, this will select the first one. If there is no - /// chain of size `count` that matches the criteria, this will return the largest chain it could - /// find with the criteria. If there are no candidates meeting those criteria, returns an empty - /// `Vec`. - /// Cycles are accepted, but this code expects that the runtime will deduplicate - /// identical candidates when occupying the cores (when proposing to back A->B->A, only A will - /// be backed on chain). - /// /// The intention of the `ancestors` is to allow queries on the basis of /// one or more candidates which were previously pending availability becoming /// available or candidates timing out. @@ -789,362 +659,334 @@ impl FragmentTree { if count == 0 { return vec![] } - // First, we need to order the ancestors. - // The node returned is the one from which we can start finding new backable candidates. - let Some(base_node) = self.find_ancestor_path(ancestors) else { return vec![] }; - - self.find_backable_chain_inner( - base_node, - count, - count, - &pred, - &mut Vec::with_capacity(count as usize), - ) - } + let base_pos = self.find_ancestor_path(ancestors); - // Try finding a candidate chain starting from `base_node` of length `expected_count`. - // If not possible, return the longest one we could find. - // Does a depth-first search, since we're optimistic that there won't be more than one such - // chains (parachains shouldn't usually have forks). So in the usual case, this will conclude - // in `O(expected_count)`. - // Cycles are accepted, but this doesn't allow for infinite execution time, because the maximum - // depth we'll reach is `expected_count`. - // - // Worst case performance is `O(num_forks ^ expected_count)`, the same as populating the tree. - // Although an exponential function, this is actually a constant that can only be altered via - // sudo/governance, because: - // 1. `num_forks` at a given level is at most `max_candidate_depth * max_validators_per_core` - // (because each validator in the assigned group can second `max_candidate_depth` - // candidates). The prospective-parachains subsystem assumes that the number of para forks is - // limited by collator-protocol and backing subsystems. In practice, this is a constant which - // can only be altered by sudo or governance. - // 2. `expected_count` is equal to the number of cores a para is scheduled on (in an elastic - // scaling scenario). For non-elastic-scaling, this is just 1. In practice, this should be a - // small number (1-3), capped by the total number of available cores (a constant alterable - // only via governance/sudo). - fn find_backable_chain_inner( - &self, - base_node: NodePointer, - expected_count: u32, - remaining_count: u32, - pred: &dyn Fn(&CandidateHash) -> bool, - accumulator: &mut Vec, - ) -> Vec { - if remaining_count == 0 { - // The best option is the chain we've accumulated so far. - return accumulator.to_vec(); + let actual_end_index = std::cmp::min(base_pos + (count as usize), self.chain.len()); + let mut res = Vec::with_capacity(actual_end_index - base_pos); + + for elem in &self.chain[base_pos..actual_end_index] { + if self.scope.get_pending_availability(&elem.candidate_hash).is_none() && + pred(&elem.candidate_hash) + { + res.push(elem.candidate_hash); + } else { + break + } } - let children: Vec<_> = match base_node { - NodePointer::Root => self - .nodes - .iter() - .enumerate() - .take_while(|(_, n)| n.parent == NodePointer::Root) - .filter(|(_, n)| self.scope.get_pending_availability(&n.candidate_hash).is_none()) - .filter(|(_, n)| pred(&n.candidate_hash)) - .map(|(ptr, n)| (NodePointer::Storage(ptr), n.candidate_hash)) - .collect(), - NodePointer::Storage(base_node_ptr) => { - let base_node = &self.nodes[base_node_ptr]; - - base_node - .children - .iter() - .filter(|(_, hash)| self.scope.get_pending_availability(&hash).is_none()) - .filter(|(_, hash)| pred(&hash)) - .map(|(ptr, hash)| (*ptr, *hash)) - .collect() - }, - }; + res + } + + // Tries to orders the ancestors into a viable path from root to the last one. + // Stops when the ancestors are all used or when a node in the chain is not present in the + // ancestor set. Returns the index in the chain were the search stopped. + fn find_ancestor_path(&self, mut ancestors: Ancestors) -> usize { + if self.chain.is_empty() { + return 0; + } - let mut best_result = accumulator.clone(); - for (child_ptr, child_hash) in children { - accumulator.push(child_hash); - - let result = self.find_backable_chain_inner( - child_ptr, - expected_count, - remaining_count - 1, - &pred, - accumulator, - ); - - accumulator.pop(); - - // Short-circuit the search if we've found the right length. Otherwise, we'll - // search for a max. - // Taking the first best selection doesn't introduce bias or become gameable, - // because `find_ancestor_path` uses a `HashSet` to track the ancestors, which - // makes the order in which ancestors are visited non-deterministic. - if result.len() == expected_count as usize { - return result - } else if best_result.len() < result.len() { - best_result = result; + for (index, candidate) in self.chain.iter().enumerate() { + if !ancestors.remove(&candidate.candidate_hash) { + return index } } - best_result + // This means that we found the entire chain in the ancestor set. There won't be anything + // left to back. + self.chain.len() + } + + // Return the earliest relay parent a new candidate can have in order to be added to the chain. + // This is the relay parent of the last candidate in the chain. + // The value returned may not be valid if we want to add a candidate pending availability, which + // may have a relay parent which is out of scope. Special handling is needed in that case. + // `None` is returned if the candidate's relay parent info cannot be found. + fn earliest_relay_parent(&self) -> Option { + if let Some(last_candidate) = self.chain.last() { + self.scope.ancestor(&last_candidate.relay_parent()).or_else(|| { + // if the relay-parent is out of scope _and_ it is in the chain, + // it must be a candidate pending availability. + self.scope + .get_pending_availability(&last_candidate.candidate_hash) + .map(|c| c.relay_parent.clone()) + }) + } else { + Some(self.scope.earliest_relay_parent()) + } + } + + // Checks if this candidate could be added in the future to this chain. + // This assumes that the chain does not already contain this candidate. It may or may not be + // present in the `CandidateStorage`. + // Even if the candidate is a potential candidate, this function will indicate that it can be + // kept only if there's enough room for it. + pub(crate) fn can_add_candidate_as_potential( + &self, + storage: &CandidateStorage, + candidate_hash: &CandidateHash, + relay_parent: &Hash, + parent_head_hash: Hash, + output_head_hash: Option, + ) -> PotentialAddition { + // If we've got enough candidates for the configured depth, no point in adding more. + if self.chain.len() > self.scope.max_depth { + return PotentialAddition::None + } + + if !self.check_potential(relay_parent, parent_head_hash, output_head_hash) { + return PotentialAddition::None + } + + let present_in_storage = storage.contains(candidate_hash); + + let unconnected = self + .find_unconnected_potential_candidates( + storage, + present_in_storage.then_some(candidate_hash), + ) + .len(); + + if (self.chain.len() + unconnected) < self.scope.max_depth { + PotentialAddition::Anyhow + } else if (self.chain.len() + unconnected) == self.scope.max_depth { + // If we've only one slot left to fill, it must be filled with a connected candidate. + PotentialAddition::IfConnected + } else { + PotentialAddition::None + } } - // Orders the ancestors into a viable path from root to the last one. - // Returns a pointer to the last node in the path. - // We assume that the ancestors form a chain (that the - // av-cores do not back parachain forks), None is returned otherwise. - // If we cannot use all ancestors, stop at the first found hole in the chain. This usually - // translates to a timed out candidate. - fn find_ancestor_path(&self, mut ancestors: Ancestors) -> Option { - // The number of elements in the path we've processed so far. - let mut depth = 0; - let mut last_node = NodePointer::Root; - let mut next_node: Option = Some(NodePointer::Root); - - while let Some(node) = next_node { - if depth > self.scope.max_depth { - return None; + // The candidates which are present in `CandidateStorage`, are not part of this chain but could + // become part of this chain in the future. Capped at the max depth minus the existing chain + // length. + // If `ignore_candidate` is supplied and found in storage, it won't be counted. + pub(crate) fn find_unconnected_potential_candidates( + &self, + storage: &CandidateStorage, + ignore_candidate: Option<&CandidateHash>, + ) -> Vec { + let mut candidates = vec![]; + for candidate in storage.candidates() { + if let Some(ignore_candidate) = ignore_candidate { + if ignore_candidate == &candidate.candidate_hash { + continue + } + } + // We stop at max_depth + 1 with the search. There's no point in looping further. + if (self.chain.len() + candidates.len()) > self.scope.max_depth { + break + } + if !self.candidates.contains(&candidate.candidate_hash) && + self.check_potential( + &candidate.relay_parent, + candidate.candidate.persisted_validation_data.parent_head.hash(), + Some(candidate.candidate.commitments.head_data.hash()), + ) { + candidates.push(candidate.candidate_hash); } + } + + candidates + } - last_node = node; + // Check if adding a candidate which transitions `parent_head_hash` to `output_head_hash` would + // introduce a fork or a cycle in the parachain. + // `output_head_hash` is optional because we sometimes make this check before retrieving the + // collation. + fn is_fork_or_cycle(&self, parent_head_hash: Hash, output_head_hash: Option) -> bool { + if self.by_parent_head.contains_key(&parent_head_hash) { + // fork. our parent has another child already + return true + } - next_node = match node { - NodePointer::Root => { - let children = self - .nodes - .iter() - .enumerate() - .take_while(|n| n.1.parent == NodePointer::Root) - .map(|(index, node)| (NodePointer::Storage(index), node.candidate_hash)) - .collect::>(); + if let Some(output_head_hash) = output_head_hash { + if self.by_output_head.contains_key(&output_head_hash) { + // this is not a chain, there are multiple paths to the same state. + return true + } - self.find_valid_child(&mut ancestors, children.iter()).ok()? - }, - NodePointer::Storage(ptr) => { - let children = self.nodes.get(ptr).and_then(|n| Some(n.children.iter())); - if let Some(children) = children { - self.find_valid_child(&mut ancestors, children).ok()? - } else { - None - } - }, - }; + // trivial 0-length cycle. + if parent_head_hash == output_head_hash { + return true + } - depth += 1; + // this should catch any other cycles. our output state cannot already be the parent + // state of another candidate, unless this is a cycle, since the already added + // candidates form a chain. + if self.by_parent_head.contains_key(&output_head_hash) { + return true + } } - Some(last_node) + false } - // Find a node from the given iterator which is present in the ancestors - // collection. If there are multiple such nodes, return an error and log a warning. We don't - // accept forks in a parachain to be backed. The supplied ancestors should all form a chain. - // If there is no such node, return None. - fn find_valid_child<'a>( + // Checks the potential of a candidate to be added to the chain in the future. + // Verifies that the relay parent is in scope and not moving backwards and that we're not + // introducing forks or cycles with other candidates in the chain. + // `output_head_hash` is optional because we sometimes make this check before retrieving the + // collation. + fn check_potential( &self, - ancestors: &'a mut Ancestors, - nodes: impl Iterator + 'a, - ) -> Result, ()> { - let mut possible_children = - nodes.filter_map(|(node_ptr, hash)| match ancestors.remove(&hash) { - true => Some(node_ptr), - false => None, - }); - - // We don't accept forks in a parachain to be backed. The supplied ancestors - // should all form a chain. - let next = possible_children.next(); - if let Some(second_child) = possible_children.next() { - if let (Some(NodePointer::Storage(first_child)), NodePointer::Storage(second_child)) = - (next, second_child) - { - gum::error!( - target: LOG_TARGET, - para_id = ?self.scope.para, - relay_parent = ?self.scope.relay_parent, - "Trying to find new backable candidates for a parachain for which we've backed a fork.\ - This is a bug and the runtime should not have allowed it.\n\ - Backed candidates with the same parent: {}, {}", - self.nodes[*first_child].candidate_hash, - self.nodes[*second_child].candidate_hash, - ); - } + relay_parent: &Hash, + parent_head_hash: Hash, + output_head_hash: Option, + ) -> bool { + if self.is_fork_or_cycle(parent_head_hash, output_head_hash) { + return false + } - Err(()) - } else { - Ok(next.copied()) + let Some(earliest_rp) = self.earliest_relay_parent() else { return false }; + + let Some(relay_parent) = self.scope.ancestor(relay_parent) else { return false }; + + if relay_parent.number < earliest_rp.number { + return false // relay parent moved backwards. } + + true } - fn populate_from_bases(&mut self, storage: &CandidateStorage, initial_bases: Vec) { - // Populate the tree breadth-first. - let mut last_sweep_start = None; + // Populate the fragment chain with candidates from CandidateStorage. + // Can be called by the constructor or when introducing a new candidate. + // If we're introducing a new candidate onto an existing chain, we may introduce more than one, + // since we may connect already existing candidates to the chain. + fn populate_chain(&mut self, storage: &CandidateStorage) { + let mut cumulative_modifications = if let Some(last_candidate) = self.chain.last() { + last_candidate.cumulative_modifications.clone() + } else { + ConstraintModifications::identity() + }; + let Some(mut earliest_rp) = self.earliest_relay_parent() else { return }; loop { - let sweep_start = self.nodes.len(); - - if Some(sweep_start) == last_sweep_start { - break + if self.chain.len() > self.scope.max_depth { + break; } - let parents: Vec = if let Some(last_start) = last_sweep_start { - (last_start..self.nodes.len()).map(NodePointer::Storage).collect() - } else { - initial_bases.clone() - }; + let child_constraints = + match self.scope.base_constraints.apply_modifications(&cumulative_modifications) { + Err(e) => { + gum::debug!( + target: LOG_TARGET, + new_parent_head = ?cumulative_modifications.required_parent, + err = ?e, + "Failed to apply modifications", + ); - // 1. get parent head and find constraints - // 2. iterate all candidates building on the right head and viable relay parent - // 3. add new node - for parent_pointer in parents { - let (modifications, child_depth, earliest_rp) = match parent_pointer { - NodePointer::Root => - (ConstraintModifications::identity(), 0, self.scope.earliest_relay_parent()), - NodePointer::Storage(ptr) => { - let node = &self.nodes[ptr]; - let parent_rp = self - .scope - .ancestor_by_hash(&node.relay_parent()) - .or_else(|| { - // if the relay-parent is out of scope _and_ it is in the tree, - // it must be a candidate pending availability. - self.scope - .get_pending_availability(&node.candidate_hash) - .map(|c| c.relay_parent.clone()) - }) - .expect("All nodes in tree are either pending availability or within scope; qed"); - - (node.cumulative_modifications.clone(), node.depth + 1, parent_rp) + break }, + Ok(c) => c, }; - if child_depth > self.scope.max_depth { + let required_head_hash = child_constraints.required_parent.hash(); + // Even though we don't allow parachain forks under the same active leaf, they may still + // appear under different relay chain forks, hence the iterator below. + let possible_children = storage.possible_para_children(&required_head_hash); + let mut added_child = false; + for candidate in possible_children { + // Add one node to chain if + // 1. it does not introduce a fork or a cycle. + // 2. parent hash is correct. + // 3. relay-parent does not move backwards. + // 4. all non-pending-availability candidates have relay-parent in scope. + // 5. candidate outputs fulfill constraints + + if self.is_fork_or_cycle( + candidate.parent_head_data_hash(), + Some(candidate.output_head_data_hash()), + ) { continue } - let child_constraints = - match self.scope.base_constraints.apply_modifications(&modifications) { + let pending = self.scope.get_pending_availability(&candidate.candidate_hash); + let Some(relay_parent) = pending + .map(|p| p.relay_parent.clone()) + .or_else(|| self.scope.ancestor(&candidate.relay_parent)) + else { + continue + }; + + // require: candidates don't move backwards + // and only pending availability candidates can be out-of-scope. + // + // earliest_rp can be before the earliest relay parent in the scope + // when the parent is a pending availability candidate as well, but + // only other pending candidates can have a relay parent out of scope. + let min_relay_parent_number = pending + .map(|p| match self.chain.len() { + 0 => p.relay_parent.number, + _ => earliest_rp.number, + }) + .unwrap_or_else(|| earliest_rp.number); + + if relay_parent.number < min_relay_parent_number { + continue // relay parent moved backwards. + } + + // don't add candidates if they're already present in the chain. + // this can never happen, as candidates can only be duplicated if there's a cycle + // and we shouldn't have allowed for a cycle to be chained. + if self.contains_candidate(&candidate.candidate_hash) { + continue + } + + let fragment = { + let mut constraints = child_constraints.clone(); + if let Some(ref p) = pending { + // overwrite for candidates pending availability as a special-case. + constraints.min_relay_parent_number = p.relay_parent.number; + } + + let f = Fragment::new( + relay_parent.clone(), + constraints, + // It's cheap to clone because it's wrapped in an Arc + candidate.candidate.clone(), + ); + + match f { + Ok(f) => f, Err(e) => { gum::debug!( target: LOG_TARGET, - new_parent_head = ?modifications.required_parent, err = ?e, - "Failed to apply modifications", + ?relay_parent, + candidate_hash = ?candidate.candidate_hash, + "Failed to instantiate fragment", ); - continue + break }, - Ok(c) => c, - }; - - // Add nodes to tree wherever - // 1. parent hash is correct - // 2. relay-parent does not move backwards. - // 3. all non-pending-availability candidates have relay-parent in scope. - // 4. candidate outputs fulfill constraints - let required_head_hash = child_constraints.required_parent.hash(); - for candidate in storage.iter_para_children(&required_head_hash) { - let pending = self.scope.get_pending_availability(&candidate.candidate_hash); - let relay_parent = pending - .map(|p| p.relay_parent.clone()) - .or_else(|| self.scope.ancestor_by_hash(&candidate.relay_parent)); - - let relay_parent = match relay_parent { - Some(r) => r, - None => continue, - }; - - // require: pending availability candidates don't move backwards - // and only those can be out-of-scope. - // - // earliest_rp can be before the earliest relay parent in the scope - // when the parent is a pending availability candidate as well, but - // only other pending candidates can have a relay parent out of scope. - let min_relay_parent_number = pending - .map(|p| match parent_pointer { - NodePointer::Root => p.relay_parent.number, - NodePointer::Storage(_) => earliest_rp.number, - }) - .unwrap_or_else(|| { - std::cmp::max( - earliest_rp.number, - self.scope.earliest_relay_parent().number, - ) - }); - - if relay_parent.number < min_relay_parent_number { - continue // relay parent moved backwards. } + }; - // don't add candidates where the parent already has it as a child. - if self.node_has_candidate_child(parent_pointer, &candidate.candidate_hash) { - continue - } + // Update the cumulative constraint modifications. + cumulative_modifications.stack(fragment.constraint_modifications()); + // Update the earliest rp + earliest_rp = relay_parent; - let fragment = { - let mut constraints = child_constraints.clone(); - if let Some(ref p) = pending { - // overwrite for candidates pending availability as a special-case. - constraints.min_relay_parent_number = p.relay_parent.number; - } - - let f = Fragment::new( - relay_parent.clone(), - constraints, - candidate.candidate.partial_clone(), - ); + let node = FragmentNode { + fragment, + candidate_hash: candidate.candidate_hash, + cumulative_modifications: cumulative_modifications.clone(), + }; - match f { - Ok(f) => f.into_owned(), - Err(e) => { - gum::debug!( - target: LOG_TARGET, - err = ?e, - ?relay_parent, - candidate_hash = ?candidate.candidate_hash, - "Failed to instantiate fragment", - ); - - continue - }, - } - }; - - let mut cumulative_modifications = modifications.clone(); - cumulative_modifications.stack(fragment.constraint_modifications()); - - let node = FragmentNode { - parent: parent_pointer, - fragment, - candidate_hash: candidate.candidate_hash, - depth: child_depth, - cumulative_modifications, - children: Vec::new(), - }; - - self.insert_node(node); - } + self.chain.push(node); + self.candidates.insert(candidate.candidate_hash); + // We've already checked for forks and cycles. + self.by_parent_head + .insert(candidate.parent_head_data_hash(), candidate.candidate_hash); + self.by_output_head + .insert(candidate.output_head_data_hash(), candidate.candidate_hash); + added_child = true; + // We can only add one child for a candidate. (it's a chain, not a tree) + break; } - last_sweep_start = Some(sweep_start); + if !added_child { + break + } } } } - -struct FragmentNode { - // A pointer to the parent node. - parent: NodePointer, - fragment: Fragment<'static>, - candidate_hash: CandidateHash, - depth: usize, - cumulative_modifications: ConstraintModifications, - children: Vec<(NodePointer, CandidateHash)>, -} - -impl FragmentNode { - fn relay_parent(&self) -> Hash { - self.fragment.relay_parent().hash - } - - fn candidate_child(&self, candidate_hash: &CandidateHash) -> Option { - self.children.iter().find(|(_, c)| c == candidate_hash).map(|(p, _)| *p) - } -} diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index fd41be55f7f9..26ee94d59d8e 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -19,17 +19,6 @@ use assert_matches::assert_matches; use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; use polkadot_primitives_test_helpers as test_helpers; -use rstest::rstest; -use std::iter; - -impl NodePointer { - fn unwrap_idx(self) -> usize { - match self { - NodePointer::Root => panic!("Unexpected root"), - NodePointer::Storage(index) => index, - } - } -} fn make_constraints( min_relay_parent_number: BlockNumber, @@ -204,8 +193,52 @@ fn scope_only_takes_ancestors_up_to_min() { } #[test] -fn storage_add_candidate() { - let mut storage = CandidateStorage::new(); +fn scope_rejects_unordered_ancestors() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 5, + hash: Hash::repeat_byte(0), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![ + RelayChainBlockInfo { + number: 4, + hash: Hash::repeat_byte(4), + storage_root: Hash::repeat_byte(69), + }, + RelayChainBlockInfo { + number: 2, + hash: Hash::repeat_byte(2), + storage_root: Hash::repeat_byte(69), + }, + RelayChainBlockInfo { + number: 3, + hash: Hash::repeat_byte(3), + storage_root: Hash::repeat_byte(69), + }, + ]; + + let max_depth = 2; + let base_constraints = make_constraints(0, vec![2], vec![1, 2, 3].into()); + let pending_availability = Vec::new(); + + assert_matches!( + Scope::with_ancestors( + para_id, + relay_parent, + base_constraints, + pending_availability, + max_depth, + ancestors, + ), + Err(UnexpectedAncestor { number: 2, prev: 4 }) + ); +} + +#[test] +fn candidate_storage_methods() { + let mut storage = CandidateStorage::default(); let relay_parent = Hash::repeat_byte(69); let (pvd, candidate) = make_committed_candidate( @@ -220,50 +253,105 @@ fn storage_add_candidate() { let candidate_hash = candidate.hash(); let parent_head_hash = pvd.parent_head.hash(); - storage.add_candidate(candidate, pvd).unwrap(); + // Invalid pvd hash + let mut wrong_pvd = pvd.clone(); + wrong_pvd.max_pov_size = 0; + assert_matches!( + storage.add_candidate(candidate.clone(), wrong_pvd, CandidateState::Seconded), + Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) + ); + assert!(!storage.contains(&candidate_hash)); + assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0); + assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None); + assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); + assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); + assert_eq!(storage.is_backed(&candidate_hash), false); + + // Add a valid candidate + storage + .add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded) + .unwrap(); assert!(storage.contains(&candidate_hash)); - assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); - - assert_eq!(storage.relay_parent_by_candidate_hash(&candidate_hash), Some(relay_parent)); -} + assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 1); + assert_eq!(storage.possible_para_children(&candidate.descriptor.para_head).count(), 0); + assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), Some(relay_parent)); + assert_eq!( + storage.head_data_by_hash(&candidate.descriptor.para_head).unwrap(), + &candidate.commitments.head_data + ); + assert_eq!(storage.head_data_by_hash(&parent_head_hash).unwrap(), &pvd.parent_head); + assert_eq!(storage.is_backed(&candidate_hash), false); -#[test] -fn storage_retain() { - let mut storage = CandidateStorage::new(); + storage.mark_backed(&candidate_hash); + assert_eq!(storage.is_backed(&candidate_hash), true); - let (pvd, candidate) = make_committed_candidate( - ParaId::from(5u32), - Hash::repeat_byte(69), - 8, - vec![4, 5, 6].into(), - vec![1, 2, 3].into(), - 7, + // Re-adding a candidate fails. + assert_matches!( + storage.add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded), + Err(CandidateStorageInsertionError::CandidateAlreadyKnown(hash)) if candidate_hash == hash ); - let candidate_hash = candidate.hash(); - let output_head_hash = candidate.commitments.head_data.hash(); - let parent_head_hash = pvd.parent_head.hash(); - - storage.add_candidate(candidate, pvd).unwrap(); + // Remove candidate and re-add it later in backed state. + storage.remove_candidate(&candidate_hash); + assert!(!storage.contains(&candidate_hash)); + assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0); + assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None); + assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); + assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); + assert_eq!(storage.is_backed(&candidate_hash), false); + + storage + .add_candidate(candidate.clone(), pvd.clone(), CandidateState::Backed) + .unwrap(); + assert_eq!(storage.is_backed(&candidate_hash), true); + + // Test retain storage.retain(|_| true); assert!(storage.contains(&candidate_hash)); - assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); - assert!(storage.head_data_by_hash(&output_head_hash).is_some()); - storage.retain(|_| false); assert!(!storage.contains(&candidate_hash)); - assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 0); - assert!(storage.head_data_by_hash(&output_head_hash).is_none()); + assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0); + assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None); + assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); + assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); + assert_eq!(storage.is_backed(&candidate_hash), false); } -// [`FragmentTree::populate`] should pick up candidates that build on other candidates. #[test] -fn populate_works_recursively() { - let mut storage = CandidateStorage::new(); +fn populate_and_extend_from_storage_empty() { + // Empty chain and empty storage. + let storage = CandidateStorage::default(); + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let scope = Scope::with_ancestors( + ParaId::from(2), + RelayChainBlockInfo { + number: 1, + hash: Hash::repeat_byte(1), + storage_root: Hash::repeat_byte(2), + }, + base_constraints, + pending_availability, + 4, + vec![], + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + assert!(chain.to_vec().is_empty()); + + chain.extend_from_storage(&storage); + assert!(chain.to_vec().is_empty()); +} + +#[test] +fn populate_and_extend_from_storage_with_existing_empty_to_vec() { + let mut storage = CandidateStorage::default(); let para_id = ParaId::from(5u32); let relay_parent_a = Hash::repeat_byte(1); let relay_parent_b = Hash::repeat_byte(2); + let relay_parent_c = Hash::repeat_byte(3); let (pvd_a, candidate_a) = make_committed_candidate( para_id, @@ -285,56 +373,623 @@ fn populate_works_recursively() { ); let candidate_b_hash = candidate_b.hash(); - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); + let (pvd_c, candidate_c) = make_committed_candidate( + para_id, + relay_parent_c, + 2, + vec![0x0c].into(), + vec![0x0d].into(), + 2, + ); + let candidate_c_hash = candidate_c.hash(); - let ancestors = vec![RelayChainBlockInfo { + let relay_parent_a_info = RelayChainBlockInfo { number: pvd_a.relay_parent_number, hash: relay_parent_a, storage_root: pvd_a.relay_parent_storage_root, - }]; - + }; let relay_parent_b_info = RelayChainBlockInfo { number: pvd_b.relay_parent_number, hash: relay_parent_b, storage_root: pvd_b.relay_parent_storage_root, }; + let relay_parent_c_info = RelayChainBlockInfo { + number: pvd_c.relay_parent_number, + hash: relay_parent_c, + storage_root: pvd_c.relay_parent_storage_root, + }; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_b_info, - base_constraints, - pending_availability, - 4, - ancestors, - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let ancestors = vec![ + // These need to be ordered in reverse. + relay_parent_b_info.clone(), + relay_parent_a_info.clone(), + ]; + + storage + .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) + .unwrap(); + storage + .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Backed) + .unwrap(); + storage + .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Backed) + .unwrap(); + + // Candidate A doesn't adhere to the base constraints. + { + for wrong_constraints in [ + // Different required parent + make_constraints(0, vec![0], vec![0x0e].into()), + // Min relay parent number is wrong + make_constraints(1, vec![0], vec![0x0a].into()), + ] { + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + wrong_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + + assert!(chain.to_vec().is_empty()); + + chain.extend_from_storage(&storage); + assert!(chain.to_vec().is_empty()); + + // If the min relay parent number is wrong, candidate A can never become valid. + // Otherwise, if only the required parent doesn't match, candidate A is still a + // potential candidate. + if wrong_constraints.min_relay_parent_number == 1 { + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate_a.hash(), + &candidate_a.descriptor.relay_parent, + pvd_a.parent_head.hash(), + Some(candidate_a.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } else { + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate_a.hash(), + &candidate_a.descriptor.relay_parent, + pvd_a.parent_head.hash(), + Some(candidate_a.commitments.head_data.hash()), + ), + PotentialAddition::Anyhow + ); + } + + // All other candidates can always be potential candidates. + for (candidate, pvd) in + [(candidate_b.clone(), pvd_b.clone()), (candidate_c.clone(), pvd_c.clone())] + { + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::Anyhow + ); + } + } + } + + // Various max depths. + { + // depth is 0, will only allow 1 candidate + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 0, + ancestors.clone(), + ) + .unwrap(); + // Before populating the chain, all candidates are potential candidates. However, they can + // only be added as connected candidates, because only one candidates is allowed by max + // depth + let chain = FragmentChain::populate(scope.clone(), &CandidateStorage::default()); + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { + assert_eq!( + chain.can_add_candidate_as_potential( + &CandidateStorage::default(), + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::IfConnected + ); + } + let mut chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash]); + chain.extend_from_storage(&storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash]); + // since depth is maxed out, we can't add more potential candidates + // candidate A is no longer a potential candidate because it's already present. + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } + + // depth is 1, allows two candidates + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 1, + ancestors.clone(), + ) + .unwrap(); + // Before populating the chain, all candidates can be added as potential. + let mut modified_storage = CandidateStorage::default(); + let chain = FragmentChain::populate(scope.clone(), &modified_storage); + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { + assert_eq!( + chain.can_add_candidate_as_potential( + &modified_storage, + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::Anyhow + ); + } + // Add an unconnected candidate. We now should only allow a Connected candidate, because max + // depth only allows one more candidate. + modified_storage + .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) + .unwrap(); + let chain = FragmentChain::populate(scope.clone(), &modified_storage); + for (candidate, pvd) in + [(candidate_a.clone(), pvd_a.clone()), (candidate_c.clone(), pvd_c.clone())] + { + assert_eq!( + chain.can_add_candidate_as_potential( + &modified_storage, + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::IfConnected + ); + } + + // Now try populating from all candidates. + let mut chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + chain.extend_from_storage(&storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + // since depth is maxed out, we can't add more potential candidates + // candidate A and B are no longer a potential candidate because they're already present. + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } + + // depths larger than 2, allows all candidates + for depth in 2..6 { + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + depth, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + chain.extend_from_storage(&storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + // Candidates are no longer potential candidates because they're already part of the + // chain. + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } + } + } + + // Wrong relay parents + { + // Candidates A has relay parent out of scope. + let ancestors_without_a = vec![relay_parent_b_info.clone()]; + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors_without_a, + ) + .unwrap(); + + let mut chain = FragmentChain::populate(scope, &storage); + assert!(chain.to_vec().is_empty()); + + chain.extend_from_storage(&storage); + assert!(chain.to_vec().is_empty()); + + // Candidate A is not a potential candidate, but candidates B and C still are. + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate_a.hash(), + &candidate_a.descriptor.relay_parent, + pvd_a.parent_head.hash(), + Some(candidate_a.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + for (candidate, pvd) in + [(candidate_b.clone(), pvd_b.clone()), (candidate_c.clone(), pvd_c.clone())] + { + assert_eq!( + chain.can_add_candidate_as_potential( + &storage, + &candidate.hash(), + &candidate.descriptor.relay_parent, + pvd.parent_head.hash(), + Some(candidate.commitments.head_data.hash()), + ), + PotentialAddition::Anyhow + ); + } + + // Candidate C has the same relay parent as candidate A's parent. Relay parent not allowed + // to move backwards + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( + para_id, + relay_parent_a, + 1, + vec![0x0c].into(), + vec![0x0d].into(), + 2, + ); + modified_storage + .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) + .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + chain.extend_from_storage(&modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + + // Candidate C is not even a potential candidate. + assert_eq!( + chain.can_add_candidate_as_potential( + &modified_storage, + &wrong_candidate_c.hash(), + &wrong_candidate_c.descriptor.relay_parent, + wrong_pvd_c.parent_head.hash(), + Some(wrong_candidate_c.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } + + // Parachain fork and cycles are not allowed. + { + // Candidate C has the same parent as candidate B. + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( + para_id, + relay_parent_c, + 2, + vec![0x0b].into(), + vec![0x0d].into(), + 2, + ); + modified_storage + .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) + .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &modified_storage); + // We'll either have A->B or A->C. It's not deterministic because CandidateStorage uses + // HashSets and HashMaps. + if chain.to_vec() == vec![candidate_a_hash, candidate_b_hash] { + chain.extend_from_storage(&modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + // Candidate C is not even a potential candidate. + assert_eq!( + chain.can_add_candidate_as_potential( + &modified_storage, + &wrong_candidate_c.hash(), + &wrong_candidate_c.descriptor.relay_parent, + wrong_pvd_c.parent_head.hash(), + Some(wrong_candidate_c.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } else if chain.to_vec() == vec![candidate_a_hash, wrong_candidate_c.hash()] { + chain.extend_from_storage(&modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, wrong_candidate_c.hash()]); + // Candidate B is not even a potential candidate. + assert_eq!( + chain.can_add_candidate_as_potential( + &modified_storage, + &candidate_b.hash(), + &candidate_b.descriptor.relay_parent, + pvd_b.parent_head.hash(), + Some(candidate_b.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } else { + panic!("Unexpected chain: {:?}", chain.to_vec()); + } + + // Candidate C is a 0-length cycle. + // Candidate C has the same parent as candidate B. + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( + para_id, + relay_parent_c, + 2, + vec![0x0c].into(), + vec![0x0c].into(), + 2, + ); + modified_storage + .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) + .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + chain.extend_from_storage(&modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + // Candidate C is not even a potential candidate. + assert_eq!( + chain.can_add_candidate_as_potential( + &modified_storage, + &wrong_candidate_c.hash(), + &wrong_candidate_c.descriptor.relay_parent, + wrong_pvd_c.parent_head.hash(), + Some(wrong_candidate_c.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + + // Candidate C points back to the pre-state of candidate C. + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( + para_id, + relay_parent_c, + 2, + vec![0x0c].into(), + vec![0x0b].into(), + 2, + ); + modified_storage + .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) + .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + chain.extend_from_storage(&modified_storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + // Candidate C is not even a potential candidate. + assert_eq!( + chain.can_add_candidate_as_potential( + &modified_storage, + &wrong_candidate_c.hash(), + &wrong_candidate_c.descriptor.relay_parent, + wrong_pvd_c.parent_head.hash(), + Some(wrong_candidate_c.commitments.head_data.hash()), + ), + PotentialAddition::None + ); + } - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert!(candidates.contains(&candidate_a_hash)); - assert!(candidates.contains(&candidate_b_hash)); + // Test with candidates pending availability + { + // Valid options + for pending in [ + vec![PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_a_info.clone(), + }], + vec![ + PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_a_info.clone(), + }, + PendingAvailability { + candidate_hash: candidate_b_hash, + relay_parent: relay_parent_b_info.clone(), + }, + ], + vec![ + PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_a_info.clone(), + }, + PendingAvailability { + candidate_hash: candidate_b_hash, + relay_parent: relay_parent_b_info.clone(), + }, + PendingAvailability { + candidate_hash: candidate_c_hash, + relay_parent: relay_parent_c_info.clone(), + }, + ], + ] { + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + pending, + 3, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + chain.extend_from_storage(&storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + } - assert_eq!(tree.nodes.len(), 2); - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[0].depth, 0); + // Relay parents of pending availability candidates can be out of scope + // Relay parent of candidate A is out of scope. + let ancestors_without_a = vec![relay_parent_b_info.clone()]; + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + vec![PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_a_info.clone(), + }], + 4, + ancestors_without_a, + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + chain.extend_from_storage(&storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + + // Even relay parents of pending availability candidates which are out of scope cannot move + // backwards. + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info.clone(), + base_constraints.clone(), + vec![ + PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: RelayChainBlockInfo { + hash: relay_parent_a_info.hash, + number: 1, + storage_root: relay_parent_a_info.storage_root, + }, + }, + PendingAvailability { + candidate_hash: candidate_b_hash, + relay_parent: RelayChainBlockInfo { + hash: relay_parent_b_info.hash, + number: 0, + storage_root: relay_parent_b_info.storage_root, + }, + }, + ], + 4, + vec![], + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + assert!(chain.to_vec().is_empty()); - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); - assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); - assert_eq!(tree.nodes[1].depth, 1); + chain.extend_from_storage(&storage); + assert!(chain.to_vec().is_empty()); + } } #[test] -fn children_of_root_are_contiguous() { - let mut storage = CandidateStorage::new(); - +fn extend_from_storage_with_existing_to_vec() { let para_id = ParaId::from(5u32); let relay_parent_a = Hash::repeat_byte(1); let relay_parent_b = Hash::repeat_byte(2); + let relay_parent_d = Hash::repeat_byte(3); let (pvd_a, candidate_a) = make_committed_candidate( para_id, @@ -344,6 +999,7 @@ fn children_of_root_are_contiguous() { vec![0x0b].into(), 0, ); + let candidate_a_hash = candidate_a.hash(); let (pvd_b, candidate_b) = make_committed_candidate( para_id, @@ -353,182 +1009,136 @@ fn children_of_root_are_contiguous() { vec![0x0c].into(), 1, ); + let candidate_b_hash = candidate_b.hash(); - let (pvd_a2, candidate_a2) = make_committed_candidate( + let (pvd_c, candidate_c) = make_committed_candidate( para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b, 1].into(), - 0, + // Use the same relay parent number as B to test that it doesn't need to change between + // candidates. + relay_parent_b, + 1, + vec![0x0c].into(), + vec![0x0d].into(), + 1, ); - let candidate_a2_hash = candidate_a2.hash(); + let candidate_c_hash = candidate_c.hash(); - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); + // Candidate D will never be added to the chain. + let (pvd_d, candidate_d) = make_committed_candidate( + para_id, + relay_parent_d, + 2, + vec![0x0e].into(), + vec![0x0f].into(), + 1, + ); - let ancestors = vec![RelayChainBlockInfo { + let relay_parent_a_info = RelayChainBlockInfo { number: pvd_a.relay_parent_number, hash: relay_parent_a, storage_root: pvd_a.relay_parent_storage_root, - }]; - + }; let relay_parent_b_info = RelayChainBlockInfo { number: pvd_b.relay_parent_number, hash: relay_parent_b, storage_root: pvd_b.relay_parent_storage_root, }; + let relay_parent_d_info = RelayChainBlockInfo { + number: pvd_d.relay_parent_number, + hash: relay_parent_d, + storage_root: pvd_d.relay_parent_storage_root, + }; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_b_info, - base_constraints, - pending_availability, - 4, - ancestors, - ) - .unwrap(); - let mut tree = FragmentTree::populate(scope, &storage); + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); + + let ancestors = vec![ + // These need to be ordered in reverse. + relay_parent_b_info.clone(), + relay_parent_a_info.clone(), + ]; - storage.add_candidate(candidate_a2, pvd_a2).unwrap(); - tree.add_and_populate(candidate_a2_hash, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 3); + // Already had A and C in the storage. Introduce B, which should add both B and C to the chain + // now. + { + let mut storage = CandidateStorage::default(); + storage + .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) + .unwrap(); + storage + .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Seconded) + .unwrap(); + storage + .add_candidate(candidate_d.clone(), pvd_d.clone(), CandidateState::Seconded) + .unwrap(); + + let scope = Scope::with_ancestors( + para_id, + relay_parent_d_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash]); + + storage + .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) + .unwrap(); + chain.extend_from_storage(&storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + } - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Root); - assert_eq!(tree.nodes[2].parent, NodePointer::Storage(0)); + // Already had A and B in the chain. Introduce C. + { + let mut storage = CandidateStorage::default(); + storage + .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) + .unwrap(); + storage + .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) + .unwrap(); + storage + .add_candidate(candidate_d.clone(), pvd_d.clone(), CandidateState::Seconded) + .unwrap(); + + let scope = Scope::with_ancestors( + para_id, + relay_parent_d_info.clone(), + base_constraints.clone(), + pending_availability.clone(), + 4, + ancestors.clone(), + ) + .unwrap(); + let mut chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + + storage + .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Seconded) + .unwrap(); + chain.extend_from_storage(&storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + } } #[test] -fn add_candidate_child_of_root() { - let mut storage = CandidateStorage::new(); - +fn test_find_ancestor_path_and_find_backable_chain_empty_to_vec() { let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); + let relay_parent = Hash::repeat_byte(1); + let required_parent: HeadData = vec![0xff].into(); + let max_depth = 10; - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0c].into(), - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - 4, - vec![], - ) - .unwrap(); - let mut tree = FragmentTree::populate(scope, &storage); - - storage.add_candidate(candidate_b, pvd_b).unwrap(); - tree.add_and_populate(candidate_b_hash, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Root); -} - -#[test] -fn add_candidate_child_of_non_root() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0c].into(), - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - 4, - vec![], - ) - .unwrap(); - let mut tree = FragmentTree::populate(scope, &storage); - - storage.add_candidate(candidate_b, pvd_b).unwrap(); - tree.add_and_populate(candidate_b_hash, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); -} - -#[test] -fn test_find_ancestor_path_and_find_backable_chain_empty_tree() { - let para_id = ParaId::from(5u32); - let relay_parent = Hash::repeat_byte(1); - let required_parent: HeadData = vec![0xff].into(); - let max_depth = 10; - - // Empty tree - let storage = CandidateStorage::new(); - let base_constraints = make_constraints(0, vec![0], required_parent.clone()); - - let relay_parent_info = - RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() }; - - let scope = Scope::with_ancestors( + // Empty chain + let storage = CandidateStorage::default(); + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + + let relay_parent_info = + RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() }; + + let scope = Scope::with_ancestors( para_id, relay_parent_info, base_constraints, @@ -537,64 +1147,23 @@ fn test_find_ancestor_path_and_find_backable_chain_empty_tree() { vec![], ) .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - assert_eq!(tree.candidates().collect::>().len(), 0); - assert_eq!(tree.nodes.len(), 0); + let chain = FragmentChain::populate(scope, &storage); + assert!(chain.to_vec().is_empty()); - assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); - assert_eq!(tree.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]); + assert_eq!(chain.find_ancestor_path(Ancestors::new()), 0); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]); // Invalid candidate. let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()), Some(NodePointer::Root)); - assert_eq!(tree.find_backable_chain(ancestors, 2, |_| true), vec![]); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); + assert_eq!(chain.find_backable_chain(ancestors, 2, |_| true), vec![]); } -#[rstest] -#[case(true, 13)] -#[case(false, 8)] -// The tree with no cycles looks like: -// Make a tree that looks like this (note that there's no cycle): -// +-(root)-+ -// | | -// +----0---+ 7 -// | | -// 1----+ 5 -// | | -// | | -// 2 6 -// | -// 3 -// | -// 4 -// -// The tree with cycles is the same as the first but has a cycle from 4 back to the state -// produced by 0 (It's bounded by the max_depth + 1). -// +-(root)-+ -// | | -// +----0---+ 7 -// | | -// 1----+ 5 -// | | -// | | -// 2 6 -// | -// 3 -// | -// 4---+ -// | | -// 1 5 -// | -// 2 -// | -// 3 -fn test_find_ancestor_path_and_find_backable_chain( - #[case] has_cycle: bool, - #[case] expected_node_count: usize, -) { +#[test] +fn test_find_ancestor_path_and_find_backable_to_vec() { let para_id = ParaId::from(5u32); let relay_parent = Hash::repeat_byte(1); let required_parent: HeadData = vec![0xff].into(); - let max_depth = 7; + let max_depth = 5; let relay_parent_number = 0; let relay_parent_storage_root = Hash::repeat_byte(69); @@ -650,42 +1219,13 @@ fn test_find_ancestor_path_and_find_backable_chain( para_id, relay_parent, 0, - vec![0].into(), + vec![4].into(), vec![5].into(), 0, )); - // Candidate 6 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![1].into(), - vec![6].into(), - 0, - )); - // Candidate 7 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - required_parent.clone(), - vec![7].into(), - 0, - )); - - if has_cycle { - candidates[4] = make_committed_candidate( - para_id, - relay_parent, - 0, - vec![3].into(), - vec![0].into(), // put the cycle here back to the output state of 0. - 0, - ); - } let base_constraints = make_constraints(0, vec![0], required_parent.clone()); - let mut storage = CandidateStorage::new(); + let mut storage = CandidateStorage::default(); let relay_parent_info = RelayChainBlockInfo { number: relay_parent_number, @@ -694,265 +1234,175 @@ fn test_find_ancestor_path_and_find_backable_chain( }; for (pvd, candidate) in candidates.iter() { - storage.add_candidate(candidate.clone(), pvd.clone()).unwrap(); + storage + .add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded) + .unwrap(); } let candidates = candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::>(); let scope = Scope::with_ancestors( para_id, - relay_parent_info, - base_constraints, + relay_parent_info.clone(), + base_constraints.clone(), vec![], max_depth, vec![], ) .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - assert_eq!(tree.candidates().collect::>().len(), candidates.len()); - assert_eq!(tree.nodes.len(), expected_node_count); - - // Do some common tests on both trees. - { - // No ancestors supplied. - assert_eq!(tree.find_ancestor_path(Ancestors::new()).unwrap(), NodePointer::Root); - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - // Ancestor which is not part of the tree. Will be ignored. - let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - // A chain fork. - let ancestors: Ancestors = - [(candidates[0].hash()), (candidates[7].hash())].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()), None); - assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); + let chain = FragmentChain::populate(scope, &storage); - // Ancestors which are part of the tree but don't form a path. Will be ignored. - let ancestors: Ancestors = - [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); - assert_eq!(tree.find_ancestor_path(ancestors.clone()).unwrap(), NodePointer::Root); - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // Valid ancestors. - let ancestors: Ancestors = [candidates[7].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[7].hash()); - assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); - - let ancestors: Ancestors = - [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[2].hash()); - assert_eq!( - tree.find_backable_chain(ancestors.clone(), 2, |_| true), - [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); + assert_eq!(candidates.len(), 6); + assert_eq!(chain.to_vec().len(), 6); - // Valid ancestors with candidates which have been omitted due to timeouts - let ancestors: Ancestors = - [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[0].hash()); - assert_eq!( - tree.find_backable_chain(ancestors, 3, |_| true), - [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - let ancestors: Ancestors = - [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[1].hash()); - if has_cycle { - assert_eq!( - tree.find_backable_chain(ancestors, 2, |_| true), - [2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } else { - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } + // No ancestors supplied. + assert_eq!(chain.find_ancestor_path(Ancestors::new()), 0); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); + assert_eq!( + chain.find_backable_chain(Ancestors::new(), 1, |_| true), + [0].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + assert_eq!( + chain.find_backable_chain(Ancestors::new(), 2, |_| true), + [0, 1].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + assert_eq!( + chain.find_backable_chain(Ancestors::new(), 5, |_| true), + [0, 1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); - let ancestors: Ancestors = - [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - assert_eq!(res, NodePointer::Root); + for count in 6..10 { assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + chain.find_backable_chain(Ancestors::new(), count, |_| true), + [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() ); + } - // Requested count is 0. - assert_eq!(tree.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); + assert_eq!( + chain.find_backable_chain(Ancestors::new(), 7, |_| true), + [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + assert_eq!( + chain.find_backable_chain(Ancestors::new(), 10, |_| true), + [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() + ); - let ancestors: Ancestors = - [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] - .into_iter() - .collect(); - assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); + // Ancestor which is not part of the chain. Will be ignored. + let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); + assert_eq!( + chain.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + let ancestors: Ancestors = + [candidates[1].hash(), CandidateHash::default()].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); + assert_eq!( + chain.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + let ancestors: Ancestors = + [candidates[0].hash(), CandidateHash::default()].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1); + assert_eq!( + chain.find_backable_chain(ancestors, 4, |_| true), + [1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); - let ancestors: Ancestors = - [candidates[2].hash(), candidates[0].hash()].into_iter().collect(); - assert_eq!(tree.find_backable_chain(ancestors, 0, |_| true), vec![]); - } + // Ancestors which are part of the chain but don't form a path from root. Will be ignored. + let ancestors: Ancestors = [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); + assert_eq!( + chain.find_backable_chain(ancestors, 4, |_| true), + [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); - // Now do some tests only on the tree with cycles - if has_cycle { - // Exceeds the maximum tree depth. 0-1-2-3-4-1-2-3-4, when the tree stops at - // 0-1-2-3-4-1-2-3. - let ancestors: Ancestors = [ - candidates[0].hash(), - candidates[1].hash(), - candidates[2].hash(), - candidates[3].hash(), - candidates[4].hash(), - ] + // Valid ancestors. + let ancestors: Ancestors = [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] .into_iter() .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[4].hash()); - assert_eq!( - tree.find_backable_chain(ancestors, 4, |_| true), - [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - - // 0-1-2. - let ancestors: Ancestors = - [candidates[0].hash(), candidates[1].hash(), candidates[2].hash()] - .into_iter() - .collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[2].hash()); - assert_eq!( - tree.find_backable_chain(ancestors.clone(), 1, |_| true), - [3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 3); + assert_eq!( + chain.find_backable_chain(ancestors.clone(), 2, |_| true), + [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + for count in 3..10 { assert_eq!( - tree.find_backable_chain(ancestors, 5, |_| true), - [3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + chain.find_backable_chain(ancestors.clone(), count, |_| true), + [3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() ); + } - // 0-1 - let ancestors: Ancestors = - [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); - let res = tree.find_ancestor_path(ancestors.clone()).unwrap(); - let candidate = &tree.nodes[res.unwrap_idx()]; - assert_eq!(candidate.candidate_hash, candidates[1].hash()); + // Valid ancestors with candidates which have been omitted due to timeouts + let ancestors: Ancestors = [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1); + assert_eq!( + chain.find_backable_chain(ancestors.clone(), 3, |_| true), + [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + assert_eq!( + chain.find_backable_chain(ancestors.clone(), 4, |_| true), + [1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() + ); + for count in 5..10 { assert_eq!( - tree.find_backable_chain(ancestors, 6, |_| true), - [2, 3, 4, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>(), + chain.find_backable_chain(ancestors.clone(), count, |_| true), + [1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() ); + } - // For 0-1-2-3-4-5, there's more than 1 way of finding this path in - // the tree. `None` should be returned. The runtime should not have accepted this. - let ancestors: Ancestors = [ - candidates[0].hash(), - candidates[1].hash(), - candidates[2].hash(), - candidates[3].hash(), - candidates[4].hash(), - candidates[5].hash(), - ] + let ancestors: Ancestors = [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] .into_iter() .collect(); - let res = tree.find_ancestor_path(ancestors.clone()); - assert_eq!(res, None); - assert_eq!(tree.find_backable_chain(ancestors, 1, |_| true), vec![]); - } -} - -#[test] -fn graceful_cycle_of_0() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0a].into(), // input same as output - 0, + assert_eq!(chain.find_ancestor_path(ancestors.clone()), 2); + assert_eq!( + chain.find_backable_chain(ancestors.clone(), 4, |_| true), + [2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() ); - let candidate_a_hash = candidate_a.hash(); - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 1); - assert_eq!(tree.nodes.len(), max_depth + 1); - - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); - assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); - assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); - assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); - - assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[1].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[3].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); + // Requested count is 0. + assert_eq!(chain.find_backable_chain(ancestors, 0, |_| true), vec![]); + // Stop when we've found a candidate for which pred returns false. + let ancestors: Ancestors = [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] + .into_iter() + .collect(); for count in 1..10 { assert_eq!( - tree.find_backable_chain(Ancestors::new(), count, |_| true), - iter::repeat(candidate_a_hash) - .take(std::cmp::min(count as usize, max_depth + 1)) - .collect::>() + // Stop at 4. + chain.find_backable_chain(ancestors.clone(), count, |hash| hash != + &candidates[4].hash()), + [3].into_iter().map(|i| candidates[i].hash()).collect::>() ); + } + + // Stop when we've found a candidate which is pending availability + { + let scope = Scope::with_ancestors( + para_id, + relay_parent_info.clone(), + base_constraints, + // Mark the third candidate as pending availability + vec![PendingAvailability { + candidate_hash: candidates[3].hash(), + relay_parent: relay_parent_info, + }], + max_depth, + vec![], + ) + .unwrap(); + let chain = FragmentChain::populate(scope, &storage); + let ancestors: Ancestors = + [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); assert_eq!( - tree.find_backable_chain([candidate_a_hash].into_iter().collect(), count - 1, |_| true), - iter::repeat(candidate_a_hash) - .take(std::cmp::min(count as usize - 1, max_depth)) - .collect::>() + // Stop at 4. + chain.find_backable_chain(ancestors.clone(), 3, |_| true), + [2].into_iter().map(|i| candidates[i].hash()).collect::>() ); } } #[test] -fn graceful_cycle_of_1() { - let mut storage = CandidateStorage::new(); +fn hypothetical_membership() { + let mut storage = CandidateStorage::default(); let para_id = ParaId::from(5u32); let relay_parent_a = Hash::repeat_byte(1); @@ -962,7 +1412,7 @@ fn graceful_cycle_of_1() { relay_parent_a, 0, vec![0x0a].into(), - vec![0x0b].into(), // input same as output + vec![0x0b].into(), 0, ); let candidate_a_hash = candidate_a.hash(); @@ -972,13 +1422,12 @@ fn graceful_cycle_of_1() { relay_parent_a, 0, vec![0x0b].into(), - vec![0x0a].into(), // input same as output + vec![0x0c].into(), 0, ); let candidate_b_hash = candidate_b.hash(); let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); let relay_parent_a_info = RelayChainBlockInfo { number: pvd_a.relay_parent_number, @@ -987,182 +1436,153 @@ fn graceful_cycle_of_1() { }; let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); + storage.add_candidate(candidate_a, pvd_a, CandidateState::Seconded).unwrap(); + storage.add_candidate(candidate_b, pvd_b, CandidateState::Seconded).unwrap(); let scope = Scope::with_ancestors( para_id, - relay_parent_a_info, - base_constraints, - pending_availability, + relay_parent_a_info.clone(), + base_constraints.clone(), + vec![], max_depth, vec![], ) .unwrap(); - let tree = FragmentTree::populate(scope, &storage); + let chain = FragmentChain::populate(scope, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert_eq!(tree.nodes.len(), max_depth + 1); + assert_eq!(chain.to_vec().len(), 2); - assert_eq!(tree.nodes[0].parent, NodePointer::Root); - assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); - assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); - assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); - assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); + // Check candidates which are already present + assert!(chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: candidate_a_hash, + }, + &storage, + )); + assert!(chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: candidate_b_hash, + }, + &storage, + )); - assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); - assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); - assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash); - assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); + // Forks not allowed. + assert!(!chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: CandidateHash(Hash::repeat_byte(21)), + }, + &storage, + )); + assert!(!chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: CandidateHash(Hash::repeat_byte(22)), + }, + &storage, + )); - assert_eq!(tree.find_backable_chain(Ancestors::new(), 1, |_| true), vec![candidate_a_hash],); - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 2, |_| true), - vec![candidate_a_hash, candidate_b_hash], - ); - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 3, |_| true), - vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], - ); - assert_eq!( - tree.find_backable_chain([candidate_a_hash].into_iter().collect(), 2, |_| true), - vec![candidate_b_hash, candidate_a_hash], - ); + // Unknown candidate which builds on top of the current chain. + assert!(chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: CandidateHash(Hash::repeat_byte(23)), + }, + &storage, + )); - assert_eq!( - tree.find_backable_chain(Ancestors::new(), 6, |_| true), - vec![ - candidate_a_hash, - candidate_b_hash, - candidate_a_hash, - candidate_b_hash, - candidate_a_hash - ], - ); + // Unknown unconnected candidate which may be valid. + assert!(chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0e]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: CandidateHash(Hash::repeat_byte(23)), + }, + &storage, + )); - for count in 3..7 { - assert_eq!( - tree.find_backable_chain( - [candidate_a_hash, candidate_b_hash].into_iter().collect(), - count, - |_| true - ), - vec![candidate_a_hash, candidate_b_hash, candidate_a_hash], + // The number of unconnected candidates is limited (chain.len() + unconnected) <= max_depth + { + // C will be an unconnected candidate. + let (pvd_c, candidate_c) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0e].into(), + vec![0x0f].into(), + 0, ); - } -} + let candidate_c_hash = candidate_c.hash(); -#[test] -fn hypothetical_depths_known_and_unknown() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), // input same as output - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0a].into(), // input same as output - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); + // Add an invalid candidate in the storage. This would introduce a fork. Just to test that + // it's ignored. + let (invalid_pvd, invalid_candidate) = make_committed_candidate( + para_id, + relay_parent_a, + 1, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert_eq!(tree.nodes.len(), max_depth + 1); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + vec![], + 2, + vec![], + ) + .unwrap(); + let mut storage = storage.clone(); + storage.add_candidate(candidate_c, pvd_c, CandidateState::Seconded).unwrap(); - assert_eq!( - tree.hypothetical_depths( - candidate_a_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![0, 2, 4], - ); + let chain = FragmentChain::populate(scope, &storage); + assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - assert_eq!( - tree.hypothetical_depths( - candidate_b_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![1, 3], - ); + storage + .add_candidate(invalid_candidate, invalid_pvd, CandidateState::Seconded) + .unwrap(); - assert_eq!( - tree.hypothetical_depths( - CandidateHash(Hash::repeat_byte(21)), + // Check that C is accepted as a potential unconnected candidate. + assert!(!chain.hypothetical_membership( HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, + parent_head_data_hash: HeadData::from(vec![0x0e]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_hash: candidate_c_hash, + candidate_para: para_id }, &storage, - false, - ), - vec![0, 2, 4], - ); + )); - assert_eq!( - tree.hypothetical_depths( - CandidateHash(Hash::repeat_byte(22)), + // Since C is already an unconnected candidate in the storage. + assert!(!chain.hypothetical_membership( HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - relay_parent: relay_parent_a, + parent_head_data_hash: HeadData::from(vec![0x0f]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: CandidateHash(Hash::repeat_byte(23)), }, &storage, - false, - ), - vec![1, 3] - ); + )); + } } #[test] -fn hypothetical_depths_stricter_on_complete() { - let storage = CandidateStorage::new(); +fn hypothetical_membership_stricter_on_complete_candidates() { + let storage = CandidateStorage::default(); let para_id = ParaId::from(5u32); let relay_parent_a = Hash::repeat_byte(1); @@ -1197,161 +1617,31 @@ fn hypothetical_depths_stricter_on_complete() { vec![], ) .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - assert_eq!( - tree.hypothetical_depths( - candidate_a_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![0], - ); - - assert!(tree - .hypothetical_depths( - candidate_a_hash, - HypotheticalCandidate::Complete { - receipt: Cow::Owned(candidate_a), - persisted_validation_data: Cow::Owned(pvd_a), - }, - &storage, - false, - ) - .is_empty()); -} - -#[test] -fn hypothetical_depths_backed_in_path() { - let mut storage = CandidateStorage::new(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0c].into(), - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let (pvd_c, candidate_c) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0d].into(), - 0, - ); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); - storage.add_candidate(candidate_c, pvd_c).unwrap(); - - // `A` and `B` are backed, `C` is not. - storage.mark_backed(&candidate_a_hash); - storage.mark_backed(&candidate_b_hash); - - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let tree = FragmentTree::populate(scope, &storage); - - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 3); - assert_eq!(tree.nodes.len(), 3); - - let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - true, - ), - vec![0], - ); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - true, - ), - vec![2], - ); - - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0d]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - true, - ), - Vec::::new(), - ); + let chain = FragmentChain::populate(scope, &storage); + + assert!(chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_para: para_id, + candidate_hash: candidate_a_hash, + }, + &storage, + )); - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0d]).hash(), - relay_parent: relay_parent_a, - }, - &storage, - false, - ), - vec![2], // non-empty if `false`. - ); + assert!(!chain.hypothetical_membership( + HypotheticalCandidate::Complete { + receipt: Arc::new(candidate_a), + persisted_validation_data: pvd_a, + candidate_hash: candidate_a_hash, + }, + &storage, + )); } #[test] -fn pending_availability_in_scope() { - let mut storage = CandidateStorage::new(); +fn hypothetical_membership_with_pending_availability_in_scope() { + let mut storage = CandidateStorage::default(); let para_id = ParaId::from(5u32); let relay_parent_a = Hash::repeat_byte(1); @@ -1402,8 +1692,8 @@ fn pending_availability_in_scope() { }; let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a).unwrap(); - storage.add_candidate(candidate_b, pvd_b).unwrap(); + storage.add_candidate(candidate_a, pvd_a, CandidateState::Seconded).unwrap(); + storage.add_candidate(candidate_b, pvd_b, CandidateState::Backed).unwrap(); storage.mark_backed(&candidate_a_hash); let scope = Scope::with_ancestors( @@ -1415,37 +1705,49 @@ fn pending_availability_in_scope() { vec![relay_parent_b_info], ) .unwrap(); - let tree = FragmentTree::populate(scope, &storage); + let chain = FragmentChain::populate(scope, &storage); - let candidates: Vec<_> = tree.candidates().collect(); - assert_eq!(candidates.len(), 2); - assert_eq!(tree.nodes.len(), 2); + assert_eq!(chain.to_vec().len(), 2); let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - relay_parent: relay_parent_c, - }, - &storage, - false, - ), - vec![1], - ); + assert!(chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + candidate_relay_parent: relay_parent_a, + candidate_hash: candidate_a_hash, + candidate_para: para_id + }, + &storage, + )); - assert_eq!( - tree.hypothetical_depths( - candidate_d_hash, - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), - relay_parent: relay_parent_b, - }, - &storage, - false, - ), - vec![2], - ); + assert!(!chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + candidate_relay_parent: relay_parent_c, + candidate_para: para_id, + candidate_hash: candidate_d_hash, + }, + &storage, + )); + + assert!(!chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + candidate_relay_parent: relay_parent_c, + candidate_para: para_id, + candidate_hash: candidate_d_hash, + }, + &storage, + )); + + assert!(chain.hypothetical_membership( + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), + candidate_relay_parent: relay_parent_b, + candidate_para: para_id, + candidate_hash: candidate_d_hash, + }, + &storage, + )); } diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index 0b1a2e034a28..d5bb5ff76ba8 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -21,22 +21,20 @@ //! This is the main coordinator of work within the node for the collation and //! backing phases of parachain consensus. //! -//! This is primarily an implementation of "Fragment Trees", as described in +//! This is primarily an implementation of "Fragment Chains", as described in //! [`polkadot_node_subsystem_util::inclusion_emulator`]. //! //! This subsystem also handles concerns such as the relay-chain being forkful and session changes. -use std::{ - borrow::Cow, - collections::{HashMap, HashSet}, -}; +use std::collections::{HashMap, HashSet}; +use fragment_chain::{FragmentChain, PotentialAddition}; use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ - Ancestors, ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, - HypotheticalFrontierRequest, IntroduceCandidateRequest, ParentHeadData, + Ancestors, ChainApiMessage, HypotheticalCandidate, HypotheticalMembership, + HypotheticalMembershipRequest, IntroduceSecondedCandidateRequest, ParentHeadData, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage, RuntimeApiRequest, }, @@ -56,7 +54,8 @@ use polkadot_primitives::{ use crate::{ error::{FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, fragment_chain::{ - CandidateStorage, CandidateStorageInsertionError, FragmentTree, Scope as TreeScope, + CandidateState, CandidateStorage, CandidateStorageInsertionError, + Scope as FragmentChainScope, }, }; @@ -72,7 +71,7 @@ const LOG_TARGET: &str = "parachain::prospective-parachains"; struct RelayBlockViewData { // Scheduling info for paras and upcoming paras. - fragment_trees: HashMap, + fragment_chains: HashMap, pending_availability: HashSet, } @@ -141,12 +140,10 @@ async fn run_iteration( }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Communication { msg } => match msg { - ProspectiveParachainsMessage::IntroduceCandidate(request, tx) => - handle_candidate_introduced(&mut *ctx, view, request, tx).await?, - ProspectiveParachainsMessage::CandidateSeconded(para, candidate_hash) => - handle_candidate_seconded(view, para, candidate_hash), + ProspectiveParachainsMessage::IntroduceSecondedCandidate(request, tx) => + handle_introduce_seconded_candidate(&mut *ctx, view, request, tx, metrics).await, ProspectiveParachainsMessage::CandidateBacked(para, candidate_hash) => - handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await?, + handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await, ProspectiveParachainsMessage::GetBackableCandidates( relay_parent, para, @@ -154,10 +151,8 @@ async fn run_iteration( ancestors, tx, ) => answer_get_backable_candidates(&view, relay_parent, para, count, ancestors, tx), - ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx) => - answer_hypothetical_frontier_request(&view, request, tx), - ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => - answer_tree_membership_request(&view, para, candidate, tx), + ProspectiveParachainsMessage::GetHypotheticalMembership(request, tx) => + answer_hypothetical_membership_request(&view, request, tx, metrics), ProspectiveParachainsMessage::GetMinimumRelayParents(relay_parent, tx) => answer_minimum_relay_parents_request(&view, relay_parent, tx), ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx) => @@ -175,8 +170,8 @@ async fn handle_active_leaves_update( metrics: &Metrics, ) -> JfyiErrorResult<()> { // 1. clean up inactive leaves - // 2. determine all scheduled para at new block - // 3. construct new fragment tree for each para for each new leaf + // 2. determine all scheduled paras at the new block + // 3. construct new fragment chain for each para for each new leaf // 4. prune candidate storage. for deactivated in &update.deactivated { @@ -203,9 +198,7 @@ async fn handle_active_leaves_update( return Ok(()) }; - let mut pending_availability = HashSet::new(); - let scheduled_paras = - fetch_upcoming_paras(&mut *ctx, hash, &mut pending_availability).await?; + let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?; let block_info: RelayChainBlockInfo = match fetch_block_info(&mut *ctx, &mut temp_header_cache, hash).await? { @@ -227,30 +220,30 @@ async fn handle_active_leaves_update( let ancestry = fetch_ancestry(&mut *ctx, &mut temp_header_cache, hash, allowed_ancestry_len).await?; + let mut all_pending_availability = HashSet::new(); + // Find constraints. - let mut fragment_trees = HashMap::new(); + let mut fragment_chains = HashMap::new(); for para in scheduled_paras { let candidate_storage = - view.candidate_storage.entry(para).or_insert_with(CandidateStorage::new); + view.candidate_storage.entry(para).or_insert_with(CandidateStorage::default); let backing_state = fetch_backing_state(&mut *ctx, hash, para).await?; - let (constraints, pending_availability) = match backing_state { - Some(c) => c, - None => { - // This indicates a runtime conflict of some kind. - - gum::debug!( - target: LOG_TARGET, - para_id = ?para, - relay_parent = ?hash, - "Failed to get inclusion backing state." - ); + let Some((constraints, pending_availability)) = backing_state else { + // This indicates a runtime conflict of some kind. + gum::debug!( + target: LOG_TARGET, + para_id = ?para, + relay_parent = ?hash, + "Failed to get inclusion backing state." + ); - continue - }, + continue }; + all_pending_availability.extend(pending_availability.iter().map(|c| c.candidate_hash)); + let pending_availability = preprocess_candidates_pending_availability( ctx, &mut temp_header_cache, @@ -261,15 +254,15 @@ async fn handle_active_leaves_update( let mut compact_pending = Vec::with_capacity(pending_availability.len()); for c in pending_availability { - let res = candidate_storage.add_candidate(c.candidate, c.persisted_validation_data); + let res = candidate_storage.add_candidate( + c.candidate, + c.persisted_validation_data, + CandidateState::Backed, + ); let candidate_hash = c.compact.candidate_hash; - compact_pending.push(c.compact); match res { - Ok(_) | Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { - // Anything on-chain is guaranteed to be backed. - candidate_storage.mark_backed(&candidate_hash); - }, + Ok(_) | Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => {}, Err(err) => { gum::warn!( target: LOG_TARGET, @@ -278,11 +271,15 @@ async fn handle_active_leaves_update( ?err, "Scraped invalid candidate pending availability", ); + + break }, } + + compact_pending.push(c.compact); } - let scope = TreeScope::with_ancestors( + let scope = FragmentChainScope::with_ancestors( para, block_info.clone(), constraints, @@ -297,16 +294,26 @@ async fn handle_active_leaves_update( relay_parent = ?hash, min_relay_parent = scope.earliest_relay_parent().number, para_id = ?para, - "Creating fragment tree" + "Creating fragment chain" ); - let tree = FragmentTree::populate(scope, &*candidate_storage); + let chain = FragmentChain::populate(scope, &*candidate_storage); + + gum::trace!( + target: LOG_TARGET, + relay_parent = ?hash, + para_id = ?para, + "Populated fragment chain with {} candidates", + chain.len() + ); - fragment_trees.insert(para, tree); + fragment_chains.insert(para, chain); } - view.active_leaves - .insert(hash, RelayBlockViewData { fragment_trees, pending_availability }); + view.active_leaves.insert( + hash, + RelayBlockViewData { fragment_chains, pending_availability: all_pending_availability }, + ); } if !update.deactivated.is_empty() { @@ -318,18 +325,39 @@ async fn handle_active_leaves_update( } fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) { - metrics.time_prune_view_candidate_storage(); + let _timer = metrics.time_prune_view_candidate_storage(); let active_leaves = &view.active_leaves; let mut live_candidates = HashSet::new(); let mut live_paras = HashSet::new(); for sub_view in active_leaves.values() { - for (para_id, fragment_tree) in &sub_view.fragment_trees { - live_candidates.extend(fragment_tree.candidates()); + live_candidates.extend(sub_view.pending_availability.iter().cloned()); + + for (para_id, fragment_chain) in &sub_view.fragment_chains { + live_candidates.extend(fragment_chain.to_vec()); live_paras.insert(*para_id); } + } - live_candidates.extend(sub_view.pending_availability.iter().cloned()); + let connected_candidates_count = live_candidates.len(); + for (leaf, sub_view) in active_leaves.iter() { + for (para_id, fragment_chain) in &sub_view.fragment_chains { + if let Some(storage) = view.candidate_storage.get(para_id) { + let unconnected_potential = + fragment_chain.find_unconnected_potential_candidates(storage, None); + if !unconnected_potential.is_empty() { + gum::trace!( + target: LOG_TARGET, + ?leaf, + "Keeping {} unconnected candidates for paraid {} in storage: {:?}", + unconnected_potential.len(), + para_id, + unconnected_potential + ); + } + live_candidates.extend(unconnected_potential); + } + } } view.candidate_storage.retain(|para_id, storage| { @@ -343,7 +371,21 @@ fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) { // This maintains a convenient invariant that para-id storage exists // as long as there's an active head which schedules the para. true - }) + }); + + for (para_id, storage) in view.candidate_storage.iter() { + gum::trace!( + target: LOG_TARGET, + "Keeping a total of {} connected candidates for paraid {} in storage", + storage.candidates().count(), + para_id, + ); + } + + metrics.record_candidate_storage_size( + connected_candidates_count as u64, + live_candidates.len().saturating_sub(connected_candidates_count) as u64, + ); } struct ImportablePendingAvailability { @@ -365,22 +407,20 @@ async fn preprocess_candidates_pending_availability( let expected_count = pending_availability.len(); for (i, pending) in pending_availability.into_iter().enumerate() { - let relay_parent = - match fetch_block_info(ctx, cache, pending.descriptor.relay_parent).await? { - None => { - gum::debug!( - target: LOG_TARGET, - ?pending.candidate_hash, - ?pending.descriptor.para_id, - index = ?i, - ?expected_count, - "Had to stop processing pending candidates early due to missing info.", - ); + let Some(relay_parent) = + fetch_block_info(ctx, cache, pending.descriptor.relay_parent).await? + else { + gum::debug!( + target: LOG_TARGET, + ?pending.candidate_hash, + ?pending.descriptor.para_id, + index = ?i, + ?expected_count, + "Had to stop processing pending candidates early due to missing info.", + ); - break - }, - Some(b) => b, - }; + break + }; let next_required_parent = pending.commitments.head_data.clone(); importable.push(ImportablePendingAvailability { @@ -407,104 +447,139 @@ async fn preprocess_candidates_pending_availability( } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] -async fn handle_candidate_introduced( +async fn handle_introduce_seconded_candidate( _ctx: &mut Context, view: &mut View, - request: IntroduceCandidateRequest, - tx: oneshot::Sender, -) -> JfyiErrorResult<()> { - let IntroduceCandidateRequest { + request: IntroduceSecondedCandidateRequest, + tx: oneshot::Sender, + metrics: &Metrics, +) { + let _timer = metrics.time_introduce_seconded_candidate(); + + let IntroduceSecondedCandidateRequest { candidate_para: para, candidate_receipt: candidate, persisted_validation_data: pvd, } = request; - // Add the candidate to storage. - // Then attempt to add it to all trees. - let storage = match view.candidate_storage.get_mut(¶) { - None => { - gum::warn!( - target: LOG_TARGET, - para_id = ?para, - candidate_hash = ?candidate.hash(), - "Received seconded candidate for inactive para", - ); + let Some(storage) = view.candidate_storage.get_mut(¶) else { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + candidate_hash = ?candidate.hash(), + "Received seconded candidate for inactive para", + ); - let _ = tx.send(Vec::new()); - return Ok(()) - }, - Some(storage) => storage, + let _ = tx.send(false); + return }; - let candidate_hash = match storage.add_candidate(candidate, pvd) { - Ok(c) => c, - Err(CandidateStorageInsertionError::CandidateAlreadyKnown(c)) => { - // Candidate known - return existing fragment tree membership. - let _ = tx.send(fragment_tree_membership(&view.active_leaves, para, c)); - return Ok(()) - }, - Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) => { - // We can't log the candidate hash without either doing more ~expensive - // hashing but this branch indicates something is seriously wrong elsewhere - // so it's doubtful that it would affect debugging. + let parent_head_hash = pvd.parent_head.hash(); + let output_head_hash = Some(candidate.commitments.head_data.hash()); + + // We first introduce the candidate in the storage and then try to extend the chain. + // If the candidate gets included in the chain, we can keep it in storage. + // If it doesn't, check that it's still a potential candidate in at least one fragment chain. + // If it's not, we can remove it. + + let candidate_hash = + match storage.add_candidate(candidate.clone(), pvd, CandidateState::Seconded) { + Ok(c) => c, + Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { + gum::debug!( + target: LOG_TARGET, + para = ?para, + "Attempting to introduce an already known candidate: {:?}", + candidate.hash() + ); + // Candidate already known. + let _ = tx.send(true); + return + }, + Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) => { + // We can't log the candidate hash without either doing more ~expensive + // hashing but this branch indicates something is seriously wrong elsewhere + // so it's doubtful that it would affect debugging. - gum::warn!( + gum::warn!( + target: LOG_TARGET, + para = ?para, + "Received seconded candidate had mismatching validation data", + ); + + let _ = tx.send(false); + return + }, + }; + + let mut keep_in_storage = false; + for (relay_parent, leaf_data) in view.active_leaves.iter_mut() { + if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { + gum::trace!( target: LOG_TARGET, para = ?para, - "Received seconded candidate had mismatching validation data", + ?relay_parent, + "Candidates in chain before trying to introduce a new one: {:?}", + chain.to_vec() ); + chain.extend_from_storage(&*storage); + if chain.contains_candidate(&candidate_hash) { + keep_in_storage = true; - let _ = tx.send(Vec::new()); - return Ok(()) - }, - }; + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + para = ?para, + ?candidate_hash, + "Added candidate to chain.", + ); + } else { + match chain.can_add_candidate_as_potential( + &storage, + &candidate_hash, + &candidate.descriptor.relay_parent, + parent_head_hash, + output_head_hash, + ) { + PotentialAddition::Anyhow => { + gum::trace!( + target: LOG_TARGET, + para = ?para, + ?relay_parent, + ?candidate_hash, + "Kept candidate as unconnected potential.", + ); - let mut membership = Vec::new(); - for (relay_parent, leaf_data) in &mut view.active_leaves { - if let Some(tree) = leaf_data.fragment_trees.get_mut(¶) { - tree.add_and_populate(candidate_hash, &*storage); - if let Some(depths) = tree.candidate(&candidate_hash) { - membership.push((*relay_parent, depths)); + keep_in_storage = true; + }, + _ => { + gum::trace!( + target: LOG_TARGET, + para = ?para, + ?relay_parent, + "Not introducing a new candidate: {:?}", + candidate_hash + ); + }, + } } } } - if membership.is_empty() { + // If there is at least one leaf where this candidate can be added or potentially added in the + // future, keep it in storage. + if !keep_in_storage { storage.remove_candidate(&candidate_hash); - } - - let _ = tx.send(membership); - - Ok(()) -} - -fn handle_candidate_seconded(view: &mut View, para: ParaId, candidate_hash: CandidateHash) { - let storage = match view.candidate_storage.get_mut(¶) { - None => { - gum::warn!( - target: LOG_TARGET, - para_id = ?para, - ?candidate_hash, - "Received instruction to second unknown candidate", - ); - return - }, - Some(storage) => storage, - }; - - if !storage.contains(&candidate_hash) { - gum::warn!( + gum::debug!( target: LOG_TARGET, - para_id = ?para, - ?candidate_hash, - "Received instruction to second unknown candidate", + para = ?para, + candidate = ?candidate_hash, + "Newly-seconded candidate cannot be kept under any active leaf", ); - - return } - storage.mark_seconded(&candidate_hash); + let _ = tx.send(keep_in_storage); } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] @@ -513,19 +588,16 @@ async fn handle_candidate_backed( view: &mut View, para: ParaId, candidate_hash: CandidateHash, -) -> JfyiErrorResult<()> { - let storage = match view.candidate_storage.get_mut(¶) { - None => { - gum::warn!( - target: LOG_TARGET, - para_id = ?para, - ?candidate_hash, - "Received instruction to back unknown candidate", - ); +) { + let Some(storage) = view.candidate_storage.get_mut(¶) else { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received instruction to back a candidate for unscheduled para", + ); - return Ok(()) - }, - Some(storage) => storage, + return }; if !storage.contains(&candidate_hash) { @@ -536,7 +608,7 @@ async fn handle_candidate_backed( "Received instruction to back unknown candidate", ); - return Ok(()) + return } if storage.is_backed(&candidate_hash) { @@ -547,11 +619,10 @@ async fn handle_candidate_backed( "Received redundant instruction to mark candidate as backed", ); - return Ok(()) + return } storage.mark_backed(&candidate_hash); - Ok(()) } fn answer_get_backable_candidates( @@ -562,62 +633,71 @@ fn answer_get_backable_candidates( ancestors: Ancestors, tx: oneshot::Sender>, ) { - let data = match view.active_leaves.get(&relay_parent) { - None => { - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - para_id = ?para, - "Requested backable candidate for inactive relay-parent." - ); + let Some(data) = view.active_leaves.get(&relay_parent) else { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Requested backable candidate for inactive relay-parent." + ); - let _ = tx.send(vec![]); - return - }, - Some(d) => d, + let _ = tx.send(vec![]); + return }; - let tree = match data.fragment_trees.get(¶) { - None => { - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - para_id = ?para, - "Requested backable candidate for inactive para." - ); + let Some(chain) = data.fragment_chains.get(¶) else { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Requested backable candidate for inactive para." + ); - let _ = tx.send(vec![]); - return - }, - Some(tree) => tree, + let _ = tx.send(vec![]); + return }; - let storage = match view.candidate_storage.get(¶) { - None => { - gum::warn!( - target: LOG_TARGET, - ?relay_parent, - para_id = ?para, - "No candidate storage for active para", - ); + let Some(storage) = view.candidate_storage.get(¶) else { + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "No candidate storage for active para", + ); - let _ = tx.send(vec![]); - return - }, - Some(s) => s, + let _ = tx.send(vec![]); + return }; - let backable_candidates: Vec<_> = tree + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Candidate storage for para: {:?}", + storage.candidates().map(|candidate| candidate.hash()).collect::>() + ); + + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Candidate chain for para: {:?}", + chain.to_vec() + ); + + let backable_candidates: Vec<_> = chain .find_backable_chain(ancestors.clone(), count, |candidate| storage.is_backed(candidate)) .into_iter() .filter_map(|child_hash| { - storage.relay_parent_by_candidate_hash(&child_hash).map_or_else( + storage.relay_parent_of_candidate(&child_hash).map_or_else( || { + // Here, we'd actually need to trim all of the candidates that follow. Or + // not, the runtime will do this. Impossible scenario anyway. gum::error!( target: LOG_TARGET, ?child_hash, para_id = ?para, - "Candidate is present in fragment tree but not in candidate's storage!", + "Candidate is present in fragment chain but not in candidate's storage!", ); None }, @@ -639,6 +719,7 @@ fn answer_get_backable_candidates( target: LOG_TARGET, ?relay_parent, ?backable_candidates, + ?ancestors, "Found backable candidates", ); } @@ -646,58 +727,32 @@ fn answer_get_backable_candidates( let _ = tx.send(backable_candidates); } -fn answer_hypothetical_frontier_request( +fn answer_hypothetical_membership_request( view: &View, - request: HypotheticalFrontierRequest, - tx: oneshot::Sender>, + request: HypotheticalMembershipRequest, + tx: oneshot::Sender>, + metrics: &Metrics, ) { + let _timer = metrics.time_hypothetical_membership_request(); + let mut response = Vec::with_capacity(request.candidates.len()); for candidate in request.candidates { - response.push((candidate, Vec::new())); + response.push((candidate, vec![])); } - let required_active_leaf = request.fragment_tree_relay_parent; + let required_active_leaf = request.fragment_chain_relay_parent; for (active_leaf, leaf_view) in view .active_leaves .iter() .filter(|(h, _)| required_active_leaf.as_ref().map_or(true, |x| h == &x)) { - for &mut (ref c, ref mut membership) in &mut response { - let fragment_tree = match leaf_view.fragment_trees.get(&c.candidate_para()) { - None => continue, - Some(f) => f, - }; - let candidate_storage = match view.candidate_storage.get(&c.candidate_para()) { - None => continue, - Some(storage) => storage, - }; - - let candidate_hash = c.candidate_hash(); - let hypothetical = match c { - HypotheticalCandidate::Complete { receipt, persisted_validation_data, .. } => - fragment_chain::HypotheticalCandidate::Complete { - receipt: Cow::Borrowed(receipt), - persisted_validation_data: Cow::Borrowed(persisted_validation_data), - }, - HypotheticalCandidate::Incomplete { - parent_head_data_hash, - candidate_relay_parent, - .. - } => fragment_chain::HypotheticalCandidate::Incomplete { - relay_parent: *candidate_relay_parent, - parent_head_data_hash: *parent_head_data_hash, - }, - }; - - let depths = fragment_tree.hypothetical_depths( - candidate_hash, - hypothetical, - candidate_storage, - request.backed_in_path_only, - ); + for &mut (ref candidate, ref mut membership) in &mut response { + let para_id = &candidate.candidate_para(); + let Some(fragment_chain) = leaf_view.fragment_chains.get(para_id) else { continue }; + let Some(candidate_storage) = view.candidate_storage.get(para_id) else { continue }; - if !depths.is_empty() { - membership.push((*active_leaf, depths)); + if fragment_chain.hypothetical_membership(candidate.clone(), candidate_storage) { + membership.push(*active_leaf); } } } @@ -705,31 +760,6 @@ fn answer_hypothetical_frontier_request( let _ = tx.send(response); } -fn fragment_tree_membership( - active_leaves: &HashMap, - para: ParaId, - candidate: CandidateHash, -) -> FragmentTreeMembership { - let mut membership = Vec::new(); - for (relay_parent, view_data) in active_leaves { - if let Some(tree) = view_data.fragment_trees.get(¶) { - if let Some(depths) = tree.candidate(&candidate) { - membership.push((*relay_parent, depths)); - } - } - } - membership -} - -fn answer_tree_membership_request( - view: &View, - para: ParaId, - candidate: CandidateHash, - tx: oneshot::Sender, -) { - let _ = tx.send(fragment_tree_membership(&view.active_leaves, para, candidate)); -} - fn answer_minimum_relay_parents_request( view: &View, relay_parent: Hash, @@ -737,8 +767,8 @@ fn answer_minimum_relay_parents_request( ) { let mut v = Vec::new(); if let Some(leaf_data) = view.active_leaves.get(&relay_parent) { - for (para_id, fragment_tree) in &leaf_data.fragment_trees { - v.push((*para_id, fragment_tree.scope().earliest_relay_parent().number)); + for (para_id, fragment_chain) in &leaf_data.fragment_chains { + v.push((*para_id, fragment_chain.scope().earliest_relay_parent().number)); } } @@ -752,9 +782,9 @@ fn answer_prospective_validation_data_request( ) { // 1. Try to get the head-data from the candidate store if known. // 2. Otherwise, it might exist as the base in some relay-parent and we can find it by iterating - // fragment trees. + // fragment chains. // 3. Otherwise, it is unknown. - // 4. Also try to find the relay parent block info by scanning fragment trees. + // 4. Also try to find the relay parent block info by scanning fragment chains. // 5. If head data and relay parent block info are found - success. Otherwise, failure. let storage = match view.candidate_storage.get(&request.para_id) { @@ -776,35 +806,32 @@ fn answer_prospective_validation_data_request( let mut relay_parent_info = None; let mut max_pov_size = None; - for fragment_tree in view + for fragment_chain in view .active_leaves .values() - .filter_map(|x| x.fragment_trees.get(&request.para_id)) + .filter_map(|x| x.fragment_chains.get(&request.para_id)) { if head_data.is_some() && relay_parent_info.is_some() && max_pov_size.is_some() { break } if relay_parent_info.is_none() { - relay_parent_info = - fragment_tree.scope().ancestor_by_hash(&request.candidate_relay_parent); + relay_parent_info = fragment_chain.scope().ancestor(&request.candidate_relay_parent); } if head_data.is_none() { - let required_parent = &fragment_tree.scope().base_constraints().required_parent; + let required_parent = &fragment_chain.scope().base_constraints().required_parent; if required_parent.hash() == parent_head_data_hash { head_data = Some(required_parent.clone()); } } if max_pov_size.is_none() { - let contains_ancestor = fragment_tree - .scope() - .ancestor_by_hash(&request.candidate_relay_parent) - .is_some(); + let contains_ancestor = + fragment_chain.scope().ancestor(&request.candidate_relay_parent).is_some(); if contains_ancestor { // We are leaning hard on two assumptions here. - // 1. That the fragment tree never contains allowed relay-parents whose session for + // 1. That the fragment chain never contains allowed relay-parents whose session for // children is different from that of the base block's. // 2. That the max_pov_size is only configurable per session. - max_pov_size = Some(fragment_tree.scope().base_constraints().max_pov_size); + max_pov_size = Some(fragment_chain.scope().base_constraints().max_pov_size); } } } @@ -843,7 +870,6 @@ async fn fetch_backing_state( async fn fetch_upcoming_paras( ctx: &mut Context, relay_parent: Hash, - pending_availability: &mut HashSet, ) -> JfyiErrorResult> { let (tx, rx) = oneshot::channel(); @@ -860,8 +886,6 @@ async fn fetch_upcoming_paras( for core in cores { match core { CoreState::Occupied(occupied) => { - pending_availability.insert(occupied.candidate_hash); - if let Some(next_up_on_available) = occupied.next_up_on_available { upcoming.insert(next_up_on_available.para_id); } diff --git a/polkadot/node/core/prospective-parachains/src/metrics.rs b/polkadot/node/core/prospective-parachains/src/metrics.rs index 57061497a1c0..5abd9f56f306 100644 --- a/polkadot/node/core/prospective-parachains/src/metrics.rs +++ b/polkadot/node/core/prospective-parachains/src/metrics.rs @@ -14,11 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use polkadot_node_subsystem_util::metrics::{self, prometheus}; +use polkadot_node_subsystem::prometheus::Opts; +use polkadot_node_subsystem_util::metrics::{ + self, + prometheus::{self, GaugeVec, U64}, +}; #[derive(Clone)] pub(crate) struct MetricsInner { - pub(crate) prune_view_candidate_storage: prometheus::Histogram, + prune_view_candidate_storage: prometheus::Histogram, + introduce_seconded_candidate: prometheus::Histogram, + hypothetical_membership: prometheus::Histogram, + candidate_storage_count: prometheus::GaugeVec, } /// Candidate backing metrics. @@ -34,6 +41,40 @@ impl Metrics { .as_ref() .map(|metrics| metrics.prune_view_candidate_storage.start_timer()) } + + /// Provide a timer for handling `IntroduceSecondedCandidate` which observes on drop. + pub fn time_introduce_seconded_candidate( + &self, + ) -> Option { + self.0 + .as_ref() + .map(|metrics| metrics.introduce_seconded_candidate.start_timer()) + } + + /// Provide a timer for handling `GetHypotheticalMembership` which observes on drop. + pub fn time_hypothetical_membership_request( + &self, + ) -> Option { + self.0.as_ref().map(|metrics| metrics.hypothetical_membership.start_timer()) + } + + /// Record the size of the candidate storage. First param is the connected candidates count, + /// second param is the unconnected candidates count. + pub fn record_candidate_storage_size(&self, connected_count: u64, unconnected_count: u64) { + self.0.as_ref().map(|metrics| { + metrics + .candidate_storage_count + .with_label_values(&["connected"]) + .set(connected_count) + }); + + self.0.as_ref().map(|metrics| { + metrics + .candidate_storage_count + .with_label_values(&["unconnected"]) + .set(unconnected_count) + }); + } } impl metrics::Metrics for Metrics { @@ -46,6 +87,30 @@ impl metrics::Metrics for Metrics { ))?, registry, )?, + introduce_seconded_candidate: prometheus::register( + prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( + "polkadot_parachain_prospective_parachains_introduce_seconded_candidate", + "Time spent within `prospective_parachains::handle_introduce_seconded_candidate`", + ))?, + registry, + )?, + hypothetical_membership: prometheus::register( + prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( + "polkadot_parachain_prospective_parachains_hypothetical_membership", + "Time spent responding to `GetHypotheticalMembership`", + ))?, + registry, + )?, + candidate_storage_count: prometheus::register( + GaugeVec::new( + Opts::new( + "polkadot_parachain_prospective_parachains_candidate_storage_count", + "Number of candidates present in the candidate storage, split by connected and unconnected" + ), + &["type"], + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 8989911a3323..4bc473672788 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -19,7 +19,7 @@ use assert_matches::assert_matches; use polkadot_node_subsystem::{ errors::RuntimeApiError, messages::{ - AllMessages, HypotheticalFrontierRequest, ParentHeadData, ProspectiveParachainsMessage, + AllMessages, HypotheticalMembershipRequest, ParentHeadData, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, }, }; @@ -340,36 +340,42 @@ async fn deactivate_leaf(virtual_overseer: &mut VirtualOverseer, hash: Hash) { .await; } -async fn introduce_candidate( +async fn introduce_seconded_candidate( virtual_overseer: &mut VirtualOverseer, candidate: CommittedCandidateReceipt, pvd: PersistedValidationData, ) { - let req = IntroduceCandidateRequest { + let req = IntroduceSecondedCandidateRequest { candidate_para: candidate.descriptor().para_id, candidate_receipt: candidate, persisted_validation_data: pvd, }; - let (tx, _) = oneshot::channel(); + let (tx, rx) = oneshot::channel(); virtual_overseer .send(overseer::FromOrchestra::Communication { - msg: ProspectiveParachainsMessage::IntroduceCandidate(req, tx), + msg: ProspectiveParachainsMessage::IntroduceSecondedCandidate(req, tx), }) .await; + assert!(rx.await.unwrap()); } -async fn second_candidate( +async fn introduce_seconded_candidate_failed( virtual_overseer: &mut VirtualOverseer, candidate: CommittedCandidateReceipt, + pvd: PersistedValidationData, ) { + let req = IntroduceSecondedCandidateRequest { + candidate_para: candidate.descriptor().para_id, + candidate_receipt: candidate, + persisted_validation_data: pvd, + }; + let (tx, rx) = oneshot::channel(); virtual_overseer .send(overseer::FromOrchestra::Communication { - msg: ProspectiveParachainsMessage::CandidateSeconded( - candidate.descriptor.para_id, - candidate.hash(), - ), + msg: ProspectiveParachainsMessage::IntroduceSecondedCandidate(req, tx), }) .await; + assert!(!rx.await.unwrap()); } async fn back_candidate( @@ -387,22 +393,6 @@ async fn back_candidate( .await; } -async fn get_membership( - virtual_overseer: &mut VirtualOverseer, - para_id: ParaId, - candidate_hash: CandidateHash, - expected_membership_response: Vec<(Hash, Vec)>, -) { - let (tx, rx) = oneshot::channel(); - virtual_overseer - .send(overseer::FromOrchestra::Communication { - msg: ProspectiveParachainsMessage::GetTreeMembership(para_id, candidate_hash, tx), - }) - .await; - let resp = rx.await.unwrap(); - assert_eq!(resp, expected_membership_response); -} - async fn get_backable_candidates( virtual_overseer: &mut VirtualOverseer, leaf: &TestLeaf, @@ -420,42 +410,39 @@ async fn get_backable_candidates( }) .await; let resp = rx.await.unwrap(); - assert_eq!(resp.len(), expected_result.len()); assert_eq!(resp, expected_result); } -async fn get_hypothetical_frontier( +async fn get_hypothetical_membership( virtual_overseer: &mut VirtualOverseer, candidate_hash: CandidateHash, receipt: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, - fragment_tree_relay_parent: Hash, - backed_in_path_only: bool, - expected_depths: Vec, + expected_membership: Vec, ) { let hypothetical_candidate = HypotheticalCandidate::Complete { candidate_hash, receipt: Arc::new(receipt), persisted_validation_data, }; - let request = HypotheticalFrontierRequest { + let request = HypotheticalMembershipRequest { candidates: vec![hypothetical_candidate.clone()], - fragment_tree_relay_parent: Some(fragment_tree_relay_parent), - backed_in_path_only, + fragment_chain_relay_parent: None, }; let (tx, rx) = oneshot::channel(); virtual_overseer .send(overseer::FromOrchestra::Communication { - msg: ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx), + msg: ProspectiveParachainsMessage::GetHypotheticalMembership(request, tx), }) .await; - let resp = rx.await.unwrap(); - let expected_frontier = if expected_depths.is_empty() { - vec![(hypothetical_candidate, vec![])] - } else { - vec![(hypothetical_candidate, vec![(fragment_tree_relay_parent, expected_depths)])] - }; - assert_eq!(resp, expected_frontier); + let mut resp = rx.await.unwrap(); + assert_eq!(resp.len(), 1); + let (candidate, membership) = resp.remove(0); + assert_eq!(candidate, hypothetical_candidate); + assert_eq!( + membership.into_iter().collect::>(), + expected_membership.into_iter().collect::>() + ); } async fn get_pvd( @@ -513,11 +500,11 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() { } // Send some candidates and make sure all are found: -// - Two for the same leaf A +// - Two for the same leaf A (one for parachain 1 and one for parachain 2) // - One for leaf B on parachain 1 // - One for leaf C on parachain 2 #[test] -fn send_candidates_and_check_if_found() { +fn introduce_candidates_basic() { let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -563,7 +550,7 @@ fn send_candidates_and_check_if_found() { test_state.validation_code_hash, ); let candidate_hash_a1 = candidate_a1.hash(); - let response_a1 = vec![(leaf_a.hash, vec![0])]; + let response_a1 = vec![(candidate_hash_a1, leaf_a.hash)]; // Candidate A2 let (candidate_a2, pvd_a2) = make_candidate( @@ -575,7 +562,7 @@ fn send_candidates_and_check_if_found() { test_state.validation_code_hash, ); let candidate_hash_a2 = candidate_a2.hash(); - let response_a2 = vec![(leaf_a.hash, vec![0])]; + let response_a2 = vec![(candidate_hash_a2, leaf_a.hash)]; // Candidate B let (candidate_b, pvd_b) = make_candidate( @@ -587,7 +574,7 @@ fn send_candidates_and_check_if_found() { test_state.validation_code_hash, ); let candidate_hash_b = candidate_b.hash(); - let response_b = vec![(leaf_b.hash, vec![0])]; + let response_b = vec![(candidate_hash_b, leaf_b.hash)]; // Candidate C let (candidate_c, pvd_c) = make_candidate( @@ -599,25 +586,78 @@ fn send_candidates_and_check_if_found() { test_state.validation_code_hash, ); let candidate_hash_c = candidate_c.hash(); - let response_c = vec![(leaf_c.hash, vec![0])]; + let response_c = vec![(candidate_hash_c, leaf_c.hash)]; // Introduce candidates. - introduce_candidate(&mut virtual_overseer, candidate_a1, pvd_a1).await; - introduce_candidate(&mut virtual_overseer, candidate_a2, pvd_a2).await; - introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b).await; - introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a1.clone(), pvd_a1).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a2.clone(), pvd_a2).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c).await; + + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + back_candidate(&mut virtual_overseer, &candidate_a1, candidate_hash_a1).await; + back_candidate(&mut virtual_overseer, &candidate_a2, candidate_hash_a2).await; + back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; + back_candidate(&mut virtual_overseer, &candidate_c, candidate_hash_c).await; // Check candidate tree membership. - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, response_a1).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, response_a2).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, response_b).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, response_c).await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + response_a1, + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::default(), + 5, + response_a2, + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + 1.into(), + Ancestors::default(), + 5, + response_b, + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_c, + 2.into(), + Ancestors::default(), + 5, + response_c, + ) + .await; + + // Check membership on other leaves. + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + 2.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; - // The candidates should not be found on other parachains. - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a1, vec![]).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a2, vec![]).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_b, vec![]).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_c, vec![]).await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_c, + 1.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; virtual_overseer }); @@ -629,10 +669,8 @@ fn send_candidates_and_check_if_found() { assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (2, 2)); } -// Send some candidates, check if the candidate won't be found once its relay parent leaves the -// view. #[test] -fn check_candidate_parent_leaving_view() { +fn introduce_candidate_multiple_times() { let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -644,32 +682,11 @@ fn check_candidate_parent_leaving_view() { (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), ], }; - // Leaf B - let leaf_b = TestLeaf { - number: 101, - hash: Hash::from_low_u64_be(131), - para_data: vec![ - (1.into(), PerParaData::new(99, HeadData(vec![3, 4, 5]))), - (2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))), - ], - }; - // Leaf C - let leaf_c = TestLeaf { - number: 102, - hash: Hash::from_low_u64_be(132), - para_data: vec![ - (1.into(), PerParaData::new(102, HeadData(vec![5, 6, 7]))), - (2.into(), PerParaData::new(98, HeadData(vec![6, 7, 8]))), - ], - }; - // Activate leaves. activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; - activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; - activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; - // Candidate A1 - let (candidate_a1, pvd_a1) = make_candidate( + // Candidate A. + let (candidate_a, pvd_a) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), @@ -677,86 +694,45 @@ fn check_candidate_parent_leaving_view() { HeadData(vec![1]), test_state.validation_code_hash, ); - let candidate_hash_a1 = candidate_a1.hash(); - - // Candidate A2 - let (candidate_a2, pvd_a2) = make_candidate( - leaf_a.hash, - leaf_a.number, - 2.into(), - HeadData(vec![2, 3, 4]), - HeadData(vec![2]), - test_state.validation_code_hash, - ); - let candidate_hash_a2 = candidate_a2.hash(); - - // Candidate B - let (candidate_b, pvd_b) = make_candidate( - leaf_b.hash, - leaf_b.number, - 1.into(), - HeadData(vec![3, 4, 5]), - HeadData(vec![3]), - test_state.validation_code_hash, - ); - let candidate_hash_b = candidate_b.hash(); - let response_b = vec![(leaf_b.hash, vec![0])]; - - // Candidate C - let (candidate_c, pvd_c) = make_candidate( - leaf_c.hash, - leaf_c.number, - 2.into(), - HeadData(vec![6, 7, 8]), - HeadData(vec![4]), - test_state.validation_code_hash, - ); - let candidate_hash_c = candidate_c.hash(); - let response_c = vec![(leaf_c.hash, vec![0])]; + let candidate_hash_a = candidate_a.hash(); + let response_a = vec![(candidate_hash_a, leaf_a.hash)]; // Introduce candidates. - introduce_candidate(&mut virtual_overseer, candidate_a1, pvd_a1).await; - introduce_candidate(&mut virtual_overseer, candidate_a2, pvd_a2).await; - introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b).await; - introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c).await; - - // Deactivate leaf A. - deactivate_leaf(&mut virtual_overseer, leaf_a.hash).await; - - // Candidates A1 and A2 should be gone. Candidates B and C should remain. - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, vec![]).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, vec![]).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, response_b).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, response_c.clone()).await; - - // Deactivate leaf B. - deactivate_leaf(&mut virtual_overseer, leaf_b.hash).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; - // Candidate B should be gone, C should remain. - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, vec![]).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, vec![]).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, vec![]).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, response_c).await; + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; - // Deactivate leaf C. - deactivate_leaf(&mut virtual_overseer, leaf_c.hash).await; + // Check candidate tree membership. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + response_a, + ) + .await; - // Candidate C should be gone. - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, vec![]).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, vec![]).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, vec![]).await; - get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, vec![]).await; + // Introduce the same candidate multiple times. It'll return true but it won't be added. + // We'll check below that the candidate count remains 1. + for _ in 0..5 { + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; + } virtual_overseer }); - assert_eq!(view.active_leaves.len(), 0); - assert_eq!(view.candidate_storage.len(), 0); + assert_eq!(view.active_leaves.len(), 1); + assert_eq!(view.candidate_storage.len(), 2); + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1)); + assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } -// Introduce a candidate to multiple forks, see how the membership is returned. #[test] -fn check_candidate_on_multiple_forks() { +fn fragment_chain_length_is_bounded() { let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -768,31 +744,16 @@ fn check_candidate_on_multiple_forks() { (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), ], }; - // Leaf B - let leaf_b = TestLeaf { - number: 101, - hash: Hash::from_low_u64_be(131), - para_data: vec![ - (1.into(), PerParaData::new(99, HeadData(vec![3, 4, 5]))), - (2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))), - ], - }; - // Leaf C - let leaf_c = TestLeaf { - number: 102, - hash: Hash::from_low_u64_be(132), - para_data: vec![ - (1.into(), PerParaData::new(102, HeadData(vec![5, 6, 7]))), - (2.into(), PerParaData::new(98, HeadData(vec![6, 7, 8]))), - ], - }; - // Activate leaves. - activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; - activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; - activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; + activate_leaf_with_params( + &mut virtual_overseer, + &leaf_a, + &test_state, + AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 3 }, + ) + .await; - // Candidate on leaf A. + // Candidates A, B and C form a chain. let (candidate_a, pvd_a) = make_candidate( leaf_a.hash, leaf_a.number, @@ -801,56 +762,59 @@ fn check_candidate_on_multiple_forks() { HeadData(vec![1]), test_state.validation_code_hash, ); - let candidate_hash_a = candidate_a.hash(); - let response_a = vec![(leaf_a.hash, vec![0])]; - - // Candidate on leaf B. let (candidate_b, pvd_b) = make_candidate( - leaf_b.hash, - leaf_b.number, + leaf_a.hash, + leaf_a.number, 1.into(), - HeadData(vec![3, 4, 5]), HeadData(vec![1]), + HeadData(vec![2]), test_state.validation_code_hash, ); - let candidate_hash_b = candidate_b.hash(); - let response_b = vec![(leaf_b.hash, vec![0])]; - - // Candidate on leaf C. let (candidate_c, pvd_c) = make_candidate( - leaf_c.hash, - leaf_c.number, + leaf_a.hash, + leaf_a.number, 1.into(), - HeadData(vec![5, 6, 7]), - HeadData(vec![1]), + HeadData(vec![2]), + HeadData(vec![3]), test_state.validation_code_hash, ); - let candidate_hash_c = candidate_c.hash(); - let response_c = vec![(leaf_c.hash, vec![0])]; - // Introduce candidates on all three leaves. - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; - introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; - introduce_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c).await; + // Introduce candidates A and B. Since max depth is 1, only these two will be allowed. + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()) + .await; + + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; + back_candidate(&mut virtual_overseer, &candidate_b, candidate_b.hash()).await; // Check candidate tree membership. - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a, response_a).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, response_b).await; - get_membership(&mut virtual_overseer, 1.into(), candidate_hash_c, response_c).await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + vec![(candidate_a.hash(), leaf_a.hash), (candidate_b.hash(), leaf_a.hash)], + ) + .await; + + // Introducing C will fail. + introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c.clone()) + .await; virtual_overseer }); - assert_eq!(view.active_leaves.len(), 3); + assert_eq!(view.active_leaves.len(), 1); assert_eq!(view.candidate_storage.len(), 2); - // Three parents and three candidates on para 1. - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (3, 3)); + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2)); assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } -// Backs some candidates and tests `GetBackableCandidates` when requesting a single candidate. #[test] -fn check_backable_query_single_candidate() { +fn unconnected_candidate_count_is_bounded() { let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -862,54 +826,534 @@ fn check_backable_query_single_candidate() { (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), ], }; - // Activate leaves. - activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + activate_leaf_with_params( + &mut virtual_overseer, + &leaf_a, + &test_state, + AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 3 }, + ) + .await; - // Candidate A + // Candidates A, B and C are all potential candidates but don't form a chain. let (candidate_a, pvd_a) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), - HeadData(vec![1, 2, 3]), HeadData(vec![1]), + HeadData(vec![2]), test_state.validation_code_hash, ); - let candidate_hash_a = candidate_a.hash(); - - // Candidate B - let (mut candidate_b, pvd_b) = make_candidate( + let (candidate_b, pvd_b) = make_candidate( leaf_a.hash, leaf_a.number, 1.into(), - HeadData(vec![1]), - HeadData(vec![2]), + HeadData(vec![3]), + HeadData(vec![4]), + test_state.validation_code_hash, + ); + let (candidate_c, pvd_c) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![4]), + HeadData(vec![5]), test_state.validation_code_hash, ); - // Set a field to make this candidate unique. - candidate_b.descriptor.para_head = Hash::from_low_u64_le(1000); - let candidate_hash_b = candidate_b.hash(); - - // Introduce candidates. - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; - introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; - // Should not get any backable candidates. - get_backable_candidates( + // Introduce candidates A and B. Although max depth is 1 (which should allow for two + // candidates), only 1 is allowed, because the last candidate must be a connected candidate. + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; + introduce_seconded_candidate_failed( &mut virtual_overseer, - &leaf_a, + candidate_b.clone(), + pvd_b.clone(), + ) + .await; + + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; + + // Check candidate tree membership. Should be empty. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + + // Introducing C will also fail. + introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c.clone()) + .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 1); + assert_eq!(view.candidate_storage.len(), 2); + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1)); + assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); +} + +// Send some candidates, check if the candidate won't be found once its relay parent leaves the +// view. +#[test] +fn introduce_candidate_parent_leaving_view() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + // Leaf B + let leaf_b = TestLeaf { + number: 101, + hash: Hash::from_low_u64_be(131), + para_data: vec![ + (1.into(), PerParaData::new(99, HeadData(vec![3, 4, 5]))), + (2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))), + ], + }; + // Leaf C + let leaf_c = TestLeaf { + number: 102, + hash: Hash::from_low_u64_be(132), + para_data: vec![ + (1.into(), PerParaData::new(102, HeadData(vec![5, 6, 7]))), + (2.into(), PerParaData::new(98, HeadData(vec![6, 7, 8]))), + ], + }; + + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; + activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; + + // Candidate A1 + let (candidate_a1, pvd_a1) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a1 = candidate_a1.hash(); + + // Candidate A2 + let (candidate_a2, pvd_a2) = make_candidate( + leaf_a.hash, + leaf_a.number, + 2.into(), + HeadData(vec![2, 3, 4]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + let candidate_hash_a2 = candidate_a2.hash(); + + // Candidate B + let (candidate_b, pvd_b) = make_candidate( + leaf_b.hash, + leaf_b.number, + 1.into(), + HeadData(vec![3, 4, 5]), + HeadData(vec![3]), + test_state.validation_code_hash, + ); + let candidate_hash_b = candidate_b.hash(); + let response_b = vec![(candidate_hash_b, leaf_b.hash)]; + + // Candidate C + let (candidate_c, pvd_c) = make_candidate( + leaf_c.hash, + leaf_c.number, + 2.into(), + HeadData(vec![6, 7, 8]), + HeadData(vec![4]), + test_state.validation_code_hash, + ); + let candidate_hash_c = candidate_c.hash(); + let response_c = vec![(candidate_hash_c, leaf_c.hash)]; + + // Introduce candidates. + introduce_seconded_candidate(&mut virtual_overseer, candidate_a1.clone(), pvd_a1).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a2.clone(), pvd_a2).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c).await; + + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + back_candidate(&mut virtual_overseer, &candidate_a1, candidate_hash_a1).await; + back_candidate(&mut virtual_overseer, &candidate_a2, candidate_hash_a2).await; + back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; + back_candidate(&mut virtual_overseer, &candidate_c, candidate_hash_c).await; + + // Deactivate leaf A. + deactivate_leaf(&mut virtual_overseer, leaf_a.hash).await; + + // Candidates A1 and A2 should be gone. Candidates B and C should remain. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + 1.into(), + Ancestors::default(), + 5, + response_b, + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_c, + 2.into(), + Ancestors::default(), + 5, + response_c.clone(), + ) + .await; + + // Deactivate leaf B. + deactivate_leaf(&mut virtual_overseer, leaf_b.hash).await; + + // Candidate B should be gone, C should remain. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + 1.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_c, + 2.into(), + Ancestors::default(), + 5, + response_c, + ) + .await; + + // Deactivate leaf C. + deactivate_leaf(&mut virtual_overseer, leaf_c.hash).await; + + // Candidate C should be gone. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + 1.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_c, + 2.into(), + Ancestors::default(), + 5, + vec![], + ) + .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 0); + assert_eq!(view.candidate_storage.len(), 0); +} + +// Introduce a candidate to multiple forks, see how the membership is returned. +#[test] +fn introduce_candidate_on_multiple_forks() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf B + let leaf_b = TestLeaf { + number: 101, + hash: Hash::from_low_u64_be(131), + para_data: vec![ + (1.into(), PerParaData::new(99, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))), + ], + }; + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: get_parent_hash(leaf_b.hash), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; + + // Candidate built on leaf A. + let (candidate_a, pvd_a) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a = candidate_a.hash(); + let response_a = vec![(candidate_hash_a, leaf_a.hash)]; + + // Introduce candidate. Should be present on leaves B and C. + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; + + // Check candidate tree membership. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + response_a.clone(), + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + 1.into(), + Ancestors::default(), + 5, + response_a.clone(), + ) + .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 2); + assert_eq!(view.candidate_storage.len(), 2); + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1)); + assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); +} + +#[test] +fn unconnected_candidates_become_connected() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + + // Candidates A, B, C and D all form a chain, but we'll first introduce A, C and D. + let (candidate_a, pvd_a) = make_candidate( + leaf_a.hash, + leaf_a.number, 1.into(), - vec![candidate_hash_a].into_iter().collect(), - 1, - vec![], + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let (candidate_b, pvd_b) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![1]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + let (candidate_c, pvd_c) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![2]), + HeadData(vec![3]), + test_state.validation_code_hash, + ); + let (candidate_d, pvd_d) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![3]), + HeadData(vec![4]), + test_state.validation_code_hash, + ); + + // Introduce candidates A, C and D. + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c.clone()) + .await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_d.clone(), pvd_d.clone()) + .await; + + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; + back_candidate(&mut virtual_overseer, &candidate_c, candidate_c.hash()).await; + back_candidate(&mut virtual_overseer, &candidate_d, candidate_d.hash()).await; + + // Check candidate tree membership. Only A should be returned. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + vec![(candidate_a.hash(), leaf_a.hash)], + ) + .await; + + // Introduce C and check membership. Full chain should be returned. + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()) + .await; + back_candidate(&mut virtual_overseer, &candidate_b, candidate_b.hash()).await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + vec![ + (candidate_a.hash(), leaf_a.hash), + (candidate_b.hash(), leaf_a.hash), + (candidate_c.hash(), leaf_a.hash), + (candidate_d.hash(), leaf_a.hash), + ], ) .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 1); + assert_eq!(view.candidate_storage.len(), 2); + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (4, 4)); + assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); +} + +// Backs some candidates and tests `GetBackableCandidates` when requesting a single candidate. +#[test] +fn check_backable_query_single_candidate() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + + // Candidate A + let (candidate_a, pvd_a) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a = candidate_a.hash(); + + // Candidate B + let (mut candidate_b, pvd_b) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![1]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + // Set a field to make this candidate unique. + candidate_b.descriptor.para_head = Hash::from_low_u64_le(1000); + let candidate_hash_b = candidate_b.hash(); + + // Introduce candidates. + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; + + // Should not get any backable candidates. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), vec![candidate_hash_a].into_iter().collect(), - 0, + 1, vec![], ) .await; @@ -917,23 +1361,17 @@ fn check_backable_query_single_candidate() { &mut virtual_overseer, &leaf_a, 1.into(), - Ancestors::new(), + vec![candidate_hash_a].into_iter().collect(), 0, vec![], ) .await; - - // Second candidates. - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; - second_candidate(&mut virtual_overseer, candidate_b.clone()).await; - - // Should not get any backable candidates. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a].into_iter().collect(), - 1, + Ancestors::new(), + 0, vec![], ) .await; @@ -1019,392 +1457,327 @@ fn check_backable_query_multiple_candidates() { // Set a field to make this candidate unique. candidate.descriptor.para_head = Hash::from_low_u64_le($index); let candidate_hash = candidate.hash(); - introduce_candidate(&mut $virtual_overseer, candidate.clone(), pvd).await; - second_candidate(&mut $virtual_overseer, candidate.clone()).await; + introduce_seconded_candidate(&mut $virtual_overseer, candidate.clone(), pvd).await; back_candidate(&mut $virtual_overseer, &candidate, candidate_hash).await; (candidate, candidate_hash) }}; } - // Parachain 1 looks like this: - // +---A----+ - // | | - // +----B---+ C - // | | | | - // D E F H - // | | - // G I - // | - // J - { - let test_state = TestState::default(); - let view = test_harness(|mut virtual_overseer| async move { - // Leaf A - let leaf_a = TestLeaf { - number: 100, - hash: Hash::from_low_u64_be(130), - para_data: vec![ - (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), - (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), - ], - }; - - // Activate leaves. - activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; - - // Candidate A - let (candidate_a, pvd_a) = make_candidate( - leaf_a.hash, - leaf_a.number, - 1.into(), - HeadData(vec![1, 2, 3]), - HeadData(vec![1]), - test_state.validation_code_hash, - ); - let candidate_hash_a = candidate_a.hash(); - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; - back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; - - let (candidate_b, candidate_hash_b) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 2); - let (candidate_c, candidate_hash_c) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 3); - let (_candidate_d, candidate_hash_d) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 4); - let (_candidate_e, candidate_hash_e) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 5); - let (candidate_f, candidate_hash_f) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 6); - let (_candidate_g, candidate_hash_g) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_f, 7); - let (candidate_h, candidate_hash_h) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_c, 8); - let (candidate_i, candidate_hash_i) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_h, 9); - let (_candidate_j, candidate_hash_j) = - make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_i, 10); - - // Should not get any backable candidates for the other para. - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 2.into(), - Ancestors::new(), - 1, - vec![], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 2.into(), - Ancestors::new(), - 5, - vec![], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 2.into(), - vec![candidate_hash_a].into_iter().collect(), - 1, - vec![], - ) - .await; - - // Test various scenarios with various counts. - - // empty required_path - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - Ancestors::new(), - 1, - vec![(candidate_hash_a, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - Ancestors::new(), - 4, - vec![ - (candidate_hash_a, leaf_a.hash), - (candidate_hash_b, leaf_a.hash), - (candidate_hash_f, leaf_a.hash), - (candidate_hash_g, leaf_a.hash), - ], - ) - .await; - } - - // required path of 1 - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a].into_iter().collect(), - 1, - vec![(candidate_hash_b, leaf_a.hash)], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a].into_iter().collect(), - 3, - vec![ - (candidate_hash_b, leaf_a.hash), - (candidate_hash_f, leaf_a.hash), - (candidate_hash_g, leaf_a.hash), - ], - ) - .await; + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; - // If the requested count exceeds the largest chain, return the longest - // chain we can get. - for count in 5..10 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a].into_iter().collect(), - count, - vec![ - (candidate_hash_c, leaf_a.hash), - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - (candidate_hash_j, leaf_a.hash), - ], - ) - .await; - } - } + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; - // required path of 2 and higher - { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_i, candidate_hash_h, candidate_hash_c] - .into_iter() - .collect(), - 1, - vec![(candidate_hash_j, leaf_a.hash)], - ) - .await; + // Candidate A + let (candidate_a, pvd_a) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a = candidate_a.hash(); + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), - 1, - vec![(candidate_hash_d, leaf_a.hash)], - ) - .await; + let (candidate_b, candidate_hash_b) = + make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 2); + let (candidate_c, candidate_hash_c) = + make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 3); + let (_candidate_d, candidate_hash_d) = + make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_c, 4); - // If the requested count exceeds the largest chain, return the longest - // chain we can get. - for count in 4..10 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_c].into_iter().collect(), - count, - vec![ - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - (candidate_hash_j, leaf_a.hash), - ], - ) - .await; - } - } + // Should not get any backable candidates for the other para. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::new(), + 1, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + Ancestors::new(), + 5, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 2.into(), + vec![candidate_hash_a].into_iter().collect(), + 1, + vec![], + ) + .await; - // No more candidates in any chain. - { - for count in 1..4 { - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_b, candidate_hash_e] - .into_iter() - .collect(), - count, - vec![], - ) - .await; - - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![ - candidate_hash_a, - candidate_hash_c, - candidate_hash_h, - candidate_hash_i, - candidate_hash_j, - ] - .into_iter() - .collect(), - count, - vec![], - ) - .await; - } - } + // Test various scenarios with various counts. - // Wrong paths. + // empty ancestors + { get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b].into_iter().collect(), + Ancestors::new(), 1, vec![(candidate_hash_a, leaf_a.hash)], ) .await; + for count in 4..10 { + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::new(), + count, + vec![ + (candidate_hash_a, leaf_a.hash), + (candidate_hash_b, leaf_a.hash), + (candidate_hash_c, leaf_a.hash), + (candidate_hash_d, leaf_a.hash), + ], + ) + .await; + } + } + + // ancestors of size 1 + { get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_b, candidate_hash_f].into_iter().collect(), - 3, - vec![ - (candidate_hash_a, leaf_a.hash), - (candidate_hash_b, leaf_a.hash), - (candidate_hash_d, leaf_a.hash), - ], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_h].into_iter().collect(), - 4, - vec![ - (candidate_hash_c, leaf_a.hash), - (candidate_hash_h, leaf_a.hash), - (candidate_hash_i, leaf_a.hash), - (candidate_hash_j, leaf_a.hash), - ], + vec![candidate_hash_a].into_iter().collect(), + 1, + vec![(candidate_hash_b, leaf_a.hash)], ) .await; get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_e, candidate_hash_h].into_iter().collect(), + vec![candidate_hash_a].into_iter().collect(), 2, - vec![(candidate_hash_a, leaf_a.hash), (candidate_hash_b, leaf_a.hash)], + vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_c, leaf_a.hash)], ) .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a, candidate_hash_c, candidate_hash_d].into_iter().collect(), - 2, - vec![(candidate_hash_h, leaf_a.hash), (candidate_hash_i, leaf_a.hash)], - ) - .await; + // If the requested count exceeds the largest chain, return the longest + // chain we can get. + for count in 3..10 { + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a].into_iter().collect(), + count, + vec![ + (candidate_hash_b, leaf_a.hash), + (candidate_hash_c, leaf_a.hash), + (candidate_hash_d, leaf_a.hash), + ], + ) + .await; + } + } - // Parachain fork. + // ancestor count 2 and higher + { get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), vec![candidate_hash_a, candidate_hash_b, candidate_hash_c].into_iter().collect(), 1, - vec![], + vec![(candidate_hash_d, leaf_a.hash)], ) .await; - // Non-existent candidate. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, CandidateHash(Hash::from_low_u64_be(100))] - .into_iter() - .collect(), - 2, - vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], + vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), + 1, + vec![(candidate_hash_c, leaf_a.hash)], ) .await; - // Requested count is zero. - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - Ancestors::new(), - 0, - vec![], - ) - .await; - get_backable_candidates( - &mut virtual_overseer, - &leaf_a, - 1.into(), - vec![candidate_hash_a].into_iter().collect(), - 0, - vec![], - ) - .await; + // If the requested count exceeds the largest chain, return the longest + // chain we can get. + for count in 3..10 { + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), + count, + vec![(candidate_hash_c, leaf_a.hash), (candidate_hash_d, leaf_a.hash)], + ) + .await; + } + } + + // No more candidates in the chain. + for count in 1..4 { get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), - vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), - 0, + vec![candidate_hash_a, candidate_hash_b, candidate_hash_c, candidate_hash_d] + .into_iter() + .collect(), + count, vec![], ) .await; + } + + // Wrong paths. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_b].into_iter().collect(), + 1, + vec![(candidate_hash_a, leaf_a.hash)], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_b, candidate_hash_c].into_iter().collect(), + 3, + vec![ + (candidate_hash_a, leaf_a.hash), + (candidate_hash_b, leaf_a.hash), + (candidate_hash_c, leaf_a.hash), + ], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, candidate_hash_c, candidate_hash_d].into_iter().collect(), + 2, + vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_c, leaf_a.hash)], + ) + .await; - virtual_overseer - }); + // Non-existent candidate. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, CandidateHash(Hash::from_low_u64_be(100))] + .into_iter() + .collect(), + 2, + vec![(candidate_hash_b, leaf_a.hash), (candidate_hash_c, leaf_a.hash)], + ) + .await; - assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - // 10 candidates and 7 parents on para 1. - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (7, 10)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); - } + // Requested count is zero. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::new(), + 0, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a].into_iter().collect(), + 0, + vec![], + ) + .await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a, candidate_hash_b].into_iter().collect(), + 0, + vec![], + ) + .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 1); + assert_eq!(view.candidate_storage.len(), 2); + // 4 candidates on para 1. + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (4, 4)); + assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } -// Test depth query. +// Test hypothetical membership query. #[test] -fn check_hypothetical_frontier_query() { +fn check_hypothetical_membership_query() { let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { + // Leaf B + let leaf_b = TestLeaf { + number: 101, + hash: Hash::from_low_u64_be(131), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; // Leaf A let leaf_a = TestLeaf { number: 100, - hash: Hash::from_low_u64_be(130), + hash: get_parent_hash(leaf_b.hash), para_data: vec![ - (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (1.into(), PerParaData::new(98, HeadData(vec![1, 2, 3]))), (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), ], }; // Activate leaves. - activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + activate_leaf_with_params( + &mut virtual_overseer, + &leaf_a, + &test_state, + AsyncBackingParams { allowed_ancestry_len: 3, max_candidate_depth: 1 }, + ) + .await; + activate_leaf_with_params( + &mut virtual_overseer, + &leaf_b, + &test_state, + AsyncBackingParams { allowed_ancestry_len: 3, max_candidate_depth: 1 }, + ) + .await; + + // Candidates will be valid on both leaves. // Candidate A. let (candidate_a, pvd_a) = make_candidate( @@ -1415,7 +1788,6 @@ fn check_hypothetical_frontier_query() { HeadData(vec![1]), test_state.validation_code_hash, ); - let candidate_hash_a = candidate_a.hash(); // Candidate B. let (candidate_b, pvd_b) = make_candidate( @@ -1426,7 +1798,6 @@ fn check_hypothetical_frontier_query() { HeadData(vec![2]), test_state.validation_code_hash, ); - let candidate_hash_b = candidate_b.hash(); // Candidate C. let (candidate_c, pvd_c) = make_candidate( @@ -1437,127 +1808,99 @@ fn check_hypothetical_frontier_query() { HeadData(vec![3]), test_state.validation_code_hash, ); - let candidate_hash_c = candidate_c.hash(); - // Get hypothetical frontier of candidate A before adding it. - get_hypothetical_frontier( - &mut virtual_overseer, - candidate_hash_a, - candidate_a.clone(), - pvd_a.clone(), - leaf_a.hash, - false, - vec![0], - ) - .await; - // Should work with `backed_in_path_only: true`, too. - get_hypothetical_frontier( - &mut virtual_overseer, - candidate_hash_a, - candidate_a.clone(), - pvd_a.clone(), - leaf_a.hash, - true, - vec![0], - ) - .await; + // Get hypothetical membership of candidates before adding candidate A. + // Candidate A can be added directly, candidates B and C are potential candidates. + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { + get_hypothetical_membership( + &mut virtual_overseer, + candidate.hash(), + candidate, + pvd, + vec![leaf_a.hash, leaf_b.hash], + ) + .await; + } // Add candidate A. - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()).await; - - // Get frontier of candidate A after adding it. - get_hypothetical_frontier( - &mut virtual_overseer, - candidate_hash_a, - candidate_a.clone(), - pvd_a.clone(), - leaf_a.hash, - false, - vec![0], - ) - .await; - - // Get hypothetical frontier of candidate B before adding it. - get_hypothetical_frontier( - &mut virtual_overseer, - candidate_hash_b, - candidate_b.clone(), - pvd_b.clone(), - leaf_a.hash, - false, - vec![1], - ) - .await; - - // Add candidate B. - introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; - // Get frontier of candidate B after adding it. - get_hypothetical_frontier( - &mut virtual_overseer, - candidate_hash_b, - candidate_b, - pvd_b.clone(), - leaf_a.hash, - false, - vec![1], - ) - .await; + // Get membership of candidates after adding A. C is not a potential candidate because we + // may only add one more candidate, which must be a connected candidate. + for (candidate, pvd) in + [(candidate_a.clone(), pvd_a.clone()), (candidate_b.clone(), pvd_b.clone())] + { + get_hypothetical_membership( + &mut virtual_overseer, + candidate.hash(), + candidate, + pvd, + vec![leaf_a.hash, leaf_b.hash], + ) + .await; + } - // Get hypothetical frontier of candidate C before adding it. - get_hypothetical_frontier( - &mut virtual_overseer, - candidate_hash_c, - candidate_c.clone(), - pvd_c.clone(), - leaf_a.hash, - false, - vec![2], - ) - .await; - // Should be empty with `backed_in_path_only` because we haven't backed anything. - get_hypothetical_frontier( + get_hypothetical_membership( &mut virtual_overseer, - candidate_hash_c, + candidate_c.hash(), candidate_c.clone(), pvd_c.clone(), - leaf_a.hash, - true, vec![], ) .await; - // Add candidate C. - introduce_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c.clone()).await; + // Candidate D has invalid relay parent. + let (candidate_d, pvd_d) = make_candidate( + Hash::from_low_u64_be(200), + leaf_a.number, + 1.into(), + HeadData(vec![1]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_d, pvd_d).await; - // Get frontier of candidate C after adding it. - get_hypothetical_frontier( - &mut virtual_overseer, - candidate_hash_c, - candidate_c.clone(), - pvd_c.clone(), - leaf_a.hash, - false, - vec![2], - ) - .await; - // Should be empty with `backed_in_path_only` because we haven't backed anything. - get_hypothetical_frontier( + // Add candidate B. + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()) + .await; + + // Get membership of candidates after adding B. + for (candidate, pvd) in + [(candidate_a.clone(), pvd_a.clone()), (candidate_b.clone(), pvd_b.clone())] + { + get_hypothetical_membership( + &mut virtual_overseer, + candidate.hash(), + candidate, + pvd, + vec![leaf_a.hash, leaf_b.hash], + ) + .await; + } + + get_hypothetical_membership( &mut virtual_overseer, - candidate_hash_c, + candidate_c.hash(), candidate_c.clone(), pvd_c.clone(), - leaf_a.hash, - true, vec![], ) .await; + // Add candidate C. It will fail because we have enough candidates for the configured depth. + introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c).await; + virtual_overseer }); - assert_eq!(view.active_leaves.len(), 1); + assert_eq!(view.active_leaves.len(), 2); assert_eq!(view.candidate_storage.len(), 2); + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2)); } #[test] @@ -1618,7 +1961,8 @@ fn check_pvd_query() { .await; // Add candidate A. - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; // Get pvd of candidate A after adding it. @@ -1642,7 +1986,7 @@ fn check_pvd_query() { .await; // Add candidate B. - introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b.clone()).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b, pvd_b.clone()).await; // Get pvd of candidate B after adding it. get_pvd( @@ -1665,7 +2009,7 @@ fn check_pvd_query() { .await; // Add candidate C. - introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone()).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone()).await; // Get pvd of candidate C after adding it. get_pvd( @@ -1849,8 +2193,7 @@ fn persists_pending_availability_candidate() { ); let candidate_hash_b = candidate_b.hash(); - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; let candidate_a_pending_av = CandidatePendingAvailability { @@ -1874,8 +2217,7 @@ fn persists_pending_availability_candidate() { }; activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; - introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; - second_candidate(&mut virtual_overseer, candidate_b.clone()).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; get_backable_candidates( @@ -1942,8 +2284,7 @@ fn backwards_compatible() { ); let candidate_hash_a = candidate_a.hash(); - introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; - second_candidate(&mut virtual_overseer, candidate_a.clone()).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; get_backable_candidates( diff --git a/polkadot/node/core/provisioner/Cargo.toml b/polkadot/node/core/provisioner/Cargo.toml index ec1a4abb3ece..1cd16e6599a7 100644 --- a/polkadot/node/core/provisioner/Cargo.toml +++ b/polkadot/node/core/provisioner/Cargo.toml @@ -19,7 +19,7 @@ polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } futures-timer = "3.0.2" -fatality = "0.0.6" +fatality = "0.1.0" schnellru = "0.2.1" [dev-dependencies] diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 5cfcb96dc2bc..fa16b38d28bd 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -877,7 +877,7 @@ async fn get_block_number_under_construction( } /// Requests backable candidates from Prospective Parachains based on -/// the given ancestors in the fragment tree. The ancestors may not be ordered. +/// the given ancestors in the fragment chain. The ancestors may not be ordered. async fn get_backable_candidates( relay_parent: Hash, para_id: ParaId, diff --git a/polkadot/node/network/availability-distribution/Cargo.toml b/polkadot/node/network/availability-distribution/Cargo.toml index b5636203f166..344389d224f5 100644 --- a/polkadot/node/network/availability-distribution/Cargo.toml +++ b/polkadot/node/network/availability-distribution/Cargo.toml @@ -25,7 +25,7 @@ thiserror = { workspace = true } rand = "0.8.5" derive_more = "0.99.17" schnellru = "0.2.1" -fatality = "0.0.6" +fatality = "0.1.0" [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } diff --git a/polkadot/node/network/availability-recovery/Cargo.toml b/polkadot/node/network/availability-recovery/Cargo.toml index dd0e0c432345..b7f817d85235 100644 --- a/polkadot/node/network/availability-recovery/Cargo.toml +++ b/polkadot/node/network/availability-recovery/Cargo.toml @@ -14,7 +14,7 @@ futures = "0.3.30" tokio = "1.37" schnellru = "0.2.1" rand = "0.8.5" -fatality = "0.0.6" +fatality = "0.1.0" thiserror = { workspace = true } async-trait = "0.1.79" gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/network/bridge/Cargo.toml b/polkadot/node/network/bridge/Cargo.toml index 9c2423e7e581..8ab571fbe7c3 100644 --- a/polkadot/node/network/bridge/Cargo.toml +++ b/polkadot/node/network/bridge/Cargo.toml @@ -24,7 +24,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-overseer = { path = "../../overseer" } parking_lot = "0.12.1" bytes = "1" -fatality = "0.0.6" +fatality = "0.1.0" thiserror = { workspace = true } [dev-dependencies] diff --git a/polkadot/node/network/collator-protocol/Cargo.toml b/polkadot/node/network/collator-protocol/Cargo.toml index 2c7135742f56..201615ac9ffb 100644 --- a/polkadot/node/network/collator-protocol/Cargo.toml +++ b/polkadot/node/network/collator-protocol/Cargo.toml @@ -24,7 +24,7 @@ polkadot-node-network-protocol = { path = "../protocol" } polkadot-node-primitives = { path = "../../primitives" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-subsystem = { path = "../../subsystem" } -fatality = "0.0.6" +fatality = "0.1.0" thiserror = { workspace = true } tokio-util = "0.7.1" @@ -32,6 +32,7 @@ tokio-util = "0.7.1" log = { workspace = true, default-features = true } env_logger = "0.11" assert_matches = "1.4.0" +rstest = "0.18.2" sp-core = { path = "../../../../substrate/primitives/core", features = ["std"] } sp-keyring = { path = "../../../../substrate/primitives/keyring" } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 879caf923285..f227e3855fa0 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -261,7 +261,7 @@ struct State { /// `active_leaves`, the opposite doesn't hold true. /// /// Relay-chain blocks which don't support prospective parachains are - /// never included in the fragment trees of active leaves which do. In + /// never included in the fragment chains of active leaves which do. In /// particular, this means that if a given relay parent belongs to implicit /// ancestry of some active leaf, then it does support prospective parachains. implicit_view: ImplicitView, @@ -531,7 +531,7 @@ async fn distribute_collation( // Otherwise, it should be present in allowed ancestry of some leaf. // // It's collation-producer responsibility to verify that there exists - // a hypothetical membership in a fragment tree for candidate. + // a hypothetical membership in a fragment chain for the candidate. let interested = state .peer_data @@ -894,7 +894,7 @@ async fn process_msg( ); } }, - msg @ (ReportCollator(..) | Invalid(..) | Seconded(..) | Backed { .. }) => { + msg @ (ReportCollator(..) | Invalid(..) | Seconded(..)) => { gum::warn!( target: LOG_TARGET, "{:?} message is not expected on the collator side of the protocol", diff --git a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs index 8c3889a35548..001df1fb3da9 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/collation.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/collation.rs @@ -121,19 +121,15 @@ impl PendingCollation { } } -/// v2 or v3 advertisement that was rejected by the backing -/// subsystem. Validator may fetch it later if its fragment -/// membership gets recognized before relay parent goes out of view. -#[derive(Debug, Clone)] -pub struct BlockedAdvertisement { - /// Peer that advertised the collation. - pub peer_id: PeerId, - /// Collator id. - pub collator_id: CollatorId, - /// The relay-parent of the candidate. - pub candidate_relay_parent: Hash, - /// Hash of the candidate. - pub candidate_hash: CandidateHash, +/// An identifier for a fetched collation that was blocked from being seconded because we don't have +/// access to the parent's HeadData. Can be retried once the candidate outputting this head data is +/// seconded. +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub struct BlockedCollationId { + /// Para id. + pub para_id: ParaId, + /// Hash of the parent head data. + pub parent_head_data_hash: Hash, } /// Performs a sanity check between advertised and fetched collations. diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index ac8c060827f5..9f037a983e51 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -59,15 +59,17 @@ use polkadot_primitives::{ use crate::error::{Error, FetchError, Result, SecondingError}; +use self::collation::BlockedCollationId; + use super::{modify_reputation, tick_stream, LOG_TARGET}; mod collation; mod metrics; use collation::{ - fetched_collation_sanity_check, BlockedAdvertisement, CollationEvent, CollationFetchError, - CollationFetchRequest, CollationStatus, Collations, FetchedCollation, PendingCollation, - PendingCollationFetch, ProspectiveCandidate, + fetched_collation_sanity_check, CollationEvent, CollationFetchError, CollationFetchRequest, + CollationStatus, Collations, FetchedCollation, PendingCollation, PendingCollationFetch, + ProspectiveCandidate, }; #[cfg(test)] @@ -388,7 +390,7 @@ struct State { /// `active_leaves`, the opposite doesn't hold true. /// /// Relay-chain blocks which don't support prospective parachains are - /// never included in the fragment trees of active leaves which do. In + /// never included in the fragment chains of active leaves which do. In /// particular, this means that if a given relay parent belongs to implicit /// ancestry of some active leaf, then it does support prospective parachains. implicit_view: ImplicitView, @@ -421,14 +423,6 @@ struct State { /// Span per relay parent. span_per_relay_parent: HashMap, - /// Advertisements that were accepted as valid by collator protocol but rejected by backing. - /// - /// It's only legal to fetch collations that are either built on top of the root - /// of some fragment tree or have a parent node which represents backed candidate. - /// Otherwise, a validator will keep such advertisement in the memory and re-trigger - /// requests to backing on new backed candidates and activations. - blocked_advertisements: HashMap<(ParaId, Hash), Vec>, - /// When a timer in this `FuturesUnordered` triggers, we should dequeue the next request /// attempt in the corresponding `collations_per_relay_parent`. /// @@ -441,6 +435,12 @@ struct State { /// on validation. fetched_candidates: HashMap, + /// Collations which we haven't been able to second due to their parent not being known by + /// prospective-parachains. Mapped from the paraid and parent_head_hash to the fetched + /// collation data. Only needed for async backing. For elastic scaling, the fetched collation + /// must contain the full parent head data. + blocked_from_seconding: HashMap>, + /// Aggregated reputation change reputation: ReputationAggregator, } @@ -953,6 +953,8 @@ enum AdvertisementError { /// Advertisement is invalid. #[allow(dead_code)] Invalid(InsertAdvertisementError), + /// Seconding not allowed by backing subsystem + BlockedByBacking, } impl AdvertisementError { @@ -962,7 +964,7 @@ impl AdvertisementError { InvalidAssignment => Some(COST_WRONG_PARA), ProtocolMisuse => Some(COST_PROTOCOL_MISUSE), RelayParentUnknown | UndeclaredCollator | Invalid(_) => Some(COST_UNEXPECTED_MESSAGE), - UnknownPeer | SecondedLimitReached => None, + UnknownPeer | SecondedLimitReached | BlockedByBacking => None, } } } @@ -1001,57 +1003,55 @@ where }) } -/// Checks whether any of the advertisements are unblocked and attempts to fetch them. -async fn request_unblocked_collations(sender: &mut Sender, state: &mut State, blocked: I) -where - Sender: CollatorProtocolSenderTrait, - I: IntoIterator)>, -{ - let _timer = state.metrics.time_request_unblocked_collations(); +// Try seconding any collations which were waiting on the validation of their parent +#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] +async fn second_unblocked_collations( + ctx: &mut Context, + state: &mut State, + para_id: ParaId, + head_data: HeadData, + head_data_hash: Hash, +) { + if let Some(unblocked_collations) = state + .blocked_from_seconding + .remove(&BlockedCollationId { para_id, parent_head_data_hash: head_data_hash }) + { + if !unblocked_collations.is_empty() { + gum::debug!( + target: LOG_TARGET, + "Candidate outputting head data with hash {} unblocked {} collations for seconding.", + head_data_hash, + unblocked_collations.len() + ); + } - for (key, mut value) in blocked { - let (para_id, para_head) = key; - let blocked = std::mem::take(&mut value); - for blocked in blocked { - let is_seconding_allowed = can_second( - sender, - para_id, - blocked.candidate_relay_parent, - blocked.candidate_hash, - para_head, - ) - .await; + for mut unblocked_collation in unblocked_collations { + unblocked_collation.maybe_parent_head_data = Some(head_data.clone()); + let peer_id = unblocked_collation.collation_event.pending_collation.peer_id; + let relay_parent = unblocked_collation.candidate_receipt.descriptor.relay_parent; - if is_seconding_allowed { - let result = enqueue_collation( - sender, - state, - blocked.candidate_relay_parent, - para_id, - blocked.peer_id, - blocked.collator_id, - Some((blocked.candidate_hash, para_head)), - ) - .await; - if let Err(fetch_error) = result { - gum::debug!( - target: LOG_TARGET, - relay_parent = ?blocked.candidate_relay_parent, - para_id = ?para_id, - peer_id = ?blocked.peer_id, - error = %fetch_error, - "Failed to request unblocked collation", - ); + if let Err(err) = kick_off_seconding(ctx, state, unblocked_collation).await { + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + ?para_id, + ?peer_id, + error = %err, + "Seconding aborted due to an error", + ); + + if err.is_malicious() { + // Report malicious peer. + modify_reputation( + &mut state.reputation, + ctx.sender(), + peer_id, + COST_REPORT_BAD, + ) + .await; } - } else { - // Keep the advertisement. - value.push(blocked); } } - - if !value.is_empty() { - state.blocked_advertisements.insert(key, value); - } } } @@ -1110,10 +1110,10 @@ where } if let Some((candidate_hash, parent_head_data_hash)) = prospective_candidate { - // We need to queue the advertisement if we are not allowed to second it. + // Check if backing subsystem allows to second this candidate. // - // This is also only important when async backing is enabled. - let queue_advertisement = relay_parent_mode.is_enabled() && + // This is also only important when async backing or elastic scaling is enabled. + let seconding_not_allowed = relay_parent_mode.is_enabled() && !can_second( sender, collator_para_id, @@ -1123,26 +1123,8 @@ where ) .await; - if queue_advertisement { - gum::debug!( - target: LOG_TARGET, - relay_parent = ?relay_parent, - para_id = ?para_id, - ?candidate_hash, - "Seconding is not allowed by backing, queueing advertisement", - ); - state - .blocked_advertisements - .entry((collator_para_id, parent_head_data_hash)) - .or_default() - .push(BlockedAdvertisement { - peer_id, - collator_id: collator_id.clone(), - candidate_relay_parent: relay_parent, - candidate_hash, - }); - - return Ok(()) + if seconding_not_allowed { + return Err(AdvertisementError::BlockedByBacking) } } @@ -1358,20 +1340,17 @@ where state.span_per_relay_parent.remove(&removed); } } - // Remove blocked advertisements that left the view. - state.blocked_advertisements.retain(|_, ads| { - ads.retain(|ad| state.per_relay_parent.contains_key(&ad.candidate_relay_parent)); - !ads.is_empty() + // Remove blocked seconding requests that left the view. + state.blocked_from_seconding.retain(|_, collations| { + collations.retain(|collation| { + state + .per_relay_parent + .contains_key(&collation.candidate_receipt.descriptor.relay_parent) + }); + + !collations.is_empty() }); - // Re-trigger previously failed requests again. - // - // This makes sense for several reasons, one simple example: if a hypothetical depth - // for an advertisement initially exceeded the limit and the candidate was included - // in a new leaf. - let maybe_unblocked = std::mem::take(&mut state.blocked_advertisements); - // Could be optimized to only sanity check new leaves. - request_unblocked_collations(sender, state, maybe_unblocked).await; for (peer_id, peer_data) in state.peer_data.iter_mut() { peer_data.prune_old_advertisements( @@ -1508,6 +1487,8 @@ async fn process_msg( return }, }; + let output_head_data = receipt.commitments.head_data.clone(); + let output_head_data_hash = receipt.descriptor.para_head; let fetched_collation = FetchedCollation::from(&receipt.to_plain()); if let Some(CollationEvent { collator_id, pending_collation, .. }) = state.fetched_candidates.remove(&fetched_collation) @@ -1536,6 +1517,17 @@ async fn process_msg( rp_state.collations.status = CollationStatus::Seconded; rp_state.collations.note_seconded(); } + + // See if we've unblocked other collations for seconding. + second_unblocked_collations( + ctx, + state, + fetched_collation.para_id, + output_head_data, + output_head_data_hash, + ) + .await; + // If async backing is enabled, make an attempt to fetch next collation. let maybe_candidate_hash = prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); @@ -1554,11 +1546,13 @@ async fn process_msg( ); } }, - Backed { para_id, para_head } => { - let maybe_unblocked = state.blocked_advertisements.remove_entry(&(para_id, para_head)); - request_unblocked_collations(ctx.sender(), state, maybe_unblocked).await; - }, Invalid(parent, candidate_receipt) => { + // Remove collations which were blocked from seconding and had this candidate as parent. + state.blocked_from_seconding.remove(&BlockedCollationId { + para_id: candidate_receipt.descriptor.para_id, + parent_head_data_hash: candidate_receipt.descriptor.para_head, + }); + let fetched_collation = FetchedCollation::from(&candidate_receipt); let candidate_hash = fetched_collation.candidate_hash; let id = match state.fetched_candidates.entry(fetched_collation) { @@ -1668,29 +1662,45 @@ async fn run_inner( }; let CollationEvent {collator_id, pending_collation, .. } = res.collation_event.clone(); - if let Err(err) = kick_off_seconding(&mut ctx, &mut state, res).await { - gum::warn!( - target: LOG_TARGET, - relay_parent = ?pending_collation.relay_parent, - para_id = ?pending_collation.para_id, - peer_id = ?pending_collation.peer_id, - error = %err, - "Seconding aborted due to an error", - ); - if err.is_malicious() { - // Report malicious peer. - modify_reputation(&mut state.reputation, ctx.sender(), pending_collation.peer_id, COST_REPORT_BAD).await; + match kick_off_seconding(&mut ctx, &mut state, res).await { + Err(err) => { + gum::warn!( + target: LOG_TARGET, + relay_parent = ?pending_collation.relay_parent, + para_id = ?pending_collation.para_id, + peer_id = ?pending_collation.peer_id, + error = %err, + "Seconding aborted due to an error", + ); + + if err.is_malicious() { + // Report malicious peer. + modify_reputation(&mut state.reputation, ctx.sender(), pending_collation.peer_id, COST_REPORT_BAD).await; + } + let maybe_candidate_hash = + pending_collation.prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); + dequeue_next_collation_and_fetch( + &mut ctx, + &mut state, + pending_collation.relay_parent, + (collator_id, maybe_candidate_hash), + ) + .await; + }, + Ok(false) => { + // No hard error occurred, but we can try fetching another collation. + let maybe_candidate_hash = + pending_collation.prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); + dequeue_next_collation_and_fetch( + &mut ctx, + &mut state, + pending_collation.relay_parent, + (collator_id, maybe_candidate_hash), + ) + .await; } - let maybe_candidate_hash = - pending_collation.prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); - dequeue_next_collation_and_fetch( - &mut ctx, - &mut state, - pending_collation.relay_parent, - (collator_id, maybe_candidate_hash), - ) - .await; + Ok(true) => {} } } res = state.collation_fetch_timeouts.select_next_some() => { @@ -1800,12 +1810,13 @@ where } /// Handle a fetched collation result. +/// Returns whether or not seconding has begun. #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] async fn kick_off_seconding( ctx: &mut Context, state: &mut State, PendingCollationFetch { mut collation_event, candidate_receipt, pov, maybe_parent_head_data }: PendingCollationFetch, -) -> std::result::Result<(), SecondingError> { +) -> std::result::Result { let pending_collation = collation_event.pending_collation; let relay_parent = pending_collation.relay_parent; @@ -1818,7 +1829,7 @@ async fn kick_off_seconding( relay_parent = ?relay_parent, "Fetched collation for a parent out of view", ); - return Ok(()) + return Ok(false) }, }; let collations = &mut per_relay_parent.collations; @@ -1828,7 +1839,7 @@ async fn kick_off_seconding( collation_event.pending_collation.commitments_hash = Some(candidate_receipt.commitments_hash); - let (maybe_pvd, maybe_parent_head_and_hash) = match ( + let (maybe_pvd, maybe_parent_head, maybe_parent_head_hash) = match ( collation_event.collator_protocol_version, collation_event.pending_collation.prospective_candidate, ) { @@ -1844,7 +1855,7 @@ async fn kick_off_seconding( ) .await?; - (pvd, maybe_parent_head_data.map(|head_data| (head_data, parent_head_data_hash))) + (pvd, maybe_parent_head_data, Some(parent_head_data_hash)) }, // Support V2 collators without async backing enabled. (CollationVersion::V2, Some(_)) | (CollationVersion::V1, _) => { @@ -1854,20 +1865,60 @@ async fn kick_off_seconding( candidate_receipt.descriptor().para_id, ) .await?; - (pvd, None) + ( + Some(pvd.ok_or(SecondingError::PersistedValidationDataNotFound)?), + maybe_parent_head_data, + None, + ) }, _ => { // `handle_advertisement` checks for protocol mismatch. - return Ok(()) + return Ok(false) + }, + }; + + let pvd = match (maybe_pvd, maybe_parent_head.clone(), maybe_parent_head_hash) { + (Some(pvd), _, _) => pvd, + (None, None, Some(parent_head_data_hash)) => { + // In this case, the collator did not supply the head data and neither could + // prospective-parachains. We add this to the blocked_from_seconding collection + // until we second its parent. + let blocked_collation = PendingCollationFetch { + collation_event, + candidate_receipt, + pov, + maybe_parent_head_data: None, + }; + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?blocked_collation.candidate_receipt.hash(), + relay_parent = ?blocked_collation.candidate_receipt.descriptor.relay_parent, + "Collation having parent head data hash {} is blocked from seconding. Waiting on its parent to be validated.", + parent_head_data_hash + ); + state + .blocked_from_seconding + .entry(BlockedCollationId { + para_id: blocked_collation.candidate_receipt.descriptor.para_id, + parent_head_data_hash, + }) + .or_insert_with(Vec::new) + .push(blocked_collation); + + return Ok(false) + }, + (None, _, _) => { + // Even though we already have the parent head data, the pvd fetching failed. We + // don't need to wait for seconding another collation outputting this head data. + return Err(SecondingError::PersistedValidationDataNotFound) }, }; - let pvd = maybe_pvd.ok_or(SecondingError::PersistedValidationDataNotFound)?; fetched_collation_sanity_check( &collation_event.pending_collation, &candidate_receipt, &pvd, - maybe_parent_head_and_hash, + maybe_parent_head.and_then(|head| maybe_parent_head_hash.map(|hash| (head, hash))), )?; ctx.send_message(CandidateBackingMessage::Second( @@ -1882,7 +1933,7 @@ async fn kick_off_seconding( collations.status = CollationStatus::WaitingOnValidation; entry.insert(collation_event); - Ok(()) + Ok(true) } else { Err(SecondingError::Duplicate) } diff --git a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index eaa725f2642e..785690121dad 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -23,6 +23,7 @@ use polkadot_primitives::{ AsyncBackingParams, BlockNumber, CandidateCommitments, CommittedCandidateReceipt, Header, SigningContext, ValidatorId, }; +use rstest::rstest; const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; @@ -262,6 +263,48 @@ async fn assert_collation_seconded( } } +/// Assert that the next message is a persisted validation data request and respond with the +/// supplied PVD. +async fn assert_persisted_validation_data( + virtual_overseer: &mut VirtualOverseer, + version: CollationVersion, + expected_relay_parent: Hash, + expected_para_id: ParaId, + expected_parent_head_data_hash: Option, + pvd: Option, +) { + // Depending on relay parent mode pvd will be either requested + // from the Runtime API or Prospective Parachains. + let msg = overseer_recv(virtual_overseer).await; + match version { + CollationVersion::V1 => assert_matches!( + msg, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx), + )) => { + assert_eq!(expected_relay_parent, hash); + assert_eq!(expected_para_id, para_id); + assert_eq!(OccupiedCoreAssumption::Free, assumption); + tx.send(Ok(pvd)).unwrap(); + } + ), + CollationVersion::V2 => assert_matches!( + msg, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx), + ) => { + assert_eq!(expected_relay_parent, request.candidate_relay_parent); + assert_eq!(expected_para_id, request.para_id); + if let Some(expected_parent_head_data_hash) = expected_parent_head_data_hash { + assert_eq!(expected_parent_head_data_hash, request.parent_head_data.hash()); + } + tx.send(pvd).unwrap(); + } + ), + } +} + #[test] fn v1_advertisement_accepted_and_seconded() { let test_state = TestState::default(); @@ -946,56 +989,73 @@ fn advertisement_spam_protection() { }); } -#[test] -fn backed_candidate_unblocks_advertisements() { +#[rstest] +#[case(true)] +#[case(false)] +fn child_blocked_from_seconding_by_parent(#[case] valid_parent: bool) { let test_state = TestState::default(); test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, .. } = test_harness; + let TestHarness { mut virtual_overseer, keystore } = test_harness; - let pair_a = CollatorPair::generate().0; - let pair_b = CollatorPair::generate().0; + let pair = CollatorPair::generate().0; + // Grandparent of head `a`. let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 2; - let head_c = get_parent_hash(head_b); // Grandparent of head `b`. - // Group rotation frequency is 1 by default, at `d` we're assigned + // Group rotation frequency is 1 by default, at `c` we're assigned // to the first para. - let head_d = get_parent_hash(head_c); + let head_c = Hash::from_low_u64_be(130); // Activated leaf is `b`, but the collation will be based on `c`. update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; let peer_a = PeerId::random(); - let peer_b = PeerId::random(); - // Accept both collators from the implicit view. connect_and_declare_collator( &mut virtual_overseer, peer_a, - pair_a.clone(), + pair.clone(), test_state.chain_ids[0], CollationVersion::V2, ) .await; - connect_and_declare_collator( - &mut virtual_overseer, - peer_b, - pair_b.clone(), - test_state.chain_ids[1], - CollationVersion::V2, - ) - .await; - let candidate_hash = CandidateHash::default(); - let parent_head_data_hash = Hash::zero(); + // Candidate A transitions from head data 0 to 1. + // Candidate B transitions from head data 1 to 2. + + // Candidate B is advertised and fetched before candidate A. + + let mut candidate_b = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default())); + candidate_b.descriptor.para_id = test_state.chain_ids[0]; + candidate_b.descriptor.para_head = HeadData(vec![2]).hash(); + candidate_b.descriptor.persisted_validation_data_hash = + PersistedValidationData:: { + parent_head: HeadData(vec![1]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + } + .hash(); + let candidate_b_commitments = CandidateCommitments { + head_data: HeadData(vec![2]), + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + candidate_b.commitments_hash = candidate_b_commitments.hash(); + + let candidate_b_hash = candidate_b.hash(); + advertise_collation( &mut virtual_overseer, - peer_b, + peer_a, head_c, - Some((candidate_hash, parent_head_data_hash)), + Some((candidate_b_hash, HeadData(vec![1]).hash())), ) .await; assert_matches!( @@ -1003,40 +1063,73 @@ fn backed_candidate_unblocks_advertisements() { AllMessages::CandidateBacking( CandidateBackingMessage::CanSecond(request, tx), ) => { - assert_eq!(request.candidate_hash, candidate_hash); - assert_eq!(request.candidate_para_id, test_state.chain_ids[1]); - assert_eq!(request.parent_head_data_hash, parent_head_data_hash); - // Reject it. - tx.send(false).expect("receiving side should be alive"); + assert_eq!(request.candidate_hash, candidate_b_hash); + assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); + assert_eq!(request.parent_head_data_hash, HeadData(vec![1]).hash()); + tx.send(true).expect("receiving side should be alive"); } ); - // Advertise with different para. - advertise_collation( + let response_channel = assert_fetch_collation_request( &mut virtual_overseer, - peer_a, - head_d, // Note different relay parent. - Some((candidate_hash, parent_head_data_hash)), + head_c, + test_state.chain_ids[0], + Some(candidate_b_hash), ) .await; - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidate_hash); - assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); - assert_eq!(request.parent_head_data_hash, parent_head_data_hash); - tx.send(false).expect("receiving side should be alive"); + + response_channel + .send(Ok(( + request_v2::CollationFetchingResponse::Collation( + candidate_b.clone(), + PoV { block_data: BlockData(vec![1]) }, + ) + .encode(), + ProtocolName::from(""), + ))) + .expect("Sending response should succeed"); + + // Persisted validation data of candidate B is not found. + assert_persisted_validation_data( + &mut virtual_overseer, + CollationVersion::V2, + head_c, + test_state.chain_ids[0], + Some(HeadData(vec![1]).hash()), + None, + ) + .await; + + // Now advertise, fetch and validate candidate A, which is the parent of B. + + let mut candidate_a = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default())); + candidate_a.descriptor.para_id = test_state.chain_ids[0]; + candidate_a.descriptor.para_head = HeadData(vec![1]).hash(); + candidate_a.descriptor.persisted_validation_data_hash = + PersistedValidationData:: { + parent_head: HeadData(vec![0]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), } - ); + .hash(); + let candidate_a_commitments = CandidateCommitments { + head_data: HeadData(vec![1]), + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + candidate_a.commitments_hash = candidate_a_commitments.hash(); + + let candidate_a_hash = candidate_a.hash(); - overseer_send( + advertise_collation( &mut virtual_overseer, - CollatorProtocolMessage::Backed { - para_id: test_state.chain_ids[0], - para_head: parent_head_data_hash, - }, + peer_a, + head_c, + Some((candidate_a_hash, HeadData(vec![0]).hash())), ) .await; assert_matches!( @@ -1044,174 +1137,155 @@ fn backed_candidate_unblocks_advertisements() { AllMessages::CandidateBacking( CandidateBackingMessage::CanSecond(request, tx), ) => { - assert_eq!(request.candidate_hash, candidate_hash); + assert_eq!(request.candidate_hash, candidate_a_hash); assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); - assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + assert_eq!(request.parent_head_data_hash, HeadData(vec![0]).hash()); tx.send(true).expect("receiving side should be alive"); } ); - assert_fetch_collation_request( + + let response_channel = assert_fetch_collation_request( &mut virtual_overseer, - head_d, + head_c, test_state.chain_ids[0], - Some(candidate_hash), + Some(candidate_a_hash), ) .await; - virtual_overseer - }); -} -#[test] -fn active_leave_unblocks_advertisements() { - let mut test_state = TestState::default(); - test_state.group_rotation_info.group_rotation_frequency = 100; + response_channel + .send(Ok(( + request_v2::CollationFetchingResponse::Collation( + candidate_a.clone(), + PoV { block_data: BlockData(vec![2]) }, + ) + .encode(), + ProtocolName::from(""), + ))) + .expect("Sending response should succeed"); - test_harness(ReputationAggregator::new(|_| true), |test_harness| async move { - let TestHarness { mut virtual_overseer, .. } = test_harness; + assert_persisted_validation_data( + &mut virtual_overseer, + CollationVersion::V2, + head_c, + test_state.chain_ids[0], + Some(HeadData(vec![0]).hash()), + Some(PersistedValidationData:: { + parent_head: HeadData(vec![0]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }), + ) + .await; - let head_b = Hash::from_low_u64_be(128); - let head_b_num: u32 = 0; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking(CandidateBackingMessage::Second( + relay_parent, + candidate_receipt, + received_pvd, + incoming_pov, + )) => { + assert_eq!(head_c, relay_parent); + assert_eq!(test_state.chain_ids[0], candidate_receipt.descriptor.para_id); + assert_eq!(PoV { block_data: BlockData(vec![2]) }, incoming_pov); + assert_eq!(PersistedValidationData:: { + parent_head: HeadData(vec![0]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }, received_pvd); + candidate_receipt + } + ); - update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + // If candidate A is valid, proceed with seconding B. + if valid_parent { + send_seconded_statement( + &mut virtual_overseer, + keystore.clone(), + &CommittedCandidateReceipt { + descriptor: candidate_a.descriptor, + commitments: candidate_a_commitments, + }, + ) + .await; - let peers: Vec = (0..3).map(|_| CollatorPair::generate().0).collect(); - let peer_ids: Vec = (0..3).map(|_| PeerId::random()).collect(); - let candidates: Vec = - (0u8..3).map(|i| CandidateHash(Hash::repeat_byte(i))).collect(); + assert_collation_seconded(&mut virtual_overseer, head_c, peer_a, CollationVersion::V2) + .await; - for (peer, peer_id) in peers.iter().zip(&peer_ids) { - connect_and_declare_collator( + // Now that candidate A has been seconded, candidate B can be seconded as well. + + assert_persisted_validation_data( &mut virtual_overseer, - *peer_id, - peer.clone(), - test_state.chain_ids[0], CollationVersion::V2, + head_c, + test_state.chain_ids[0], + Some(HeadData(vec![1]).hash()), + Some(PersistedValidationData:: { + parent_head: HeadData(vec![1]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }), ) .await; - } - let parent_head_data_hash = Hash::zero(); - for (peer, candidate) in peer_ids.iter().zip(&candidates).take(2) { - advertise_collation( + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking(CandidateBackingMessage::Second( + relay_parent, + candidate_receipt, + received_pvd, + incoming_pov, + )) => { + assert_eq!(head_c, relay_parent); + assert_eq!(test_state.chain_ids[0], candidate_receipt.descriptor.para_id); + assert_eq!(PoV { block_data: BlockData(vec![1]) }, incoming_pov); + assert_eq!(PersistedValidationData:: { + parent_head: HeadData(vec![1]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }, received_pvd); + candidate_receipt + } + ); + + send_seconded_statement( &mut virtual_overseer, - *peer, - head_b, - Some((*candidate, parent_head_data_hash)), + keystore.clone(), + &CommittedCandidateReceipt { + descriptor: candidate_b.descriptor, + commitments: candidate_b_commitments, + }, + ) + .await; + + assert_collation_seconded(&mut virtual_overseer, head_c, peer_a, CollationVersion::V2) + .await; + } else { + // If candidate A is invalid, B won't be seconded. + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::Invalid(head_c, candidate_a), ) .await; assert_matches!( overseer_recv(&mut virtual_overseer).await, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(peer, rep)), ) => { - assert_eq!(request.candidate_hash, *candidate); - assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); - assert_eq!(request.parent_head_data_hash, parent_head_data_hash); - // Send false. - tx.send(false).expect("receiving side should be alive"); + assert_eq!(peer, peer_a); + assert_eq!(rep.value, COST_REPORT_BAD.cost_or_benefit()); } ); } - let head_c = Hash::from_low_u64_be(127); - let head_c_num: u32 = 1; - - let next_overseer_message = - update_view(&mut virtual_overseer, &test_state, vec![(head_c, head_c_num)], 1) - .await - .expect("should've sent request to backing"); - - // Unblock first request. - assert_matches!( - next_overseer_message, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidates[0]); - assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); - assert_eq!(request.parent_head_data_hash, parent_head_data_hash); - tx.send(true).expect("receiving side should be alive"); - } - ); - - assert_fetch_collation_request( - &mut virtual_overseer, - head_b, - test_state.chain_ids[0], - Some(candidates[0]), - ) - .await; - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidates[1]); - assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); - assert_eq!(request.parent_head_data_hash, parent_head_data_hash); - tx.send(false).expect("receiving side should be alive"); - } - ); - - // Collation request was discarded. test_helpers::Yield::new().await; assert_matches!(virtual_overseer.recv().now_or_never(), None); - advertise_collation( - &mut virtual_overseer, - peer_ids[2], - head_c, - Some((candidates[2], parent_head_data_hash)), - ) - .await; - - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidates[2]); - tx.send(false).expect("receiving side should be alive"); - } - ); - - let head_d = Hash::from_low_u64_be(126); - let head_d_num: u32 = 2; - - let next_overseer_message = - update_view(&mut virtual_overseer, &test_state, vec![(head_d, head_d_num)], 1) - .await - .expect("should've sent request to backing"); - - // Reject 2, accept 3. - assert_matches!( - next_overseer_message, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidates[1]); - tx.send(false).expect("receiving side should be alive"); - } - ); - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::CandidateBacking( - CandidateBackingMessage::CanSecond(request, tx), - ) => { - assert_eq!(request.candidate_hash, candidates[2]); - tx.send(true).expect("receiving side should be alive"); - } - ); - assert_fetch_collation_request( - &mut virtual_overseer, - head_c, - test_state.chain_ids[0], - Some(candidates[2]), - ) - .await; - virtual_overseer }); } diff --git a/polkadot/node/network/dispute-distribution/Cargo.toml b/polkadot/node/network/dispute-distribution/Cargo.toml index ff9c302c7314..7d9a719d8525 100644 --- a/polkadot/node/network/dispute-distribution/Cargo.toml +++ b/polkadot/node/network/dispute-distribution/Cargo.toml @@ -25,7 +25,7 @@ sc-network = { path = "../../../../substrate/client/network" } sp-application-crypto = { path = "../../../../substrate/primitives/application-crypto" } sp-keystore = { path = "../../../../substrate/primitives/keystore" } thiserror = { workspace = true } -fatality = "0.0.6" +fatality = "0.1.0" schnellru = "0.2.1" indexmap = "2.0.0" diff --git a/polkadot/node/network/protocol/Cargo.toml b/polkadot/node/network/protocol/Cargo.toml index 0408e6737911..29975fc735f2 100644 --- a/polkadot/node/network/protocol/Cargo.toml +++ b/polkadot/node/network/protocol/Cargo.toml @@ -24,7 +24,7 @@ sp-runtime = { path = "../../../../substrate/primitives/runtime" } strum = { version = "0.26.2", features = ["derive"] } futures = "0.3.30" thiserror = { workspace = true } -fatality = "0.0.6" +fatality = "0.1.0" rand = "0.8" derive_more = "0.99" gum = { package = "tracing-gum", path = "../../gum" } diff --git a/polkadot/node/network/statement-distribution/Cargo.toml b/polkadot/node/network/statement-distribution/Cargo.toml index d8ae031cbf36..b5cfeaa0c1ed 100644 --- a/polkadot/node/network/statement-distribution/Cargo.toml +++ b/polkadot/node/network/statement-distribution/Cargo.toml @@ -24,7 +24,7 @@ arrayvec = "0.7.4" indexmap = "2.0.0" parity-scale-codec = { version = "3.6.1", default-features = false, features = ["derive"] } thiserror = { workspace = true } -fatality = "0.0.6" +fatality = "0.1.0" bitvec = "1" [dev-dependencies] diff --git a/polkadot/node/network/statement-distribution/src/v2/candidates.rs b/polkadot/node/network/statement-distribution/src/v2/candidates.rs index ad56ad4a2365..a4f2455c2840 100644 --- a/polkadot/node/network/statement-distribution/src/v2/candidates.rs +++ b/polkadot/node/network/statement-distribution/src/v2/candidates.rs @@ -243,12 +243,12 @@ impl Candidates { /// Whether statements from a candidate are importable. /// /// This is only true when the candidate is known, confirmed, - /// and is importable in a fragment tree. + /// and is importable in a fragment chain. pub fn is_importable(&self, candidate_hash: &CandidateHash) -> bool { self.get_confirmed(candidate_hash).map_or(false, |c| c.is_importable(None)) } - /// Note that a candidate is importable in a fragment tree indicated by the given + /// Note that a candidate is importable in a fragment chain indicated by the given /// leaf hash. pub fn note_importable_under(&mut self, candidate: &HypotheticalCandidate, leaf_hash: Hash) { match candidate { diff --git a/polkadot/node/network/statement-distribution/src/v2/grid.rs b/polkadot/node/network/statement-distribution/src/v2/grid.rs index 24d846c840e0..b6e4163090c4 100644 --- a/polkadot/node/network/statement-distribution/src/v2/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/grid.rs @@ -46,10 +46,8 @@ //! - Request/response for the candidate + votes. //! - Ignore if they are inconsistent with the manifest. //! - A malicious backing group is capable of producing an unbounded number of backed candidates. -//! - We request the candidate only if the candidate has a hypothetical depth in any of our -//! fragment trees, and: -//! - the seconding validators have not seconded any other candidates at that depth in any of -//! those fragment trees +//! - We request the candidate only if the candidate is a hypothetical member in any of our +//! fragment chains, and: //! - All members of the group attempt to circulate all statements (in compact form) from the rest //! of the group on candidates that have already been backed. //! - They do this via the grid topology. diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 58e1f350fdc3..292d7e02f4b2 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -37,7 +37,7 @@ use polkadot_node_primitives::{ use polkadot_node_subsystem::{ messages::{ network_bridge_event::NewGossipTopology, CandidateBackingMessage, HypotheticalCandidate, - HypotheticalFrontierRequest, NetworkBridgeEvent, NetworkBridgeTxMessage, + HypotheticalMembershipRequest, NetworkBridgeEvent, NetworkBridgeTxMessage, ProspectiveParachainsMessage, }, overseer, ActivatedLeaf, @@ -719,7 +719,7 @@ pub(crate) async fn handle_active_leaves_update( } } - new_leaf_fragment_tree_updates(ctx, state, activated.hash).await; + new_leaf_fragment_chain_updates(ctx, state, activated.hash).await; Ok(()) } @@ -2182,7 +2182,7 @@ async fn determine_groups_per_para( } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn fragment_tree_update_inner( +async fn fragment_chain_update_inner( ctx: &mut Context, state: &mut State, active_leaf_hash: Option, @@ -2196,31 +2196,34 @@ async fn fragment_tree_update_inner( }; // 2. find out which are in the frontier - let frontier = { + gum::debug!( + target: LOG_TARGET, + "Calling getHypotheticalMembership from statement distribution" + ); + let candidate_memberships = { let (tx, rx) = oneshot::channel(); - ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalFrontier( - HypotheticalFrontierRequest { + ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalMembership( + HypotheticalMembershipRequest { candidates: hypotheticals, - fragment_tree_relay_parent: active_leaf_hash, - backed_in_path_only: false, + fragment_chain_relay_parent: active_leaf_hash, }, tx, )) .await; match rx.await { - Ok(frontier) => frontier, + Ok(candidate_memberships) => candidate_memberships, Err(oneshot::Canceled) => return, } }; // 3. note that they are importable under a given leaf hash. - for (hypo, membership) in frontier { - // skip parablocks outside of the frontier + for (hypo, membership) in candidate_memberships { + // skip parablocks which aren't potential candidates if membership.is_empty() { continue } - for (leaf_hash, _) in membership { + for leaf_hash in membership { state.candidates.note_importable_under(&hypo, leaf_hash); } @@ -2264,31 +2267,31 @@ async fn fragment_tree_update_inner( } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn new_leaf_fragment_tree_updates( +async fn new_leaf_fragment_chain_updates( ctx: &mut Context, state: &mut State, leaf_hash: Hash, ) { - fragment_tree_update_inner(ctx, state, Some(leaf_hash), None, None).await + fragment_chain_update_inner(ctx, state, Some(leaf_hash), None, None).await } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn prospective_backed_notification_fragment_tree_updates( +async fn prospective_backed_notification_fragment_chain_updates( ctx: &mut Context, state: &mut State, para_id: ParaId, para_head: Hash, ) { - fragment_tree_update_inner(ctx, state, None, Some((para_head, para_id)), None).await + fragment_chain_update_inner(ctx, state, None, Some((para_head, para_id)), None).await } #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn new_confirmed_candidate_fragment_tree_updates( +async fn new_confirmed_candidate_fragment_chain_updates( ctx: &mut Context, state: &mut State, candidate: HypotheticalCandidate, ) { - fragment_tree_update_inner(ctx, state, None, None, Some(vec![candidate])).await + fragment_chain_update_inner(ctx, state, None, None, Some(vec![candidate])).await } struct ManifestImportSuccess<'a> { @@ -2831,7 +2834,7 @@ pub(crate) async fn handle_backed_candidate_message( .await; // Search for children of the backed candidate to request. - prospective_backed_notification_fragment_tree_updates( + prospective_backed_notification_fragment_chain_updates( ctx, state, confirmed.para_id(), @@ -2922,7 +2925,8 @@ async fn apply_post_confirmation( post_confirmation.hypothetical.relay_parent(), ) .await; - new_confirmed_candidate_fragment_tree_updates(ctx, state, post_confirmation.hypothetical).await; + new_confirmed_candidate_fragment_chain_updates(ctx, state, post_confirmation.hypothetical) + .await; } /// Dispatch pending requests for candidate data & statements. @@ -3147,8 +3151,8 @@ pub(crate) async fn handle_response( let confirmed = state.candidates.get_confirmed(&candidate_hash).expect("just confirmed; qed"); - // Although the candidate is confirmed, it isn't yet on the - // hypothetical frontier of the fragment tree. Later, when it is, + // Although the candidate is confirmed, it isn't yet a + // hypothetical member of the fragment chain. Later, when it is, // we will import statements. if !confirmed.is_importable(None) { return diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs index 4fb033e08ce3..fe51f953e244 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/cluster.rs @@ -111,8 +111,8 @@ fn share_seconded_circulated_to_cluster() { ); // sharing a `Seconded` message confirms a candidate, which leads to new - // fragment tree updates. - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + // fragment chain updates. + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; overseer }); @@ -509,7 +509,7 @@ fn seconded_statement_leads_to_request() { if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; overseer }); @@ -583,7 +583,7 @@ fn cluster_statements_shared_seconded_first() { .await; // result of new confirmed candidate. - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; overseer .send(FromOrchestra::Communication { @@ -717,8 +717,8 @@ fn cluster_accounts_for_implicit_view() { ); // sharing a `Seconded` message confirms a candidate, which leads to new - // fragment tree updates. - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + // fragment chain updates. + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; // activate new leaf, which has relay-parent in implicit view. let next_relay_parent = Hash::repeat_byte(2); @@ -855,7 +855,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { ); } - answer_expected_hypothetical_depth_request( + answer_expected_hypothetical_membership_request( &mut overseer, vec![( HypotheticalCandidate::Complete { @@ -863,7 +863,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { receipt: Arc::new(candidate.clone()), persisted_validation_data: pvd.clone(), }, - vec![(relay_parent, vec![0])], + vec![relay_parent], )], ) .await; @@ -978,7 +978,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { ); } - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; let next_relay_parent = Hash::repeat_byte(2); let mut next_test_leaf = state.make_dummy_leaf(next_relay_parent); @@ -996,7 +996,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { receipt: Arc::new(candidate.clone()), persisted_validation_data: pvd.clone(), }, - vec![(relay_parent, vec![0])], + vec![relay_parent], )], ) .await; @@ -1113,7 +1113,7 @@ fn ensure_seconding_limit_is_respected() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Candidate 2. @@ -1139,7 +1139,7 @@ fn ensure_seconding_limit_is_respected() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Send first statement from peer A. diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs index 9d00a92e742b..d2bf031368c1 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/grid.rs @@ -129,7 +129,7 @@ fn backed_candidate_leads_to_advertisement() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Send enough statements to make candidate backable, make sure announcements are sent. @@ -224,7 +224,7 @@ fn backed_candidate_leads_to_advertisement() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer @@ -384,7 +384,7 @@ fn received_advertisement_before_confirmation_leads_to_request() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() => { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer @@ -515,7 +515,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_STATEMENT); assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_RESPONSE); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Receive Backed message. @@ -546,7 +546,7 @@ fn received_advertisement_after_backing_leads_to_acknowledgement() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Receive a manifest about the same candidate from peer D. @@ -720,7 +720,7 @@ fn received_acknowledgements_for_locally_confirmed() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Receive an unexpected acknowledgement from peer D. @@ -785,7 +785,7 @@ fn received_acknowledgements_for_locally_confirmed() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Receive an unexpected acknowledgement from peer D. @@ -918,7 +918,7 @@ fn received_acknowledgements_for_externally_confirmed() { assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_STATEMENT); assert_peer_reported!(&mut overseer, peer_c, BENEFIT_VALID_RESPONSE); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } let ack = BackedCandidateAcknowledgement { @@ -1101,7 +1101,7 @@ fn received_advertisement_after_confirmation_before_backing() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Receive advertisement from peer D (after confirmation but before backing). @@ -1272,9 +1272,12 @@ fn additional_statements_are_shared_after_manifest_exchange() { receipt: Arc::new(candidate.clone()), persisted_validation_data: pvd.clone(), }; - let membership = vec![(relay_parent, vec![0])]; - answer_expected_hypothetical_depth_request(&mut overseer, vec![(hypothetical, membership)]) - .await; + let membership = vec![relay_parent]; + answer_expected_hypothetical_membership_request( + &mut overseer, + vec![(hypothetical, membership)], + ) + .await; // Statements are sent to the Backing subsystem. { @@ -1338,7 +1341,7 @@ fn additional_statements_are_shared_after_manifest_exchange() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Receive a manifest about the same candidate from peer D. Contains different statements. @@ -1507,7 +1510,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Send enough statements to make candidate backable, make sure announcements are sent. @@ -1574,7 +1577,7 @@ fn advertisement_sent_when_peer_enters_relay_parent_view() { }) .await; - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; // Relay parent enters view of peer C. { @@ -1721,7 +1724,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Send enough statements to make candidate backable, make sure announcements are sent. @@ -1816,7 +1819,7 @@ fn advertisement_not_re_sent_when_peer_re_enters_view() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Peer leaves view. @@ -1982,9 +1985,12 @@ fn inner_grid_statements_imported_to_backing(groups_for_first_para: usize) { receipt: Arc::new(candidate.clone()), persisted_validation_data: pvd.clone(), }; - let membership = vec![(relay_parent, vec![0])]; - answer_expected_hypothetical_depth_request(&mut overseer, vec![(hypothetical, membership)]) - .await; + let membership = vec![relay_parent]; + answer_expected_hypothetical_membership_request( + &mut overseer, + vec![(hypothetical, membership)], + ) + .await; // Receive messages from Backing subsystem. { @@ -2616,7 +2622,7 @@ fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Receive conflicting advertisement from peer C after confirmation. @@ -2763,7 +2769,7 @@ fn inactive_local_participates_in_grid() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(p, r))) if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; overseer }); diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs index 959ccca9b73c..750d459ceaca 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/mod.rs @@ -26,8 +26,8 @@ use polkadot_node_network_protocol::{ }; use polkadot_node_primitives::Statement; use polkadot_node_subsystem::messages::{ - network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, FragmentTreeMembership, - HypotheticalCandidate, NetworkBridgeEvent, ProspectiveParachainsMessage, ReportPeerMessage, + network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, HypotheticalCandidate, + HypotheticalMembership, NetworkBridgeEvent, ProspectiveParachainsMessage, ReportPeerMessage, RuntimeApiMessage, RuntimeApiRequest, }; use polkadot_node_subsystem_test_helpers as test_helpers; @@ -539,7 +539,7 @@ async fn activate_leaf( leaf: &TestLeaf, test_state: &TestState, is_new_session: bool, - hypothetical_frontier: Vec<(HypotheticalCandidate, FragmentTreeMembership)>, + hypothetical_memberships: Vec<(HypotheticalCandidate, HypotheticalMembership)>, ) { let activated = new_leaf(leaf.hash, leaf.number); @@ -554,7 +554,7 @@ async fn activate_leaf( leaf, test_state, is_new_session, - hypothetical_frontier, + hypothetical_memberships, ) .await; } @@ -564,7 +564,7 @@ async fn handle_leaf_activation( leaf: &TestLeaf, test_state: &TestState, is_new_session: bool, - hypothetical_frontier: Vec<(HypotheticalCandidate, FragmentTreeMembership)>, + hypothetical_memberships: Vec<(HypotheticalCandidate, HypotheticalMembership)>, ) { let TestLeaf { number, @@ -674,18 +674,17 @@ async fn handle_leaf_activation( tx.send(Ok((validator_groups, group_rotation_info))).unwrap(); }, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetHypotheticalFrontier(req, tx), + ProspectiveParachainsMessage::GetHypotheticalMembership(req, tx), ) => { - assert_eq!(req.fragment_tree_relay_parent, Some(*hash)); - assert!(!req.backed_in_path_only); - for (i, (candidate, _)) in hypothetical_frontier.iter().enumerate() { + assert_eq!(req.fragment_chain_relay_parent, Some(*hash)); + for (i, (candidate, _)) in hypothetical_memberships.iter().enumerate() { assert!( req.candidates.iter().any(|c| &c == &candidate), "did not receive request for hypothetical candidate {}", i, ); } - tx.send(hypothetical_frontier).unwrap(); + tx.send(hypothetical_memberships).unwrap(); // this is the last expected runtime api call break }, @@ -727,17 +726,16 @@ async fn handle_sent_request( ); } -async fn answer_expected_hypothetical_depth_request( +async fn answer_expected_hypothetical_membership_request( virtual_overseer: &mut VirtualOverseer, - responses: Vec<(HypotheticalCandidate, FragmentTreeMembership)>, + responses: Vec<(HypotheticalCandidate, HypotheticalMembership)>, ) { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetHypotheticalFrontier(req, tx) + ProspectiveParachainsMessage::GetHypotheticalMembership(req, tx) ) => { - assert_eq!(req.fragment_tree_relay_parent, None); - assert!(!req.backed_in_path_only); + assert_eq!(req.fragment_chain_relay_parent, None); for (i, (candidate, _)) in responses.iter().enumerate() { assert!( req.candidates.iter().any(|c| &c == &candidate), diff --git a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs index 34d4100a499e..4fdfda0dba24 100644 --- a/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs +++ b/polkadot/node/network/statement-distribution/src/v2/tests/requests.rs @@ -168,7 +168,7 @@ fn cluster_peer_allowed_to_send_incomplete_statements() { ); } - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; overseer }); @@ -338,7 +338,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Peer C advertises candidate 2. @@ -410,7 +410,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Peer C sends an announcement for candidate 3. Should hit seconding limit for validator 1. @@ -633,7 +633,7 @@ fn peer_reported_for_not_enough_statements() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer @@ -788,7 +788,7 @@ fn peer_reported_for_duplicate_statements() { ); } - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; overseer }); @@ -918,7 +918,7 @@ fn peer_reported_for_providing_statements_with_invalid_signatures() { if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer @@ -1048,7 +1048,7 @@ fn peer_reported_for_providing_statements_with_wrong_validator_id() { if p == peer_a && r == BENEFIT_VALID_RESPONSE.into() => { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer @@ -1214,7 +1214,7 @@ fn disabled_validators_added_to_unwanted_mask() { assert_eq!(statement, seconded_b); } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer @@ -1355,7 +1355,7 @@ fn disabling_works_from_relay_parent_not_the_latest_state() { if p == peer_disabled && r == BENEFIT_VALID_RESPONSE.into() => { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } activate_leaf(&mut overseer, &leaf_2, &state, false, vec![]).await; @@ -1399,7 +1399,7 @@ fn disabling_works_from_relay_parent_not_the_latest_state() { if p == peer_disabled && r == BENEFIT_VALID_RESPONSE.into() => { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } { @@ -1539,7 +1539,7 @@ fn local_node_sanity_checks_incoming_requests() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Should drop requests from unknown peers. @@ -1713,7 +1713,7 @@ fn local_node_checks_that_peer_can_request_before_responding() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; // Local node should respond to requests from peers in the same group // which appear to not have already seen the candidate @@ -1925,7 +1925,7 @@ fn local_node_respects_statement_mask() { AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Send enough statements to make candidate backable, make sure announcements are sent. @@ -2024,7 +2024,7 @@ fn local_node_respects_statement_mask() { } ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // `1` indicates statements NOT to request. @@ -2277,7 +2277,7 @@ fn should_delay_before_retrying_dropped_requests() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } // Sleep for the given amount of time. This should reset the delay for the first candidate. @@ -2368,7 +2368,7 @@ fn should_delay_before_retrying_dropped_requests() { if p == peer_c && r == BENEFIT_VALID_RESPONSE.into() ); - answer_expected_hypothetical_depth_request(&mut overseer, vec![]).await; + answer_expected_hypothetical_membership_request(&mut overseer, vec![]).await; } overseer diff --git a/polkadot/node/service/src/fake_runtime_api.rs b/polkadot/node/service/src/fake_runtime_api.rs index 5c889552a6ae..03c4836020d9 100644 --- a/polkadot/node/service/src/fake_runtime_api.rs +++ b/polkadot/node/service/src/fake_runtime_api.rs @@ -272,11 +272,11 @@ sp_api::impl_runtime_apis! { fn generate_proof( _: Vec, _: Option, - ) -> Result<(Vec, sp_mmr_primitives::Proof), sp_mmr_primitives::Error> { + ) -> Result<(Vec, sp_mmr_primitives::LeafProof), sp_mmr_primitives::Error> { unimplemented!() } - fn verify_proof(_: Vec, _: sp_mmr_primitives::Proof) + fn verify_proof(_: Vec, _: sp_mmr_primitives::LeafProof) -> Result<(), sp_mmr_primitives::Error> { unimplemented!() @@ -285,7 +285,7 @@ sp_api::impl_runtime_apis! { fn verify_proof_stateless( _: Hash, _: Vec, - _: sp_mmr_primitives::Proof + _: sp_mmr_primitives::LeafProof ) -> Result<(), sp_mmr_primitives::Error> { unimplemented!() } diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index e75d80395c4b..2a54b3aed301 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -62,7 +62,7 @@ pub mod network_bridge_event; pub use network_bridge_event::NetworkBridgeEvent; /// A request to the candidate backing subsystem to check whether -/// there exists vacant membership in some fragment tree. +/// we can second this candidate. #[derive(Debug, Copy, Clone)] pub struct CanSecondRequest { /// Para id of the candidate. @@ -90,10 +90,12 @@ pub enum CandidateBackingMessage { oneshot::Sender>>, ), /// Request the subsystem to check whether it's allowed to second given candidate. - /// The rule is to only fetch collations that are either built on top of the root - /// of some fragment tree or have a parent node which represents backed candidate. + /// The rule is to only fetch collations that can either be directly chained to any + /// FragmentChain in the view or there is at least one FragmentChain where this candidate is a + /// potentially unconnected candidate (we predict that it may become connected to a + /// FragmentChain in the future). /// - /// Always responses with `false` if async backing is disabled for candidate's relay + /// Always responds with `false` if async backing is disabled for candidate's relay /// parent. CanSecond(CanSecondRequest, oneshot::Sender), /// Note that the Candidate Backing subsystem should second the given candidate in the context @@ -244,13 +246,6 @@ pub enum CollatorProtocolMessage { /// /// The hash is the relay parent. Seconded(Hash, SignedFullStatement), - /// The candidate received enough validity votes from the backing group. - Backed { - /// Candidate's para id. - para_id: ParaId, - /// Hash of the para head generated by candidate. - para_head: Hash, - }, } impl Default for CollatorProtocolMessage { @@ -1023,9 +1018,9 @@ pub enum GossipSupportMessage { NetworkBridgeUpdate(NetworkBridgeEvent), } -/// Request introduction of a candidate into the prospective parachains subsystem. +/// Request introduction of a seconded candidate into the prospective parachains subsystem. #[derive(Debug, PartialEq, Eq, Clone)] -pub struct IntroduceCandidateRequest { +pub struct IntroduceSecondedCandidateRequest { /// The para-id of the candidate. pub candidate_para: ParaId, /// The candidate receipt itself. @@ -1034,7 +1029,7 @@ pub struct IntroduceCandidateRequest { pub persisted_validation_data: PersistedValidationData, } -/// A hypothetical candidate to be evaluated for frontier membership +/// A hypothetical candidate to be evaluated for potential/actual membership /// in the prospective parachains subsystem. /// /// Hypothetical candidates are either complete or incomplete. @@ -1103,21 +1098,27 @@ impl HypotheticalCandidate { candidate_relay_parent, } } + + /// Get the output head data hash, if the candidate is complete. + pub fn output_head_data_hash(&self) -> Option { + match *self { + HypotheticalCandidate::Complete { ref receipt, .. } => + Some(receipt.descriptor.para_head), + HypotheticalCandidate::Incomplete { .. } => None, + } + } } /// Request specifying which candidates are either already included -/// or might be included in the hypothetical frontier of fragment trees -/// under a given active leaf. +/// or might become included in fragment chain under a given active leaf (or any active leaf if +/// `fragment_chain_relay_parent` is `None`). #[derive(Debug, PartialEq, Eq, Clone)] -pub struct HypotheticalFrontierRequest { +pub struct HypotheticalMembershipRequest { /// Candidates, in arbitrary order, which should be checked for - /// possible membership in fragment trees. + /// hypothetical/actual membership in fragment chains. pub candidates: Vec, - /// Either a specific fragment tree to check, otherwise all. - pub fragment_tree_relay_parent: Option, - /// Only return membership if all candidates in the path from the - /// root are backed. - pub backed_in_path_only: bool, + /// Either a specific fragment chain to check, otherwise all. + pub fragment_chain_relay_parent: Option, } /// A request for the persisted validation data stored in the prospective @@ -1156,9 +1157,9 @@ impl ParentHeadData { } } -/// Indicates the relay-parents whose fragment tree a candidate -/// is present in and the depths of that tree the candidate is present in. -pub type FragmentTreeMembership = Vec<(Hash, Vec)>; +/// Indicates the relay-parents whose fragment chain a candidate +/// is present in or can be added in (right now or in the future). +pub type HypotheticalMembership = Vec; /// A collection of ancestor candidates of a parachain. pub type Ancestors = HashSet; @@ -1166,15 +1167,11 @@ pub type Ancestors = HashSet; /// Messages sent to the Prospective Parachains subsystem. #[derive(Debug)] pub enum ProspectiveParachainsMessage { - /// Inform the Prospective Parachains Subsystem of a new candidate. + /// Inform the Prospective Parachains Subsystem of a new seconded candidate. /// - /// The response sender accepts the candidate membership, which is the existing - /// membership of the candidate if it was already known. - IntroduceCandidate(IntroduceCandidateRequest, oneshot::Sender), - /// Inform the Prospective Parachains Subsystem that a previously introduced candidate - /// has been seconded. This requires that the candidate was successfully introduced in - /// the past. - CandidateSeconded(ParaId, CandidateHash), + /// The response sender returns false if the candidate was rejected by prospective parachains, + /// true otherwise (if it was accepted or already present) + IntroduceSecondedCandidate(IntroduceSecondedCandidateRequest, oneshot::Sender), /// Inform the Prospective Parachains Subsystem that a previously introduced candidate /// has been backed. This requires that the candidate was successfully introduced in /// the past. @@ -1193,23 +1190,29 @@ pub enum ProspectiveParachainsMessage { Ancestors, oneshot::Sender>, ), - /// Get the hypothetical frontier membership of candidates with the given properties - /// under the specified active leaves' fragment trees. + /// Get the hypothetical or actual membership of candidates with the given properties + /// under the specified active leave's fragment chain. + /// + /// For each candidate, we return a vector of leaves where the candidate is present or could be + /// added. "Could be added" either means that the candidate can be added to the chain right now + /// or could be added in the future (we may not have its ancestors yet). + /// Note that even if we think it could be added in the future, we may find out that it was + /// invalid, as time passes. + /// If an active leaf is not in the vector, it means that there's no + /// chance this candidate will become valid under that leaf in the future. /// - /// For any candidate which is already known, this returns the depths the candidate - /// occupies. - GetHypotheticalFrontier( - HypotheticalFrontierRequest, - oneshot::Sender>, + /// If `fragment_chain_relay_parent` in the request is `Some()`, the return vector can only + /// contain this relay parent (or none). + GetHypotheticalMembership( + HypotheticalMembershipRequest, + oneshot::Sender>, ), - /// Get the membership of the candidate in all fragment trees. - GetTreeMembership(ParaId, CandidateHash, oneshot::Sender), - /// Get the minimum accepted relay-parent number for each para in the fragment tree + /// Get the minimum accepted relay-parent number for each para in the fragment chain /// for the given relay-chain block hash. /// /// That is, if the block hash is known and is an active leaf, this returns the /// minimum relay-parent block number in the same branch of the relay chain which - /// is accepted in the fragment tree for each para-id. + /// is accepted in the fragment chain for each para-id. /// /// If the block hash is not an active leaf, this will return an empty vector. /// @@ -1219,8 +1222,10 @@ pub enum ProspectiveParachainsMessage { /// Para-IDs are returned in no particular order. GetMinimumRelayParents(Hash, oneshot::Sender>), /// Get the validation data of some prospective candidate. The candidate doesn't need - /// to be part of any fragment tree, but this only succeeds if the parent head-data and - /// relay-parent are part of some fragment tree. + /// to be part of any fragment chain, but this only succeeds if the parent head-data and + /// relay-parent are part of the `CandidateStorage` (meaning that it's a candidate which is + /// part of some fragment chain or which prospective-parachains predicted will become part of + /// some fragment chain). GetProspectiveValidationData( ProspectiveValidationDataRequest, oneshot::Sender>, diff --git a/polkadot/node/subsystem-util/Cargo.toml b/polkadot/node/subsystem-util/Cargo.toml index cb93ad75d20b..b6cc368b9245 100644 --- a/polkadot/node/subsystem-util/Cargo.toml +++ b/polkadot/node/subsystem-util/Cargo.toml @@ -19,7 +19,7 @@ parking_lot = "0.12.1" pin-project = "1.0.9" rand = "0.8.5" thiserror = { workspace = true } -fatality = "0.0.6" +fatality = "0.1.0" gum = { package = "tracing-gum", path = "../gum" } derive_more = "0.99.17" schnellru = "0.2.1" diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index d38d838fedef..b5aef325c8b4 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -39,8 +39,8 @@ /// /// # Usage /// -/// It's expected that the users of this module will be building up trees of -/// [`Fragment`]s and consistently pruning and adding to the tree. +/// It's expected that the users of this module will be building up chains of +/// [`Fragment`]s and consistently pruning and adding to the chains. /// /// ## Operating Constraints /// @@ -54,60 +54,65 @@ /// make an intelligent prediction about what might be accepted in the future based on /// prior fragments that also exist off-chain. /// -/// ## Fragment Trees +/// ## Fragment Chains +/// +/// For simplicity and practicality, we expect that collators of the same parachain are +/// cooperating and don't create parachain forks or cycles on the same relay chain active leaf. +/// Therefore, higher-level code should maintain one fragment chain for each active leaf (not a +/// fragment tree). If parachains do create forks, their performance in regards to async +/// backing and elastic scaling will suffer, because different validators will have different +/// predictions of the future. /// /// As the relay-chain grows, some predictions come true and others come false. /// And new predictions get made. These three changes correspond distinctly to the -/// 3 primary operations on fragment trees. -/// -/// A fragment tree is a mental model for thinking about a forking series of predictions -/// about a single parachain. There may be one or more fragment trees per parachain. -/// -/// In expectation, most parachains will have a plausibly-unique authorship method which means -/// that they should really be much closer to fragment-chains, maybe with an occasional fork. +/// 3 primary operations on fragment chains. /// -/// Avoiding fragment-tree blowup is beyond the scope of this module. +/// Avoiding fragment-chain blowup is beyond the scope of this module. Higher-level must ensure +/// proper spam protection. /// -/// ### Pruning Fragment Trees +/// ### Pruning Fragment Chains /// /// When the relay-chain advances, we want to compare the new constraints of that relay-parent -/// to the roots of the fragment trees we have. There are 3 cases: +/// to the root of the fragment chain we have. There are 3 cases: /// /// 1. The root fragment is still valid under the new constraints. In this case, we do nothing. -/// This is the "prediction still uncertain" case. +/// This is the "prediction still uncertain" case. (Corresponds to some candidates still +/// being pending availability). /// -/// 2. The root fragment is invalid under the new constraints because it has been subsumed by -/// the relay-chain. In this case, we can discard the root and split & re-root the fragment -/// tree under its descendants and compare to the new constraints again. This is the -/// "prediction came true" case. +/// 2. The root fragment (potentially along with a number of descendants) is invalid under the +/// new constraints because it has been included by the relay-chain. In this case, we can +/// discard the included chain and split & re-root the chain under its descendants and +/// compare to the new constraints again. This is the "prediction came true" case. /// -/// 3. The root fragment is invalid under the new constraints because a competing parachain -/// block has been included or it would never be accepted for some other reason. In this -/// case we can discard the entire fragment tree. This is the "prediction came false" case. +/// 3. The root fragment becomes invalid under the new constraints for any reason (if for +/// example the parachain produced a fork and the block producer picked a different +/// candidate to back). In this case we can discard the entire fragment chain. This is the +/// "prediction came false" case. /// /// This is all a bit of a simplification because it assumes that the relay-chain advances -/// without forks and is finalized instantly. In practice, the set of fragment-trees needs to +/// without forks and is finalized instantly. In practice, the set of fragment-chains needs to /// be observable from the perspective of a few different possible forks of the relay-chain and /// not pruned too eagerly. /// /// Note that the fragments themselves don't need to change and the only thing we care about /// is whether the predictions they represent are still valid. /// -/// ### Extending Fragment Trees +/// ### Extending Fragment Chains /// /// As predictions fade into the past, new ones should be stacked on top. /// /// Every new relay-chain block is an opportunity to make a new prediction about the future. -/// Higher-level logic should select the leaves of the fragment-trees to build upon or whether -/// to create a new fragment-tree. +/// Higher-level logic should decide whether to build upon an existing chain or whether +/// to create a new fragment-chain. /// /// ### Code Upgrades /// /// Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade /// scheduling logic is very path-dependent and intricate so we just assume that code upgrades -/// can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep, -/// in practice and code upgrades are fairly rare. So what's likely to happen around code -/// upgrades is that the entire fragment-tree has to get discarded at some point. +/// can't be initiated and applied within a single fragment-chain. Fragment-chains aren't deep, +/// in practice (bounded by a linear function of the the number of cores assigned to a +/// parachain) and code upgrades are fairly rare. So what's likely to happen around code +/// upgrades is that the entire fragment-chain has to get discarded at some point. /// /// That means a few blocks of execution time lost, which is not a big deal for code upgrades /// in practice at most once every few weeks. @@ -116,10 +121,7 @@ use polkadot_primitives::{ CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, PersistedValidationData, UpgradeRestriction, ValidationCodeHash, }; -use std::{ - borrow::{Borrow, Cow}, - collections::HashMap, -}; +use std::{collections::HashMap, sync::Arc}; /// Constraints on inbound HRMP channels. #[derive(Debug, Clone, PartialEq)] @@ -524,9 +526,9 @@ impl ConstraintModifications { /// here. But the erasure-root is not. This means that prospective candidates /// are not correlated to any session in particular. #[derive(Debug, Clone, PartialEq)] -pub struct ProspectiveCandidate<'a> { +pub struct ProspectiveCandidate { /// The commitments to the output of the execution. - pub commitments: Cow<'a, CandidateCommitments>, + pub commitments: CandidateCommitments, /// The collator that created the candidate. pub collator: CollatorId, /// The signature of the collator on the payload. @@ -539,32 +541,6 @@ pub struct ProspectiveCandidate<'a> { pub validation_code_hash: ValidationCodeHash, } -impl<'a> ProspectiveCandidate<'a> { - fn into_owned(self) -> ProspectiveCandidate<'static> { - ProspectiveCandidate { commitments: Cow::Owned(self.commitments.into_owned()), ..self } - } - - /// Partially clone the prospective candidate, but borrow the - /// parts which are potentially heavy. - pub fn partial_clone(&self) -> ProspectiveCandidate { - ProspectiveCandidate { - commitments: Cow::Borrowed(self.commitments.borrow()), - collator: self.collator.clone(), - collator_signature: self.collator_signature.clone(), - persisted_validation_data: self.persisted_validation_data.clone(), - pov_hash: self.pov_hash, - validation_code_hash: self.validation_code_hash, - } - } -} - -#[cfg(test)] -impl ProspectiveCandidate<'static> { - fn commitments_mut(&mut self) -> &mut CandidateCommitments { - self.commitments.to_mut() - } -} - /// Kinds of errors with the validity of a fragment. #[derive(Debug, Clone, PartialEq)] pub enum FragmentValidityError { @@ -618,19 +594,19 @@ pub enum FragmentValidityError { /// This is a type which guarantees that the candidate is valid under the /// operating constraints. #[derive(Debug, Clone, PartialEq)] -pub struct Fragment<'a> { +pub struct Fragment { /// The new relay-parent. relay_parent: RelayChainBlockInfo, /// The constraints this fragment is operating under. operating_constraints: Constraints, /// The core information about the prospective candidate. - candidate: ProspectiveCandidate<'a>, + candidate: Arc, /// Modifications to the constraints based on the outputs of /// the candidate. modifications: ConstraintModifications, } -impl<'a> Fragment<'a> { +impl Fragment { /// Create a new fragment. /// /// This fails if the fragment isn't in line with the operating @@ -642,10 +618,29 @@ impl<'a> Fragment<'a> { pub fn new( relay_parent: RelayChainBlockInfo, operating_constraints: Constraints, - candidate: ProspectiveCandidate<'a>, + candidate: Arc, ) -> Result { + let modifications = Self::check_against_constraints( + &relay_parent, + &operating_constraints, + &candidate.commitments, + &candidate.validation_code_hash, + &candidate.persisted_validation_data, + )?; + + Ok(Fragment { relay_parent, operating_constraints, candidate, modifications }) + } + + /// Check the candidate against the operating constrains and return the constraint modifications + /// made by this candidate. + pub fn check_against_constraints( + relay_parent: &RelayChainBlockInfo, + operating_constraints: &Constraints, + commitments: &CandidateCommitments, + validation_code_hash: &ValidationCodeHash, + persisted_validation_data: &PersistedValidationData, + ) -> Result { let modifications = { - let commitments = &candidate.commitments; ConstraintModifications { required_parent: Some(commitments.head_data.clone()), hrmp_watermark: Some({ @@ -689,11 +684,13 @@ impl<'a> Fragment<'a> { validate_against_constraints( &operating_constraints, &relay_parent, - &candidate, + commitments, + persisted_validation_data, + validation_code_hash, &modifications, )?; - Ok(Fragment { relay_parent, operating_constraints, candidate, modifications }) + Ok(modifications) } /// Access the relay parent information. @@ -707,7 +704,7 @@ impl<'a> Fragment<'a> { } /// Access the underlying prospective candidate. - pub fn candidate(&self) -> &ProspectiveCandidate<'a> { + pub fn candidate(&self) -> &ProspectiveCandidate { &self.candidate } @@ -715,31 +712,14 @@ impl<'a> Fragment<'a> { pub fn constraint_modifications(&self) -> &ConstraintModifications { &self.modifications } - - /// Convert the fragment into an owned variant. - pub fn into_owned(self) -> Fragment<'static> { - Fragment { candidate: self.candidate.into_owned(), ..self } - } - - /// Validate this fragment against some set of constraints - /// instead of the operating constraints. - pub fn validate_against_constraints( - &self, - constraints: &Constraints, - ) -> Result<(), FragmentValidityError> { - validate_against_constraints( - constraints, - &self.relay_parent, - &self.candidate, - &self.modifications, - ) - } } fn validate_against_constraints( constraints: &Constraints, relay_parent: &RelayChainBlockInfo, - candidate: &ProspectiveCandidate, + commitments: &CandidateCommitments, + persisted_validation_data: &PersistedValidationData, + validation_code_hash: &ValidationCodeHash, modifications: &ConstraintModifications, ) -> Result<(), FragmentValidityError> { let expected_pvd = PersistedValidationData { @@ -749,17 +729,17 @@ fn validate_against_constraints( max_pov_size: constraints.max_pov_size as u32, }; - if expected_pvd != candidate.persisted_validation_data { + if expected_pvd != *persisted_validation_data { return Err(FragmentValidityError::PersistedValidationDataMismatch( expected_pvd, - candidate.persisted_validation_data.clone(), + persisted_validation_data.clone(), )) } - if constraints.validation_code_hash != candidate.validation_code_hash { + if constraints.validation_code_hash != *validation_code_hash { return Err(FragmentValidityError::ValidationCodeMismatch( constraints.validation_code_hash, - candidate.validation_code_hash, + *validation_code_hash, )) } @@ -770,7 +750,7 @@ fn validate_against_constraints( )) } - if candidate.commitments.new_validation_code.is_some() { + if commitments.new_validation_code.is_some() { match constraints.upgrade_restriction { None => {}, Some(UpgradeRestriction::Present) => @@ -778,11 +758,8 @@ fn validate_against_constraints( } } - let announced_code_size = candidate - .commitments - .new_validation_code - .as_ref() - .map_or(0, |code| code.0.len()); + let announced_code_size = + commitments.new_validation_code.as_ref().map_or(0, |code| code.0.len()); if announced_code_size > constraints.max_code_size { return Err(FragmentValidityError::CodeSizeTooLarge( @@ -801,17 +778,17 @@ fn validate_against_constraints( } } - if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate { + if commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate { return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { messages_allowed: constraints.max_hrmp_num_per_candidate, - messages_submitted: candidate.commitments.horizontal_messages.len(), + messages_submitted: commitments.horizontal_messages.len(), }) } - if candidate.commitments.upward_messages.len() > constraints.max_ump_num_per_candidate { + if commitments.upward_messages.len() > constraints.max_ump_num_per_candidate { return Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { messages_allowed: constraints.max_ump_num_per_candidate, - messages_submitted: candidate.commitments.upward_messages.len(), + messages_submitted: commitments.upward_messages.len(), }) } @@ -1184,21 +1161,21 @@ mod tests { fn make_candidate( constraints: &Constraints, relay_parent: &RelayChainBlockInfo, - ) -> ProspectiveCandidate<'static> { + ) -> ProspectiveCandidate { let collator_pair = CollatorPair::generate().0; let collator = collator_pair.public(); let sig = collator_pair.sign(b"blabla".as_slice()); ProspectiveCandidate { - commitments: Cow::Owned(CandidateCommitments { + commitments: CandidateCommitments { upward_messages: Default::default(), horizontal_messages: Default::default(), new_validation_code: None, head_data: HeadData::from(vec![1, 2, 3, 4, 5]), processed_downward_messages: 0, hrmp_watermark: relay_parent.number, - }), + }, collator, collator_signature: sig, persisted_validation_data: PersistedValidationData { @@ -1229,7 +1206,7 @@ mod tests { candidate.validation_code_hash = got_code; assert_eq!( - Fragment::new(relay_parent, constraints, candidate), + Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::ValidationCodeMismatch(expected_code, got_code,)), ) } @@ -1261,7 +1238,7 @@ mod tests { let got_pvd = candidate.persisted_validation_data.clone(); assert_eq!( - Fragment::new(relay_parent_b, constraints, candidate), + Fragment::new(relay_parent_b, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::PersistedValidationDataMismatch(expected_pvd, got_pvd,)), ); } @@ -1278,10 +1255,10 @@ mod tests { let mut candidate = make_candidate(&constraints, &relay_parent); let max_code_size = constraints.max_code_size; - candidate.commitments_mut().new_validation_code = Some(vec![0; max_code_size + 1].into()); + candidate.commitments.new_validation_code = Some(vec![0; max_code_size + 1].into()); assert_eq!( - Fragment::new(relay_parent, constraints, candidate), + Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::CodeSizeTooLarge(max_code_size, max_code_size + 1,)), ); } @@ -1298,7 +1275,7 @@ mod tests { let candidate = make_candidate(&constraints, &relay_parent); assert_eq!( - Fragment::new(relay_parent, constraints, candidate), + Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::RelayParentTooOld(5, 3,)), ); } @@ -1317,7 +1294,7 @@ mod tests { let max_hrmp = constraints.max_hrmp_num_per_candidate; candidate - .commitments_mut() + .commitments .horizontal_messages .try_extend((0..max_hrmp + 1).map(|i| OutboundHrmpMessage { recipient: ParaId::from(i as u32), @@ -1326,7 +1303,7 @@ mod tests { .unwrap(); assert_eq!( - Fragment::new(relay_parent, constraints, candidate), + Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { messages_allowed: max_hrmp, messages_submitted: max_hrmp + 1, @@ -1346,22 +1323,36 @@ mod tests { let mut candidate = make_candidate(&constraints, &relay_parent); // Empty dmp queue is ok. - assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); + assert!(Fragment::new( + relay_parent.clone(), + constraints.clone(), + Arc::new(candidate.clone()) + ) + .is_ok()); // Unprocessed message that was sent later is ok. constraints.dmp_remaining_messages = vec![relay_parent.number + 1]; - assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); + assert!(Fragment::new( + relay_parent.clone(), + constraints.clone(), + Arc::new(candidate.clone()) + ) + .is_ok()); for block_number in 0..=relay_parent.number { constraints.dmp_remaining_messages = vec![block_number]; assert_eq!( - Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), + Fragment::new( + relay_parent.clone(), + constraints.clone(), + Arc::new(candidate.clone()) + ), Err(FragmentValidityError::DmpAdvancementRule), ); } - candidate.commitments.to_mut().processed_downward_messages = 1; - assert!(Fragment::new(relay_parent, constraints, candidate).is_ok()); + candidate.commitments.processed_downward_messages = 1; + assert!(Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())).is_ok()); } #[test] @@ -1379,13 +1370,12 @@ mod tests { candidate .commitments - .to_mut() .upward_messages .try_extend((0..max_ump + 1).map(|i| vec![i as u8])) .unwrap(); assert_eq!( - Fragment::new(relay_parent, constraints, candidate), + Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { messages_allowed: max_ump, messages_submitted: max_ump + 1, @@ -1405,10 +1395,10 @@ mod tests { let mut candidate = make_candidate(&constraints, &relay_parent); constraints.upgrade_restriction = Some(UpgradeRestriction::Present); - candidate.commitments_mut().new_validation_code = Some(ValidationCode(vec![1, 2, 3])); + candidate.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3])); assert_eq!( - Fragment::new(relay_parent, constraints, candidate), + Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::CodeUpgradeRestricted), ); } @@ -1424,23 +1414,23 @@ mod tests { let constraints = make_constraints(); let mut candidate = make_candidate(&constraints, &relay_parent); - candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ + candidate.commitments.horizontal_messages = HorizontalMessages::truncate_from(vec![ OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] }, OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, ]); assert_eq!( - Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), + Fragment::new(relay_parent.clone(), constraints.clone(), Arc::new(candidate.clone())), Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), ); - candidate.commitments_mut().horizontal_messages = HorizontalMessages::truncate_from(vec![ + candidate.commitments.horizontal_messages = HorizontalMessages::truncate_from(vec![ OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] }, OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, ]); assert_eq!( - Fragment::new(relay_parent, constraints, candidate), + Fragment::new(relay_parent, constraints, Arc::new(candidate.clone())), Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), ); } diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md index 8f00ff084941..701f6c87caff 100644 --- a/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md +++ b/polkadot/roadmap/implementers-guide/src/node/backing/prospective-parachains.md @@ -98,15 +98,11 @@ prospective validation data. This is unlikely to change. hashes. - Sent by the Provisioner when requesting backable candidates, when selecting candidates for a given relay-parent. -- `ProspectiveParachainsMessage::GetHypotheticalFrontier` +- `ProspectiveParachainsMessage::GetHypotheticalMembership` - Gets the hypothetical frontier membership of candidates with the given properties under the specified active leaves' fragment trees. - Sent by the Backing Subsystem when sanity-checking whether a candidate can be seconded based on its hypothetical frontiers. -- `ProspectiveParachainsMessage::GetTreeMembership` - - Gets the membership of the candidate in all fragment trees. - - Sent by the Backing Subsystem when it needs to update the candidates - seconded at various depths under new active leaves. - `ProspectiveParachainsMessage::GetMinimumRelayParents` - Gets the minimum accepted relay-parent number for each para in the fragment tree for the given relay-chain block hash. diff --git a/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md b/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md index a00705804bdc..ce2ff3ca9139 100644 --- a/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md +++ b/polkadot/roadmap/implementers-guide/src/node/backing/statement-distribution.md @@ -180,7 +180,7 @@ of re-enabling. - Reports a peer (either good or bad). - `CandidateBackingMessage::Statement` - Note a validator's statement about a particular candidate. -- `ProspectiveParachainsMessage::GetHypotheticalFrontier` +- `ProspectiveParachainsMessage::GetHypotheticalMembership` - Gets the hypothetical frontier membership of candidates under active leaves' fragment trees. - `NetworkBridgeTxMessage::SendRequests` - Sends requests, initiating the request/response protocol. diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index 94bce4c83e6f..33cbcb98fb29 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -106,7 +106,7 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The runtime's definition of a Currency. type Currency: Currency; - /// The ParaId of the broker system parachain. + /// The ParaId of the coretime chain. #[pallet::constant] type BrokerId: Get; /// Something that provides the weight of this pallet. @@ -139,10 +139,16 @@ pub mod pallet { #[pallet::call] impl Pallet { + /// Request the configuration to be updated with the specified number of cores. Warning: + /// Since this only schedules a configuration update, it takes two sessions to come into + /// effect. + /// + /// - `origin`: Root or the Coretime Chain + /// - `count`: total number of cores #[pallet::weight(::WeightInfo::request_core_count())] #[pallet::call_index(1)] pub fn request_core_count(origin: OriginFor, count: u16) -> DispatchResult { - // Ignore requests not coming from the broker parachain or root. + // Ignore requests not coming from the coretime chain or root. Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; configuration::Pallet::::set_coretime_cores_unchecked(u32::from(count)) @@ -155,7 +161,7 @@ pub mod pallet { // origin: OriginFor, // _when: BlockNumberFor, //) -> DispatchResult { - // // Ignore requests not coming from the broker parachain or root. + // // Ignore requests not coming from the coretime chain or root. // Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; // Ok(()) //} @@ -168,7 +174,7 @@ pub mod pallet { // _who: T::AccountId, // _amount: BalanceOf, //) -> DispatchResult { - // // Ignore requests not coming from the broker parachain or root. + // // Ignore requests not coming from the coretime chain or root. // Self::ensure_root_or_para(origin, ::BrokerId::get().into())?; // Ok(()) //} @@ -177,7 +183,7 @@ pub mod pallet { /// to be used. /// /// Parameters: - /// -`origin`: The `ExternalBrokerOrigin`, assumed to be the Broker system parachain. + /// -`origin`: The `ExternalBrokerOrigin`, assumed to be the coretime chain. /// -`core`: The core that should be scheduled. /// -`begin`: The starting blockheight of the instruction. /// -`assignment`: How the blockspace should be utilised. @@ -193,7 +199,7 @@ pub mod pallet { assignment: Vec<(CoreAssignment, PartsOf57600)>, end_hint: Option>, ) -> DispatchResult { - // Ignore requests not coming from the broker parachain or root. + // Ignore requests not coming from the coretime chain or root. Self::ensure_root_or_para(origin, T::BrokerId::get().into())?; let core = u32::from(core).into(); @@ -243,7 +249,7 @@ impl Pallet { } } - // Handle legacy swaps in coretime. Notifies broker parachain that a lease swap has occurred via + // Handle legacy swaps in coretime. Notifies coretime chain that a lease swap has occurred via // XCM message. This function is meant to be used in an implementation of `OnSwap` trait. pub fn on_legacy_lease_swap(one: ParaId, other: ParaId) { let message = Xcm(vec![ diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 22e6183e5946..c22d5c39b233 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -2134,7 +2134,7 @@ sp_api::impl_runtime_apis! { fn generate_proof( block_numbers: Vec, best_known_block_number: Option, - ) -> Result<(Vec, mmr::Proof), mmr::Error> { + ) -> Result<(Vec, mmr::LeafProof), mmr::Error> { Mmr::generate_proof(block_numbers, best_known_block_number).map( |(leaves, proof)| { ( @@ -2148,7 +2148,7 @@ sp_api::impl_runtime_apis! { ) } - fn verify_proof(leaves: Vec, proof: mmr::Proof) + fn verify_proof(leaves: Vec, proof: mmr::LeafProof) -> Result<(), mmr::Error> { let leaves = leaves.into_iter().map(|leaf| @@ -2161,7 +2161,7 @@ sp_api::impl_runtime_apis! { fn verify_proof_stateless( root: mmr::Hash, leaves: Vec, - proof: mmr::Proof + proof: mmr::LeafProof ) -> Result<(), mmr::Error> { let nodes = leaves.into_iter().map(|leaf|mmr::DataOrHash::Data(leaf.into_opaque_leaf())).collect(); pallet_mmr::verify_leaves_proof::(root, nodes, proof) diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 0509ba382b2e..9eb0fcca6678 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -1044,11 +1044,11 @@ sp_api::impl_runtime_apis! { fn generate_proof( _block_numbers: Vec, _best_known_block_number: Option, - ) -> Result<(Vec, mmr::Proof), mmr::Error> { + ) -> Result<(Vec, mmr::LeafProof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } - fn verify_proof(_leaves: Vec, _proof: mmr::Proof) + fn verify_proof(_leaves: Vec, _proof: mmr::LeafProof) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) @@ -1057,7 +1057,7 @@ sp_api::impl_runtime_apis! { fn verify_proof_stateless( _root: Hash, _leaves: Vec, - _proof: mmr::Proof + _proof: mmr::LeafProof ) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index cae12ab49c02..b62c6d08201c 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -2013,7 +2013,7 @@ sp_api::impl_runtime_apis! { fn generate_proof( block_numbers: Vec, best_known_block_number: Option, - ) -> Result<(Vec, mmr::Proof), mmr::Error> { + ) -> Result<(Vec, mmr::LeafProof), mmr::Error> { Mmr::generate_proof(block_numbers, best_known_block_number).map( |(leaves, proof)| { ( @@ -2027,7 +2027,7 @@ sp_api::impl_runtime_apis! { ) } - fn verify_proof(leaves: Vec, proof: mmr::Proof) + fn verify_proof(leaves: Vec, proof: mmr::LeafProof) -> Result<(), mmr::Error> { let leaves = leaves.into_iter().map(|leaf| @@ -2040,7 +2040,7 @@ sp_api::impl_runtime_apis! { fn verify_proof_stateless( root: mmr::Hash, leaves: Vec, - proof: mmr::Proof + proof: mmr::LeafProof ) -> Result<(), mmr::Error> { let nodes = leaves.into_iter().map(|leaf|mmr::DataOrHash::Data(leaf.into_opaque_leaf())).collect(); pallet_mmr::verify_leaves_proof::(root, nodes, proof) diff --git a/prdoc/pr_4035.prdoc b/prdoc/pr_4035.prdoc new file mode 100644 index 000000000000..0617a6a26189 --- /dev/null +++ b/prdoc/pr_4035.prdoc @@ -0,0 +1,24 @@ +title: "Prospective parachains rework" + +doc: + - audience: Node Dev + description: | + Changes prospective-parachains from dealing with trees of unincluded candidates to maintaining only candidate chains + and a number of unconnected candidates (for which we don't yet know the parent candidate but which otherwise seem potentially viable). + This is needed for elastic scaling, in order to have full throughput even if a candidate is validated by a backing group before the parent candidate + is fetched from the other backing group. + Also simplifies the subsystem by no longer allowing parachain cycles. + +crates: + - name: polkadot-node-core-prospective-parachains + bump: major + - name: polkadot-node-core-backing + bump: minor + - name: polkadot-collator-protocol + bump: minor + - name: polkadot-statement-distribution + bump: minor + - name: polkadot-node-subsystem-types + bump: major + - name: polkadot-node-subsystem-util + bump: major diff --git a/prdoc/pr_4091.prdoc b/prdoc/pr_4091.prdoc new file mode 100644 index 000000000000..5c38a344bd8a --- /dev/null +++ b/prdoc/pr_4091.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from the authority-discovery pallet + +doc: + - audience: Runtime Dev + description: | + This PR removed `pallet::getter`s from `pallet-authority-discovery`s storage items. + When accessed inside the pallet, use the syntax `StorageItem::::get()`. + When accessed outside the pallet, use the getters current_authorities() and next_authorities() instead. + +crates: + - name: pallet-authority-discovery + bump: major diff --git a/prdoc/pr_4302.prdoc b/prdoc/pr_4302.prdoc new file mode 100644 index 000000000000..bb4331f28023 --- /dev/null +++ b/prdoc/pr_4302.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "migrations: take() should consume read and write operation weight" + +doc: + - audience: Runtime Dev + description: | + `take()` consumes only 1 read worth of weight in `single-block-migrations` example, while `take()` is `get() + kill()`, + i.e should be 1 read + 1 write. Since this could mislead developers writing migrations following the example, + this PR fixes the weight calculation. + +crates: + - name: pallet-example-single-block-migrations + bump: minor diff --git a/prdoc/pr_4326.prdoc b/prdoc/pr_4326.prdoc new file mode 100644 index 000000000000..b448bd7e52e7 --- /dev/null +++ b/prdoc/pr_4326.prdoc @@ -0,0 +1,16 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: CheckWeight checks for combined extrinsic length and proof size + +doc: + - audience: Runtime Dev + description: | + The `CheckWeight` `SignedExtension` will now perform an additional check. The extension was verifying the extrinsic length and + weight limits individually. However, the proof size dimension of the weight and extrinsic length together are bound by the PoV size limit. + The `CheckWeight` extension will now check that the combined size of the proof and the extrinsic lengths will not + exceed the PoV size limit. + +crates: + - name: frame-system + bump: minor diff --git a/prdoc/pr_4349.prdoc b/prdoc/pr_4349.prdoc new file mode 100644 index 000000000000..fdc9e816e1b9 --- /dev/null +++ b/prdoc/pr_4349.prdoc @@ -0,0 +1,9 @@ +title: "Store Header in RemoteExt Snapshot" + +doc: + - audience: Runtime Dev + description: Replaces the block hash in the RemoteExt snapshot with the block header. + +crates: + - name: frame-remote-externalities + bump: major diff --git a/prdoc/pr_4414.prdoc b/prdoc/pr_4414.prdoc new file mode 100644 index 000000000000..864e816be91a --- /dev/null +++ b/prdoc/pr_4414.prdoc @@ -0,0 +1,10 @@ +title: "Rococo Asset Hub: undeploy state-trie migration" + +doc: + - audience: Runtime Dev + description: | + The state-trie migration on the Rococo Asset Hub is completed and is now removed. + +crates: + - name: asset-hub-rococo-runtime + bump: major diff --git a/prdoc/pr_4417.prdoc b/prdoc/pr_4417.prdoc new file mode 100644 index 000000000000..5aa72edd066a --- /dev/null +++ b/prdoc/pr_4417.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Removed `pallet::getter` usage from pallet-contracts-mock-network + +doc: + - audience: Runtime Dev + description: | + This PR removed the `pallet::getter`s from `pallet-contracts-mock-network`s storage items. + +crates: + - name: pallet-contracts-mock-network + bump: minor diff --git a/prdoc/pr_4442.prdoc b/prdoc/pr_4442.prdoc new file mode 100644 index 000000000000..ee6ac29e1a10 --- /dev/null +++ b/prdoc/pr_4442.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Improve mock relay in --dev mode to support async backing + +doc: + - audience: Node Dev + description: | + Support async backing in --dev mode. Improve the relay mock MockValidationDataInherentDataProvider to mach expectations of async backing runtimes. + +crates: + - name: cumulus-client-parachain-inherent + bump: patch diff --git a/substrate/bin/node/cli/Cargo.toml b/substrate/bin/node/cli/Cargo.toml index 050004acc78f..323afa56696a 100644 --- a/substrate/bin/node/cli/Cargo.toml +++ b/substrate/bin/node/cli/Cargo.toml @@ -170,7 +170,6 @@ clap_complete = { version = "4.0.2", optional = true } node-inspect = { package = "staging-node-inspect", path = "../inspect", optional = true } frame-benchmarking-cli = { path = "../../../utils/frame/benchmarking-cli", optional = true } substrate-build-script-utils = { path = "../../../utils/build-script-utils", optional = true } -substrate-frame-cli = { path = "../../../utils/frame/frame-utilities-cli", optional = true } sc-cli = { path = "../../../client/cli", optional = true } pallet-balances = { path = "../../../frame/balances" } sc-storage-monitor = { path = "../../../client/storage-monitor" } @@ -185,7 +184,6 @@ cli = [ "sc-cli", "sc-service/rocksdb", "substrate-build-script-utils", - "substrate-frame-cli", ] runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index 18b0d0c31a4d..017ee9100f9e 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -3094,7 +3094,7 @@ impl_runtime_apis! { fn generate_proof( block_numbers: Vec, best_known_block_number: Option, - ) -> Result<(Vec, mmr::Proof), mmr::Error> { + ) -> Result<(Vec, mmr::LeafProof), mmr::Error> { Mmr::generate_proof(block_numbers, best_known_block_number).map( |(leaves, proof)| { ( @@ -3108,7 +3108,7 @@ impl_runtime_apis! { ) } - fn verify_proof(leaves: Vec, proof: mmr::Proof) + fn verify_proof(leaves: Vec, proof: mmr::LeafProof) -> Result<(), mmr::Error> { let leaves = leaves.into_iter().map(|leaf| @@ -3121,7 +3121,7 @@ impl_runtime_apis! { fn verify_proof_stateless( root: mmr::Hash, leaves: Vec, - proof: mmr::Proof + proof: mmr::LeafProof ) -> Result<(), mmr::Error> { let nodes = leaves.into_iter().map(|leaf|mmr::DataOrHash::Data(leaf.into_opaque_leaf())).collect(); pallet_mmr::verify_leaves_proof::(root, nodes, proof) diff --git a/substrate/client/merkle-mountain-range/rpc/src/lib.rs b/substrate/client/merkle-mountain-range/rpc/src/lib.rs index b4da9848de54..41e73a5b8d75 100644 --- a/substrate/client/merkle-mountain-range/rpc/src/lib.rs +++ b/substrate/client/merkle-mountain-range/rpc/src/lib.rs @@ -36,7 +36,7 @@ use sp_core::{ offchain::{storage::OffchainDb, OffchainDbExt, OffchainStorage}, Bytes, }; -use sp_mmr_primitives::{Error as MmrError, Proof}; +use sp_mmr_primitives::{Error as MmrError, LeafProof}; use sp_runtime::traits::{Block as BlockT, NumberFor}; pub use sp_mmr_primitives::MmrApi as MmrRuntimeApi; @@ -52,17 +52,17 @@ pub struct LeavesProof { pub block_hash: BlockHash, /// SCALE-encoded vector of `LeafData`. pub leaves: Bytes, - /// SCALE-encoded proof data. See [sp_mmr_primitives::Proof]. + /// SCALE-encoded proof data. See [sp_mmr_primitives::LeafProof]. pub proof: Bytes, } impl LeavesProof { /// Create new `LeavesProof` from a given vector of `Leaf` and a - /// [sp_mmr_primitives::Proof]. + /// [sp_mmr_primitives::LeafProof]. pub fn new( block_hash: BlockHash, leaves: Vec, - proof: Proof, + proof: LeafProof, ) -> Self where Leaf: Encode, @@ -258,7 +258,7 @@ mod tests { fn should_serialize_leaf_proof() { // given let leaf = vec![1_u8, 2, 3, 4]; - let proof = Proof { + let proof = LeafProof { leaf_indices: vec![1], leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], @@ -281,7 +281,7 @@ mod tests { // given let leaf_a = vec![1_u8, 2, 3, 4]; let leaf_b = vec![2_u8, 2, 3, 4]; - let proof = Proof { + let proof = LeafProof { leaf_indices: vec![1, 2], leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], @@ -306,7 +306,7 @@ mod tests { block_hash: H256::repeat_byte(0), leaves: Bytes(vec![vec![1_u8, 2, 3, 4]].encode()), proof: Bytes( - Proof { + LeafProof { leaf_indices: vec![1], leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], @@ -333,7 +333,7 @@ mod tests { block_hash: H256::repeat_byte(0), leaves: Bytes(vec![vec![1_u8, 2, 3, 4], vec![2_u8, 2, 3, 4]].encode()), proof: Bytes( - Proof { + LeafProof { leaf_indices: vec![1, 2], leaf_count: 9, items: vec![H256::repeat_byte(1), H256::repeat_byte(2)], diff --git a/substrate/client/merkle-mountain-range/src/test_utils.rs b/substrate/client/merkle-mountain-range/src/test_utils.rs index 5775b4cfe67c..fcf9fa25b593 100644 --- a/substrate/client/merkle-mountain-range/src/test_utils.rs +++ b/substrate/client/merkle-mountain-range/src/test_utils.rs @@ -309,11 +309,11 @@ sp_api::mock_impl_runtime_apis! { &self, _block_numbers: Vec, _best_known_block_number: Option, - ) -> Result<(Vec, mmr::Proof), mmr::Error> { + ) -> Result<(Vec, mmr::LeafProof), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } - fn verify_proof(_leaves: Vec, _proof: mmr::Proof) + fn verify_proof(_leaves: Vec, _proof: mmr::LeafProof) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) @@ -322,7 +322,7 @@ sp_api::mock_impl_runtime_apis! { fn verify_proof_stateless( _root: MmrHash, _leaves: Vec, - _proof: mmr::Proof + _proof: mmr::LeafProof ) -> Result<(), mmr::Error> { Err(mmr::Error::PalletNotIncluded) } diff --git a/substrate/frame/authority-discovery/src/lib.rs b/substrate/frame/authority-discovery/src/lib.rs index ed9240d99e8d..16f71960d693 100644 --- a/substrate/frame/authority-discovery/src/lib.rs +++ b/substrate/frame/authority-discovery/src/lib.rs @@ -48,13 +48,11 @@ pub mod pallet { } #[pallet::storage] - #[pallet::getter(fn keys)] /// Keys of the current authority set. pub(super) type Keys = StorageValue<_, WeakBoundedVec, ValueQuery>; #[pallet::storage] - #[pallet::getter(fn next_keys)] /// Keys of the next authority set. pub(super) type NextKeys = StorageValue<_, WeakBoundedVec, ValueQuery>; diff --git a/substrate/frame/balances/src/migration.rs b/substrate/frame/balances/src/migration.rs index 38d9c07ff7e0..568c3fbb7cf0 100644 --- a/substrate/frame/balances/src/migration.rs +++ b/substrate/frame/balances/src/migration.rs @@ -91,7 +91,7 @@ impl, I: 'static> OnRuntimeUpgrade for ResetInactive { StorageVersion::new(0).put::>(); log::info!(target: LOG_TARGET, "Storage to version 0"); - T::DbWeight::get().reads_writes(1, 2) + T::DbWeight::get().reads_writes(1, 3) } else { log::info!( target: LOG_TARGET, diff --git a/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs b/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs index cc81b6bd636e..bfdf6dd97eaf 100644 --- a/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs +++ b/substrate/frame/contracts/mock-network/src/mocks/msg_queue.rs @@ -47,17 +47,15 @@ pub mod pallet { pub struct Pallet(_); #[pallet::storage] - #[pallet::getter(fn parachain_id)] pub(super) type ParachainId = StorageValue<_, ParaId, ValueQuery>; #[pallet::storage] - #[pallet::getter(fn received_dmp)] /// A queue of received DMP messages pub(super) type ReceivedDmp = StorageValue<_, Vec>, ValueQuery>; impl Get for Pallet { fn get() -> ParaId { - Self::parachain_id() + ParachainId::::get() } } @@ -89,6 +87,14 @@ pub mod pallet { ParachainId::::put(para_id); } + pub fn parachain_id() -> ParaId { + ParachainId::::get() + } + + pub fn received_dmp() -> Vec> { + ReceivedDmp::::get() + } + fn handle_xcmp_message( sender: ParaId, _sent_at: RelayBlockNumber, @@ -169,7 +175,7 @@ pub mod pallet { limit, Weight::zero(), ); - >::append(x); + ReceivedDmp::::append(x); Self::deposit_event(Event::ExecutedDownward(id, outcome)); }, }, diff --git a/substrate/frame/democracy/src/migrations/v1.rs b/substrate/frame/democracy/src/migrations/v1.rs index 5e423b9ab6ef..47f8df017f1e 100644 --- a/substrate/frame/democracy/src/migrations/v1.rs +++ b/substrate/frame/democracy/src/migrations/v1.rs @@ -108,7 +108,7 @@ pub mod v1 { .collect::>(); let bounded = BoundedVec::<_, T::MaxProposals>::truncate_from(props.clone()); PublicProps::::put(bounded); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1)); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); if props.len() as u32 > T::MaxProposals::get() { log::error!( @@ -126,7 +126,7 @@ pub mod v1 { StorageVersion::new(1).put::>(); - weight.saturating_add(T::DbWeight::get().reads_writes(1, 2)) + weight.saturating_add(T::DbWeight::get().reads_writes(1, 3)) } #[cfg(feature = "try-runtime")] diff --git a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs index 18ef4e72cc4f..7b543d72c984 100644 --- a/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs +++ b/substrate/frame/examples/single-block-migrations/src/migrations/v1.rs @@ -67,10 +67,10 @@ impl UncheckedOnRuntimeUpgrade for InnerMigrateV0ToV1 { // Write the new value to storage let new = crate::CurrentAndPreviousValue { current: old_value, previous: None }; crate::Value::::put(new); - // One read for the old value, one write for the new value - T::DbWeight::get().reads_writes(1, 1) + // One read + write for taking the old value, and one write for setting the new value + T::DbWeight::get().reads_writes(1, 2) } else { - // One read for trying to access the old value + // No writes since there was no old value, just one read for checking T::DbWeight::get().reads(1) } } @@ -184,7 +184,7 @@ mod test { // value. assert_eq!( weight, - ::DbWeight::get().reads_writes(1, 1) + ::DbWeight::get().reads_writes(1, 2) ); // After the migration, the new value should be set as the `current` value. diff --git a/substrate/frame/merkle-mountain-range/src/lib.rs b/substrate/frame/merkle-mountain-range/src/lib.rs index e2b40974579e..a86443f2e011 100644 --- a/substrate/frame/merkle-mountain-range/src/lib.rs +++ b/substrate/frame/merkle-mountain-range/src/lib.rs @@ -260,15 +260,15 @@ pub mod pallet { /// Stateless MMR proof verification for batch of leaves. /// -/// This function can be used to verify received MMR [primitives::Proof] (`proof`) +/// This function can be used to verify received MMR [primitives::LeafProof] (`proof`) /// for given leaves set (`leaves`) against a known MMR root hash (`root`). /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the -/// [primitives::Proof]. +/// [primitives::LeafProof]. pub fn verify_leaves_proof( root: H::Output, leaves: Vec>, - proof: primitives::Proof, + proof: primitives::LeafProof, ) -> Result<(), primitives::Error> where H: traits::Hash, @@ -342,7 +342,7 @@ impl, I: 'static> Pallet { pub fn generate_proof( block_numbers: Vec>, best_known_block_number: Option>, - ) -> Result<(Vec>, primitives::Proof>), primitives::Error> { + ) -> Result<(Vec>, primitives::LeafProof>), primitives::Error> { // check whether best_known_block_number provided, else use current best block let best_known_block_number = best_known_block_number.unwrap_or_else(|| >::block_number()); @@ -362,11 +362,6 @@ impl, I: 'static> Pallet { mmr.generate_proof(leaf_indices) } - /// Return the on-chain MMR root hash. - pub fn mmr_root() -> HashOf { - RootHash::::get() - } - /// Verify MMR proof for given `leaves`. /// /// This method is safe to use within the runtime code. @@ -375,7 +370,7 @@ impl, I: 'static> Pallet { /// or the proof is invalid. pub fn verify_leaves( leaves: Vec>, - proof: primitives::Proof>, + proof: primitives::LeafProof>, ) -> Result<(), primitives::Error> { if proof.leaf_count > NumberOfLeaves::::get() || proof.leaf_count == 0 || @@ -393,4 +388,37 @@ impl, I: 'static> Pallet { Err(primitives::Error::Verify.log_debug("The proof is incorrect.")) } } + + pub fn generate_ancestry_proof( + prev_block_number: BlockNumberFor, + best_known_block_number: Option>, + ) -> Result>, Error> { + // check whether best_known_block_number provided, else use current best block + let best_known_block_number = + best_known_block_number.unwrap_or_else(|| >::block_number()); + + let leaf_count = Self::block_num_to_leaf_index(best_known_block_number)?.saturating_add(1); + let prev_leaf_count = Self::block_num_to_leaf_index(prev_block_number)?.saturating_add(1); + + let mmr: ModuleMmr = mmr::Mmr::new(leaf_count); + mmr.generate_ancestry_proof(prev_leaf_count) + } + + pub fn verify_ancestry_proof( + ancestry_proof: primitives::AncestryProof>, + ) -> Result<(), Error> { + let mmr: ModuleMmr = + mmr::Mmr::new(ancestry_proof.leaf_count); + let is_valid = mmr.verify_ancestry_proof(ancestry_proof)?; + if is_valid { + Ok(()) + } else { + Err(Error::Verify.log_debug("The ancestry proof is incorrect.")) + } + } + + /// Return the on-chain MMR root hash. + pub fn mmr_root() -> HashOf { + RootHash::::get() + } } diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs index aeb3e7ea6641..5efc172d1e93 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -23,17 +23,17 @@ use crate::{ primitives::{self, Error, NodeIndex}, Config, HashOf, HashingOf, }; -use sp_mmr_primitives::{mmr_lib, utils::NodesUtils}; +use sp_mmr_primitives::{mmr_lib, mmr_lib::MMRStoreReadOps, utils::NodesUtils, LeafIndex}; use sp_std::prelude::*; /// Stateless verification of the proof for a batch of leaves. /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the -/// [primitives::Proof] +/// [primitives::LeafProof] pub fn verify_leaves_proof( root: H::Output, leaves: Vec>, - proof: primitives::Proof, + proof: primitives::LeafProof, ) -> Result where H: sp_runtime::traits::Hash, @@ -69,7 +69,8 @@ where T: Config, I: 'static, L: primitives::FullLeaf, - Storage: mmr_lib::MMRStore>, + Storage: + MMRStoreReadOps> + mmr_lib::MMRStoreWriteOps>, { mmr: mmr_lib::MMR, Hasher, L>, Storage>, leaves: NodeIndex, @@ -80,7 +81,8 @@ where T: Config, I: 'static, L: primitives::FullLeaf, - Storage: mmr_lib::MMRStore>, + Storage: + MMRStoreReadOps> + mmr_lib::MMRStoreWriteOps>, { /// Create a pointer to an existing MMR with given number of leaves. pub fn new(leaves: NodeIndex) -> Self { @@ -91,11 +93,11 @@ where /// Verify proof for a set of leaves. /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have /// the same position in both the `leaves` vector and the `leaf_indices` vector contained in the - /// [primitives::Proof] + /// [primitives::LeafProof] pub fn verify_leaves_proof( &self, leaves: Vec, - proof: primitives::Proof>, + proof: primitives::LeafProof>, ) -> Result { let p = mmr_lib::MerkleProof::, Hasher, L>>::new( self.mmr.mmr_size(), @@ -117,6 +119,44 @@ where .map_err(|e| Error::Verify.log_debug(e)) } + pub fn verify_ancestry_proof( + &self, + ancestry_proof: primitives::AncestryProof>, + ) -> Result { + let prev_peaks_proof = + mmr_lib::NodeMerkleProof::, Hasher, L>>::new( + self.mmr.mmr_size(), + ancestry_proof + .items + .into_iter() + .map(|(index, hash)| (index, Node::Hash(hash))) + .collect(), + ); + + let raw_ancestry_proof = mmr_lib::AncestryProof::< + NodeOf, + Hasher, L>, + > { + prev_peaks: ancestry_proof + .prev_peaks + .into_iter() + .map(|hash| Node::Hash(hash)) + .collect(), + prev_size: mmr_lib::helper::leaf_index_to_mmr_size(ancestry_proof.prev_leaf_count - 1), + proof: prev_peaks_proof, + }; + + let prev_root = mmr_lib::ancestry_proof::bagging_peaks_hashes::< + NodeOf, + Hasher, L>, + >(raw_ancestry_proof.prev_peaks.clone()) + .map_err(|e| Error::Verify.log_debug(e))?; + let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; + raw_ancestry_proof + .verify_ancestor(root, prev_root) + .map_err(|e| Error::Verify.log_debug(e)) + } + /// Return the internal size of the MMR (number of nodes). #[cfg(test)] pub fn size(&self) -> NodeIndex { @@ -145,7 +185,7 @@ where /// Commit the changes to underlying storage, return current number of leaves and /// calculate the new MMR's root hash. - pub fn finalize(self) -> Result<(NodeIndex, HashOf), Error> { + pub fn finalize(mut self) -> Result<(NodeIndex, HashOf), Error> { let root = self.mmr.get_root().map_err(|e| Error::GetRoot.log_error(e))?; self.mmr.commit().map_err(|e| Error::Commit.log_error(e))?; Ok((self.leaves, root.hash())) @@ -166,7 +206,7 @@ where pub fn generate_proof( &self, leaf_indices: Vec, - ) -> Result<(Vec, primitives::Proof>), Error> { + ) -> Result<(Vec, primitives::LeafProof>), Error> { let positions = leaf_indices .iter() .map(|index| mmr_lib::leaf_index_to_pos(*index)) @@ -174,7 +214,7 @@ where let store = >::default(); let leaves = positions .iter() - .map(|pos| match mmr_lib::MMRStore::get_elem(&store, *pos) { + .map(|pos| match store.get_elem(*pos) { Ok(Some(Node::Data(leaf))) => Ok(leaf), e => Err(Error::LeafNotFound.log_debug(e)), }) @@ -184,11 +224,34 @@ where self.mmr .gen_proof(positions) .map_err(|e| Error::GenerateProof.log_error(e)) - .map(|p| primitives::Proof { + .map(|p| primitives::LeafProof { leaf_indices, leaf_count, items: p.proof_items().iter().map(|x| x.hash()).collect(), }) .map(|p| (leaves, p)) } + + pub fn generate_ancestry_proof( + &self, + prev_leaf_count: LeafIndex, + ) -> Result>, Error> { + let prev_mmr_size = NodesUtils::new(prev_leaf_count).size(); + let raw_ancestry_proof = self + .mmr + .gen_ancestry_proof(prev_mmr_size) + .map_err(|e| Error::GenerateProof.log_error(e))?; + + Ok(primitives::AncestryProof { + prev_peaks: raw_ancestry_proof.prev_peaks.into_iter().map(|p| p.hash()).collect(), + prev_leaf_count, + leaf_count: self.leaves, + items: raw_ancestry_proof + .proof + .proof_items() + .iter() + .map(|(index, item)| (*index, item.hash())) + .collect(), + }) + } } diff --git a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs index f2acc35a137f..6848b8f1b990 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs @@ -60,7 +60,7 @@ impl Default for Storage { } } -impl mmr_lib::MMRStore> for Storage +impl mmr_lib::MMRStoreReadOps> for Storage where T: Config, I: 'static, @@ -98,13 +98,20 @@ where Ok(sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &temp_key) .and_then(|v| codec::Decode::decode(&mut &*v).ok())) } +} +impl mmr_lib::MMRStoreWriteOps> for Storage +where + T: Config, + I: 'static, + L: primitives::FullLeaf + codec::Decode, +{ fn append(&mut self, _: NodeIndex, _: Vec>) -> mmr_lib::Result<()> { panic!("MMR must not be altered in the off-chain context.") } } -impl mmr_lib::MMRStore> for Storage +impl mmr_lib::MMRStoreReadOps> for Storage where T: Config, I: 'static, @@ -113,7 +120,14 @@ where fn get_elem(&self, pos: NodeIndex) -> mmr_lib::Result>> { Ok(Nodes::::get(pos).map(Node::Hash)) } +} +impl mmr_lib::MMRStoreWriteOps> for Storage +where + T: Config, + I: 'static, + L: primitives::FullLeaf, +{ fn append(&mut self, pos: NodeIndex, elems: Vec>) -> mmr_lib::Result<()> { if elems.is_empty() { return Ok(()) diff --git a/substrate/frame/merkle-mountain-range/src/tests.rs b/substrate/frame/merkle-mountain-range/src/tests.rs index 88de7511c9f2..f8cfcb4e2c28 100644 --- a/substrate/frame/merkle-mountain-range/src/tests.rs +++ b/substrate/frame/merkle-mountain-range/src/tests.rs @@ -22,7 +22,7 @@ use sp_core::{ offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}, H256, }; -use sp_mmr_primitives::{mmr_lib::helper, utils, Compact, Proof}; +use sp_mmr_primitives::{mmr_lib::helper, utils, Compact, LeafProof}; use sp_runtime::BuildStorage; pub(crate) fn new_test_ext() -> sp_io::TestExternalities { @@ -283,7 +283,7 @@ fn should_generate_proofs_correctly() { proofs[0], ( vec![Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),))], - Proof { + LeafProof { leaf_indices: vec![0], leaf_count: 7, items: vec![ @@ -298,7 +298,7 @@ fn should_generate_proofs_correctly() { historical_proofs[0][0], ( vec![Compact::new(((0, H256::repeat_byte(1)).into(), LeafData::new(1).into(),))], - Proof { leaf_indices: vec![0], leaf_count: 1, items: vec![] } + LeafProof { leaf_indices: vec![0], leaf_count: 1, items: vec![] } ) ); @@ -314,7 +314,7 @@ fn should_generate_proofs_correctly() { proofs[2], ( vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], - Proof { + LeafProof { leaf_indices: vec![2], leaf_count: 7, items: vec![ @@ -334,7 +334,7 @@ fn should_generate_proofs_correctly() { historical_proofs[2][0], ( vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], - Proof { + LeafProof { leaf_indices: vec![2], leaf_count: 3, items: vec![hex( @@ -354,7 +354,7 @@ fn should_generate_proofs_correctly() { historical_proofs[2][2], ( vec![Compact::new(((2, H256::repeat_byte(3)).into(), LeafData::new(3).into(),))], - Proof { + LeafProof { leaf_indices: vec![2], leaf_count: 5, items: vec![ @@ -372,7 +372,7 @@ fn should_generate_proofs_correctly() { ( // NOTE: the leaf index is equivalent to the block number(in this case 5) - 1 vec![Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),))], - Proof { + LeafProof { leaf_indices: vec![4], leaf_count: 7, items: vec![ @@ -387,7 +387,7 @@ fn should_generate_proofs_correctly() { historical_proofs[4][0], ( vec![Compact::new(((4, H256::repeat_byte(5)).into(), LeafData::new(5).into(),))], - Proof { + LeafProof { leaf_indices: vec![4], leaf_count: 5, items: vec![hex( @@ -402,7 +402,7 @@ fn should_generate_proofs_correctly() { proofs[6], ( vec![Compact::new(((6, H256::repeat_byte(7)).into(), LeafData::new(7).into(),))], - Proof { + LeafProof { leaf_indices: vec![6], leaf_count: 7, items: vec![ @@ -433,7 +433,7 @@ fn should_generate_batch_proof_correctly() { // then assert_eq!( proof, - Proof { + LeafProof { // the leaf indices are equivalent to the above specified block numbers - 1. leaf_indices: vec![0, 4, 5], leaf_count: 7, @@ -451,7 +451,7 @@ fn should_generate_batch_proof_correctly() { // then assert_eq!( historical_proof, - Proof { + LeafProof { leaf_indices: vec![0, 4, 5], leaf_count: 6, items: vec![ @@ -516,43 +516,40 @@ fn should_verify() { }); } -#[test] -fn should_verify_batch_proofs() { - fn generate_and_verify_batch_proof( - ext: &mut sp_io::TestExternalities, - block_numbers: &Vec, - blocks_to_add: usize, - ) { - let (leaves, proof) = ext.execute_with(|| { - crate::Pallet::::generate_proof(block_numbers.to_vec(), None).unwrap() - }); +fn generate_and_verify_batch_proof( + ext: &mut sp_io::TestExternalities, + block_numbers: &Vec, + blocks_to_add: usize, +) { + let (leaves, proof) = ext.execute_with(|| { + crate::Pallet::::generate_proof(block_numbers.to_vec(), None).unwrap() + }); - let max_block_number = ext.execute_with(|| frame_system::Pallet::::block_number()); - let min_block_number = block_numbers.iter().max().unwrap(); + let max_block_number = ext.execute_with(|| frame_system::Pallet::::block_number()); + let min_block_number = block_numbers.iter().max().unwrap(); - // generate all possible historical proofs for the given blocks - let historical_proofs = (*min_block_number..=max_block_number) - .map(|best_block| { - ext.execute_with(|| { - crate::Pallet::::generate_proof(block_numbers.to_vec(), Some(best_block)) - .unwrap() - }) + // generate all possible historical proofs for the given blocks + let historical_proofs = (*min_block_number..=max_block_number) + .map(|best_block| { + ext.execute_with(|| { + crate::Pallet::::generate_proof(block_numbers.to_vec(), Some(best_block)) + .unwrap() }) - .collect::>(); - - ext.execute_with(|| { - add_blocks(blocks_to_add); - // then - assert_eq!(crate::Pallet::::verify_leaves(leaves, proof), Ok(())); - historical_proofs.iter().for_each(|(leaves, proof)| { - assert_eq!( - crate::Pallet::::verify_leaves(leaves.clone(), proof.clone()), - Ok(()) - ); - }); }) - } + .collect::>(); + + ext.execute_with(|| { + add_blocks(blocks_to_add); + // then + assert_eq!(crate::Pallet::::verify_leaves(leaves, proof), Ok(())); + historical_proofs.iter().for_each(|(leaves, proof)| { + assert_eq!(crate::Pallet::::verify_leaves(leaves.clone(), proof.clone()), Ok(())); + }); + }) +} +#[test] +fn should_verify_batch_proofs() { let _ = env_logger::try_init(); use itertools::Itertools; @@ -790,3 +787,24 @@ fn does_not_panic_when_generating_historical_proofs() { ); }); } + +#[test] +fn generating_and_verifying_ancestry_proofs_works_correctly() { + let _ = env_logger::try_init(); + let mut ext = new_test_ext(); + ext.execute_with(|| add_blocks(500)); + ext.persist_offchain_overlay(); + register_offchain_ext(&mut ext); + + ext.execute_with(|| { + // Check that generating and verifying ancestry proofs works correctly + // for each previous block + for prev_block_number in 1..501 { + let proof = Pallet::::generate_ancestry_proof(prev_block_number, None).unwrap(); + Pallet::::verify_ancestry_proof(proof).unwrap(); + } + + // Check that we can't generate ancestry proofs for a future block. + assert_eq!(Pallet::::generate_ancestry_proof(501, None), Err(Error::GenerateProof)); + }); +} diff --git a/substrate/frame/staking/src/migrations.rs b/substrate/frame/staking/src/migrations.rs index 510252be26c9..b2ddf77004f9 100644 --- a/substrate/frame/staking/src/migrations.rs +++ b/substrate/frame/staking/src/migrations.rs @@ -370,7 +370,7 @@ pub mod v10 { StorageVersion::::put(ObsoleteReleases::V10_0_0); log!(info, "MigrateToV10 executed successfully"); - T::DbWeight::get().reads_writes(1, 1) + T::DbWeight::get().reads_writes(1, 2) } else { log!(warn, "MigrateToV10 should be removed."); T::DbWeight::get().reads(1) diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr index 96504b7ce775..10418b915e36 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/deprecated_where_block.stderr @@ -107,7 +107,7 @@ note: required because it appears within the type `RuntimeEvent` 28 | | } | |_^ note: required by a bound in `EncodeLike` - --> $CARGO/parity-scale-codec-3.6.5/src/encode_like.rs + --> $CARGO/parity-scale-codec-3.6.11/src/encode_like.rs | | pub trait EncodeLike: Sized + Encode {} | ^^^^^ required by this bound in `EncodeLike` @@ -137,7 +137,7 @@ note: required because it appears within the type `RuntimeEvent` 28 | | } | |_^ note: required by a bound in `Decode` - --> $CARGO/parity-scale-codec-3.6.5/src/codec.rs + --> $CARGO/parity-scale-codec-3.6.11/src/codec.rs | | pub trait Decode: Sized { | ^^^^^ required by this bound in `Decode` @@ -286,7 +286,7 @@ note: required because it appears within the type `RuntimeCall` 28 | | } | |_^ note: required by a bound in `EncodeLike` - --> $CARGO/parity-scale-codec-3.6.5/src/encode_like.rs + --> $CARGO/parity-scale-codec-3.6.11/src/encode_like.rs | | pub trait EncodeLike: Sized + Encode {} | ^^^^^ required by this bound in `EncodeLike` @@ -317,7 +317,7 @@ note: required because it appears within the type `RuntimeCall` 28 | | } | |_^ note: required by a bound in `Decode` - --> $CARGO/parity-scale-codec-3.6.5/src/codec.rs + --> $CARGO/parity-scale-codec-3.6.11/src/codec.rs | | pub trait Decode: Sized { | ^^^^^ required by this bound in `Decode` diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 70d1e7563327..061d543f8c31 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -64,17 +64,6 @@ where } } - /// Checks if the current extrinsic can fit into the block with respect to block weight limits. - /// - /// Upon successes, it returns the new block weight as a `Result`. - fn check_block_weight( - info: &DispatchInfoOf, - ) -> Result { - let maximum_weight = T::BlockWeights::get(); - let all_weight = Pallet::::block_weight(); - calculate_consumed_weight::(maximum_weight, all_weight, info) - } - /// Checks if the current extrinsic can fit into the block with respect to block length limits. /// /// Upon successes, it returns the new block length as a `Result`. @@ -113,7 +102,12 @@ where len: usize, ) -> Result<(), TransactionValidityError> { let next_len = Self::check_block_length(info, len)?; - let next_weight = Self::check_block_weight(info)?; + + let all_weight = Pallet::::block_weight(); + let maximum_weight = T::BlockWeights::get(); + let next_weight = + calculate_consumed_weight::(&maximum_weight, all_weight, info)?; + check_combined_proof_size(&maximum_weight, next_len, &next_weight)?; Self::check_extrinsic_weight(info)?; crate::AllExtrinsicsLen::::put(next_len); @@ -136,8 +130,32 @@ where } } +/// Check that the combined extrinsic length and proof size together do not exceed the PoV limit. +pub fn check_combined_proof_size( + maximum_weight: &BlockWeights, + next_len: u32, + next_weight: &crate::ConsumedWeight, +) -> Result<(), TransactionValidityError> { + // This extra check ensures that the extrinsic length does not push the + // PoV over the limit. + let total_pov_size = next_weight.total().proof_size().saturating_add(next_len as u64); + if total_pov_size > maximum_weight.max_block.proof_size() { + log::debug!( + target: LOG_TARGET, + "Extrinsic exceeds total pov size: {}kb, limit: {}kb", + total_pov_size as f64/1024.0, + maximum_weight.max_block.proof_size() as f64/1024.0 + ); + return Err(InvalidTransaction::ExhaustsResources.into()) + } + Ok(()) +} + +/// Checks if the current extrinsic can fit into the block with respect to block weight limits. +/// +/// Upon successes, it returns the new block weight as a `Result`. pub fn calculate_consumed_weight( - maximum_weight: BlockWeights, + maximum_weight: &BlockWeights, mut all_weight: crate::ConsumedWeight, info: &DispatchInfoOf, ) -> Result @@ -742,17 +760,90 @@ mod tests { // when assert_ok!(calculate_consumed_weight::<::RuntimeCall>( - maximum_weight.clone(), + &maximum_weight, all_weight.clone(), - &mandatory1 + &mandatory1, )); assert_err!( calculate_consumed_weight::<::RuntimeCall>( - maximum_weight, + &maximum_weight, all_weight, - &mandatory2 + &mandatory2, ), InvalidTransaction::ExhaustsResources ); } + + #[test] + fn maximum_proof_size_includes_length() { + let maximum_weight = BlockWeights::builder() + .base_block(Weight::zero()) + .for_class(DispatchClass::non_mandatory(), |w| { + w.base_extrinsic = Weight::zero(); + w.max_total = Some(Weight::from_parts(20, 10)); + }) + .for_class(DispatchClass::Mandatory, |w| { + w.base_extrinsic = Weight::zero(); + w.reserved = Some(Weight::from_parts(5, 10)); + w.max_total = None; + }) + .build_or_panic(); + + assert_eq!(maximum_weight.max_block, Weight::from_parts(20, 10)); + + // We have 10 reftime and 5 proof size left over. + let next_weight = crate::ConsumedWeight::new(|class| match class { + DispatchClass::Normal => Weight::from_parts(10, 5), + DispatchClass::Operational => Weight::from_parts(0, 0), + DispatchClass::Mandatory => Weight::zero(), + }); + + // Simple checks for the length + assert_ok!(check_combined_proof_size(&maximum_weight, 0, &next_weight)); + assert_ok!(check_combined_proof_size(&maximum_weight, 5, &next_weight)); + assert_err!( + check_combined_proof_size(&maximum_weight, 6, &next_weight), + InvalidTransaction::ExhaustsResources + ); + + // We have 10 reftime and 0 proof size left over. + let next_weight = crate::ConsumedWeight::new(|class| match class { + DispatchClass::Normal => Weight::from_parts(10, 10), + DispatchClass::Operational => Weight::from_parts(0, 0), + DispatchClass::Mandatory => Weight::zero(), + }); + assert_ok!(check_combined_proof_size(&maximum_weight, 0, &next_weight)); + assert_err!( + check_combined_proof_size(&maximum_weight, 1, &next_weight), + InvalidTransaction::ExhaustsResources + ); + + // We have 10 reftime and 2 proof size left over. + // Used weight is spread across dispatch classes this time. + let next_weight = crate::ConsumedWeight::new(|class| match class { + DispatchClass::Normal => Weight::from_parts(10, 5), + DispatchClass::Operational => Weight::from_parts(0, 3), + DispatchClass::Mandatory => Weight::zero(), + }); + assert_ok!(check_combined_proof_size(&maximum_weight, 0, &next_weight)); + assert_ok!(check_combined_proof_size(&maximum_weight, 2, &next_weight)); + assert_err!( + check_combined_proof_size(&maximum_weight, 3, &next_weight), + InvalidTransaction::ExhaustsResources + ); + + // Ref time is over the limit. Should not happen, but we should make sure that it is + // ignored. + let next_weight = crate::ConsumedWeight::new(|class| match class { + DispatchClass::Normal => Weight::from_parts(30, 5), + DispatchClass::Operational => Weight::from_parts(0, 0), + DispatchClass::Mandatory => Weight::zero(), + }); + assert_ok!(check_combined_proof_size(&maximum_weight, 0, &next_weight)); + assert_ok!(check_combined_proof_size(&maximum_weight, 5, &next_weight)); + assert_err!( + check_combined_proof_size(&maximum_weight, 6, &next_weight), + InvalidTransaction::ExhaustsResources + ); + } } diff --git a/substrate/primitives/merkle-mountain-range/Cargo.toml b/substrate/primitives/merkle-mountain-range/Cargo.toml index b97cef138ed4..65d8bd79e5af 100644 --- a/substrate/primitives/merkle-mountain-range/Cargo.toml +++ b/substrate/primitives/merkle-mountain-range/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.11.1", default-features = false, features = ["derive"] } log = { workspace = true } -mmr-lib = { package = "ckb-merkle-mountain-range", version = "0.5.2", default-features = false } +mmr-lib = { package = "ckb-merkle-mountain-range", git = "https://github.com/paritytech/merkle-mountain-range.git", branch = "master", default-features = false } serde = { features = ["alloc", "derive"], optional = true, workspace = true } sp-api = { path = "../api", default-features = false } sp-core = { path = "../core", default-features = false } diff --git a/substrate/primitives/merkle-mountain-range/src/lib.rs b/substrate/primitives/merkle-mountain-range/src/lib.rs index c76d66bb08ea..3740047e0278 100644 --- a/substrate/primitives/merkle-mountain-range/src/lib.rs +++ b/substrate/primitives/merkle-mountain-range/src/lib.rs @@ -352,15 +352,29 @@ impl_leaf_data_for_tuple!(A:0, B:1, C:2, D:3, E:4); /// An MMR proof data for a group of leaves. #[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)] -pub struct Proof { +pub struct LeafProof { /// The indices of the leaves the proof is for. pub leaf_indices: Vec, /// Number of leaves in MMR, when the proof was generated. pub leaf_count: NodeIndex, - /// Proof elements (hashes of siblings of inner nodes on the path to the leaf). + /// Proof elements (hashes of siblings of inner nodes on the path to the leafs). pub items: Vec, } +/// An MMR ancestry proof for a prior mmr root. +#[derive(codec::Encode, codec::Decode, RuntimeDebug, Clone, PartialEq, Eq, TypeInfo)] +pub struct AncestryProof { + /// Peaks of the ancestor's mmr + pub prev_peaks: Vec, + /// Number of leaves in the ancestor's MMR. + pub prev_leaf_count: u64, + /// Number of leaves in MMR, when the proof was generated. + pub leaf_count: NodeIndex, + /// Proof elements + /// (positions and hashes of siblings of inner nodes on the path to the previous peaks). + pub items: Vec<(u64, Hash)>, +} + /// Merkle Mountain Range operation error. #[cfg_attr(feature = "std", derive(thiserror::Error))] #[derive(RuntimeDebug, codec::Encode, codec::Decode, PartialEq, Eq, TypeInfo)] @@ -437,14 +451,14 @@ sp_api::decl_runtime_apis! { fn generate_proof( block_numbers: Vec, best_known_block_number: Option - ) -> Result<(Vec, Proof), Error>; + ) -> Result<(Vec, LeafProof), Error>; /// Verify MMR proof against on-chain MMR for a batch of leaves. /// /// Note this function will use on-chain MMR root hash and check if the proof matches the hash. /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the - /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [Proof] - fn verify_proof(leaves: Vec, proof: Proof) -> Result<(), Error>; + /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [LeafProof] + fn verify_proof(leaves: Vec, proof: LeafProof) -> Result<(), Error>; /// Verify MMR proof against given root hash for a batch of leaves. /// @@ -452,8 +466,8 @@ sp_api::decl_runtime_apis! { /// proof is verified against given MMR root hash. /// /// Note, the leaves should be sorted such that corresponding leaves and leaf indices have the - /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [Proof] - fn verify_proof_stateless(root: Hash, leaves: Vec, proof: Proof) + /// same position in both the `leaves` vector and the `leaf_indices` vector contained in the [LeafProof] + fn verify_proof_stateless(root: Hash, leaves: Vec, proof: LeafProof) -> Result<(), Error>; } } @@ -472,12 +486,12 @@ mod tests { type Test = DataOrHash; type TestCompact = Compact; - type TestProof = Proof<::Output>; + type TestProof = LeafProof<::Output>; #[test] fn should_encode_decode_proof() { // given - let proof: TestProof = Proof { + let proof: TestProof = LeafProof { leaf_indices: vec![5], leaf_count: 10, items: vec![ diff --git a/substrate/utils/frame/frame-utilities-cli/Cargo.toml b/substrate/utils/frame/frame-utilities-cli/Cargo.toml deleted file mode 100644 index 3952c9fd219f..000000000000 --- a/substrate/utils/frame/frame-utilities-cli/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "substrate-frame-cli" -version = "32.0.0" -authors.workspace = true -edition.workspace = true -license = "Apache-2.0" -homepage = "https://substrate.io" -repository.workspace = true -description = "cli interface for FRAME" -documentation = "https://docs.rs/substrate-frame-cli" -readme = "README.md" - -[lints] -workspace = true - -[dependencies] -clap = { version = "4.5.3", features = ["derive"] } -frame-support = { path = "../../../frame/support" } -frame-system = { path = "../../../frame/system" } -sc-cli = { path = "../../../client/cli" } -sp-core = { path = "../../../primitives/core" } -sp-runtime = { path = "../../../primitives/runtime" } - -[features] -default = [] diff --git a/substrate/utils/frame/frame-utilities-cli/README.md b/substrate/utils/frame/frame-utilities-cli/README.md deleted file mode 100644 index 54467a3ad770..000000000000 --- a/substrate/utils/frame/frame-utilities-cli/README.md +++ /dev/null @@ -1,3 +0,0 @@ -frame-system CLI utilities - -License: Apache-2.0 diff --git a/substrate/utils/frame/frame-utilities-cli/src/lib.rs b/substrate/utils/frame/frame-utilities-cli/src/lib.rs deleted file mode 100644 index 97129e36f7e9..000000000000 --- a/substrate/utils/frame/frame-utilities-cli/src/lib.rs +++ /dev/null @@ -1,22 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! frame-system CLI utilities - -mod pallet_id; - -pub use pallet_id::PalletIdCmd; diff --git a/substrate/utils/frame/frame-utilities-cli/src/pallet_id.rs b/substrate/utils/frame/frame-utilities-cli/src/pallet_id.rs deleted file mode 100644 index abc0cdb3ff52..000000000000 --- a/substrate/utils/frame/frame-utilities-cli/src/pallet_id.rs +++ /dev/null @@ -1,88 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Implementation of the `palletid` subcommand - -use clap::Parser; -use frame_support::PalletId; -use sc_cli::{ - utils::print_from_uri, with_crypto_scheme, CryptoSchemeFlag, Error, KeystoreParams, - OutputTypeFlag, -}; -use sp_core::crypto::{unwrap_or_default_ss58_version, Ss58AddressFormat, Ss58Codec}; -use sp_runtime::traits::AccountIdConversion; - -/// The `palletid` command -#[derive(Debug, Parser)] -#[command(name = "palletid", about = "Inspect a module ID address")] -pub struct PalletIdCmd { - /// The module ID used to derive the account - id: String, - - /// network address format - #[arg( - long, - value_name = "NETWORK", - value_parser = sc_cli::parse_ss58_address_format, - ignore_case = true, - )] - pub network: Option, - - #[allow(missing_docs)] - #[command(flatten)] - pub output_scheme: OutputTypeFlag, - - #[allow(missing_docs)] - #[command(flatten)] - pub crypto_scheme: CryptoSchemeFlag, - - #[allow(missing_docs)] - #[command(flatten)] - pub keystore_params: KeystoreParams, -} - -impl PalletIdCmd { - /// runs the command - pub fn run(&self) -> Result<(), Error> - where - R: frame_system::Config, - R::AccountId: Ss58Codec, - { - if self.id.len() != 8 { - return Err("a module id must be a string of 8 characters".into()) - } - let password = self.keystore_params.read_password()?; - - let id_fixed_array: [u8; 8] = self.id.as_bytes().try_into().map_err(|_| { - "Cannot convert argument to palletid: argument should be 8-character string" - })?; - - let account_id: R::AccountId = PalletId(id_fixed_array).into_account_truncating(); - - with_crypto_scheme!( - self.crypto_scheme.scheme, - print_from_uri( - &account_id.to_ss58check_with_version(unwrap_or_default_ss58_version(self.network)), - password, - self.network, - self.output_scheme.output_type - ) - ); - - Ok(()) - } -} diff --git a/substrate/utils/frame/remote-externalities/src/lib.rs b/substrate/utils/frame/remote-externalities/src/lib.rs index 58cb901470c1..201b5e176f34 100644 --- a/substrate/utils/frame/remote-externalities/src/lib.rs +++ b/substrate/utils/frame/remote-externalities/src/lib.rs @@ -36,7 +36,7 @@ use sp_core::{ }, }; use sp_runtime::{ - traits::{Block as BlockT, Hash, HashingFor}, + traits::{Block as BlockT, HashingFor}, StateVersion, }; use sp_state_machine::TestExternalities; @@ -58,37 +58,39 @@ type ChildKeyValues = Vec<(ChildInfo, Vec)>; type SnapshotVersion = Compact; const LOG_TARGET: &str = "remote-ext"; -const DEFAULT_HTTP_ENDPOINT: &str = "https://rpc.polkadot.io:443"; -const SNAPSHOT_VERSION: SnapshotVersion = Compact(3); +const DEFAULT_HTTP_ENDPOINT: &str = "https://polkadot-try-runtime-node.parity-chains.parity.io:443"; +const SNAPSHOT_VERSION: SnapshotVersion = Compact(4); /// The snapshot that we store on disk. #[derive(Decode, Encode)] -struct Snapshot { +struct Snapshot { snapshot_version: SnapshotVersion, state_version: StateVersion, - block_hash: H, // > raw_storage: Vec<(Vec, (Vec, i32))>, - storage_root: H, + // The storage root of the state. This may vary from the storage root in the header, if not the + // entire state was fetched. + storage_root: B::Hash, + header: B::Header, } -impl Snapshot { +impl Snapshot { pub fn new( state_version: StateVersion, - block_hash: H, raw_storage: Vec<(Vec, (Vec, i32))>, - storage_root: H, + storage_root: B::Hash, + header: B::Header, ) -> Self { Self { snapshot_version: SNAPSHOT_VERSION, state_version, - block_hash, raw_storage, storage_root, + header, } } - fn load(path: &PathBuf) -> Result, &'static str> { + fn load(path: &PathBuf) -> Result, &'static str> { let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; // The first item in the SCALE encoded struct bytes is the snapshot version. We decode and // check that first, before proceeding to decode the rest of the snapshot. @@ -105,21 +107,21 @@ impl Snapshot { /// An externalities that acts exactly the same as [`sp_io::TestExternalities`] but has a few extra /// bits and pieces to it, and can be loaded remotely. -pub struct RemoteExternalities { +pub struct RemoteExternalities { /// The inner externalities. - pub inner_ext: TestExternalities, - /// The block hash with which we created this externality env. - pub block_hash: H::Out, + pub inner_ext: TestExternalities>, + /// The block header which we created this externality env. + pub header: B::Header, } -impl Deref for RemoteExternalities { - type Target = TestExternalities; +impl Deref for RemoteExternalities { + type Target = TestExternalities>; fn deref(&self) -> &Self::Target { &self.inner_ext } } -impl DerefMut for RemoteExternalities { +impl DerefMut for RemoteExternalities { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner_ext } @@ -859,7 +861,7 @@ where } } -impl Builder +impl Builder where B::Hash: DeserializeOwned, B::Header: DeserializeOwned, @@ -1030,6 +1032,21 @@ where Ok(()) } + async fn load_header(&self) -> Result { + let retry_strategy = + FixedInterval::new(Self::KEYS_PAGE_RETRY_INTERVAL).take(Self::MAX_RETRIES); + let get_header_closure = || { + ChainApi::<(), _, B::Header, ()>::header( + self.as_online().rpc_client(), + Some(self.as_online().at_expected()), + ) + }; + Retry::spawn(retry_strategy, get_header_closure) + .await + .map_err(|_| "Failed to fetch header for block from network")? + .ok_or("Network returned None block header") + } + /// Load the data from a remote server. The main code path is calling into `load_top_remote` and /// `load_child_remote`. /// @@ -1058,13 +1075,11 @@ where // If we need to save a snapshot, save the raw storage and root hash to the snapshot. if let Some(path) = self.as_online().state_snapshot.clone().map(|c| c.path) { let (raw_storage, storage_root) = pending_ext.into_raw_snapshot(); - let snapshot = Snapshot::::new( + let snapshot = Snapshot::::new( state_version, - self.as_online() - .at - .expect("set to `Some` in `init_remote_client`; must be called before; qed"), raw_storage.clone(), storage_root, + self.load_header().await?, ); let encoded = snapshot.encode(); log::info!( @@ -1086,22 +1101,21 @@ where Ok(pending_ext) } - async fn do_load_remote(&mut self) -> Result>, &'static str> { + async fn do_load_remote(&mut self) -> Result, &'static str> { self.init_remote_client().await?; - let block_hash = self.as_online().at_expected(); let inner_ext = self.load_remote_and_maybe_save().await?; - Ok(RemoteExternalities { block_hash, inner_ext }) + Ok(RemoteExternalities { header: self.load_header().await?, inner_ext }) } fn do_load_offline( &mut self, config: OfflineConfig, - ) -> Result>, &'static str> { + ) -> Result, &'static str> { let mut sp = Spinner::with_timer(Spinners::Dots, "Loading snapshot...".into()); let start = Instant::now(); info!(target: LOG_TARGET, "Loading snapshot from {:?}", &config.state_snapshot.path); - let Snapshot { snapshot_version: _, block_hash, state_version, raw_storage, storage_root } = - Snapshot::::load(&config.state_snapshot.path)?; + let Snapshot { snapshot_version: _, header, state_version, raw_storage, storage_root } = + Snapshot::::load(&config.state_snapshot.path)?; let inner_ext = TestExternalities::from_raw_snapshot( raw_storage, @@ -1110,12 +1124,10 @@ where ); sp.stop_with_message(format!("✅ Loaded snapshot ({:.2}s)", start.elapsed().as_secs_f32())); - Ok(RemoteExternalities { inner_ext, block_hash }) + Ok(RemoteExternalities { inner_ext, header }) } - pub(crate) async fn pre_build( - mut self, - ) -> Result>, &'static str> { + pub(crate) async fn pre_build(mut self) -> Result, &'static str> { let mut ext = match self.mode.clone() { Mode::Offline(config) => self.do_load_offline(config)?, Mode::Online(_) => self.do_load_remote().await?, @@ -1154,7 +1166,7 @@ where } // Public methods -impl Builder +impl Builder where B::Hash: DeserializeOwned, B::Header: DeserializeOwned, @@ -1191,7 +1203,7 @@ where self } - pub async fn build(self) -> Result>, &'static str> { + pub async fn build(self) -> Result, &'static str> { let mut ext = self.pre_build().await?; ext.commit_all().unwrap(); @@ -1226,7 +1238,7 @@ mod tests { init_logger(); Builder::::new() .mode(Mode::Offline(OfflineConfig { - state_snapshot: SnapshotConfig::new("test_data/proxy_test"), + state_snapshot: SnapshotConfig::new("test_data/test.snap"), })) .build() .await @@ -1241,7 +1253,7 @@ mod tests { // get the first key from the snapshot file. let some_key = Builder::::new() .mode(Mode::Offline(OfflineConfig { - state_snapshot: SnapshotConfig::new("test_data/proxy_test"), + state_snapshot: SnapshotConfig::new("test_data/test.snap"), })) .build() .await @@ -1255,7 +1267,7 @@ mod tests { Builder::::new() .mode(Mode::Offline(OfflineConfig { - state_snapshot: SnapshotConfig::new("test_data/proxy_test"), + state_snapshot: SnapshotConfig::new("test_data/test.snap"), })) .blacklist_hashed_key(&some_key) .build() @@ -1341,7 +1353,7 @@ mod remote_tests { .await .unwrap(); - assert_eq!(ext.block_hash, cached_ext.block_hash); + assert_eq!(ext.header.hash(), cached_ext.header.hash()); } #[tokio::test] diff --git a/substrate/utils/frame/remote-externalities/test_data/proxy_test b/substrate/utils/frame/remote-externalities/test_data/proxy_test deleted file mode 100644 index f0b1b4f5af40..000000000000 Binary files a/substrate/utils/frame/remote-externalities/test_data/proxy_test and /dev/null differ diff --git a/substrate/utils/frame/remote-externalities/test_data/test.snap b/substrate/utils/frame/remote-externalities/test_data/test.snap new file mode 100644 index 000000000000..28f2012d0f2a Binary files /dev/null and b/substrate/utils/frame/remote-externalities/test_data/test.snap differ