From 50291a720f713eb78c3654e88d292ba82f24c60a Mon Sep 17 00:00:00 2001 From: Lev Gorodetskii Date: Mon, 23 Dec 2024 08:28:13 -0300 Subject: [PATCH] Indexing Substrate networks (#1168) --- .gitignore | 1 + .vscode/launch.json | 36 + CHANGELOG.md | 29 +- docs/0.quickstart-substrate.md | 181 + docs/1.getting-started/7.datasources.md | 27 +- docs/1.getting-started/8.indexes.md | 15 +- docs/10.supported-networks/0.overview.md | 4 +- docs/10.supported-networks/1.arbitrum.md | 8 +- docs/10.supported-networks/10.canto.md | 2 +- docs/10.supported-networks/11.core.md | 2 +- docs/10.supported-networks/12.cyber.md | 2 +- docs/10.supported-networks/13.cyberconnect.md | 2 +- docs/10.supported-networks/14.dfk-chain.md | 2 +- docs/10.supported-networks/15.dogechain.md | 4 +- docs/10.supported-networks/16.ethereum.md | 8 +- docs/10.supported-networks/17.etherlink.md | 4 +- docs/10.supported-networks/18.exosama.md | 2 +- docs/10.supported-networks/19.fantom.md | 4 +- docs/10.supported-networks/2.astar.md | 6 +- docs/10.supported-networks/20.flare.md | 2 +- docs/10.supported-networks/21.gnosis.md | 2 +- .../22.immutable-zkevm.md | 4 +- docs/10.supported-networks/23.hokum.md | 2 +- docs/10.supported-networks/24.kakarot.md | 2 +- docs/10.supported-networks/25.karak.md | 2 +- docs/10.supported-networks/26.linea.md | 2 +- docs/10.supported-networks/27.mantle.md | 4 +- docs/10.supported-networks/28.merlin.md | 4 +- docs/10.supported-networks/29.metis.md | 2 +- docs/10.supported-networks/3.avalanche.md | 4 +- docs/10.supported-networks/30.mineplex.md | 2 +- docs/10.supported-networks/31.mode.md | 2 +- docs/10.supported-networks/32.moonbeam.md | 8 +- docs/10.supported-networks/33.neon.md | 4 +- docs/10.supported-networks/34.opbnb.md | 4 +- docs/10.supported-networks/35.optimism.md | 6 +- docs/10.supported-networks/36.peaq.md | 2 +- docs/10.supported-networks/37.polygon.md | 12 +- docs/10.supported-networks/38.prom.md | 4 +- docs/10.supported-networks/39.scroll.md | 4 +- docs/10.supported-networks/4.base.md | 4 +- docs/10.supported-networks/40.shibarium.md | 4 +- docs/10.supported-networks/41.shibuya.md | 2 +- docs/10.supported-networks/42.shiden.md | 2 +- docs/10.supported-networks/43.scale.md | 2 +- docs/10.supported-networks/44.sonic.md | 2 +- docs/10.supported-networks/45.taiko.md | 4 +- docs/10.supported-networks/46.tanssi.md | 2 +- docs/10.supported-networks/47.x1.md | 2 +- docs/10.supported-networks/48.x-layer.md | 4 +- docs/10.supported-networks/49.zksync.md | 4 +- docs/10.supported-networks/5.berachain.md | 4 +- docs/10.supported-networks/50.zora.md | 2 +- .../6.binance-smart-chain.md | 4 +- docs/10.supported-networks/7.bitgert.md | 4 +- docs/10.supported-networks/8.blast.md | 4 +- docs/10.supported-networks/9.bob.md | 2 +- docs/15.glossary.md | 2 +- ...balances.md => 10.tezos_token_balances.md} | 0 ...ansfers.md => 11.tezos_token_transfers.md} | 0 docs/2.indexes/4.substrate_events.md | 19 + ....tezos_big_maps.md => 5.tezos_big_maps.md} | 0 .../{5.tezos_events.md => 6.tezos_events.md} | 0 .../{6.tezos_head.md => 7.tezos_head.md} | 0 ...os_operations.md => 8.tezos_operations.md} | 0 ...ed.md => 9.tezos_operations_unfiltered.md} | 0 docs/2.indexes/_substrate.md | 6 + ...7.tzip_metadata.md => 10.tzip_metadata.md} | 0 .../{8.coinbase.md => 11.coinbase.md} | 0 docs/3.datasources/{9.ipfs.md => 12.ipfs.md} | 0 docs/3.datasources/{10.http.md => 13.http.md} | 0 ...{3.abi_etherscan.md => 3.evm_etherscan.md} | 6 +- docs/3.datasources/6.substrate_node.md | 20 + docs/3.datasources/7.substrate_subscan.md | 30 + docs/3.datasources/8.substrate_subsquid.md | 23 + .../{6.tezos_tzkt.md => 9.tezos_tzkt.md} | 0 docs/7.references/2.config.md | 265 +- docs/7.references/3.context.md | 4 +- docs/7.references/4.models.md | 48 + docs/8.examples/_demos_table.md | 1 + docs/9.release-notes/1.v8.2.md | 11 + docs/9.release-notes/{1.v8.1.md => 2.v8.1.md} | 0 docs/9.release-notes/{2.v8.0.md => 3.v8.0.md} | 0 docs/9.release-notes/{3.v7.5.md => 4.v7.5.md} | 2 +- docs/9.release-notes/{4.v7.4.md => 5.v7.4.md} | 0 docs/9.release-notes/{5.v7.3.md => 6.v7.3.md} | 2 +- docs/9.release-notes/{6.v7.2.md => 7.v7.2.md} | 2 +- docs/9.release-notes/{7.v7.1.md => 8.v7.1.md} | 0 docs/9.release-notes/{8.v7.0.md => 9.v7.0.md} | 0 docs/9.release-notes/_7.0_changelog.md | 2 +- docs/9.release-notes/_7.3_changelog.md | 2 +- docs/9.release-notes/_7.5_changelog.md | 2 +- docs/9.release-notes/_8.0_changelog.md | 2 +- docs/9.release-notes/_8.1_changelog.md | 2 +- docs/9.release-notes/_8.2_changelog.md | 17 + docs/config.rst | 13 +- docs/models.rst | 8 + pdm.lock | 815 +- pyproject.toml | 98 +- requirements.txt | 82 +- schemas/dipdup-3.0.json | 392 +- scripts/docs.py | 8 +- src/demo_evm_events/dipdup.yaml | 2 +- src/demo_evm_transactions/dipdup.yaml | 2 +- src/demo_evm_uniswap/dipdup.yaml | 2 +- src/demo_substrate_events/.dockerignore | 22 + src/demo_substrate_events/.gitignore | 29 + src/demo_substrate_events/Makefile | 54 + src/demo_substrate_events/README.md | 49 + .../__init__.py | 0 .../abi/.keep} | 0 .../abi/assethub/v601.json | 8569 ++++++++++++++++ .../abi/assethub/v700.json | 8956 +++++++++++++++++ src/demo_substrate_events/configs/.keep | 0 .../configs/dipdup.compose.yaml | 24 + .../configs/dipdup.sqlite.yaml | 3 + .../configs/dipdup.swarm.yaml | 24 + src/demo_substrate_events/configs/replay.yaml | 18 + src/demo_substrate_events/deploy/.env.default | 15 + src/demo_substrate_events/deploy/.keep | 0 src/demo_substrate_events/deploy/Dockerfile | 9 + .../deploy/compose.sqlite.yaml | 18 + .../deploy/compose.swarm.yaml | 91 + src/demo_substrate_events/deploy/compose.yaml | 54 + .../deploy/sqlite.env.default | 5 + .../deploy/swarm.env.default | 15 + src/demo_substrate_events/dipdup.yaml | 31 + src/demo_substrate_events/graphql/.keep | 0 src/demo_substrate_events/handlers/.keep | 0 src/demo_substrate_events/handlers/batch.py | 12 + .../handlers/on_transfer.py | 52 + src/demo_substrate_events/hasura/.keep | 0 src/demo_substrate_events/hooks/.keep | 0 .../hooks/on_index_rollback.py | 16 + src/demo_substrate_events/hooks/on_reindex.py | 7 + src/demo_substrate_events/hooks/on_restart.py | 7 + .../hooks/on_synchronized.py | 7 + src/demo_substrate_events/models/.keep | 0 src/demo_substrate_events/models/__init__.py | 13 + src/demo_substrate_events/py.typed | 0 src/demo_substrate_events/pyproject.toml | 50 + src/demo_substrate_events/sql/.keep | 0 .../sql/on_index_rollback/.keep | 0 .../sql/on_reindex/.keep | 0 .../sql/on_restart/.keep | 0 .../sql/on_synchronized/.keep | 0 .../sql/update_balance.sql | 22 + src/demo_substrate_events/types/.keep | 0 .../assets_transferred/__init__.py | 4 + .../assets_transferred/v601.py | 18 + .../assets_transferred/v700.py | 18 + src/dipdup/_survey.py | 6 +- src/dipdup/codegen/evm.py | 4 +- src/dipdup/codegen/substrate.py | 259 + src/dipdup/config/__init__.py | 53 +- src/dipdup/config/abi_etherscan.py | 28 +- src/dipdup/config/evm.py | 4 +- src/dipdup/config/evm_etherscan.py | 27 + src/dipdup/config/starknet_subsquid.py | 1 + src/dipdup/config/substrate.py | 44 + src/dipdup/config/substrate_events.py | 74 + src/dipdup/config/substrate_node.py | 36 + src/dipdup/config/substrate_subscan.py | 26 + src/dipdup/config/substrate_subsquid.py | 33 + src/dipdup/context.py | 46 +- src/dipdup/datasources/__init__.py | 18 +- src/dipdup/datasources/abi_etherscan.py | 84 +- src/dipdup/datasources/evm_etherscan.py | 82 + src/dipdup/datasources/evm_node.py | 7 +- src/dipdup/datasources/starknet_node.py | 6 +- src/dipdup/datasources/substrate_node.py | 399 + src/dipdup/datasources/substrate_subscan.py | 29 + src/dipdup/datasources/substrate_subsquid.py | 56 + src/dipdup/dipdup.py | 50 +- src/dipdup/env.py | 2 + src/dipdup/fields.py | 12 +- src/dipdup/index.py | 8 +- src/dipdup/indexes/substrate.py | 41 + .../indexes/substrate_events/fetcher.py | 64 + src/dipdup/indexes/substrate_events/index.py | 96 + src/dipdup/indexes/substrate_node.py | 36 + src/dipdup/indexes/substrate_subsquid.py | 25 + src/dipdup/models/__init__.py | 12 +- src/dipdup/models/_subsquid.py | 1 + src/dipdup/models/substrate.py | 136 + src/dipdup/models/substrate_node.py | 17 + src/dipdup/models/substrate_subsquid.py | 0 src/dipdup/project.py | 23 +- .../projects/demo_evm_events/dipdup.yaml.j2 | 2 +- .../demo_evm_transactions/dipdup.yaml.j2 | 2 +- .../projects/demo_evm_uniswap/dipdup.yaml.j2 | 2 +- .../demo_substrate_events/dipdup.yaml.j2 | 31 + .../handlers/on_transfer.py.j2 | 52 + .../models/__init__.py.j2 | 13 + .../demo_substrate_events/replay.yaml | 5 + .../sql/update_balance.sql | 22 + src/dipdup/runtimes.py | 165 + src/dipdup/type_registries/hydradx.json | 57 + tests/configs/test_evm.yml | 2 +- 199 files changed, 21865 insertions(+), 904 deletions(-) create mode 100644 docs/0.quickstart-substrate.md rename docs/2.indexes/{9.tezos_token_balances.md => 10.tezos_token_balances.md} (100%) rename docs/2.indexes/{10.tezos_token_transfers.md => 11.tezos_token_transfers.md} (100%) create mode 100644 docs/2.indexes/4.substrate_events.md rename docs/2.indexes/{4.tezos_big_maps.md => 5.tezos_big_maps.md} (100%) rename docs/2.indexes/{5.tezos_events.md => 6.tezos_events.md} (100%) rename docs/2.indexes/{6.tezos_head.md => 7.tezos_head.md} (100%) rename docs/2.indexes/{7.tezos_operations.md => 8.tezos_operations.md} (100%) rename docs/2.indexes/{8.tezos_operations_unfiltered.md => 9.tezos_operations_unfiltered.md} (100%) create mode 100644 docs/2.indexes/_substrate.md rename docs/3.datasources/{7.tzip_metadata.md => 10.tzip_metadata.md} (100%) rename docs/3.datasources/{8.coinbase.md => 11.coinbase.md} (100%) rename docs/3.datasources/{9.ipfs.md => 12.ipfs.md} (100%) rename docs/3.datasources/{10.http.md => 13.http.md} (100%) rename docs/3.datasources/{3.abi_etherscan.md => 3.evm_etherscan.md} (95%) create mode 100644 docs/3.datasources/6.substrate_node.md create mode 100644 docs/3.datasources/7.substrate_subscan.md create mode 100644 docs/3.datasources/8.substrate_subsquid.md rename docs/3.datasources/{6.tezos_tzkt.md => 9.tezos_tzkt.md} (100%) create mode 100644 docs/9.release-notes/1.v8.2.md rename docs/9.release-notes/{1.v8.1.md => 2.v8.1.md} (100%) rename docs/9.release-notes/{2.v8.0.md => 3.v8.0.md} (100%) rename docs/9.release-notes/{3.v7.5.md => 4.v7.5.md} (95%) rename docs/9.release-notes/{4.v7.4.md => 5.v7.4.md} (100%) rename docs/9.release-notes/{5.v7.3.md => 6.v7.3.md} (97%) rename docs/9.release-notes/{6.v7.2.md => 7.v7.2.md} (98%) rename docs/9.release-notes/{7.v7.1.md => 8.v7.1.md} (100%) rename docs/9.release-notes/{8.v7.0.md => 9.v7.0.md} (100%) create mode 100644 docs/9.release-notes/_8.2_changelog.md create mode 100644 src/demo_substrate_events/.dockerignore create mode 100644 src/demo_substrate_events/.gitignore create mode 100644 src/demo_substrate_events/Makefile create mode 100644 src/demo_substrate_events/README.md rename src/{dipdup/indexes => demo_substrate_events}/__init__.py (100%) rename src/{dipdup/indexes/starknet_events/__init__.py => demo_substrate_events/abi/.keep} (100%) create mode 100644 src/demo_substrate_events/abi/assethub/v601.json create mode 100644 src/demo_substrate_events/abi/assethub/v700.json create mode 100644 src/demo_substrate_events/configs/.keep create mode 100644 src/demo_substrate_events/configs/dipdup.compose.yaml create mode 100644 src/demo_substrate_events/configs/dipdup.sqlite.yaml create mode 100644 src/demo_substrate_events/configs/dipdup.swarm.yaml create mode 100644 src/demo_substrate_events/configs/replay.yaml create mode 100644 src/demo_substrate_events/deploy/.env.default create mode 100644 src/demo_substrate_events/deploy/.keep create mode 100644 src/demo_substrate_events/deploy/Dockerfile create mode 100644 src/demo_substrate_events/deploy/compose.sqlite.yaml create mode 100644 src/demo_substrate_events/deploy/compose.swarm.yaml create mode 100644 src/demo_substrate_events/deploy/compose.yaml create mode 100644 src/demo_substrate_events/deploy/sqlite.env.default create mode 100644 src/demo_substrate_events/deploy/swarm.env.default create mode 100644 src/demo_substrate_events/dipdup.yaml create mode 100644 src/demo_substrate_events/graphql/.keep create mode 100644 src/demo_substrate_events/handlers/.keep create mode 100644 src/demo_substrate_events/handlers/batch.py create mode 100644 src/demo_substrate_events/handlers/on_transfer.py create mode 100644 src/demo_substrate_events/hasura/.keep create mode 100644 src/demo_substrate_events/hooks/.keep create mode 100644 src/demo_substrate_events/hooks/on_index_rollback.py create mode 100644 src/demo_substrate_events/hooks/on_reindex.py create mode 100644 src/demo_substrate_events/hooks/on_restart.py create mode 100644 src/demo_substrate_events/hooks/on_synchronized.py create mode 100644 src/demo_substrate_events/models/.keep create mode 100644 src/demo_substrate_events/models/__init__.py create mode 100644 src/demo_substrate_events/py.typed create mode 100644 src/demo_substrate_events/pyproject.toml create mode 100644 src/demo_substrate_events/sql/.keep create mode 100644 src/demo_substrate_events/sql/on_index_rollback/.keep create mode 100644 src/demo_substrate_events/sql/on_reindex/.keep create mode 100644 src/demo_substrate_events/sql/on_restart/.keep create mode 100644 src/demo_substrate_events/sql/on_synchronized/.keep create mode 100644 src/demo_substrate_events/sql/update_balance.sql create mode 100644 src/demo_substrate_events/types/.keep create mode 100644 src/demo_substrate_events/types/assethub/substrate_events/assets_transferred/__init__.py create mode 100644 src/demo_substrate_events/types/assethub/substrate_events/assets_transferred/v601.py create mode 100644 src/demo_substrate_events/types/assethub/substrate_events/assets_transferred/v700.py create mode 100644 src/dipdup/codegen/substrate.py create mode 100644 src/dipdup/config/evm_etherscan.py create mode 100644 src/dipdup/config/substrate.py create mode 100644 src/dipdup/config/substrate_events.py create mode 100644 src/dipdup/config/substrate_node.py create mode 100644 src/dipdup/config/substrate_subscan.py create mode 100644 src/dipdup/config/substrate_subsquid.py create mode 100644 src/dipdup/datasources/evm_etherscan.py create mode 100644 src/dipdup/datasources/substrate_node.py create mode 100644 src/dipdup/datasources/substrate_subscan.py create mode 100644 src/dipdup/datasources/substrate_subsquid.py create mode 100644 src/dipdup/indexes/substrate.py create mode 100644 src/dipdup/indexes/substrate_events/fetcher.py create mode 100644 src/dipdup/indexes/substrate_events/index.py create mode 100644 src/dipdup/indexes/substrate_node.py create mode 100644 src/dipdup/indexes/substrate_subsquid.py create mode 100644 src/dipdup/models/substrate.py create mode 100644 src/dipdup/models/substrate_node.py create mode 100644 src/dipdup/models/substrate_subsquid.py create mode 100644 src/dipdup/projects/demo_substrate_events/dipdup.yaml.j2 create mode 100644 src/dipdup/projects/demo_substrate_events/handlers/on_transfer.py.j2 create mode 100644 src/dipdup/projects/demo_substrate_events/models/__init__.py.j2 create mode 100644 src/dipdup/projects/demo_substrate_events/replay.yaml create mode 100644 src/dipdup/projects/demo_substrate_events/sql/update_balance.sql create mode 100644 src/dipdup/runtimes.py create mode 100644 src/dipdup/type_registries/hydradx.json diff --git a/.gitignore b/.gitignore index dee9f09d7..173806972 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ !**/pdm.lock !**/README.md !**/.keep +!**/py.typed # Add Python code !**/*.py diff --git a/.vscode/launch.json b/.vscode/launch.json index a92be6516..1c528c436 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -37,6 +37,42 @@ "DIPDUP_NO_SYMLINK": "1" } }, + { + "name": "demo_substrate_events: run", + "type": "debugpy", + "request": "launch", + "module": "dipdup", + "args": [ + "-e", + ".env", + "run" + ], + "console": "integratedTerminal", + "cwd": "${workspaceFolder}/src/demo_substrate_events", + "justMyCode": false, + "env": { + "DIPDUP_DEBUG": "1", + "DIPDUP_NO_SYMLINK": "1" + } + }, + { + "name": "demo_substrate_events: init", + "type": "debugpy", + "request": "launch", + "module": "dipdup", + "args": [ + "-e", + ".env", + "init" + ], + "console": "integratedTerminal", + "cwd": "${workspaceFolder}/src/demo_substrate_events", + "justMyCode": false, + "env": { + "DIPDUP_DEBUG": "1", + "DIPDUP_NO_SYMLINK": "1" + } + }, { "name": "demo_evm_events: run", "type": "debugpy", diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b16bdb13..90fe92a00 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,12 +6,22 @@ The format is based on [Keep a Changelog], and this project adheres to [Semantic Releases prior to 7.0 has been removed from this file to declutter search results; see the [archived copy](https://github.com/dipdup-io/dipdup/blob/8.0.0b5/CHANGELOG.md) for the full list. -## [Unreleased] +## [8.2.0rc1] - ????-??-?? + +### Added + +- substrate.events: Added `subtrate.events` index kind to process Substrate events. +- substrate.node: Added `subtrate.node` datasource to receive data from Substrate node. +- substrate.subscan: Added `substrate.subscan` datasource to fetch ABIs from Subscan. +- substrate.subsquid: Added `substrate.subsquid` datasource to fetch historical data from Squid Network. ### Fixed -- subsquid: Fixed float type for `timestamp` field on event / transaction deserialization. -- subsquid: Fixed empty field base conversion on event deserialization. +- evm.subsquid: Fixed event/transaction model deserialization. + +### Changed + +- evm.etherscan: Datasource has been renamed from `abi.etherscan` to `evm.etherscan` for consistency. ## [8.1.3] - 2024-12-20 @@ -45,7 +55,7 @@ Releases prior to 7.0 has been removed from this file to declutter search result ### Added -- abi.etherscan: Try to extract ABI from webpage when API call fails. +- evm.etherscan: Try to extract ABI from webpage when API call fails. - cli: Added `schema` subcommands to manage database migrations: `migrate`, `upgrade`, `downgrade`, `heads` and `history`. - cli: Added interactive mode for `new` command. - database: Support database migrations using [`aerich`](https://github.com/tortoise/aerich). @@ -225,7 +235,7 @@ Releases prior to 7.0 has been removed from this file to declutter search result ### Removed - config: `node_only` index config flag has been removed; add `evm.node` datasource(s) to the `datasources` list instead. -- config: `abi` index config field has been removed; add `abi.etherscan` datasource(s) to the `datasources` list instead. +- config: `abi` index config field has been removed; add `evm.etherscan` datasource(s) to the `datasources` list instead. ### Other @@ -285,7 +295,7 @@ Releases prior to 7.0 has been removed from this file to declutter search result ### Fixed -- abi.etherscan: Raise `AbiNotAvailableError` when contract is not verified. +- evm.etherscan: Raise `AbiNotAvailableError` when contract is not verified. - cli: Fixed incorrect indexer status logging. - evm.node: Fixed memory leak when using realtime subscriptions. - evm.node: Fixed processing chain reorgs. @@ -343,7 +353,7 @@ Releases prior to 7.0 has been removed from this file to declutter search result ### Fixed -- abi.etherscan: Fixed handling "rate limit reached" errors. +- evm.etherscan: Fixed handling "rate limit reached" errors. - cli: Fixed setting logger levels based on config and env variables. - http: Fixed incorrect number of retries performed on failed requests. @@ -511,7 +521,7 @@ Releases prior to 7.0 has been removed from this file to declutter search result ### Added -- abi.etherscan: Added `abi.etherscan` datasource to fetch ABIs from Etherscan. +- evm.etherscan: Added `evm.etherscan` datasource to fetch ABIs from Etherscan. - api: Added `/performance` endpoint to request indexing stats. - cli: Added `report` command group to manage performance and crash reports created by DipDup. - config: Added `advanced.decimal_precision` field to overwrite precision if it's not guessed correctly based on project models. @@ -567,7 +577,8 @@ Releases prior to 7.0 has been removed from this file to declutter search result [semantic versioning]: https://semver.org/spec/v2.0.0.html -[Unreleased]: https://github.com/dipdup-io/dipdup/compare/8.1.3...HEAD +[Unreleased]: https://github.com/dipdup-io/dipdup/compare/8.2.0rc1...HEAD +[8.2.0rc1]: https://github.com/dipdup-io/dipdup/compare/8.1.3...8.2.0rc1 [8.1.3]: https://github.com/dipdup-io/dipdup/compare/8.1.2...8.1.3 [8.1.2]: https://github.com/dipdup-io/dipdup/compare/8.1.1...8.1.2 [8.1.1]: https://github.com/dipdup-io/dipdup/compare/8.1.0...8.1.1 diff --git a/docs/0.quickstart-substrate.md b/docs/0.quickstart-substrate.md new file mode 100644 index 000000000..6cdcf9e7e --- /dev/null +++ b/docs/0.quickstart-substrate.md @@ -0,0 +1,181 @@ +--- +title: "Quickstart" +description: "This page will guide you through the steps to get your first selective indexer up and running in a few minutes without getting too deep into the details." +navigation.icon: "stars" +--- + +# Quickstart + +::banner{type="warning"} +Substrate support is in early preview stage. API and features may change in the future. +:: + +This page will guide you through the steps to get your first selective indexer up and running in a few minutes without getting too deep into the details. + +A selective blockchain indexer is an application that extracts and organizes specific blockchain data from multiple data sources, rather than processing all blockchain data. It allows users to index only relevant entities, reducing storage and computational requirements compared to full node indexing, and query data more efficiently for specific use cases. Think of it as a customizable filter that captures and stores only the blockchain data you need, making data retrieval faster and more resource-efficient. DipDup is a framework that helps you implement such an indexer. + +Let's create an indexer for the balance transfers in AssetHub network. Our goal is to save all transfers to the database and then calculate some statistics of its holders' activity. + +## Install DipDup + +A modern Linux/macOS distribution with Python 3.12 installed is required to run DipDup. + +The recommended way to install DipDup CLI is [pipx](https://pipx.pypa.io/stable/). We also provide a convenient helper script that installs all necessary tools. Run the following command in your terminal: + +{{ #include _curl-spell.md }} + +See the [Installation](../docs/1.getting-started/1.installation.md) page for all options. + +After installation, run the following command to switch to the preview branch: + +```shell [Terminal] +dipdup self install -f -r feat/substrate +``` + +## Create a project + +DipDup CLI has a built-in project generator. Run the following command in your terminal: + +```shell [Terminal] +dipdup new +``` + +Choose `From template`, then `Substrate` network and `demo_substrate_events` template. + +::banner{type="note"} +Want to skip a tutorial and start from scratch? Choose `Blank` at the first step instead and proceed to the [Config](../docs/1.getting-started/3.config.md) section. +:: + +Follow the instructions; the project will be created in the new directory. + +## Write a configuration file + +In the project root, you'll find a file named `dipdup.yaml`. It's the main configuration file of your indexer. We will discuss it in detail in the [Config](../docs/1.getting-started/3.config.md) section; now it has the following content: + +```yaml [dipdup.yaml] +{{ #include ../src/demo_substrate_events/dipdup.yaml }} +``` + +## Generate types and stubs + +Now it's time to generate typeclasses and callback stubs based on definitions from config. Examples below use `demo_substrate_events` as a package name; yours may differ. + +Run the following command: + +```shell [Terminal] +dipdup init +``` + +DipDup will create a Python package `demo_substrate_events` with everything you need to start writing your indexer. Use `package tree` command to see the generated structure: + +```shell [Terminal] +$ dipdup package tree +demo_substrate_events [/home/droserasprout/git/dipdup/src/demo_substrate_events] +├── abi +│ ├── assethub/v1000000.json +│ ├── assethub/v1001002.json +│ ├── ... +│ └── assethub/v9430.json +├── configs +│ ├── dipdup.compose.yaml +│ ├── dipdup.sqlite.yaml +│ ├── dipdup.swarm.yaml +│ └── replay.yaml +├── deploy +│ ├── .env.default +│ ├── Dockerfile +│ ├── compose.sqlite.yaml +│ ├── compose.swarm.yaml +│ ├── compose.yaml +│ ├── sqlite.env.default +│ └── swarm.env.default +├── graphql +├── handlers +│ ├── batch.py +│ └── on_transfer.py +├── hasura +├── hooks +│ ├── on_index_rollback.py +│ ├── on_reindex.py +│ ├── on_restart.py +│ └── on_synchronized.py +├── models +│ └── __init__.py +├── sql +├── types +│ ├── assethub/substrate_events/assets_transferred/__init__.py +│ ├── assethub/substrate_events/assets_transferred/v601.py +│ └── assethub/substrate_events/assets_transferred/v700.py +└── py.typed +``` + +That's a lot of files and directories! But don't worry, we will need only `models` and `handlers` sections in this guide. + +## Define data models + +DipDup supports storing data in SQLite, PostgreSQL and TimescaleDB databases. We use modified [Tortoise ORM](https://tortoise.github.io/) library as an abstraction layer. + +First, you need to define a model class. DipDup uses model definitions both for database schema and autogenerated GraphQL API. Our schema will consist of a single model `Holder` with the following fields: + +| | | +| ----------- | ----------------------------------- | +| `address` | account address | +| `balance` | token amount held by the account | +| `turnover` | total amount of transfer/mint calls | +| `tx_count` | number of transfers/mints | +| `last_seen` | time of the last transfer/mint | + +Here's how to define this model in DipDup: + +```python [models/__init__.py] +{{ #include ../src/demo_substrate_events/models/__init__.py }} +``` + +Using ORM is not a requirement; DipDup provides helpers to run SQL queries/scripts directly, see [Database](1.getting-started/5.database.md) page. + +## Implement handlers + +Everything's ready to implement an actual indexer logic. + +Our task is to index all the balance updates. Put some code to the `on_transfer` handler callback to process matched logs: + +```python [handlers/on_transfer.py] +{{ #include ../src/demo_substrate_events/handlers/on_transfer.py }} +``` + +And that's all! We can run the indexer now. + +## Next steps + +Run the indexer in memory: + +```shell +dipdup run +``` + +Store data in SQLite database (defaults to /tmp, set `SQLITE_PATH` env variable): + +```shell +dipdup -c . -c configs/dipdup.sqlite.yaml run +``` + +Or spawn a Compose stack with PostgreSQL and Hasura: + +```shell +cd deploy +cp .env.default .env +# Edit .env file before running +docker-compose up +``` + +DipDup will fetch all the historical data and then switch to realtime updates. You can check the progress in the logs. + +If you use SQLite, run this query to check the data: + +```bash +sqlite3 /tmp/demo_substrate_events.sqlite 'SELECT * FROM holder LIMIT 10' +``` + +If you run a Compose stack, open `http://127.0.0.1:8080` in your browser to see the Hasura console (an exposed port may differ). You can use it to explore the database and build GraphQL queries. + +Congratulations! You've just created your first DipDup indexer. Proceed to the Getting Started section to learn more about DipDup configuration and features. diff --git a/docs/1.getting-started/7.datasources.md b/docs/1.getting-started/7.datasources.md index db00d97d5..4b08b3c87 100644 --- a/docs/1.getting-started/7.datasources.md +++ b/docs/1.getting-started/7.datasources.md @@ -9,18 +9,21 @@ Datasources are DipDup connectors to various APIs. They are defined in config an Index datasources, ones that can be attached to a specific index, are prefixed with blockchain name, e.g. `tezos.tzkt` or `evm.subsquid`. -| kind | blockchain | description | -| ------------------------------------------------------------ | ---------------- | ------------------------------- | -| [evm.subsquid](../3.datasources/1.evm_subsquid.md) | ⟠ EVM-compatible | Subsquid Network API | -| [evm.node](../3.datasources/2.evm_node.md) | ⟠ EVM-compatible | Ethereum node | -| [abi.etherscan](../3.datasources/3.abi_etherscan.md) | ⟠ EVM-compatible | Provides ABIs for EVM contracts | -| [starknet.subsquid](../3.datasources/4.starknet_subsquid.md) | 🐺 Starknet | Subsquid Network API | -| [starknet.node](../3.datasources/5.starknet_node.md) | 🐺 Starknet | Starknet node | -| [tezos.tzkt](../3.datasources/6.tezos_tzkt.md) | ꜩ Tezos | TzKT API | -| [tzip_metadata](../3.datasources/7.tzip_metadata.md) | ꜩ Tezos | TZIP-16 metadata | -| [coinbase](../3.datasources/8.coinbase.md) | any | Coinbase price feed | -| [ipfs](../3.datasources/9.ipfs.md) | any | IPFS gateway | -| [http](../3.datasources/10.http.md) | any | Generic HTTP API | +| kind | blockchain | description | +| -------------------------------------------------------------- | ---------------- | ----------------------------------------------- | +| [evm.subsquid](../3.datasources/1.evm_subsquid.md) | ⟠ EVM-compatible | Subsquid Network API | +| [evm.node](../3.datasources/2.evm_node.md) | ⟠ EVM-compatible | Ethereum node | +| [evm.etherscan](../3.datasources/3.evm_etherscan.md) | ⟠ EVM-compatible | Provides ABIs for EVM contracts | +| [starknet.subsquid](../3.datasources/4.starknet_subsquid.md) | 🐺 Starknet | Subsquid Network API | +| [starknet.node](../3.datasources/5.starknet_node.md) | 🐺 Starknet | Starknet node | +| [substrate.node](../3.datasources/6.substrate_node.md) | 🔮 Substrate | Substrate node | +| [substrate.subscan](../3.datasources/7.substrate_subscan.md) | 🔮 Substrate | Provides pallet metadata for Substrate networks | +| [substrate.subsquid](../3.datasources/8.substrate_subsquid.md) | 🔮 Substrate | Subsquid Network API | +| [tezos.tzkt](../3.datasources/9.tezos_tzkt.md) | ꜩ Tezos | TzKT API | +| [tzip_metadata](../3.datasources/10.tzip_metadata.md) | ꜩ Tezos | TZIP-16 metadata | +| [coinbase](../3.datasources/11.coinbase.md) | any | Coinbase price feed | +| [ipfs](../3.datasources/12.ipfs.md) | any | IPFS gateway | +| [http](../3.datasources/13.http.md) | any | Generic HTTP API | ## Connection settings diff --git a/docs/1.getting-started/8.indexes.md b/docs/1.getting-started/8.indexes.md index 20db53b18..f65a5f2a8 100644 --- a/docs/1.getting-started/8.indexes.md +++ b/docs/1.getting-started/8.indexes.md @@ -14,13 +14,14 @@ Multiple indexes are available for different workloads. Every index is linked to | [evm.events](../2.indexes/1.evm_events.md) | ⟠ EVM-compatible | `evm` | event logs | | [evm.transactions](../2.indexes/2.evm_transactions.md) | ⟠ EVM-compatible | `evm` | transactions | | [starknet.events](../2.indexes/3.starknet_events.md) | 🐺 Starknet | `starknet` | event logs | -| [tezos.big_maps](../2.indexes/4.tezos_big_maps.md) | ꜩ Tezos | `tezos` | big map diffs | -| [tezos.events](../2.indexes/5.tezos_events.md) | ꜩ Tezos | `tezos` | events | -| [tezos.head](../2.indexes/6.tezos_head.md) | ꜩ Tezos | `tezos` | head blocks (realtime only) | -| [tezos.operations](../2.indexes/7.tezos_operations.md) | ꜩ Tezos | `tezos` | typed operations | -| [tezos.operations_unfiltered](../2.indexes/8.tezos_operations_unfiltered.md) | ꜩ Tezos | `tezos` | untyped operations | -| [tezos.token_balances](../2.indexes/9.tezos_token_balances.md) | ꜩ Tezos | `tezos` | TZIP-12/16 token balances | -| [tezos.token_transfers](../2.indexes/10.tezos_token_transfers.md) | ꜩ Tezos | `tezos` | TZIP-12/16 token transfers | +| [substrate.events](../2.indexes/4.substrate_events.md) | 🔮 Substrate | `substrate` | pallet events | +| [tezos.big_maps](../2.indexes/5.tezos_big_maps.md) | ꜩ Tezos | `tezos` | big map diffs | +| [tezos.events](../2.indexes/6.tezos_events.md) | ꜩ Tezos | `tezos` | events | +| [tezos.head](../2.indexes/7.tezos_head.md) | ꜩ Tezos | `tezos` | head blocks (realtime only) | +| [tezos.operations](../2.indexes/8.tezos_operations.md) | ꜩ Tezos | `tezos` | typed operations | +| [tezos.operations_unfiltered](../2.indexes/9.tezos_operations_unfiltered.md) | ꜩ Tezos | `tezos` | untyped operations | +| [tezos.token_balances](../2.indexes/10.tezos_token_balances.md) | ꜩ Tezos | `tezos` | TZIP-12/16 token balances | +| [tezos.token_transfers](../2.indexes/11.tezos_token_transfers.md) | ꜩ Tezos | `tezos` | TZIP-12/16 token transfers | Indexes can join multiple contracts considered as a single application. Also, contracts can be used by multiple indexes of any kind, but make sure that they are independent of each other and that indexed data don't overlap. diff --git a/docs/10.supported-networks/0.overview.md b/docs/10.supported-networks/0.overview.md index 07e0cfcf9..922bc495c 100644 --- a/docs/10.supported-networks/0.overview.md +++ b/docs/10.supported-networks/0.overview.md @@ -28,7 +28,7 @@ datasources: kind: evm.subsquid url: ${SUBSQUID_URL:-https://v2.archive.subsquid.io/network/ethereum-mainnet} etherscan: - kind: abi.etherscan + kind: evm.etherscan url: ${ETHERSCAN_URL:-https://api.etherscan.io/api} api_key: ${ETHERSCAN_API_KEY:-''} evm_node: @@ -41,6 +41,6 @@ To configure datasources for other networks, you need to change URLs and API key [evm.subsquid](../3.datasources/1.evm_subsquid.md) - Subsquid Network is the main source of historical data for EVM-compatible networks. It's free and available for many networks. -[abi.etherscan](../3.datasources/3.abi_etherscan.md) - Etherscan is a source of contract ABIs, which are used to generate types for the indexer. Many explorers have Etherscan-like API which could be used to retrieve ABIs. Some of them require an API key, which you can get on their website. If there's no Etherscan-like API available, you need to obtain contract ABI JSON somewhere and put it to the `abi//abi.json` path. Don't forget to run `dipdup init` after that to generate all necessary types. +[evm.etherscan](../3.datasources/3.evm_etherscan.md) - Etherscan is a source of contract ABIs, which are used to generate types for the indexer. Many explorers have Etherscan-like API which could be used to retrieve ABIs. Some of them require an API key, which you can get on their website. If there's no Etherscan-like API available, you need to obtain contract ABI JSON somewhere and put it to the `abi//abi.json` path. Don't forget to run `dipdup init` after that to generate all necessary types. [evm.node](../3.datasources/2.evm_node.md) - EVM node datasource can be used to fetch recent data not yet in Subsquid Network. API methods could vary a lot across different networks, but DipDup only uses a few of them, so most of the nodes will work. WebSocket URL can be specified to get real-time updates. This option can save you some requests to the node, but otherwise, it's not required. If Subsquid for your network is not available yet, you can use this datasource to fetch historical data, but it's significantly slower. diff --git a/docs/10.supported-networks/1.arbitrum.md b/docs/10.supported-networks/1.arbitrum.md index a7bee090b..77a80ffa5 100644 --- a/docs/10.supported-networks/1.arbitrum.md +++ b/docs/10.supported-networks/1.arbitrum.md @@ -18,7 +18,7 @@ Explorer: [Arbiscan](https://arbiscan.io/) | datasource | status | URLs | | -----------------:|:-------- | -------------------------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/arbitrum-one` | -| **abi.etherscan** | 🟢 works | `https://api.arbiscan.io/api` | +| **evm.etherscan** | 🟢 works | `https://api.arbiscan.io/api` | | **evm.node** | 🟢 works | `https://arb-mainnet.g.alchemy.com/v2`
`wss://arb-mainnet.g.alchemy.com/v2` | ### Arbitrum Goerli @@ -26,7 +26,7 @@ Explorer: [Arbiscan](https://arbiscan.io/) | datasource | status | URLs | | -----------------:|:------------- | -------------------------------------------------------- | | **evm.subsquid** | ⚰️ deprecated | `https://v2.archive.subsquid.io/network/arbitrum-goerli` | -| **abi.etherscan** | ⚰️ deprecated | | +| **evm.etherscan** | ⚰️ deprecated | | | **evm.node** | ⚰️ deprecated | | ### Arbitrum Nova @@ -36,7 +36,7 @@ Explorer: [Arbiscan](https://nova.arbiscan.io/) | datasource | status | URLs | | -----------------:|:---------------- | ------------------------------------------------------ | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/arbitrum-nova` | -| **abi.etherscan** | 🟢 works | `https://api-nova.arbiscan.io/api` | +| **evm.etherscan** | 🟢 works | `https://api-nova.arbiscan.io/api` | | **evm.node** | 🤔 WS not tested | `https://nova.arbitrum.io/rpc` | ### Arbitrum Sepolia @@ -46,5 +46,5 @@ Explorer: [Arbiscan](https://sepolia.arbiscan.io/) | datasource | status | URLs | | -----------------:|:-------- | -------------------------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/arbitrum-sepolia` | -| **abi.etherscan** | 🟢 works | `https://api-sepolia.arbiscan.io/api` | +| **evm.etherscan** | 🟢 works | `https://api-sepolia.arbiscan.io/api` | | **evm.node** | 🟢 works | `https://arb-sepolia.g.alchemy.com/v2`
`wss://arb-sepolia.g.alchemy.com/v2` | diff --git a/docs/10.supported-networks/10.canto.md b/docs/10.supported-networks/10.canto.md index d3c4171b6..29f8d76b4 100644 --- a/docs/10.supported-networks/10.canto.md +++ b/docs/10.supported-networks/10.canto.md @@ -12,5 +12,5 @@ description: "Canto network support" | datasource | status | URLs | | -----------------:|:------------- | ---------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/canto` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/11.core.md b/docs/10.supported-networks/11.core.md index 6bdea9129..1ff762516 100644 --- a/docs/10.supported-networks/11.core.md +++ b/docs/10.supported-networks/11.core.md @@ -12,5 +12,5 @@ description: "Core network support" | datasource | status | URLs | | -----------------:|:------------- | ----------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/core-mainnet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/12.cyber.md b/docs/10.supported-networks/12.cyber.md index ea22f104e..245268785 100644 --- a/docs/10.supported-networks/12.cyber.md +++ b/docs/10.supported-networks/12.cyber.md @@ -12,5 +12,5 @@ description: "Cyber network support" | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------ | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/cyber-mainnet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/13.cyberconnect.md b/docs/10.supported-networks/13.cyberconnect.md index 8381b284c..07977b60b 100644 --- a/docs/10.supported-networks/13.cyberconnect.md +++ b/docs/10.supported-networks/13.cyberconnect.md @@ -12,5 +12,5 @@ description: "Cyberconnect network support" | datasource | status | URLs | | -----------------:|:------------- | ---------------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/cyberconnect-l2-testnet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/14.dfk-chain.md b/docs/10.supported-networks/14.dfk-chain.md index 9d4b65294..bed04e1a3 100644 --- a/docs/10.supported-networks/14.dfk-chain.md +++ b/docs/10.supported-networks/14.dfk-chain.md @@ -14,5 +14,5 @@ Explorer: [Avascan](https://avascan.info/blockchain/dfk/home) | datasource | status | URLs | | -----------------:|:------------- | -------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/dfk-chain` | -| **abi.etherscan** | 🤔 not tested | `https://api.avascan.info/v2` | +| **evm.etherscan** | 🤔 not tested | `https://api.avascan.info/v2` | | **evm.node** | 🤔 not tested | `https://subnets.avax.network/defi-kingdoms/dfk-chain/` | diff --git a/docs/10.supported-networks/15.dogechain.md b/docs/10.supported-networks/15.dogechain.md index ce20f95ec..2d13ee77f 100644 --- a/docs/10.supported-networks/15.dogechain.md +++ b/docs/10.supported-networks/15.dogechain.md @@ -16,7 +16,7 @@ Explorers: [Dogechain](https://dogechain.info/), [Blockscout](https://explorer.d | datasource | status | URLs | | -----------------:|:------------- | ---------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/dogechain-mainnet` | -| **abi.etherscan** | 🤔 not tested | `https://explorer.dogechain.dog/api` | +| **evm.etherscan** | 🤔 not tested | `https://explorer.dogechain.dog/api` | | **evm.node** | 🤔 not tested | `https://rpc.dogechain.dog` | ### Dogechain Testnet @@ -24,5 +24,5 @@ Explorers: [Dogechain](https://dogechain.info/), [Blockscout](https://explorer.d | datasource | status | URLs | | -----------------:|:------------- | ---------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/dogechain-testnet` | -| **abi.etherscan** | 🤔 not tested | `http://explorer-testnet.dogechain.dog/api` | +| **evm.etherscan** | 🤔 not tested | `http://explorer-testnet.dogechain.dog/api` | | **evm.node** | 🤔 not tested | `https://rpc-testnet.dogechain.dog` | diff --git a/docs/10.supported-networks/16.ethereum.md b/docs/10.supported-networks/16.ethereum.md index fd06fc293..e5dcb668c 100644 --- a/docs/10.supported-networks/16.ethereum.md +++ b/docs/10.supported-networks/16.ethereum.md @@ -16,7 +16,7 @@ Explorer: [Etherscan](https://etherscan.io/) | datasource | status | URLs | | -----------------:|:-------- | -------------------------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/ethereum-mainnet` | -| **abi.etherscan** | 🟢 works | `https://api.etherscan.io/api` | +| **evm.etherscan** | 🟢 works | `https://api.etherscan.io/api` | | **evm.node** | 🟢 works | `https://eth-mainnet.g.alchemy.com/v2`
`wss://eth-mainnet.g.alchemy.com/v2` | ### Ethereum Goerli @@ -26,7 +26,7 @@ Explorer: [Etherscan](https://goerli.etherscan.io/) | datasource | status | URLs | | -----------------:|:------------- | -------------------------------------------------------- | | **evm.subsquid** | ⚰️ deprecated | `https://v2.archive.subsquid.io/network/ethereum-goerli` | -| **abi.etherscan** | ⚰️ deprecated | `https://api-goerli.etherscan.io/api` | +| **evm.etherscan** | ⚰️ deprecated | `https://api-goerli.etherscan.io/api` | | **evm.node** | ⚰️ deprecated | | ### Ethereum Holesky @@ -36,7 +36,7 @@ Explorer: [Etherscan](https://holesky.etherscan.io/) | datasource | status | URLs | | -----------------:|:------------- | --------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/ethereum-holesky` | -| **abi.etherscan** | 🤔 not tested | `https://api-holesky.etherscan.io/api` | +| **evm.etherscan** | 🤔 not tested | `https://api-holesky.etherscan.io/api` | | **evm.node** | 🤔 not tested | | ### Ethereum Sepolia @@ -46,5 +46,5 @@ Explorer: [Etherscan](https://sepolia.etherscan.io/) | datasource | status | URLs | | -----------------:|:------------- | --------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/ethereum-sepolia` | -| **abi.etherscan** | 🤔 not tested | `https://api-sepolia.etherscan.io/api` | +| **evm.etherscan** | 🤔 not tested | `https://api-sepolia.etherscan.io/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/17.etherlink.md b/docs/10.supported-networks/17.etherlink.md index 2aeccf747..0fafe71dd 100644 --- a/docs/10.supported-networks/17.etherlink.md +++ b/docs/10.supported-networks/17.etherlink.md @@ -16,7 +16,7 @@ Explorer: [Blockscout](https://explorer.etherlink.com/) | datasource | status | URLs | | -----------------:|:------------- | ---------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/etherlink-mainnet` | -| **abi.etherscan** | 🤔 not tested | `https://testnet-explorer.etherlink.com/api` | +| **evm.etherscan** | 🤔 not tested | `https://testnet-explorer.etherlink.com/api` | | **evm.node** | 🤔 not tested | `https://node.mainnet.etherlink.com` | ### Etherlink Testnet @@ -26,5 +26,5 @@ Explorer: [Blockscout](https://testnet.explorer.etherlink.com/) | datasource | status | URLs | | -----------------:|:------------- | ---------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/etherlink-testnet` | -| **abi.etherscan** | 🤔 not tested | `https://testnet-explorer.etherlink.com/api` | +| **evm.etherscan** | 🤔 not tested | `https://testnet-explorer.etherlink.com/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/18.exosama.md b/docs/10.supported-networks/18.exosama.md index 3739b8e78..5ac547da5 100644 --- a/docs/10.supported-networks/18.exosama.md +++ b/docs/10.supported-networks/18.exosama.md @@ -14,5 +14,5 @@ Explorer: [Blockscout](https://explorer.exosama.com/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------ | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/exosama` | -| **abi.etherscan** | 🤔 not tested | `https://explorer.exosama.com/api` | +| **evm.etherscan** | 🤔 not tested | `https://explorer.exosama.com/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/19.fantom.md b/docs/10.supported-networks/19.fantom.md index 14f5fd840..073209656 100644 --- a/docs/10.supported-networks/19.fantom.md +++ b/docs/10.supported-networks/19.fantom.md @@ -16,7 +16,7 @@ Explorer: [Ftmscan](https://ftmscan.com/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/fantom-mainnet` | -| **abi.etherscan** | 🟢 works | `https://api.ftmscan.com/api` | +| **evm.etherscan** | 🟢 works | `https://api.ftmscan.com/api` | | **evm.node** | 🤔 not tested | | ### Fantom Testnet @@ -26,5 +26,5 @@ Explorer: [Ftmscan](https://testnet.ftmscan.com/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/fantom-testnet` | -| **abi.etherscan** | 🤔 not tested | `https://api-testnet.ftmscan.com/api` | +| **evm.etherscan** | 🤔 not tested | `https://api-testnet.ftmscan.com/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/2.astar.md b/docs/10.supported-networks/2.astar.md index 347ea20b8..320ff2d84 100644 --- a/docs/10.supported-networks/2.astar.md +++ b/docs/10.supported-networks/2.astar.md @@ -16,7 +16,7 @@ Explorers: [Blockscout](https://astar.blockscout.com/), [Subscan](https://astar. | datasource | status | URLs | | -----------------:|:------------ | ------------------------------------------------------ | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/astar-mainnet` | -| **abi.etherscan** | 🟢 works | `https://astar.blockscout.com/api` | +| **evm.etherscan** | 🟢 works | `https://astar.blockscout.com/api` | | **evm.node** | 🟡 HTTP only | `https://astar-mainnet.g.alchemy.com/v2` | ### Astar zkEVM Mainnet @@ -26,7 +26,7 @@ Explorer: [Blockscout](https://astar-zkevm.explorer.startale.com/) | datasource | status | URLs | | -----------------:|:------------ | ------------------------------------------------------------ | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/astar-zkevm-mainnet` | -| **abi.etherscan** | 🟢 works | `https://astar-zkevm.explorer.startale.com/` | +| **evm.etherscan** | 🟢 works | `https://astar-zkevm.explorer.startale.com/` | | **evm.node** | 🟡 HTTP only | `https://rpc.startale.com/astar-zkevm` | ### Astar zKyoto @@ -36,5 +36,5 @@ Explorer: [Blockscout](https://zkyoto.explorer.startale.com/) | datasource | status | URLs | | -----------------:|:------------ | ----------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/astar-zkyoto` | -| **abi.etherscan** | 🟢 works | `https://zkyoto.explorer.startale.com/api` | +| **evm.etherscan** | 🟢 works | `https://zkyoto.explorer.startale.com/api` | | **evm.node** | 🟡 HTTP only | `https://rpc.startale.com/zkyoto` | diff --git a/docs/10.supported-networks/20.flare.md b/docs/10.supported-networks/20.flare.md index e806b6547..948d3c5f6 100644 --- a/docs/10.supported-networks/20.flare.md +++ b/docs/10.supported-networks/20.flare.md @@ -14,5 +14,5 @@ Explorer: [Flarescan](https://flarescan.com/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------------------ | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/flare-mainnet` | -| **abi.etherscan** | 🤔 not tested | `https://api.routescan.io/v2/network/mainnet/evm/14/etherscan/api` | +| **evm.etherscan** | 🤔 not tested | `https://api.routescan.io/v2/network/mainnet/evm/14/etherscan/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/21.gnosis.md b/docs/10.supported-networks/21.gnosis.md index c99f44eba..ee5ad1dee 100644 --- a/docs/10.supported-networks/21.gnosis.md +++ b/docs/10.supported-networks/21.gnosis.md @@ -14,5 +14,5 @@ Explorer: [Gnosisscan](https://gnosisscan.io/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/gnosis-mainnet` | -| **abi.etherscan** | 🟢 works | `https://api.gnosisscan.io/api` | +| **evm.etherscan** | 🟢 works | `https://api.gnosisscan.io/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/22.immutable-zkevm.md b/docs/10.supported-networks/22.immutable-zkevm.md index a8e5f9eb6..0473112e9 100644 --- a/docs/10.supported-networks/22.immutable-zkevm.md +++ b/docs/10.supported-networks/22.immutable-zkevm.md @@ -16,7 +16,7 @@ Explorer: [Blockscout](https://explorer.immutable.com/) | datasource | status | URLs | | -----------------:|:------------- | ---------------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/immutable-zkevm-mainnet` | -| **abi.etherscan** | 🤔 not tested | `https://explorer.immutable.com/` | +| **evm.etherscan** | 🤔 not tested | `https://explorer.immutable.com/` | | **evm.node** | 🤔 not tested | `https://rpc.immutable.com` | ### Immutable zkEVM Testnet @@ -26,5 +26,5 @@ Explorer: [Blockscout](https://explorer.testnet.immutable.com/) | datasource | status | URLs | | -----------------:|:------------- | ---------------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/immutable-zkevm-testnet` | -| **abi.etherscan** | 🤔 not tested | `https://explorer.testnet.immutable.com/api` | +| **evm.etherscan** | 🤔 not tested | `https://explorer.testnet.immutable.com/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/23.hokum.md b/docs/10.supported-networks/23.hokum.md index aa713d7ea..05b4dfafd 100644 --- a/docs/10.supported-networks/23.hokum.md +++ b/docs/10.supported-networks/23.hokum.md @@ -14,5 +14,5 @@ Explorer: [Blockscout](https://explorer.hokum.gg/) | datasource | status | URLs | | -----------------:|:------------- | --------------------------- | | **evm.subsquid** | 🔴 no API | N/A | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | `https://mainnet.hokum.gg` | diff --git a/docs/10.supported-networks/24.kakarot.md b/docs/10.supported-networks/24.kakarot.md index 667a7e8ab..17e18252c 100644 --- a/docs/10.supported-networks/24.kakarot.md +++ b/docs/10.supported-networks/24.kakarot.md @@ -16,5 +16,5 @@ See step-by-step instructions on how to get started in [this guide](https://docs | datasource | status | URLs | | -----------------:|:------------- | --------------------------------- | | **evm.subsquid** | 🔴 no API | N/A | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🟢 works | `https://sepolia-rpc.kakarot.org` | diff --git a/docs/10.supported-networks/25.karak.md b/docs/10.supported-networks/25.karak.md index 0a7672803..54d0887ad 100644 --- a/docs/10.supported-networks/25.karak.md +++ b/docs/10.supported-networks/25.karak.md @@ -14,5 +14,5 @@ Explorer: [Blockscout](https://explorer.karak.network/) | datasource | status | URLs | | -----------------:|:------------- | --------------------------- | | **evm.subsquid** | 🔴 no API | N/A | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | `https://rpc.karak.network` | diff --git a/docs/10.supported-networks/26.linea.md b/docs/10.supported-networks/26.linea.md index fbe52b76d..964ca513b 100644 --- a/docs/10.supported-networks/26.linea.md +++ b/docs/10.supported-networks/26.linea.md @@ -14,5 +14,5 @@ Explorer: [Lineascan](https://lineascan.build/) | datasource | status | URLs | | -----------------:|:-------- | ------------------------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/linea-mainnet` | -| **abi.etherscan** | 🟢 works | `https://api.lineascan.build/api` | +| **evm.etherscan** | 🟢 works | `https://api.lineascan.build/api` | | **evm.node** | 🟢 works | `https://linea-mainnet.infura.io/v3`
`wss://linea-mainnet.infura.io/ws/v3` | diff --git a/docs/10.supported-networks/27.mantle.md b/docs/10.supported-networks/27.mantle.md index b7607e2fa..cd22e11f7 100644 --- a/docs/10.supported-networks/27.mantle.md +++ b/docs/10.supported-networks/27.mantle.md @@ -14,7 +14,7 @@ description: "Mantle network support" | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/mantle-mainnet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | `https://rpc.mantle.xyz` | ## Mantle Sepolia @@ -22,5 +22,5 @@ description: "Mantle network support" | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/mantle-sepolia` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/28.merlin.md b/docs/10.supported-networks/28.merlin.md index 552781498..31fc2ce56 100644 --- a/docs/10.supported-networks/28.merlin.md +++ b/docs/10.supported-networks/28.merlin.md @@ -16,7 +16,7 @@ Explorer: [Merlinscan](https://scan.merlinchain.io/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/merlin-mainnet` | -| **abi.etherscan** | 🤔 not tested | `https://scan.merlinchain.io/api/` | +| **evm.etherscan** | 🤔 not tested | `https://scan.merlinchain.io/api/` | | **evm.node** | 🤔 not tested | `https://rpc.merlinchain.io` | ### Merlin Testnet @@ -26,5 +26,5 @@ Explorer: [Merlinscan](https://testnet-scan.merlinchain.io/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/merlin-testnet` | -| **abi.etherscan** | 🤔 not tested | `https://testnet-scan.merlinchain.io/api/` | +| **evm.etherscan** | 🤔 not tested | `https://testnet-scan.merlinchain.io/api/` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/29.metis.md b/docs/10.supported-networks/29.metis.md index 4ec4c18c8..c3531eef0 100644 --- a/docs/10.supported-networks/29.metis.md +++ b/docs/10.supported-networks/29.metis.md @@ -14,5 +14,5 @@ Explorer: [Blockscout](https://andromeda-explorer.metis.io/) | datasource | status | URLs | | -----------------:|:------------- | -------------------------------------------------------| | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/metis-mainnet` | -| **abi.etherscan** | 🤔 not tested | `https://andromeda-explorer.metis.io/api` | +| **evm.etherscan** | 🤔 not tested | `https://andromeda-explorer.metis.io/api` | | **evm.node** | 🤔 not tested | `https://metis.drpc.org`
`wss://metis.drpc.org` | diff --git a/docs/10.supported-networks/3.avalanche.md b/docs/10.supported-networks/3.avalanche.md index 6ac38c339..a07b60448 100644 --- a/docs/10.supported-networks/3.avalanche.md +++ b/docs/10.supported-networks/3.avalanche.md @@ -16,7 +16,7 @@ Explorer: [Snowtrace](https://snowtrace.dev/) | datasource | status | URLs | | -----------------:|:------------ | --------------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/avalanche-mainnet` | -| **abi.etherscan** | 🟢 works | `https://api.routescan.io/v2/network/mainnet/evm/43114/etherscan/api` | +| **evm.etherscan** | 🟢 works | `https://api.routescan.io/v2/network/mainnet/evm/43114/etherscan/api` | | **evm.node** | 🟡 HTTP only | `https://avalanche-mainnet.infura.io/v3` | ### Avalanche Testnet @@ -26,5 +26,5 @@ Explorer: [Snowtrace](https://testnet.snowtrace.dev/) | datasource | status | URLs | | -----------------:|:------------- | --------------------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/avalanche-testnet` | -| **abi.etherscan** | 🤔 not tested | `https://api.routescan.io/v2/network/testnet/evm/43113/etherscan/api` | +| **evm.etherscan** | 🤔 not tested | `https://api.routescan.io/v2/network/testnet/evm/43113/etherscan/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/30.mineplex.md b/docs/10.supported-networks/30.mineplex.md index 52e2b4b5c..8f2294611 100644 --- a/docs/10.supported-networks/30.mineplex.md +++ b/docs/10.supported-networks/30.mineplex.md @@ -16,5 +16,5 @@ Explorer: [Mineplex](https://explorer.mineplex.io/) | datasource | status | URLs | | -----------------:|:------------- | --------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/mineplex-testnet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/31.mode.md b/docs/10.supported-networks/31.mode.md index c300587ff..90b4c0f64 100644 --- a/docs/10.supported-networks/31.mode.md +++ b/docs/10.supported-networks/31.mode.md @@ -14,5 +14,5 @@ Explorer: [Blockscout](https://explorer.mode.network/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------ | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/mode-mainnet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | `https://mode.drpc.org`
`wss://mode.drpc.org` | diff --git a/docs/10.supported-networks/32.moonbeam.md b/docs/10.supported-networks/32.moonbeam.md index 76accd89d..15a58069a 100644 --- a/docs/10.supported-networks/32.moonbeam.md +++ b/docs/10.supported-networks/32.moonbeam.md @@ -16,7 +16,7 @@ Explorer: [Moonscan](https://moonscan.io/) | datasource | status | URLs | | -----------------:|:------------- | --------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/moonbeam-mainnet` | -| **abi.etherscan** | 🟢 works | `https://api-moonbeam.moonscan.io/api` | +| **evm.etherscan** | 🟢 works | `https://api-moonbeam.moonscan.io/api` | | **evm.node** | 🤔 not tested | | ### Moonbeam Moonbase @@ -26,7 +26,7 @@ Explorer: [Moonscan](https://moonbase.moonscan.io/) | datasource | status | URLs | | -----------------:|:------------- | --------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/moonbase-testnet` | -| **abi.etherscan** | 🤔 not tested | `https://api-moonbase.moonscan.io/api` | +| **evm.etherscan** | 🤔 not tested | `https://api-moonbase.moonscan.io/api` | | **evm.node** | 🤔 not tested | | ### Moonriver @@ -36,7 +36,7 @@ Explorer: [Moonscan](https://moonriver.moonscan.io/) | datasource | status | URLs | | -----------------:|:------------- | ---------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/moonriver-mainnet` | -| **abi.etherscan** | 🤔 not tested | `https://api-moonriver.moonscan.io/api` | +| **evm.etherscan** | 🤔 not tested | `https://api-moonriver.moonscan.io/api` | | **evm.node** | 🤔 not tested | | ### Moonsama @@ -44,5 +44,5 @@ Explorer: [Moonscan](https://moonriver.moonscan.io/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/moonsama` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/33.neon.md b/docs/10.supported-networks/33.neon.md index 5e1980242..51f981929 100644 --- a/docs/10.supported-networks/33.neon.md +++ b/docs/10.supported-networks/33.neon.md @@ -16,7 +16,7 @@ Explorer: [Blockscout](https://neon.blockscout.com/) | datasource | status | URLs | | -----------------:|:------------- | ---------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/neon-mainnet` | -| **abi.etherscan** | 🤔 not tested | `https://neon.blockscout.com/api` | +| **evm.etherscan** | 🤔 not tested | `https://neon.blockscout.com/api` | | **evm.node** | 🤔 not tested | `https://neon-evm.drpc.org`
`wss://neon-evm.drpc.org` | ### Neon EVM Devnet @@ -26,5 +26,5 @@ Explorer: [Blockscout](https://neon-devnet.blockscout.com/) | datasource | status | URLs | | -----------------:|:------------- | ---------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/neon-devnet` | -| **abi.etherscan** | 🤔 not tested | `https://neon-devnet.blockscout.com/api` | +| **evm.etherscan** | 🤔 not tested | `https://neon-devnet.blockscout.com/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/34.opbnb.md b/docs/10.supported-networks/34.opbnb.md index 88029eb12..801c26887 100644 --- a/docs/10.supported-networks/34.opbnb.md +++ b/docs/10.supported-networks/34.opbnb.md @@ -16,7 +16,7 @@ Explorers: [Bscscan](https://opbnb.bscscan.com/), [Opbnbscan](https://opbnbscan. | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------ | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/opbnb-mainnet` | -| **abi.etherscan** | 🤔 not tested | `https://api-opbnb.bscscan.com/api` | +| **evm.etherscan** | 🤔 not tested | `https://api-opbnb.bscscan.com/api` | | **evm.node** | 🤔 not tested | `https://opbnb.drpc.org`
`wss://opbnb.drpc.org` | ### opBNB Testnet @@ -26,5 +26,5 @@ Explorers: [Bscscan](https://opbnb-testnet.bscscan.com/), [Opbnbscan](https://te | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------ | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/opbnb-testnet` | -| **abi.etherscan** | 🤔 not tested | `https://api-opbnb-testnet.bscscan.com/api` | +| **evm.etherscan** | 🤔 not tested | `https://api-opbnb-testnet.bscscan.com/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/35.optimism.md b/docs/10.supported-networks/35.optimism.md index 13e6fb7be..a5d929531 100644 --- a/docs/10.supported-networks/35.optimism.md +++ b/docs/10.supported-networks/35.optimism.md @@ -16,7 +16,7 @@ Explorer: [Etherscan](https://optimistic.etherscan.io/) | datasource | status | URLs | | -----------------:|:-------- | -------------------------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/optimism-mainnet` | -| **abi.etherscan** | 🟢 works | `https://api-optimistic.etherscan.io/api` | +| **evm.etherscan** | 🟢 works | `https://api-optimistic.etherscan.io/api` | | **evm.node** | 🟢 works | `https://opt-mainnet.g.alchemy.com/v2`
`wss://opt-mainnet.g.alchemy.com/v2` | ### Optimism Goerli @@ -24,7 +24,7 @@ Explorer: [Etherscan](https://optimistic.etherscan.io/) | datasource | status | URLs | | -----------------:|:------------- | -------------------------------------------------------- | | **evm.subsquid** | ⚰️ deprecated | `https://v2.archive.subsquid.io/network/optimism-goerli` | -| **abi.etherscan** | ⚰️ deprecated | `https://api-goerli-optimistic.etherscan.io/api` | +| **evm.etherscan** | ⚰️ deprecated | `https://api-goerli-optimistic.etherscan.io/api` | | **evm.node** | ⚰️ deprecated | | ### Optimism Sepolia @@ -34,5 +34,5 @@ Explorer: [Etherscan](https://sepolia-optimism.etherscan.io/) | datasource | status | URLs | | -----------------:|:------------- | --------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/optimism-sepolia` | -| **abi.etherscan** | 🤔 not tested | `https://api-sepolia-optimistic.etherscan.io/api` | +| **evm.etherscan** | 🤔 not tested | `https://api-sepolia-optimistic.etherscan.io/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/36.peaq.md b/docs/10.supported-networks/36.peaq.md index 7e2a1aa8c..c3111d3e9 100644 --- a/docs/10.supported-networks/36.peaq.md +++ b/docs/10.supported-networks/36.peaq.md @@ -12,5 +12,5 @@ description: "Peaq network support" | datasource | status | URLs | | -----------------:|:------------- | ----------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/peaq-mainnet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/37.polygon.md b/docs/10.supported-networks/37.polygon.md index b140c4545..b036db9ed 100644 --- a/docs/10.supported-networks/37.polygon.md +++ b/docs/10.supported-networks/37.polygon.md @@ -16,7 +16,7 @@ Explorer: [Polygonscan](https://polygonscan.com) | datasource | status | URLs | | -----------------:|:-------- | ---------------------------------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/polygon-mainnet` | -| **abi.etherscan** | 🟢 works | `https://api.polygonscan.com/api` | +| **evm.etherscan** | 🟢 works | `https://api.polygonscan.com/api` | | **evm.node** | 🟢 works | `https://polygon-mainnet.g.alchemy.com/v2`
`wss://polygon-mainnet.g.alchemy.com/v2` | ### Polygon Mumbai @@ -26,7 +26,7 @@ Explorer: [Polygonscan](https://mumbai.polygonscan.com/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/polygon-mumbai` | -| **abi.etherscan** | 🤔 not tested | `https://api-testnet.polygonscan.com/api` | +| **evm.etherscan** | 🤔 not tested | `https://api-testnet.polygonscan.com/api` | | **evm.node** | 🤔 not tested | | ### Polygon Amoy Testnet @@ -34,7 +34,7 @@ Explorer: [Polygonscan](https://mumbai.polygonscan.com/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/polygon-amoy-testnet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | ### Polygon zkEVM Mainnet @@ -44,7 +44,7 @@ Explorer: [Polygonscan](https://zkevm.polygonscan.com/) | datasource | status | URLs | | -----------------:|:------------ | -------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/polygon-zkevm-mainnet` | -| **abi.etherscan** | 🟢 works | `https://api-zkevm.polygonscan.com/api` | +| **evm.etherscan** | 🟢 works | `https://api-zkevm.polygonscan.com/api` | | **evm.node** | 🟡 HTTP only | `https://polygonzkevm-mainnet.g.alchemy.com/v2` | ### Polygon zkEVM Testnet @@ -54,7 +54,7 @@ Explorer: [Polygonscan](https://testnet-zkevm.polygonscan.com/) | datasource | status | URLs | | -----------------:|:------------- | -------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/polygon-zkevm-testnet` | -| **abi.etherscan** | 🟢 works | `https://api-testnet-zkevm.polygonscan.com/api` (🔴 404) | +| **evm.etherscan** | 🟢 works | `https://api-testnet-zkevm.polygonscan.com/api` (🔴 404) | | **evm.node** | 🤔 not tested | | ### Polygon zkEVM Cardona Testnet @@ -62,5 +62,5 @@ Explorer: [Polygonscan](https://testnet-zkevm.polygonscan.com/) | datasource | status | URLs | | -----------------:|:------------- | ---------------------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/polygon-zkevm-cardona-testnet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/38.prom.md b/docs/10.supported-networks/38.prom.md index a050540ea..ffe3aa3bf 100644 --- a/docs/10.supported-networks/38.prom.md +++ b/docs/10.supported-networks/38.prom.md @@ -16,7 +16,7 @@ Explorer: [Promscan](https://promscan.io/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/prom-mainnet` | -| **abi.etherscan** | 🤔 not tested | `https://promscan.io/api/v2/search?q=USDT` | +| **evm.etherscan** | 🤔 not tested | `https://promscan.io/api/v2/search?q=USDT` | | **evm.node** | 🤔 not tested | `https://prom-rpc.eu-north-2.gateway.fm` | ### Prom Testnet @@ -24,5 +24,5 @@ Explorer: [Promscan](https://promscan.io/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/prom-testnet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/39.scroll.md b/docs/10.supported-networks/39.scroll.md index a152ba84d..26874be1f 100644 --- a/docs/10.supported-networks/39.scroll.md +++ b/docs/10.supported-networks/39.scroll.md @@ -16,7 +16,7 @@ Explorer: [Scrollscan](https://scrollscan.com/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/scroll-mainnet` | -| **abi.etherscan** | 🟢 works | `https://api.scrollscan.com/api` | +| **evm.etherscan** | 🟢 works | `https://api.scrollscan.com/api` | | **evm.node** | 🟡 HTTP only | `https://rpc.scroll.io` | ## Scroll Sepolia @@ -24,5 +24,5 @@ Explorer: [Scrollscan](https://scrollscan.com/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/scroll-sepolia` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/4.base.md b/docs/10.supported-networks/4.base.md index af3db3de4..8118a37ed 100644 --- a/docs/10.supported-networks/4.base.md +++ b/docs/10.supported-networks/4.base.md @@ -16,7 +16,7 @@ Explorer: [Basescan](https://basescan.org/) | datasource | status | URLs | | -----------------:|:------------ | ----------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/base-mainnet` | -| **abi.etherscan** | 🟢 works | `https://api.basescan.org/api` | +| **evm.etherscan** | 🟢 works | `https://api.basescan.org/api` | | **evm.node** | 🟡 HTTP only | `https://base-mainnet.g.alchemy.com/v2` | ### Base Sepolia @@ -26,5 +26,5 @@ Explorer: [Basescan](https://sepolia.basescan.org/) | datasource | status | URLs | | -----------------:|:------------- | ----------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/base-sepolia` | -| **abi.etherscan** | 🤔 not tested | `https://api-sepolia.basescan.org/api` | +| **evm.etherscan** | 🤔 not tested | `https://api-sepolia.basescan.org/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/40.shibarium.md b/docs/10.supported-networks/40.shibarium.md index abf9ff2b7..cddcab99b 100644 --- a/docs/10.supported-networks/40.shibarium.md +++ b/docs/10.supported-networks/40.shibarium.md @@ -14,7 +14,7 @@ description: "Shubarium network support" | datasource | status | URLs | | -----------------:|:------------- | -------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/shibarium` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | ## Puppynet Testnet @@ -22,5 +22,5 @@ description: "Shubarium network support" | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/puppynet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/41.shibuya.md b/docs/10.supported-networks/41.shibuya.md index e0e7a391a..9d1617736 100644 --- a/docs/10.supported-networks/41.shibuya.md +++ b/docs/10.supported-networks/41.shibuya.md @@ -16,5 +16,5 @@ Explorer: [Subscan](https://shibuya.subscan.io/) | datasource | status | URLs | | -----------------:|:------------- | -------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/shibuya-testnet` | -| **abi.etherscan** | 🔴 no API | N/A | +| **evm.etherscan** | 🔴 no API | N/A | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/42.shiden.md b/docs/10.supported-networks/42.shiden.md index b53c2e38c..3ddec98bc 100644 --- a/docs/10.supported-networks/42.shiden.md +++ b/docs/10.supported-networks/42.shiden.md @@ -14,5 +14,5 @@ Explorer: [Subscan](https://shiden.subscan.io/) | datasource | status | URLs | | -----------------:|:------------- | -------------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/shiden-mainnet` | -| **abi.etherscan** | 🤔 not tested | `https://support.subscan.io/api` | +| **evm.etherscan** | 🤔 not tested | `https://support.subscan.io/api` | | **evm.node** | 🤔 not tested | `https://shiden-rpc.dwellir.com`
`wss://shiden-rpc.dwellir.com` | diff --git a/docs/10.supported-networks/43.scale.md b/docs/10.supported-networks/43.scale.md index 51d5b1aa8..cb194b0e7 100644 --- a/docs/10.supported-networks/43.scale.md +++ b/docs/10.supported-networks/43.scale.md @@ -16,5 +16,5 @@ Explorers: [Blockscout](https://green-giddy-denebola.explorer.mainnet.skalenodes | datasource | status | URLs | | -----------------:|:-------- | ------------------------------------------------------------------------------------------------------------------------ | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/skale-nebula` | -| **abi.etherscan** | 🟢 works | `https://green-giddy-denebola.explorer.mainnet.skalenodes.com/api` | +| **evm.etherscan** | 🟢 works | `https://green-giddy-denebola.explorer.mainnet.skalenodes.com/api` | | **evm.node** | 🟢 works | `https://mainnet.skalenodes.com/v1/green-giddy-denebola`
`wss://mainnet.skalenodes.com/v1/ws/green-giddy-denebola` | diff --git a/docs/10.supported-networks/44.sonic.md b/docs/10.supported-networks/44.sonic.md index 6120da641..1beca983c 100644 --- a/docs/10.supported-networks/44.sonic.md +++ b/docs/10.supported-networks/44.sonic.md @@ -16,5 +16,5 @@ Explorer: [Sonic Explorer](https://explorer.soniclabs.com/) | datasource | status | URLs | | ----------------: | :------------ | ------------------------------------------------------ | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/sonic-testnet` | -| **abi.etherscan** | 🔴 no API | N/A | +| **evm.etherscan** | 🔴 no API | N/A | | **evm.node** | 🤔 not tested | `https://rpc.testnet.soniclabs.com` | diff --git a/docs/10.supported-networks/45.taiko.md b/docs/10.supported-networks/45.taiko.md index 2c927df2b..456b326d7 100644 --- a/docs/10.supported-networks/45.taiko.md +++ b/docs/10.supported-networks/45.taiko.md @@ -16,7 +16,7 @@ Explorer: [Taikoscan](https://taikoscan.io/) | datasource | status | URLs | | -----------------:|:------------ | --------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/taiko-mainnet` | -| **abi.etherscan** | 🤔 not tested | `https://api.taikoscan.io/api` | +| **evm.etherscan** | 🤔 not tested | `https://api.taikoscan.io/api` | | **evm.node** | 🤔 not tested | `https://rpc.taiko.xyz` | ### Taiko Hekla @@ -26,5 +26,5 @@ Explorer: [Taikoscan](https://hekla.taikoscan.io/) | datasource | status | URLs | | -----------------:|:------------ | --------------------------------------------------| | **evm.subsquid** | 🔴 no API | N/A | -| **abi.etherscan** | 🤔 not tested | `https://api-hekla.taikoscan.io/api` | +| **evm.etherscan** | 🤔 not tested | `https://api-hekla.taikoscan.io/api` | | **evm.node** | 🤔 not tested | `https://rpc.hekla.taiko.xyz` | diff --git a/docs/10.supported-networks/46.tanssi.md b/docs/10.supported-networks/46.tanssi.md index 05539533c..e74b102e8 100644 --- a/docs/10.supported-networks/46.tanssi.md +++ b/docs/10.supported-networks/46.tanssi.md @@ -14,5 +14,5 @@ Explorer: [Blockscout](https://3001-blockscout.a.dancebox.tanssi.network) (🔴 | datasource | status | URLs | | -----------------:|:------------- | ---------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/tanssi` | -| **abi.etherscan** | 🟢 works | `https://3001-blockscout.a.dancebox.tanssi.network/api` (🔴 404) | +| **evm.etherscan** | 🟢 works | `https://3001-blockscout.a.dancebox.tanssi.network/api` (🔴 404) | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/47.x1.md b/docs/10.supported-networks/47.x1.md index 0e02a8c55..5a66a50d7 100644 --- a/docs/10.supported-networks/47.x1.md +++ b/docs/10.supported-networks/47.x1.md @@ -12,5 +12,5 @@ description: "X1 network support" | datasource | status | URLs | | -----------------:|:------------- | --------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/x1-testnet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/48.x-layer.md b/docs/10.supported-networks/48.x-layer.md index 7827f62c2..1aaba360d 100644 --- a/docs/10.supported-networks/48.x-layer.md +++ b/docs/10.supported-networks/48.x-layer.md @@ -14,7 +14,7 @@ description: "X Layer network support" | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/xlayer-mainnet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | `https://xlayer.drpc.org`
`wss://xlayer.drpc.org` | ### X Layer Testnet @@ -22,5 +22,5 @@ description: "X Layer network support" | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/xlayer-testnet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/49.zksync.md b/docs/10.supported-networks/49.zksync.md index 354eabcf8..6325a06de 100644 --- a/docs/10.supported-networks/49.zksync.md +++ b/docs/10.supported-networks/49.zksync.md @@ -18,7 +18,7 @@ Explorer: [Zkscan](https://zkscan.io/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/zksync-mainnet` | -| **abi.etherscan** | 🤔 not tested | `https://block-explorer-api.mainnet.zksync.io/api` | +| **evm.etherscan** | 🤔 not tested | `https://block-explorer-api.mainnet.zksync.io/api` | | **evm.node** | 🤔 not tested | `https://zksync.drpc.org`
`wss://zksync.drpc.org` | ### zkSync Sepolia @@ -28,5 +28,5 @@ Explorer: [Zkscan](https://zkscan.io/) | datasource | status | URLs | | -----------------:|:------------- | ------------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/zksync-sepolia` | -| **abi.etherscan** | 🤔 not tested | `https://block-explorer-api.testnet.zksync.io/api` | +| **evm.etherscan** | 🤔 not tested | `https://block-explorer-api.testnet.zksync.io/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/5.berachain.md b/docs/10.supported-networks/5.berachain.md index a4a29c868..da5ec0748 100644 --- a/docs/10.supported-networks/5.berachain.md +++ b/docs/10.supported-networks/5.berachain.md @@ -16,7 +16,7 @@ Explorer: [Beratrail](https://artio.beratrail.io/) | datasource | status | URLs | | -----------------:|:------------- | --------------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/berachain-artio` | -| **abi.etherscan** | 🟢 works | `https://api.routescan.io/v2/network/testnet/evm/80085/etherscan/api` | +| **evm.etherscan** | 🟢 works | `https://api.routescan.io/v2/network/testnet/evm/80085/etherscan/api` | | **evm.node** | 🤔 not tested | | ### Berachain bArtio @@ -26,5 +26,5 @@ Explorer: [Beratrail]) | datasource | status | URLs | | -----------------:|:------------- | --------------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/berachain-bartio` | -| **abi.etherscan** | 🟢 works | `https://api.routescan.io/v2/network/testnet/evm/80084/etherscan/api` | +| **evm.etherscan** | 🟢 works | `https://api.routescan.io/v2/network/testnet/evm/80084/etherscan/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/50.zora.md b/docs/10.supported-networks/50.zora.md index ab2328d33..47fb1863b 100644 --- a/docs/10.supported-networks/50.zora.md +++ b/docs/10.supported-networks/50.zora.md @@ -16,5 +16,5 @@ Explorer: [Zorascan](https://zora.thesuperscan.io/) | datasource | status | URLs | | -----------------:|:------------- | ----------------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/zora-mainnet` | -| **abi.etherscan** | 🟢 works | `https://api.routescan.io/v2/network/mainnet/evm/7777777/etherscan/api` | +| **evm.etherscan** | 🟢 works | `https://api.routescan.io/v2/network/mainnet/evm/7777777/etherscan/api` | | **evm.node** | 🤔 not tested | `https://zora.drpc.org`
`wss://zksync.drpc.org` | diff --git a/docs/10.supported-networks/6.binance-smart-chain.md b/docs/10.supported-networks/6.binance-smart-chain.md index c2799f1c8..175537b43 100644 --- a/docs/10.supported-networks/6.binance-smart-chain.md +++ b/docs/10.supported-networks/6.binance-smart-chain.md @@ -16,7 +16,7 @@ Explorer: [Bscscan](https://bscscan.com/) | datasource | status | URLs | | -----------------:|:-------- | -------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/binance-mainnet` | -| **abi.etherscan** | 🟢 works | `https://api.bscscan.com/api` | +| **evm.etherscan** | 🟢 works | `https://api.bscscan.com/api` | | **evm.node** | 🟢 works | `https://go.getblock.io/`
`wss://go.getblock.io/` | ### Binance Smart Chain Testnet @@ -26,5 +26,5 @@ Explorer: [Bscscan](https://testnet.bscscan.com/) | datasource | status | URLs | | -----------------:|:-------- | -------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/binance-testnet` | -| **abi.etherscan** | 🟢 works | `https://api-testnet.bscscan.com/api` | +| **evm.etherscan** | 🟢 works | `https://api-testnet.bscscan.com/api` | | **evm.node** | 🟢 works | `https://go.getblock.io/`
`wss://go.getblock.io/` | diff --git a/docs/10.supported-networks/7.bitgert.md b/docs/10.supported-networks/7.bitgert.md index 33122bdf0..632c5e432 100644 --- a/docs/10.supported-networks/7.bitgert.md +++ b/docs/10.supported-networks/7.bitgert.md @@ -14,7 +14,7 @@ Explorer: [Brisescan](https://brisescan.com/) | datasource | status | URLs | | -----------------:|:------------ | -------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/bitgert-mainnet` | -| **abi.etherscan** | 🟢 works | `https://scan.brisescan.com/api` | +| **evm.etherscan** | 🟢 works | `https://scan.brisescan.com/api` | | **evm.node** | 🟡 HTTP only | `https://mainnet-rpc.brisescan.com/` | ### Bitgert Testnet @@ -24,5 +24,5 @@ Explorer: [Brisescan](https://testnet-explorer.brisescan.com/) (🔴 502) | datasource | status | URLs | | -----------------:|:------------ | -------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/bitgert-testnet` | -| **abi.etherscan** | 🟢 works | `http://testnet-explorer.brisescan.com/api` | +| **evm.etherscan** | 🟢 works | `http://testnet-explorer.brisescan.com/api` | | **evm.node** | 🟡 HTTP only | `http://testnet-explorer.brisescan.com/api/eth-rpc` | diff --git a/docs/10.supported-networks/8.blast.md b/docs/10.supported-networks/8.blast.md index b9a06da78..a64036d66 100644 --- a/docs/10.supported-networks/8.blast.md +++ b/docs/10.supported-networks/8.blast.md @@ -16,7 +16,7 @@ Explorer: [Blast Explorer](https://blastexplorer.io/) | datasource | status | URLs | | -----------------:|:-------- | --------------------------------------------------------------------- | | **evm.subsquid** | 🟢 works | `https://v2.archive.subsquid.io/network/blast-l2-mainnet` | -| **abi.etherscan** | 🟢 works | `https://api.routescan.io/v2/network/mainnet/evm/81457/etherscan/api` | +| **evm.etherscan** | 🟢 works | `https://api.routescan.io/v2/network/mainnet/evm/81457/etherscan/api` | | **evm.node** | 🟢 works | `https://rpc.blast.io`¹
`wss://blast.drpc.org` | ¹ ratelimited to 10 RPS @@ -26,5 +26,5 @@ Explorer: [Blast Explorer](https://blastexplorer.io/) | datasource | status | URLs | | -----------------:|:-------- | ------------------------------------------------------------------------------ | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/blast-sepolia` | -| **abi.etherscan** | 🤔 not tested | `https://api.routescan.io/v2/network/testnet/evm/168587773/etherscan/api` | +| **evm.etherscan** | 🤔 not tested | `https://api.routescan.io/v2/network/testnet/evm/168587773/etherscan/api` | | **evm.node** | 🤔 not tested | | diff --git a/docs/10.supported-networks/9.bob.md b/docs/10.supported-networks/9.bob.md index 1e791e3d9..d951c5375 100644 --- a/docs/10.supported-networks/9.bob.md +++ b/docs/10.supported-networks/9.bob.md @@ -12,5 +12,5 @@ description: "BOB network" | datasource | status | URLs | | -----------------:|:------------- | ---------------------------------------------------- | | **evm.subsquid** | 🤔 not tested | `https://v2.archive.subsquid.io/network/bob-mainnet` | -| **abi.etherscan** | 🤔 not tested | | +| **evm.etherscan** | 🤔 not tested | | | **evm.node** | 🤔 not tested | `https://bob.drpc.org`
`wss://rpc.gobob.xyz` | diff --git a/docs/15.glossary.md b/docs/15.glossary.md index 720599db0..950b8750f 100644 --- a/docs/15.glossary.md +++ b/docs/15.glossary.md @@ -122,7 +122,7 @@ A Python asyncio library for defining models and relationships between tables, s ### big map -big_map object covered in [big map index page](2.indexes/4.tezos_big_maps.md). +big_map object covered in [big map index page](2.indexes/5.tezos_big_maps.md). ### contract storage diff --git a/docs/2.indexes/9.tezos_token_balances.md b/docs/2.indexes/10.tezos_token_balances.md similarity index 100% rename from docs/2.indexes/9.tezos_token_balances.md rename to docs/2.indexes/10.tezos_token_balances.md diff --git a/docs/2.indexes/10.tezos_token_transfers.md b/docs/2.indexes/11.tezos_token_transfers.md similarity index 100% rename from docs/2.indexes/10.tezos_token_transfers.md rename to docs/2.indexes/11.tezos_token_transfers.md diff --git a/docs/2.indexes/4.substrate_events.md b/docs/2.indexes/4.substrate_events.md new file mode 100644 index 000000000..8fdcc4849 --- /dev/null +++ b/docs/2.indexes/4.substrate_events.md @@ -0,0 +1,19 @@ +--- +title: "Events" +description: "This index allows processing events emitted by Substrate pallets. You can define a handler for each module/name pair. Only necessary events are processed." +network: "substrate" +--- + +# `substrate.events` index + +This index allows processing events emitted by Substrate pallets. You can define a handler for each module/name pair. Only necessary events are processed. + +Below is a basic indexer for AssetHub transfers. + +To create a project based on this template, run `dipdup new -t demo_substrate_events`. + +```yaml [dipdup.yaml] +{{ #include ../src/demo_substrate_events/dipdup.yaml }} +``` + +{{ #include 2.indexes/_substrate.md }} diff --git a/docs/2.indexes/4.tezos_big_maps.md b/docs/2.indexes/5.tezos_big_maps.md similarity index 100% rename from docs/2.indexes/4.tezos_big_maps.md rename to docs/2.indexes/5.tezos_big_maps.md diff --git a/docs/2.indexes/5.tezos_events.md b/docs/2.indexes/6.tezos_events.md similarity index 100% rename from docs/2.indexes/5.tezos_events.md rename to docs/2.indexes/6.tezos_events.md diff --git a/docs/2.indexes/6.tezos_head.md b/docs/2.indexes/7.tezos_head.md similarity index 100% rename from docs/2.indexes/6.tezos_head.md rename to docs/2.indexes/7.tezos_head.md diff --git a/docs/2.indexes/7.tezos_operations.md b/docs/2.indexes/8.tezos_operations.md similarity index 100% rename from docs/2.indexes/7.tezos_operations.md rename to docs/2.indexes/8.tezos_operations.md diff --git a/docs/2.indexes/8.tezos_operations_unfiltered.md b/docs/2.indexes/9.tezos_operations_unfiltered.md similarity index 100% rename from docs/2.indexes/8.tezos_operations_unfiltered.md rename to docs/2.indexes/9.tezos_operations_unfiltered.md diff --git a/docs/2.indexes/_substrate.md b/docs/2.indexes/_substrate.md new file mode 100644 index 000000000..3cc3f580d --- /dev/null +++ b/docs/2.indexes/_substrate.md @@ -0,0 +1,6 @@ + +## Datasources + +DipDup indexes for Substrate networks use [Subsquid Network](https://docs.subsquid.io/subsquid-network/overview/) as a main source of historical data. Substrate nodes are not required for DipDup to operate, but they can be used to get the latest data (not yet in Subsquid Network) and realtime updates. See [substrate.subsquid](../3.datasources/8.substrate_subsquid.md) and [substrate.node](../3.datasources/6.substrate_node.md) pages for more info on how to configure both datasources. + +For testing purposes, you can use EVM node as a single datasource, but indexing will be significantly slower. For production, it's recommended to use Subsquid Network as the main datasource and EVM node(s) as a secondary one. If there are multiple `substrate.node` datasources attached to index, DipDup will use random one for each request. diff --git a/docs/3.datasources/7.tzip_metadata.md b/docs/3.datasources/10.tzip_metadata.md similarity index 100% rename from docs/3.datasources/7.tzip_metadata.md rename to docs/3.datasources/10.tzip_metadata.md diff --git a/docs/3.datasources/8.coinbase.md b/docs/3.datasources/11.coinbase.md similarity index 100% rename from docs/3.datasources/8.coinbase.md rename to docs/3.datasources/11.coinbase.md diff --git a/docs/3.datasources/9.ipfs.md b/docs/3.datasources/12.ipfs.md similarity index 100% rename from docs/3.datasources/9.ipfs.md rename to docs/3.datasources/12.ipfs.md diff --git a/docs/3.datasources/10.http.md b/docs/3.datasources/13.http.md similarity index 100% rename from docs/3.datasources/10.http.md rename to docs/3.datasources/13.http.md diff --git a/docs/3.datasources/3.abi_etherscan.md b/docs/3.datasources/3.evm_etherscan.md similarity index 95% rename from docs/3.datasources/3.abi_etherscan.md rename to docs/3.datasources/3.evm_etherscan.md index d494d809a..b23725564 100644 --- a/docs/3.datasources/3.abi_etherscan.md +++ b/docs/3.datasources/3.evm_etherscan.md @@ -17,7 +17,7 @@ To use this datasource, add the following section in config: ```yaml [dipdup.yaml] datasources: etherscan: - kind: abi.etherscan + kind: evm.etherscan url: ${ETHERSCAN_URL:-https://api.etherscan.io/api} api_key: ${ETHERSCAN_API_KEY:-''} ``` @@ -28,7 +28,7 @@ During initialization, DipDup will use this datasource to fetch contract ABIs. I indexes: evm_events: kind: evm.events - abi: + datasources: - etherscan ... ``` @@ -39,7 +39,7 @@ If you have an Etherscan API key, you can set it in config. You may also want to ```yaml [dipdup.yaml] etherscan: - kind: abi.etherscan + kind: evm.etherscan url: ${ETHERSCAN_URL:-https://api.etherscan.io/api} api_key: ${ETHERSCAN_API_KEY:-''} http: diff --git a/docs/3.datasources/6.substrate_node.md b/docs/3.datasources/6.substrate_node.md new file mode 100644 index 000000000..f64c60f17 --- /dev/null +++ b/docs/3.datasources/6.substrate_node.md @@ -0,0 +1,20 @@ +--- +title: "Substrate Node" +description: "DipDup can connect to any Substrate node via JSON-RPC. It can be used as a last mile datasource for Substrate indexes (data that is not in Subsquid Network yet) or as a standalone datasource for handlers and hooks." +network: "substrate" +--- + +# Substrate node + +DipDup can connect to any Substrate node via JSON-RPC. It can be used as a "last mile" datasource for Substrate indexes (data that is not in Subsquid Network yet) or as a standalone datasource for handlers and hooks. + +```yaml [dipdup.yaml] +datasources: +{{ #include ../src/demo_substrate_events/dipdup.yaml:11:15 }} +``` + +Then, add it to EVM index definitions: + +```yaml [dipdup.yaml] +{{ #include ../src/demo_substrate_events/dipdup.yaml:22: }} +``` diff --git a/docs/3.datasources/7.substrate_subscan.md b/docs/3.datasources/7.substrate_subscan.md new file mode 100644 index 000000000..b1a9f509f --- /dev/null +++ b/docs/3.datasources/7.substrate_subscan.md @@ -0,0 +1,30 @@ +--- +title: "Subscan" +description: "Subscan is a popular Substrate blockchain explorer. It provides a public API to fetch Substrate metadata. DipDup can use its API to fetch ABIs for pallets being indexed." +network: "ethereum" +--- + +# Subscan + +[Subscan](https://subscan.io/) is a popular Substrate blockchain explorer. It provides a public API to fetch Substrate metadata. DipDup can use its API to fetch ABIs for pallets being indexed. + +To use this datasource, add the following section in config: + +```yaml [dipdup.yaml] +datasources: + subscan: + kind: substrate.subscan + url: ${ETHERSCAN_URL:-https://api.subscan.io/api} + api_key: ${ETHERSCAN_API_KEY:-''} +``` + +During initialization, DipDup will use this datasource to fetch contract ABIs. If your config contains definitions for multiple networks, you can assign the datasource explicitly in `substrate.subsquid` index definitions: + +```yaml [dipdup.yaml] +indexes: + substrate_events: + kind: substrate.events + datasources: + - subscan + ... +``` diff --git a/docs/3.datasources/8.substrate_subsquid.md b/docs/3.datasources/8.substrate_subsquid.md new file mode 100644 index 000000000..f23275e4c --- /dev/null +++ b/docs/3.datasources/8.substrate_subsquid.md @@ -0,0 +1,23 @@ +--- +title: "Subsquid Network" +description: "DipDup uses Subsquid Network as a source of historical data for Substrate blockchains." +network: "substrate" +--- + +# Subsquid Network + +DipDup uses [Subsquid Network](https://docs.subsquid.io/subsquid-network/reference/evm-api/) as a source of historical data for Substrate blockchains. + +Configure the datasource in your project config: + +```yaml [dipdup.yaml] +{{ #include ../src/demo_substrate_events/dipdup.yaml:3:7 }} +``` + +Then, add it to Substrate index definitions: + +```yaml [dipdup.yaml] +{{ #include ../src/demo_substrate_events/dipdup.yaml:22: }} +``` + +DipDup will use Subsquid Network when possible and fallback to EVM nodes for the latest data and realtime updates. diff --git a/docs/3.datasources/6.tezos_tzkt.md b/docs/3.datasources/9.tezos_tzkt.md similarity index 100% rename from docs/3.datasources/6.tezos_tzkt.md rename to docs/3.datasources/9.tezos_tzkt.md diff --git a/docs/7.references/2.config.md b/docs/7.references/2.config.md index 0d7ba7876..33e5ca222 100644 --- a/docs/7.references/2.config.md +++ b/docs/7.references/2.config.md @@ -10,18 +10,19 @@ description: "Config file reference" ## dipdup.config.DipDupConfig -class dipdup.config.DipDupConfig(*args, spec_version, package, datasources=<factory>, database=<factory>, contracts=<factory>, indexes=<factory>, templates=<factory>, jobs=<factory>, hooks=<factory>, hasura=None, sentry=None, prometheus=None, api=None, advanced=<factory>, custom=<factory>, logging='INFO') +class dipdup.config.DipDupConfig(*args, spec_version, package, datasources=<factory>, database=<factory>, runtimes=<factory>, contracts=<factory>, indexes=<factory>, templates=<factory>, jobs=<factory>, hooks=<factory>, hasura=None, sentry=None, prometheus=None, api=None, advanced=<factory>, custom=<factory>, logging='INFO')

DipDup project configuration file

Parameters:
@@ -238,7 +206,7 @@ description: "Config file reference" ## dipdup.config.evm_subsquid.EvmSubsquidDatasourceConfig -class dipdup.config.evm_subsquid.EvmSubsquidDatasourceConfig(*args) +class dipdup.config.evm_subsquid.EvmSubsquidDatasourceConfig(**kwargs)

Subsquid datasource config

Parameters:
@@ -247,23 +215,7 @@ description: "Config file reference"
  • url – URL of Subsquid Network API

  • http – HTTP client configuration

  • - -
    - - - -
    - -## dipdup.config.evm.EvmIndexConfig - -class dipdup.config.evm.EvmIndexConfig(*args) -

    EVM index that use Subsquid Network as a datasource

    -
    -
    Parameters:
    -
      -
    • kind – starts with ‘evm’

    • -
    • datasourcesevm datasources to use

    • - +
    • kwargs (Any)

    @@ -294,17 +246,18 @@ description: "Config file reference" ## dipdup.config.evm_transactions.EvmTransactionsIndexConfig -class dipdup.config.evm_transactions.EvmTransactionsIndexConfig(kind, datasources) +class dipdup.config.evm_transactions.EvmTransactionsIndexConfig(**kwargs)

    Index that uses Subsquid Network as a datasource for transactions

    Parameters:
      -
    • kind (Literal['evm.transactions']) – always ‘evm.transactions’

    • -
    • datasources (tuple[str | EvmSubsquidDatasourceConfig | EvmNodeDatasourceConfig | AbiEtherscanDatasourceConfig, ...]) – evm datasources to use

    • +
    • kind – always ‘evm.transactions’

    • +
    • datasourcesevm datasources to use

    • handlers – Transaction handlers

    • first_level – Level to start indexing from

    • last_level – Level to stop indexing at

    • +
    • kwargs (Any)

    @@ -406,7 +359,7 @@ description: "Config file reference" ## dipdup.config.http.HttpDatasourceConfig -class dipdup.config.http.HttpDatasourceConfig(*args) +class dipdup.config.http.HttpDatasourceConfig(**kwargs)

    Generic HTTP datasource config

    Parameters:
    @@ -415,6 +368,7 @@ description: "Config file reference"
  • url – URL to fetch data from

  • http – HTTP client configuration

  • +
  • kwargs (Any)

  • @@ -424,7 +378,7 @@ description: "Config file reference" ## dipdup.config.IndexConfig -class dipdup.config.IndexConfig(*args) +class dipdup.config.IndexConfig(**kwargs)

    Index config

    Parameters:
    @@ -432,6 +386,7 @@ description: "Config file reference"
  • kind – Defined by child class

  • datasources – Aliases of index datasources in datasources section

  • +
  • kwargs (Any)

  • @@ -439,9 +394,9 @@ description: "Config file reference"
    -## id0 +## dipdup.config.DatasourceConfig -class dipdup.config.DatasourceConfig(*args) +class dipdup.config.DatasourceConfig(**kwargs)

    Base class for datasource configs

    Parameters:
    @@ -450,6 +405,7 @@ description: "Config file reference"
  • url – URL of the API

  • http – HTTP connection tunables

  • +
  • kwargs (Any)

  • @@ -588,6 +544,23 @@ description: "Config file reference"
    +## dipdup.config.RuntimeConfig + +class dipdup.config.RuntimeConfig(*args) +

    Runtime config

    +
    +
    Parameters:
    +
      +
    • kind – Defined by child class

    • +
    • typename – Alias for the typeclass directory

    • + +
    +
    +
    +
    + +
    + ## dipdup.config.SentryConfig class dipdup.config.SentryConfig(**kwargs) @@ -1107,25 +1080,6 @@ description: "Config file reference"
    -## dipdup.config.starknet.StarknetIndexConfig - -class dipdup.config.starknet.StarknetIndexConfig(kind, datasources) -

    Starknet index that use Subsquid Network as a datasource

    -
    -
    Parameters:
    -
    -
    -
    -
    - -
    - ## dipdup.config.starknet_events.StarknetEventsHandlerConfig class dipdup.config.starknet_events.StarknetEventsHandlerConfig(callback) @@ -1146,7 +1100,7 @@ description: "Config file reference" ## dipdup.config.starknet_events.StarknetEventsIndexConfig -class dipdup.config.starknet_events.StarknetEventsIndexConfig(kind, datasources, first_level=0, last_level=0) +class dipdup.config.starknet_events.StarknetEventsIndexConfig(kind, datasources)

    Starknet events index config

    Parameters:
    @@ -1154,8 +1108,8 @@ description: "Config file reference"
  • kind (Literal['starknet.events']) – Always ‘starknet.events’

  • datasources (tuple[str | StarknetSubsquidDatasourceConfig | StarknetNodeDatasourceConfig, ...]) – Aliases of index datasources in datasources section

  • handlers – Event handlers

  • -
  • first_level (int) – Level to start indexing from

  • -
  • last_level (int) – Level to stop indexing at

  • +
  • first_level – Level to start indexing from

  • +
  • last_level – Level to stop indexing at

  • @@ -1202,6 +1156,99 @@ description: "Config file reference"
    +## dipdup.config.substrate.SubstrateRuntimeConfig + +class dipdup.config.substrate.SubstrateRuntimeConfig(*args) +

    Substrate runtime config

    +
    +
    Parameters:
    +
      +
    • kind – Always ‘substrate’

    • +
    • type_registry – Path to type registry or its alias

    • + +
    +
    +
    +
    + +
    + +## dipdup.config.substrate_events.SubstrateEventsHandlerConfig + +class dipdup.config.substrate_events.SubstrateEventsHandlerConfig(callback) +

    Subsquid event handler

    +
    +
    Parameters:
    +
      +
    • callback (str) – Callback name

    • +
    • name – Event name (pallet.event)

    • + +
    +
    +
    +
    + +
    + +## dipdup.config.substrate_events.SubstrateEventsIndexConfig + +class dipdup.config.substrate_events.SubstrateEventsIndexConfig(kind, datasources) +

    Subsquid datasource config

    +
    +
    Parameters:
    +
      +
    • kind (Literal['substrate.events']) – Always ‘substrate.events’

    • +
    • datasources (tuple[str | SubstrateSubsquidDatasourceConfig | SubstrateSubscanDatasourceConfig | SubstrateNodeDatasourceConfig, ...]) – substrate datasources to use

    • +
    • handlers – Event handlers

    • +
    • first_level – Level to start indexing from

    • +
    • last_level – Level to stop indexing and disable this index

    • +
    • typename – Alias for pallet interface

    • +
    • runtime – Substrate runtime

    • + +
    +
    +
    +
    + +
    + +## dipdup.config.substrate_subsquid.SubstrateSubsquidDatasourceConfig + +class dipdup.config.substrate_subsquid.SubstrateSubsquidDatasourceConfig(*args) +

    Subsquid datasource config

    +
    +
    Parameters:
    +
      +
    • kind – always ‘substrate.subsquid’

    • +
    • url – URL of Subsquid Network API

    • +
    • http – HTTP client configuration

    • + +
    +
    +
    +
    + +
    + +## dipdup.config.substrate_subscan.SubstrateSubscanDatasourceConfig + +class dipdup.config.substrate_subscan.SubstrateSubscanDatasourceConfig(*args) +

    Subscan datasource config

    +
    +
    Parameters:
    +
      +
    • kind – always ‘substrate.subscan’

    • +
    • url – API URL

    • +
    • api_key – API key

    • +
    • http – HTTP client configuration

    • + +
    +
    +
    +
    + +
    + ## dipdup.config.tzip_metadata.TzipMetadataDatasourceConfig class dipdup.config.tzip_metadata.TzipMetadataDatasourceConfig(*args) diff --git a/docs/7.references/3.context.md b/docs/7.references/3.context.md index fcfe3494d..eba8986bb 100644 --- a/docs/7.references/3.context.md +++ b/docs/7.references/3.context.md @@ -176,13 +176,13 @@ description: "Context reference" ## dipdup.context.DipDupContext.get_abi_etherscan_datasource DipDupContext.get_abi_etherscan_datasource(name) -

    Get abi.etherscan datasource by name

    +

    Get evm.etherscan datasource by name

    Parameters:

    name (str) – Name of the datasource

    Return type:
    -

    AbiEtherscanDatasource

    +

    EvmEtherscanDatasource

    diff --git a/docs/7.references/4.models.md b/docs/7.references/4.models.md index 8cdb719cf..8727f255f 100644 --- a/docs/7.references/4.models.md +++ b/docs/7.references/4.models.md @@ -397,6 +397,54 @@ description: "Models reference" +## Substrate + +
    + +## dipdup.models.substrate.SubstrateEvent + +class dipdup.models.substrate.SubstrateEvent(data: dipdup.models.substrate.SubstrateEventData, runtime: dipdup.runtimes.SubstrateRuntime) +
    +
    Parameters:
    +
    +
    +
    +
    + +
    + +## dipdup.models.substrate.SubstrateEventData + +class dipdup.models.substrate.SubstrateEventData(*, name: str, index: int, extrinsic_index: int, call_address: list[str] | None, args: list[dipdup.fields.Any] | None = None, decoded_args: dict[str, dipdup.fields.Any] | None = None, header: dipdup.models.substrate._BlockHeader, header_extra: dipdup.models.substrate._BlockHeaderExtra | None) +
    +
    Parameters:
    +
      +
    • name (str)

    • +
    • index (int)

    • +
    • extrinsic_index (int)

    • +
    • call_address (list[str] | None)

    • +
    • args (list[Any] | None)

    • +
    • decoded_args (dict[str, Any] | None)

    • +
    • header (_BlockHeader)

    • +
    • header_extra (_BlockHeaderExtra | None)

    • +
    +
    +
    +
    + +
    + +## dipdup.models.substrate.SubstrateHeadBlockData + +class dipdup.models.substrate.SubstrateHeadBlockData +
    + + + + ## Tezos
    diff --git a/docs/8.examples/_demos_table.md b/docs/8.examples/_demos_table.md index 4652a653f..69907f164 100644 --- a/docs/8.examples/_demos_table.md +++ b/docs/8.examples/_demos_table.md @@ -2,6 +2,7 @@ | name | network | description | source | |-|-|-|-| | demo_blank | | Empty config for a fresh start | [link](https://github.com/dipdup-io/dipdup/tree/8.1.3/src/demo_blank) | +| demo_substrate_events | | Substrate balance transfers | [link](https://github.com/dipdup-io/dipdup/tree/8.1.3/src/demo_substrate_events) | | demo_evm_events | EVM | ERC-20 token transfers (from event logs) | [link](https://github.com/dipdup-io/dipdup/tree/8.1.3/src/demo_evm_events) | | demo_evm_transactions | EVM | ERC-20 token transfers (from transactions) | [link](https://github.com/dipdup-io/dipdup/tree/8.1.3/src/demo_evm_transactions) | | demo_evm_uniswap | EVM | Uniswap V3 pools, positions, etc. (advanced, uses TimescaleDB) | [link](https://github.com/dipdup-io/dipdup/tree/8.1.3/src/demo_evm_uniswap) | diff --git a/docs/9.release-notes/1.v8.2.md b/docs/9.release-notes/1.v8.2.md new file mode 100644 index 000000000..0019e60ee --- /dev/null +++ b/docs/9.release-notes/1.v8.2.md @@ -0,0 +1,11 @@ +--- +title: "8.2" +description: DipDup 8.2 release notes +--- + + + +# Release Notes: 8.2 + +{{ #include 9.release-notes/_8.2_changelog.md }} +{{ #include 9.release-notes/_footer.md }} diff --git a/docs/9.release-notes/1.v8.1.md b/docs/9.release-notes/2.v8.1.md similarity index 100% rename from docs/9.release-notes/1.v8.1.md rename to docs/9.release-notes/2.v8.1.md diff --git a/docs/9.release-notes/2.v8.0.md b/docs/9.release-notes/3.v8.0.md similarity index 100% rename from docs/9.release-notes/2.v8.0.md rename to docs/9.release-notes/3.v8.0.md diff --git a/docs/9.release-notes/3.v7.5.md b/docs/9.release-notes/4.v7.5.md similarity index 95% rename from docs/9.release-notes/3.v7.5.md rename to docs/9.release-notes/4.v7.5.md index 87df3b311..77666a548 100644 --- a/docs/9.release-notes/3.v7.5.md +++ b/docs/9.release-notes/4.v7.5.md @@ -15,7 +15,7 @@ A bunch of performance improvements have been made in this release. DipDup now i The Hasura adapter now supports the `bulk` request type to apply table customizations faster and organize custom metadata files more conveniently. -Finally, DipDup 6.5, the stable release branch, has reached end-of-life. 6.5.16 is the last release in this branch. Please, follow the [7.0 Migration Guide](../9.release-notes/8.v7.0.md#migration-guide) to upgrade to the latest version. +Finally, DipDup 6.5, the stable release branch, has reached end-of-life. 6.5.16 is the last release in this branch. Please, follow the [7.0 Migration Guide](../9.release-notes/9.v7.0.md#migration-guide) to upgrade to the latest version. {{ #include 9.release-notes/_7.5_changelog.md }} {{ #include 9.release-notes/_footer.md }} diff --git a/docs/9.release-notes/4.v7.4.md b/docs/9.release-notes/5.v7.4.md similarity index 100% rename from docs/9.release-notes/4.v7.4.md rename to docs/9.release-notes/5.v7.4.md diff --git a/docs/9.release-notes/5.v7.3.md b/docs/9.release-notes/6.v7.3.md similarity index 97% rename from docs/9.release-notes/5.v7.3.md rename to docs/9.release-notes/6.v7.3.md index 1b456066d..4c2555247 100644 --- a/docs/9.release-notes/5.v7.3.md +++ b/docs/9.release-notes/6.v7.3.md @@ -76,7 +76,7 @@ To use this demo as a template for your own Etherlink project, run `dipdup new` Read more about Etherlink and DipDup support for it: -- [`tezos.tzkt.operations` index](../2.indexes/7.tezos_operations.md) +- [`tezos.tzkt.operations` index](../2.indexes/8.tezos_operations.md) - [Etherlink docs](https://docs.etherlink.com/) - [Etherlink: Building The Most Decentralized EVM Layer 2 (On Tezos)](https://news.tezoscommons.org/etherlink-building-the-most-decentralized-evm-layer-2-on-tezos-1c749fb78d34). diff --git a/docs/9.release-notes/6.v7.2.md b/docs/9.release-notes/7.v7.2.md similarity index 98% rename from docs/9.release-notes/6.v7.2.md rename to docs/9.release-notes/7.v7.2.md index f70093018..03076834e 100644 --- a/docs/9.release-notes/6.v7.2.md +++ b/docs/9.release-notes/7.v7.2.md @@ -56,7 +56,7 @@ indexes: Etherlink rollups' addresses start with the `sr1` prefix instead of `KT1`, and an entrypoint is always `default`. If you omit an entrypoint in the operation pattern, the transaction will be treated as untyped. -See the [tezos.tzkt.operations](../2.indexes/7.tezos_operations.md) page for more details. +See the [tezos.tzkt.operations](../2.indexes/8.tezos_operations.md) page for more details. ## API to add indexes in runtime diff --git a/docs/9.release-notes/7.v7.1.md b/docs/9.release-notes/8.v7.1.md similarity index 100% rename from docs/9.release-notes/7.v7.1.md rename to docs/9.release-notes/8.v7.1.md diff --git a/docs/9.release-notes/8.v7.0.md b/docs/9.release-notes/9.v7.0.md similarity index 100% rename from docs/9.release-notes/8.v7.0.md rename to docs/9.release-notes/9.v7.0.md diff --git a/docs/9.release-notes/_7.0_changelog.md b/docs/9.release-notes/_7.0_changelog.md index 1ee4ada58..a4614b830 100644 --- a/docs/9.release-notes/_7.0_changelog.md +++ b/docs/9.release-notes/_7.0_changelog.md @@ -3,7 +3,6 @@ ### Added -- abi.etherscan: Added `abi.etherscan` datasource to fetch ABIs from Etherscan. - api: Added `/performance` endpoint to request indexing stats. - cli: Added `report` command group to manage performance and crash reports created by DipDup. - config: Added `advanced.api` section to configure monitoring API exposed by DipDup. @@ -15,6 +14,7 @@ - database: Added `dipdup_wipe` and `dipdup_approve` SQL functions to the schema. - database: Added experimental support for immune tables in SQLite. - env: Added `DIPDUP_DEBUG` environment variable to enable debug logging. +- evm.etherscan: Added `evm.etherscan` datasource to fetch ABIs from Etherscan. - evm.node: Added `evm.node` datasource to receive events from Ethereum node and use web3 API. - evm.subsquid.events: Added `evm.subsquid.events` index to process event logs from Subsquid Archives. - evm.subsquid: Added `evm.subsquid` datasource to fetch historical data from Subsquid Archives. diff --git a/docs/9.release-notes/_7.3_changelog.md b/docs/9.release-notes/_7.3_changelog.md index 55331d9a3..235359b4e 100644 --- a/docs/9.release-notes/_7.3_changelog.md +++ b/docs/9.release-notes/_7.3_changelog.md @@ -8,12 +8,12 @@ ### Fixed -- abi.etherscan: Fixed handling "rate limit reached" errors. - cli: Do not consider config as oneshot if `tezos.tzkt.head` index is present. - cli: Fixed setting logger levels based on config and env variables. - codegen: Allow dots to be used in typenames indicating nested packages. - codegen: Always cleanup jsonschemas before generating types. - config: Make `ws_url` field optional for `evm.node` datasource. +- evm.etherscan: Fixed handling "rate limit reached" errors. - evm.node: Make `withdrawals_root` field optional in `EvmNodeHeadData` model. - http: Fixed crash on some datasource URLs. - http: Fixed incorrect number of retries performed on failed requests. diff --git a/docs/9.release-notes/_7.5_changelog.md b/docs/9.release-notes/_7.5_changelog.md index ffba53858..c28f21798 100644 --- a/docs/9.release-notes/_7.5_changelog.md +++ b/docs/9.release-notes/_7.5_changelog.md @@ -9,7 +9,6 @@ ### Fixed -- abi.etherscan: Raise `AbiNotAvailableError` when contract is not verified. - cli: Fixed incorrect indexer status logging. - cli: Improved logging of indexer status. - config: Don't raise `ConfigurationError` from some model validators. @@ -18,6 +17,7 @@ - config: Fixed setting logging levels according to the config. - config: Forbid extra arguments in config mappings. - deps: Removed `pyarrow` from dependencies, bumped `web3`. +- evm.etherscan: Raise `AbiNotAvailableError` when contract is not verified. - evm.events: Improve fetching event batches from node. - evm.node: Fixed default ratelimit sleep time being too high. - evm.node: Fixed memory leak when using realtime subscriptions. diff --git a/docs/9.release-notes/_8.0_changelog.md b/docs/9.release-notes/_8.0_changelog.md index f20cb17da..e0555feed 100644 --- a/docs/9.release-notes/_8.0_changelog.md +++ b/docs/9.release-notes/_8.0_changelog.md @@ -67,7 +67,7 @@ ### Removed - config: Removed `advanced.skip_version_check` flag; use `DIPDUP_NO_VERSION_CHECK` environment variable. -- config: `abi` index config field has been removed; add `abi.etherscan` datasource(s) to the `datasources` list instead. +- config: `abi` index config field has been removed; add `evm.etherscan` datasource(s) to the `datasources` list instead. - config: `node_only` index config flag has been removed; add `evm.node` datasource(s) to the `datasources` list instead. - database: Removed `dipdup_head_status` view; use `dipdup_status` view instead. diff --git a/docs/9.release-notes/_8.1_changelog.md b/docs/9.release-notes/_8.1_changelog.md index 93c1d4431..15338c29e 100644 --- a/docs/9.release-notes/_8.1_changelog.md +++ b/docs/9.release-notes/_8.1_changelog.md @@ -3,10 +3,10 @@ ### Added -- abi.etherscan: Try to extract ABI from webpage when API call fails. - cli: Added `schema` subcommands to manage database migrations: `migrate`, `upgrade`, `downgrade`, `heads` and `history`. - cli: Added interactive mode for `new` command. - database: Support database migrations using [`aerich`](https://github.com/tortoise/aerich). +- evm.etherscan: Try to extract ABI from webpage when API call fails. - hasura: Added `hide` and `hide_internal` config options to make specified tables/views private. ### Fixed diff --git a/docs/9.release-notes/_8.2_changelog.md b/docs/9.release-notes/_8.2_changelog.md new file mode 100644 index 000000000..97bdc63ed --- /dev/null +++ b/docs/9.release-notes/_8.2_changelog.md @@ -0,0 +1,17 @@ + +## Changes since 8.1 + +### Added + +- substrate.events: Added `subtrate.events` index kind to process Substrate events. +- substrate.node: Added `subtrate.node` datasource to receive data from Substrate node. +- substrate.subscan: Added `substrate.subscan` datasource to fetch ABIs from Subscan. +- substrate.subsquid: Added `substrate.subsquid` datasource to fetch historical data from Squid Network. + +### Fixed + +- evm.subsquid: Fixed event/transaction model deserialization. + +### Changed + +- evm.etherscan: Datasource has been renamed from `abi.etherscan` to `evm.etherscan` for consistency. diff --git a/docs/config.rst b/docs/config.rst index 552bde5ef..1be7c4fcb 100644 --- a/docs/config.rst +++ b/docs/config.rst @@ -2,19 +2,15 @@ .. autoclass:: dipdup.config.DipDupConfig -.. autoclass:: dipdup.config.AbiDatasourceConfig -.. autoclass:: dipdup.config.abi_etherscan.AbiEtherscanDatasourceConfig .. autoclass:: dipdup.config.AdvancedConfig .. autoclass:: dipdup.config.ApiConfig .. autoclass:: dipdup.config.coinbase.CoinbaseDatasourceConfig -.. autoclass:: dipdup.config.ContractConfig -.. autoclass:: dipdup.config.DatasourceConfig .. autoclass:: dipdup.config.evm.EvmContractConfig .. autoclass:: dipdup.config.evm_node.EvmNodeDatasourceConfig +.. autoclass:: dipdup.config.evm_etherscan.EvmEtherscanDatasourceConfig .. autoclass:: dipdup.config.evm_events.EvmEventsHandlerConfig .. autoclass:: dipdup.config.evm_events.EvmEventsIndexConfig .. autoclass:: dipdup.config.evm_subsquid.EvmSubsquidDatasourceConfig -.. autoclass:: dipdup.config.evm.EvmIndexConfig .. autoclass:: dipdup.config.evm_transactions.EvmTransactionsHandlerConfig .. autoclass:: dipdup.config.evm_transactions.EvmTransactionsIndexConfig .. autoclass:: dipdup.config.HandlerConfig @@ -30,6 +26,7 @@ .. autoclass:: dipdup.config.PostgresDatabaseConfig .. autoclass:: dipdup.config.PrometheusConfig .. autoclass:: dipdup.config.ResolvedHttpConfig +.. autoclass:: dipdup.config.RuntimeConfig .. autoclass:: dipdup.config.SentryConfig .. autoclass:: dipdup.config.SqliteDatabaseConfig .. autoclass:: dipdup.config.SystemHookConfig @@ -57,9 +54,13 @@ .. autoclass:: dipdup.config.tezos_token_transfers.TezosTokenTransfersHandlerConfig .. autoclass:: dipdup.config.tezos_token_transfers.TezosTokenTransfersIndexConfig .. autoclass:: dipdup.config.starknet.StarknetContractConfig -.. autoclass:: dipdup.config.starknet.StarknetIndexConfig .. autoclass:: dipdup.config.starknet_events.StarknetEventsHandlerConfig .. autoclass:: dipdup.config.starknet_events.StarknetEventsIndexConfig .. autoclass:: dipdup.config.starknet_node.StarknetNodeDatasourceConfig .. autoclass:: dipdup.config.starknet_subsquid.StarknetSubsquidDatasourceConfig +.. autoclass:: dipdup.config.substrate.SubstrateRuntimeConfig +.. autoclass:: dipdup.config.substrate_events.SubstrateEventsHandlerConfig +.. autoclass:: dipdup.config.substrate_events.SubstrateEventsIndexConfig +.. autoclass:: dipdup.config.substrate_subsquid.SubstrateSubsquidDatasourceConfig +.. autoclass:: dipdup.config.substrate_subscan.SubstrateSubscanDatasourceConfig .. autoclass:: dipdup.config.tzip_metadata.TzipMetadataDatasourceConfig diff --git a/docs/models.rst b/docs/models.rst index 37b807780..cc87b305b 100644 --- a/docs/models.rst +++ b/docs/models.rst @@ -49,6 +49,14 @@ Starknet .. autoclass:: dipdup.models.starknet.StarknetEventData .. autoclass:: dipdup.models.starknet.StarknetTransactionData +------------------------------------------------------------------------------- +Substrate +------------------------------------------------------------------------------- + +.. autoclass:: dipdup.models.substrate.SubstrateEvent +.. autoclass:: dipdup.models.substrate.SubstrateEventData +.. autoclass:: dipdup.models.substrate.SubstrateHeadBlockData + ------------------------------------------------------------------------------- Tezos ------------------------------------------------------------------------------- diff --git a/pdm.lock b/pdm.lock index 66be548b3..756396142 100644 --- a/pdm.lock +++ b/pdm.lock @@ -5,7 +5,7 @@ groups = ["default", "docs", "lint", "migrations", "perf", "test"] strategy = ["inherit_metadata"] lock_version = "4.5.0" -content_hash = "sha256:f6d3398f8c6129e88d381b14fbfae9486c7ef293d3ca7aad3c84b4c7550e3e69" +content_hash = "sha256:84744143ffe92b9fdf65f189fa6f3903b7cfdf2022acae2b98933fbe8df64a55" [[metadata.targets]] requires_python = ">=3.12,<3.13" @@ -30,13 +30,13 @@ files = [ [[package]] name = "aiohappyeyeballs" -version = "2.4.0" +version = "2.4.4" requires_python = ">=3.8" summary = "Happy Eyeballs for asyncio" groups = ["default", "test"] files = [ - {file = "aiohappyeyeballs-2.4.0-py3-none-any.whl", hash = "sha256:7ce92076e249169a13c2f49320d1967425eaf1f407522d707d59cac7628d62bd"}, - {file = "aiohappyeyeballs-2.4.0.tar.gz", hash = "sha256:55a1714f084e63d49639800f95716da97a1f173d46a16dfcfda0016abb93b6b2"}, + {file = "aiohappyeyeballs-2.4.4-py3-none-any.whl", hash = "sha256:a980909d50efcd44795c4afeca523296716d50cd756ddca6af8c65b996e27de8"}, + {file = "aiohappyeyeballs-2.4.4.tar.gz", hash = "sha256:5fdd7d87889c63183afc18ce9271f9b0a7d32c2303e394468dd45d514a757745"}, ] [[package]] @@ -87,16 +87,16 @@ files = [ [[package]] name = "aiosignal" -version = "1.3.1" -requires_python = ">=3.7" +version = "1.3.2" +requires_python = ">=3.9" summary = "aiosignal: a list of registered asynchronous callbacks" groups = ["default", "test"] dependencies = [ "frozenlist>=1.1.0", ] files = [ - {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, - {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, + {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, + {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, ] [[package]] @@ -113,6 +113,24 @@ files = [ {file = "aiosqlite-0.20.0.tar.gz", hash = "sha256:6d35c8c256637f4672f843c31021464090805bf925385ac39473fb16eaaca3d7"}, ] +[[package]] +name = "aiosubstrate" +version = "0.1.0" +requires_python = "<3.13,>=3.10" +summary = "A library for interacting with Substrate node (py-substrate-interface fork)" +groups = ["default"] +dependencies = [ + "aiohttp>=3.11.6", + "orjson>=3.10.12", + "scalecodec>=1.2.11", + "websocket-client>=1.8.0", + "xxhash>=3.5.0", +] +files = [ + {file = "aiosubstrate-0.1.0-py3-none-any.whl", hash = "sha256:ae9b80b0ae49684a39f60ea82488d691c2f4be219ae4306e67f4d18a1be62797"}, + {file = "aiosubstrate-0.1.0.tar.gz", hash = "sha256:364ecbad23bd71c9cb1ef6a82cfd700c0a98ab141eeb328cab5052d5f032875e"}, +] + [[package]] name = "alabaster" version = "1.0.0" @@ -182,13 +200,13 @@ files = [ [[package]] name = "argcomplete" -version = "3.5.0" +version = "3.5.2" requires_python = ">=3.8" summary = "Bash tab completion for argparse" groups = ["default"] files = [ - {file = "argcomplete-3.5.0-py3-none-any.whl", hash = "sha256:d4bcf3ff544f51e16e54228a7ac7f486ed70ebf2ecfe49a63a91171c76bf029b"}, - {file = "argcomplete-3.5.0.tar.gz", hash = "sha256:4349400469dccfb7950bb60334a680c58d88699bff6159df61251878dc6bf74b"}, + {file = "argcomplete-3.5.2-py3-none-any.whl", hash = "sha256:036d020d79048a5d525bc63880d7a4b8d1668566b8a76daf1144c0bbe0f63472"}, + {file = "argcomplete-3.5.2.tar.gz", hash = "sha256:23146ed7ac4403b70bd6026402468942ceba34a6732255b9edf5b7354f68a6bb"}, ] [[package]] @@ -242,16 +260,13 @@ files = [ [[package]] name = "attrs" -version = "24.2.0" -requires_python = ">=3.7" +version = "24.3.0" +requires_python = ">=3.8" summary = "Classes Without Boilerplate" groups = ["default", "test"] -dependencies = [ - "importlib-metadata; python_version < \"3.8\"", -] files = [ - {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, - {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, + {file = "attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308"}, + {file = "attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff"}, ] [[package]] @@ -268,28 +283,39 @@ files = [ {file = "babel-2.16.0.tar.gz", hash = "sha256:d1f3554ca26605fe173f3de0c65f750f5a42f924499bf134de6423582298e316"}, ] +[[package]] +name = "base58" +version = "2.1.1" +requires_python = ">=3.5" +summary = "Base58 and Base58Check implementation." +groups = ["default"] +files = [ + {file = "base58-2.1.1-py3-none-any.whl", hash = "sha256:11a36f4d3ce51dfc1043f3218591ac4eb1ceb172919cebe05b52a5bcc8d245c2"}, + {file = "base58-2.1.1.tar.gz", hash = "sha256:c5d0cb3f5b6e81e8e35da5754388ddcc6d0d14b6c6a132cb93d69ed580a7278c"}, +] + [[package]] name = "bitarray" -version = "2.9.2" +version = "3.0.0" summary = "efficient arrays of booleans -- C extension" groups = ["default"] files = [ - {file = "bitarray-2.9.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:90e3a281ffe3897991091b7c46fca38c2675bfd4399ffe79dfeded6c52715436"}, - {file = "bitarray-2.9.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bed637b674db5e6c8a97a4a321e3e4d73e72d50b5c6b29950008a93069cc64cd"}, - {file = "bitarray-2.9.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e49066d251dbbe4e6e3a5c3937d85b589e40e2669ad0eef41a00f82ec17d844b"}, - {file = "bitarray-2.9.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c4344e96642e2211fb3a50558feff682c31563a4c64529a931769d40832ca79"}, - {file = "bitarray-2.9.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aeb60962ec4813c539a59fbd4f383509c7222b62c3fb1faa76b54943a613e33a"}, - {file = "bitarray-2.9.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ed0f7982f10581bb16553719e5e8f933e003f5b22f7d25a68bdb30fac630a6ff"}, - {file = "bitarray-2.9.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c71d1cabdeee0cdda4669168618f0e46b7dace207b29da7b63aaa1adc2b54081"}, - {file = "bitarray-2.9.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0ef2d0a6f1502d38d911d25609b44c6cc27bee0a4363dd295df78b075041b60"}, - {file = "bitarray-2.9.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6f71d92f533770fb027388b35b6e11988ab89242b883f48a6fe7202d238c61f8"}, - {file = "bitarray-2.9.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ba0734aa300757c924f3faf8148e1b8c247176a0ac8e16aefdf9c1eb19e868f7"}, - {file = "bitarray-2.9.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:d91406f413ccbf4af6ab5ae7bc78f772a95609f9ddd14123db36ef8c37116d95"}, - {file = "bitarray-2.9.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:87abb7f80c0a042f3fe8e5264da1a2756267450bb602110d5327b8eaff7682e7"}, - {file = "bitarray-2.9.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4b558ce85579b51a2e38703877d1e93b7728a7af664dd45a34e833534f0b755d"}, - {file = "bitarray-2.9.2-cp312-cp312-win32.whl", hash = "sha256:dac2399ee2889fbdd3472bfc2ede74c34cceb1ccf29a339964281a16eb1d3188"}, - {file = "bitarray-2.9.2-cp312-cp312-win_amd64.whl", hash = "sha256:48a30d718d1a6dfc22a49547450107abe8f4afdf2abdcbe76eb9ed88edc49498"}, - {file = "bitarray-2.9.2.tar.gz", hash = "sha256:a8f286a51a32323715d77755ed959f94bef13972e9a2fe71b609e40e6d27957e"}, + {file = "bitarray-3.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:184972c96e1c7e691be60c3792ca1a51dd22b7f25d96ebea502fe3c9b554f25d"}, + {file = "bitarray-3.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:787db8da5e9e29be712f7a6bce153c7bc8697ccc2c38633e347bb9c82475d5c9"}, + {file = "bitarray-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2da91ab3633c66999c2a352f0ca9ae064f553e5fc0eca231d28e7e305b83e942"}, + {file = "bitarray-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7edb83089acbf2c86c8002b96599071931dc4ea5e1513e08306f6f7df879a48b"}, + {file = "bitarray-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:996d1b83eb904589f40974538223eaed1ab0f62be8a5105c280b9bd849e685c4"}, + {file = "bitarray-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4817d73d995bd2b977d9cde6050be8d407791cf1f84c8047fa0bea88c1b815bc"}, + {file = "bitarray-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d47bc4ff9b0e1624d613563c6fa7b80aebe7863c56c3df5ab238bb7134e8755"}, + {file = "bitarray-3.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aca0a9cd376beaccd9f504961de83e776dd209c2de5a4c78dc87a78edf61839b"}, + {file = "bitarray-3.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:572a61fba7e3a710a8324771322fba8488d134034d349dcd036a7aef74723a80"}, + {file = "bitarray-3.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a817ad70c1aff217530576b4f037dd9b539eb2926603354fcac605d824082ad1"}, + {file = "bitarray-3.0.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:2ac67b658fa5426503e9581a3fb44a26a3b346c1abd17105735f07db572195b3"}, + {file = "bitarray-3.0.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:12f19ede03e685c5c588ab5ed63167999295ffab5e1126c5fe97d12c0718c18f"}, + {file = "bitarray-3.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fcef31b062f756ba7eebcd7890c5d5de84b9d64ee877325257bcc9782288564a"}, + {file = "bitarray-3.0.0-cp312-cp312-win32.whl", hash = "sha256:656db7bdf1d81ec3b57b3cad7ec7276765964bcfd0eb81c5d1331f385298169c"}, + {file = "bitarray-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f785af6b7cb07a9b1e5db0dea9ef9e3e8bb3d74874a0a61303eab9c16acc1999"}, + {file = "bitarray-3.0.0.tar.gz", hash = "sha256:a2083dc20f0d828a7cdf7a16b20dae56aab0f43dc4f347a3b3039f6577992b03"}, ] [[package]] @@ -318,62 +344,62 @@ files = [ [[package]] name = "certifi" -version = "2024.8.30" +version = "2024.12.14" requires_python = ">=3.6" summary = "Python package for providing Mozilla's CA Bundle." groups = ["default", "docs", "test"] files = [ - {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, - {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, + {file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"}, + {file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"}, ] [[package]] name = "charset-normalizer" -version = "3.3.2" +version = "3.4.0" requires_python = ">=3.7.0" summary = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." groups = ["default", "docs", "test"] files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, ] [[package]] name = "ckzg" -version = "2.0.0" +version = "2.0.1" summary = "Python bindings for C-KZG-4844" groups = ["default"] files = [ - {file = "ckzg-2.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5fb8a7ed9f430e1102f7d25df015e555c255c512c372373bd1b52fa65b2c32b2"}, - {file = "ckzg-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a93ef601f87960f881b6a2519d6689ee829cc35e0847ed3dff38c6afff383b41"}, - {file = "ckzg-2.0.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d0ca9e939b7b0dfd5a91cd981a595512000f42739b6262824c886b3a06960fe"}, - {file = "ckzg-2.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:187a0fc230f3993fa8cb2c17d589f8b3ea6b74e1f5ac9927d4f37c19e153afd1"}, - {file = "ckzg-2.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68a3c4aec3ffef2a20f67f6d4a13e9980560aa25d89bbc553aff1e4144f3239a"}, - {file = "ckzg-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cb48fd7d110fda65a5b9f34f921d15d468354662752d252a0de02797e9510c50"}, - {file = "ckzg-2.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:de94dd1615e6aa003a6c864d5c8e8771d98ef912e32f12c555e7703134e77717"}, - {file = "ckzg-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:805d3a11bf6c50badaf02464340dcfb52363b1889b7f75b04a7179959285bac7"}, - {file = "ckzg-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:ea02a706d42e9c273554192949439742267b0031054d859c5c63db064b768a79"}, - {file = "ckzg-2.0.0.tar.gz", hash = "sha256:cd115a39cbc301b8465f6e19191cbb375b3589f3458cc995122595649a6f193f"}, + {file = "ckzg-2.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:285cf3121b8a8c5609c5b706314f68d2ba2784ab02c5bb7487c6ae1714ecb27f"}, + {file = "ckzg-2.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f927bc41c2551b0ef0056a649a7ebed29d9665680a10795f4cee5002c69ddb7"}, + {file = "ckzg-2.0.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fd9fb690c88919f30c9f3ab7cc46a7ecd734d5ff4c9ccea383c119b9b7cc4da"}, + {file = "ckzg-2.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fabc3bd41b306d1c7025d561c3281a007c2aca8ceaf998582dc3894904d9c73e"}, + {file = "ckzg-2.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2eb50c53efdb9c34f762bd0c8006cf79bc92a9daf47aa6b541e496988484124f"}, + {file = "ckzg-2.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7960cc62f959403293fb53a3c2404778369ae7cefc6d7f202e5e00567cf98c4b"}, + {file = "ckzg-2.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d721bcd492294c70eca39da0b0a433c29b6a571dbac2f7084bab06334904af06"}, + {file = "ckzg-2.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dde2391d025b5033ef0eeacf62b11ecfe446aea25682b5f547a907766ad0a8cb"}, + {file = "ckzg-2.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fab8859d9420f6f7df4e094ee3639bc49d18c8dab0df81bee825e2363dd67a09"}, + {file = "ckzg-2.0.1.tar.gz", hash = "sha256:62c5adc381637affa7e1df465c57750b356a761b8a3164c3106589b02532b9c9"}, ] [[package]] name = "click" -version = "8.1.7" +version = "8.1.8" requires_python = ">=3.7" summary = "Composable command line interface toolkit" groups = ["default", "docs", "lint", "migrations"] @@ -382,19 +408,19 @@ dependencies = [ "importlib-metadata; python_version < \"3.8\"", ] files = [ - {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, - {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, ] [[package]] name = "cloudpickle" -version = "3.0.0" +version = "3.1.0" requires_python = ">=3.8" summary = "Pickler class to extend the standard pickle.Pickler functionality" groups = ["perf"] files = [ - {file = "cloudpickle-3.0.0-py3-none-any.whl", hash = "sha256:246ee7d0c295602a036e86369c77fecda4ab17b506496730f2f576d9016fd9c7"}, - {file = "cloudpickle-3.0.0.tar.gz", hash = "sha256:996d9a482c6fb4f33c1a35335cf8afd065d2a56e973270364840712d9131a882"}, + {file = "cloudpickle-3.1.0-py3-none-any.whl", hash = "sha256:fe11acda67f61aaaec473e3afe030feb131d78a43461b718185363384f1ba12e"}, + {file = "cloudpickle-3.1.0.tar.gz", hash = "sha256:81a929b6e3c7335c863c771d673d105f02efdb89dfaba0c90495d1c64796601b"}, ] [[package]] @@ -411,47 +437,47 @@ files = [ [[package]] name = "coverage" -version = "7.6.1" -requires_python = ">=3.8" +version = "7.6.9" +requires_python = ">=3.9" summary = "Code coverage measurement for Python" groups = ["test"] files = [ - {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, - {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, - {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, - {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, - {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, + {file = "coverage-7.6.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:99e266ae0b5d15f1ca8d278a668df6f51cc4b854513daab5cae695ed7b721cf8"}, + {file = "coverage-7.6.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9901d36492009a0a9b94b20e52ebfc8453bf49bb2b27bca2c9706f8b4f5a554a"}, + {file = "coverage-7.6.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abd3e72dd5b97e3af4246cdada7738ef0e608168de952b837b8dd7e90341f015"}, + {file = "coverage-7.6.9-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff74026a461eb0660366fb01c650c1d00f833a086b336bdad7ab00cc952072b3"}, + {file = "coverage-7.6.9-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65dad5a248823a4996724a88eb51d4b31587aa7aa428562dbe459c684e5787ae"}, + {file = "coverage-7.6.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:22be16571504c9ccea919fcedb459d5ab20d41172056206eb2994e2ff06118a4"}, + {file = "coverage-7.6.9-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f957943bc718b87144ecaee70762bc2bc3f1a7a53c7b861103546d3a403f0a6"}, + {file = "coverage-7.6.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0ae1387db4aecb1f485fb70a6c0148c6cdaebb6038f1d40089b1fc84a5db556f"}, + {file = "coverage-7.6.9-cp312-cp312-win32.whl", hash = "sha256:1a330812d9cc7ac2182586f6d41b4d0fadf9be9049f350e0efb275c8ee8eb692"}, + {file = "coverage-7.6.9-cp312-cp312-win_amd64.whl", hash = "sha256:b12c6b18269ca471eedd41c1b6a1065b2f7827508edb9a7ed5555e9a56dcfc97"}, + {file = "coverage-7.6.9.tar.gz", hash = "sha256:4a8d8977b0c6ef5aeadcb644da9e69ae0dcfe66ec7f368c89c72e058bd71164d"}, ] [[package]] name = "coverage" -version = "7.6.1" +version = "7.6.9" extras = ["toml"] -requires_python = ">=3.8" +requires_python = ">=3.9" summary = "Code coverage measurement for Python" groups = ["test"] dependencies = [ - "coverage==7.6.1", + "coverage==7.6.9", "tomli; python_full_version <= \"3.11.0a6\"", ] files = [ - {file = "coverage-7.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:95cae0efeb032af8458fc27d191f85d1717b1d4e49f7cb226cf526ff28179778"}, - {file = "coverage-7.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5621a9175cf9d0b0c84c2ef2b12e9f5f5071357c4d2ea6ca1cf01814f45d2391"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:260933720fdcd75340e7dbe9060655aff3af1f0c5d20f46b57f262ab6c86a5e8"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07e2ca0ad381b91350c0ed49d52699b625aab2b44b65e1b4e02fa9df0e92ad2d"}, - {file = "coverage-7.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44fee9975f04b33331cb8eb272827111efc8930cfd582e0320613263ca849ca"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877abb17e6339d96bf08e7a622d05095e72b71f8afd8a9fefc82cf30ed944163"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e0cadcf6733c09154b461f1ca72d5416635e5e4ec4e536192180d34ec160f8a"}, - {file = "coverage-7.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3c02d12f837d9683e5ab2f3d9844dc57655b92c74e286c262e0fc54213c216d"}, - {file = "coverage-7.6.1-cp312-cp312-win32.whl", hash = "sha256:e05882b70b87a18d937ca6768ff33cc3f72847cbc4de4491c8e73880766718e5"}, - {file = "coverage-7.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:b5d7b556859dd85f3a541db6a4e0167b86e7273e1cdc973e5b175166bb634fdb"}, - {file = "coverage-7.6.1.tar.gz", hash = "sha256:953510dfb7b12ab69d20135a0662397f077c59b1e6379a768e97c59d852ee51d"}, + {file = "coverage-7.6.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:99e266ae0b5d15f1ca8d278a668df6f51cc4b854513daab5cae695ed7b721cf8"}, + {file = "coverage-7.6.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9901d36492009a0a9b94b20e52ebfc8453bf49bb2b27bca2c9706f8b4f5a554a"}, + {file = "coverage-7.6.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abd3e72dd5b97e3af4246cdada7738ef0e608168de952b837b8dd7e90341f015"}, + {file = "coverage-7.6.9-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff74026a461eb0660366fb01c650c1d00f833a086b336bdad7ab00cc952072b3"}, + {file = "coverage-7.6.9-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65dad5a248823a4996724a88eb51d4b31587aa7aa428562dbe459c684e5787ae"}, + {file = "coverage-7.6.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:22be16571504c9ccea919fcedb459d5ab20d41172056206eb2994e2ff06118a4"}, + {file = "coverage-7.6.9-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f957943bc718b87144ecaee70762bc2bc3f1a7a53c7b861103546d3a403f0a6"}, + {file = "coverage-7.6.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0ae1387db4aecb1f485fb70a6c0148c6cdaebb6038f1d40089b1fc84a5db556f"}, + {file = "coverage-7.6.9-cp312-cp312-win32.whl", hash = "sha256:1a330812d9cc7ac2182586f6d41b4d0fadf9be9049f350e0efb275c8ee8eb692"}, + {file = "coverage-7.6.9-cp312-cp312-win_amd64.whl", hash = "sha256:b12c6b18269ca471eedd41c1b6a1065b2f7827508edb9a7ed5555e9a56dcfc97"}, + {file = "coverage-7.6.9.tar.gz", hash = "sha256:4a8d8977b0c6ef5aeadcb644da9e69ae0dcfe66ec7f368c89c72e058bd71164d"}, ] [[package]] @@ -484,8 +510,8 @@ files = [ [[package]] name = "cytoolz" -version = "0.12.3" -requires_python = ">=3.7" +version = "1.0.1" +requires_python = ">=3.8" summary = "Cython implementation of Toolz: High performance functional utilities" groups = ["default"] marker = "implementation_name == \"cpython\"" @@ -493,21 +519,21 @@ dependencies = [ "toolz>=0.8.0", ] files = [ - {file = "cytoolz-0.12.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:86923d823bd19ce35805953b018d436f6b862edd6a7c8b747a13d52b39ed5716"}, - {file = "cytoolz-0.12.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a3e61acfd029bfb81c2c596249b508dfd2b4f72e31b7b53b62e5fb0507dd7293"}, - {file = "cytoolz-0.12.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd728f4e6051af6af234651df49319da1d813f47894d4c3c8ab7455e01703a37"}, - {file = "cytoolz-0.12.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fe8c6267caa7ec67bcc37e360f0d8a26bc3bdce510b15b97f2f2e0143bdd3673"}, - {file = "cytoolz-0.12.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99462abd8323c52204a2a0ce62454ce8fa0f4e94b9af397945c12830de73f27e"}, - {file = "cytoolz-0.12.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da125221b1fa25c690fcd030a54344cecec80074df018d906fc6a99f46c1e3a6"}, - {file = "cytoolz-0.12.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c18e351956f70db9e2d04ff02f28e9a41839250d3f936a4c8a1eabd1c3094d2"}, - {file = "cytoolz-0.12.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:921e6d2440ac758c4945c587b1d1d9b781b72737ac0c0ca5d5e02ca1db8bded2"}, - {file = "cytoolz-0.12.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1651a9bd591a8326329ce1d6336f3129161a36d7061a4d5ea9e5377e033364cf"}, - {file = "cytoolz-0.12.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:8893223b87c2782bd59f9c4bd5c7bf733edd8728b523c93efb91d7468b486528"}, - {file = "cytoolz-0.12.3-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:e4d2961644153c5ae186db964aa9f6109da81b12df0f1d3494b4e5cf2c332ee2"}, - {file = "cytoolz-0.12.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:71b6eb97f6695f7ba8ce69c49b707a351c5f46fd97f5aeb5f6f2fb0d6e72b887"}, - {file = "cytoolz-0.12.3-cp312-cp312-win32.whl", hash = "sha256:cee3de65584e915053412cd178729ff510ad5f8f585c21c5890e91028283518f"}, - {file = "cytoolz-0.12.3-cp312-cp312-win_amd64.whl", hash = "sha256:9eef0d23035fa4dcfa21e570961e86c375153a7ee605cdd11a8b088c24f707f6"}, - {file = "cytoolz-0.12.3.tar.gz", hash = "sha256:4503dc59f4ced53a54643272c61dc305d1dbbfbd7d6bdf296948de9f34c3a282"}, + {file = "cytoolz-1.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fcb8f7d0d65db1269022e7e0428471edee8c937bc288ebdcb72f13eaa67c2fe4"}, + {file = "cytoolz-1.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:207d4e4b445e087e65556196ff472ff134370d9a275d591724142e255f384662"}, + {file = "cytoolz-1.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21cdf6bac6fd843f3b20280a66fd8df20dea4c58eb7214a2cd8957ec176f0bb3"}, + {file = "cytoolz-1.0.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a55ec098036c0dea9f3bdc021f8acd9d105a945227d0811589f0573f21c9ce1"}, + {file = "cytoolz-1.0.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a13ab79ff4ce202e03ab646a2134696988b554b6dc4b71451e948403db1331d8"}, + {file = "cytoolz-1.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e2d944799026e1ff08a83241f1027a2d9276c41f7a74224cd98b7df6e03957d"}, + {file = "cytoolz-1.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88ba85834cd523b91fdf10325e1e6d71c798de36ea9bdc187ca7bd146420de6f"}, + {file = "cytoolz-1.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5a750b1af7e8bf6727f588940b690d69e25dc47cce5ce467925a76561317eaf7"}, + {file = "cytoolz-1.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:44a71870f7eae31d263d08b87da7c2bf1176f78892ed8bdade2c2850478cb126"}, + {file = "cytoolz-1.0.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c8231b9abbd8e368e036f4cc2e16902c9482d4cf9e02a6147ed0e9a3cd4a9ab0"}, + {file = "cytoolz-1.0.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:aa87599ccc755de5a096a4d6c34984de6cd9dc928a0c5eaa7607457317aeaf9b"}, + {file = "cytoolz-1.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:67cd16537df51baabde3baa770ab7b8d16839c4d21219d5b96ac59fb012ebd2d"}, + {file = "cytoolz-1.0.1-cp312-cp312-win32.whl", hash = "sha256:fb988c333f05ee30ad4693fe4da55d95ec0bb05775d2b60191236493ea2e01f9"}, + {file = "cytoolz-1.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:8f89c48d8e5aec55ffd566a8ec858706d70ed0c6a50228eca30986bfa5b4da8b"}, + {file = "cytoolz-1.0.1.tar.gz", hash = "sha256:89cc3161b89e1bb3ed7636f74ed2e55984fd35516904fc878cae216e42b2c7d6"}, ] [[package]] @@ -548,14 +574,14 @@ files = [ [[package]] name = "dnspython" -version = "2.6.1" -requires_python = ">=3.8" +version = "2.7.0" +requires_python = ">=3.9" summary = "DNS toolkit" groups = ["default"] marker = "python_version ~= \"3.11\"" files = [ - {file = "dnspython-2.6.1-py3-none-any.whl", hash = "sha256:5ef3b9680161f6fa89daf8ad451b5f1a33b18ae8a1c6778cdf4b43f08c0a6e50"}, - {file = "dnspython-2.6.1.tar.gz", hash = "sha256:e8f0f9c23a7b7cb99ded64e6c3a6f3e701d78f50c55e002b839dea7225cff7cc"}, + {file = "dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86"}, + {file = "dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1"}, ] [[package]] @@ -633,7 +659,7 @@ files = [ [[package]] name = "eth-account" -version = "0.13.3" +version = "0.13.4" requires_python = "<4,>=3.8" summary = "eth-account: Sign Ethereum transactions and messages with local private keys" groups = ["default"] @@ -641,7 +667,7 @@ dependencies = [ "bitarray>=2.4.0", "ckzg>=2.0.0", "eth-abi>=4.0.0-b.2", - "eth-keyfile>=0.7.0", + "eth-keyfile<0.9.0,>=0.7.0", "eth-keys>=0.4.0", "eth-rlp>=2.1.0", "eth-utils>=2.0.0", @@ -650,8 +676,8 @@ dependencies = [ "rlp>=1.0.0", ] files = [ - {file = "eth_account-0.13.3-py3-none-any.whl", hash = "sha256:c8f3dae3403b8647f386fcc081fb8c2a0970991cf3e00af7e7ebd73f95d6a319"}, - {file = "eth_account-0.13.3.tar.gz", hash = "sha256:03d6af5d314e64b3dd53283e15b24736c5caa24542e5edac0455d6ff87d8b1e0"}, + {file = "eth_account-0.13.4-py3-none-any.whl", hash = "sha256:a4c109e9bad3a278243fcc028b755fb72b43e25b1e6256b3f309a44f5f7d87c3"}, + {file = "eth_account-0.13.4.tar.gz", hash = "sha256:2e1f2de240bef3d9f3d8013656135d2a79b6be6d4e7885bce9cace4334a4a376"}, ] [[package]] @@ -699,7 +725,7 @@ files = [ [[package]] name = "eth-keys" -version = "0.5.1" +version = "0.6.0" requires_python = "<4,>=3.8" summary = "eth-keys: Common API for Ethereum key operations" groups = ["default"] @@ -708,8 +734,8 @@ dependencies = [ "eth-utils>=2", ] files = [ - {file = "eth_keys-0.5.1-py3-none-any.whl", hash = "sha256:ad13d920a2217a49bed3a1a7f54fb0980f53caf86d3bbab2139fd3330a17b97e"}, - {file = "eth_keys-0.5.1.tar.gz", hash = "sha256:2b587e4bbb9ac2195215a7ab0c0fb16042b17d4ec50240ed670bbb8f53da7a48"}, + {file = "eth_keys-0.6.0-py3-none-any.whl", hash = "sha256:b396fdfe048a5bba3ef3990739aec64901eb99901c03921caa774be668b1db6e"}, + {file = "eth_keys-0.6.0.tar.gz", hash = "sha256:ba33230f851d02c894e83989185b21d76152c49b37e35b61b1d8a6d9f1d20430"}, ] [[package]] @@ -731,7 +757,7 @@ files = [ [[package]] name = "eth-typing" -version = "5.0.0" +version = "5.0.1" requires_python = "<4,>=3.8" summary = "eth-typing: Common type annotations for ethereum python packages" groups = ["default"] @@ -739,13 +765,13 @@ dependencies = [ "typing-extensions>=4.5.0", ] files = [ - {file = "eth_typing-5.0.0-py3-none-any.whl", hash = "sha256:c7ebc8595e7b65175bb4b4176c2b548ab21b13329f2058e84d4f8c289ba9f577"}, - {file = "eth_typing-5.0.0.tar.gz", hash = "sha256:87ce7cee75665c09d2dcff8de1b496609d5e32fcd2e2b1d8fc0370c29eedcdc0"}, + {file = "eth_typing-5.0.1-py3-none-any.whl", hash = "sha256:f30d1af16aac598f216748a952eeb64fbcb6e73efa691d2de31148138afe96de"}, + {file = "eth_typing-5.0.1.tar.gz", hash = "sha256:83debf88c9df286db43bb7374974681ebcc9f048fac81be2548dbc549a3203c0"}, ] [[package]] name = "eth-utils" -version = "5.0.0" +version = "5.1.0" requires_python = "<4,>=3.8" summary = "eth-utils: Common utility functions for python code that interacts with Ethereum" groups = ["default"] @@ -753,12 +779,11 @@ dependencies = [ "cytoolz>=0.10.1; implementation_name == \"cpython\"", "eth-hash>=0.3.1", "eth-typing>=5.0.0", - "hexbytes>=1.0.0", "toolz>0.8.2; implementation_name == \"pypy\"", ] files = [ - {file = "eth_utils-5.0.0-py3-none-any.whl", hash = "sha256:99c44eca11db74dbb881a1d70b24cd80436fc62fe527d2f5c3e3cf7932aba7b2"}, - {file = "eth_utils-5.0.0.tar.gz", hash = "sha256:a5eb9555f43f4579eb83cb84f9dda9f3d6663bbd4a5a6b693f8d35045f305a1f"}, + {file = "eth_utils-5.1.0-py3-none-any.whl", hash = "sha256:a99f1f01b51206620904c5af47fac65abc143aebd0a76bdec860381c5a3230f8"}, + {file = "eth_utils-5.1.0.tar.gz", hash = "sha256:84c6314b9cf1fcd526107464bbf487e3f87097a2e753360d5ed319f7d42e3f20"}, ] [[package]] @@ -774,28 +799,28 @@ files = [ [[package]] name = "frozenlist" -version = "1.4.1" +version = "1.5.0" requires_python = ">=3.8" summary = "A list-like structure which implements collections.abc.MutableSequence" groups = ["default", "test"] files = [ - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, - {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, - {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, - {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, - {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f"}, + {file = "frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8"}, + {file = "frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f"}, + {file = "frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3"}, + {file = "frozenlist-1.5.0.tar.gz", hash = "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817"}, ] [[package]] @@ -821,13 +846,13 @@ files = [ [[package]] name = "idna" -version = "3.8" +version = "3.10" requires_python = ">=3.6" summary = "Internationalized Domain Names in Applications (IDNA)" groups = ["default", "docs", "test"] files = [ - {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, - {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] [[package]] @@ -949,53 +974,53 @@ files = [ [[package]] name = "markupsafe" -version = "2.1.5" -requires_python = ">=3.7" +version = "3.0.2" +requires_python = ">=3.9" summary = "Safely add untrusted strings to HTML/XML markup." groups = ["default", "docs", "perf"] files = [ - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, - {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, ] [[package]] name = "marshmallow" -version = "3.22.0" -requires_python = ">=3.8" +version = "3.23.2" +requires_python = ">=3.9" summary = "A lightweight library for converting complex datatypes to and from native Python datatypes." groups = ["default"] dependencies = [ "packaging>=17.0", ] files = [ - {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, - {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, + {file = "marshmallow-3.23.2-py3-none-any.whl", hash = "sha256:bcaf2d6fd74fb1459f8450e85d994997ad3e70036452cbfa4ab685acb19479b3"}, + {file = "marshmallow-3.23.2.tar.gz", hash = "sha256:c448ac6455ca4d794773f00bae22c2f351d62d739929f761dce5eacb5c468d7f"}, ] [[package]] name = "marshmallow-dataclass" -version = "8.7.0" +version = "8.7.1" requires_python = ">=3.8" summary = "Python library to convert dataclasses into marshmallow schemas." groups = ["default"] dependencies = [ "marshmallow>=3.18.0", - "typeguard~=4.0.0", + "typeguard<5,>=4.0", "typing-extensions>=4.2.0; python_version < \"3.11\"", - "typing-inspect~=0.9.0", + "typing-inspect>=0.9.0", ] files = [ - {file = "marshmallow_dataclass-8.7.0-py3-none-any.whl", hash = "sha256:9e528d72b83f2b6b0f60cb29fd38781a6f7ce2155295adb1ed33289826a93c4b"}, - {file = "marshmallow_dataclass-8.7.0.tar.gz", hash = "sha256:0218008fec3fd4b5f739b2a0c6d7593bcc403308f6da953e341e4e359e268849"}, + {file = "marshmallow_dataclass-8.7.1-py3-none-any.whl", hash = "sha256:405cbaaad9cea56b3de2f85eff32a9880e3bf849f652e7f6de7395e4b1ddc072"}, + {file = "marshmallow_dataclass-8.7.1.tar.gz", hash = "sha256:4fb80e1bf7b31ce1b192aa87ffadee2cedb3f6f37bb0042f8500b07e6fad59c4"}, ] [[package]] @@ -1023,6 +1048,17 @@ files = [ {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, ] +[[package]] +name = "more-itertools" +version = "10.5.0" +requires_python = ">=3.8" +summary = "More routines for operating on iterables, beyond itertools" +groups = ["default"] +files = [ + {file = "more-itertools-10.5.0.tar.gz", hash = "sha256:5482bfef7849c25dc3c6dd53a6173ae4795da2a41a80faea6700d9f5846c5da6"}, + {file = "more_itertools-10.5.0-py3-none-any.whl", hash = "sha256:037b0d3203ce90cca8ab1defbbdac29d5f993fc20131f3664dc8d6acfa872aef"}, +] + [[package]] name = "mpmath" version = "1.3.0" @@ -1035,49 +1071,52 @@ files = [ [[package]] name = "msgpack" -version = "1.0.8" +version = "1.1.0" requires_python = ">=3.8" summary = "MessagePack serializer" groups = ["default"] files = [ - {file = "msgpack-1.0.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:114be227f5213ef8b215c22dde19532f5da9652e56e8ce969bf0a26d7c419fee"}, - {file = "msgpack-1.0.8-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d661dc4785affa9d0edfdd1e59ec056a58b3dbb9f196fa43587f3ddac654ac7b"}, - {file = "msgpack-1.0.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d56fd9f1f1cdc8227d7b7918f55091349741904d9520c65f0139a9755952c9e8"}, - {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0726c282d188e204281ebd8de31724b7d749adebc086873a59efb8cf7ae27df3"}, - {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8db8e423192303ed77cff4dce3a4b88dbfaf43979d280181558af5e2c3c71afc"}, - {file = "msgpack-1.0.8-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99881222f4a8c2f641f25703963a5cefb076adffd959e0558dc9f803a52d6a58"}, - {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b5505774ea2a73a86ea176e8a9a4a7c8bf5d521050f0f6f8426afe798689243f"}, - {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ef254a06bcea461e65ff0373d8a0dd1ed3aa004af48839f002a0c994a6f72d04"}, - {file = "msgpack-1.0.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e1dd7839443592d00e96db831eddb4111a2a81a46b028f0facd60a09ebbdd543"}, - {file = "msgpack-1.0.8-cp312-cp312-win32.whl", hash = "sha256:64d0fcd436c5683fdd7c907eeae5e2cbb5eb872fafbc03a43609d7941840995c"}, - {file = "msgpack-1.0.8-cp312-cp312-win_amd64.whl", hash = "sha256:74398a4cf19de42e1498368c36eed45d9528f5fd0155241e82c4082b7e16cffd"}, - {file = "msgpack-1.0.8.tar.gz", hash = "sha256:95c02b0e27e706e48d0e5426d1710ca78e0f0628d6e89d5b5a5b91a5f12274f3"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d46cf9e3705ea9485687aa4001a76e44748b609d260af21c4ceea7f2212a501d"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5dbad74103df937e1325cc4bfeaf57713be0b4f15e1c2da43ccdd836393e2ea2"}, + {file = "msgpack-1.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58dfc47f8b102da61e8949708b3eafc3504509a5728f8b4ddef84bd9e16ad420"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676e5be1b472909b2ee6356ff425ebedf5142427842aa06b4dfd5117d1ca8a2"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17fb65dd0bec285907f68b15734a993ad3fc94332b5bb21b0435846228de1f39"}, + {file = "msgpack-1.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a51abd48c6d8ac89e0cfd4fe177c61481aca2d5e7ba42044fd218cfd8ea9899f"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2137773500afa5494a61b1208619e3871f75f27b03bcfca7b3a7023284140247"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:398b713459fea610861c8a7b62a6fec1882759f308ae0795b5413ff6a160cf3c"}, + {file = "msgpack-1.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:06f5fd2f6bb2a7914922d935d3b8bb4a7fff3a9a91cfce6d06c13bc42bec975b"}, + {file = "msgpack-1.1.0-cp312-cp312-win32.whl", hash = "sha256:ad33e8400e4ec17ba782f7b9cf868977d867ed784a1f5f2ab46e7ba53b6e1e1b"}, + {file = "msgpack-1.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:115a7af8ee9e8cddc10f87636767857e7e3717b7a2e97379dc2054712693e90f"}, + {file = "msgpack-1.1.0.tar.gz", hash = "sha256:dd432ccc2c72b914e4cb77afce64aab761c1137cc698be3984eee260bcb2896e"}, ] [[package]] name = "multidict" -version = "6.0.5" -requires_python = ">=3.7" +version = "6.1.0" +requires_python = ">=3.8" summary = "multidict implementation" groups = ["default", "test"] +dependencies = [ + "typing-extensions>=4.1.0; python_version < \"3.11\"", +] files = [ - {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:51d035609b86722963404f711db441cf7134f1889107fb171a970c9701f92e1e"}, - {file = "multidict-6.0.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cbebcd5bcaf1eaf302617c114aa67569dd3f090dd0ce8ba9e35e9985b41ac35b"}, - {file = "multidict-6.0.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2ffc42c922dbfddb4a4c3b438eb056828719f07608af27d163191cb3e3aa6cc5"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ceb3b7e6a0135e092de86110c5a74e46bda4bd4fbfeeb3a3bcec79c0f861e450"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:79660376075cfd4b2c80f295528aa6beb2058fd289f4c9252f986751a4cd0496"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4428b29611e989719874670fd152b6625500ad6c686d464e99f5aaeeaca175a"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d84a5c3a5f7ce6db1f999fb9438f686bc2e09d38143f2d93d8406ed2dd6b9226"}, - {file = "multidict-6.0.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:76c0de87358b192de7ea9649beb392f107dcad9ad27276324c24c91774ca5271"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:79a6d2ba910adb2cbafc95dad936f8b9386e77c84c35bc0add315b856d7c3abb"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:92d16a3e275e38293623ebf639c471d3e03bb20b8ebb845237e0d3664914caef"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:fb616be3538599e797a2017cccca78e354c767165e8858ab5116813146041a24"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:14c2976aa9038c2629efa2c148022ed5eb4cb939e15ec7aace7ca932f48f9ba6"}, - {file = "multidict-6.0.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:435a0984199d81ca178b9ae2c26ec3d49692d20ee29bc4c11a2a8d4514c67eda"}, - {file = "multidict-6.0.5-cp312-cp312-win32.whl", hash = "sha256:9fe7b0653ba3d9d65cbe7698cca585bf0f8c83dbbcc710db9c90f478e175f2d5"}, - {file = "multidict-6.0.5-cp312-cp312-win_amd64.whl", hash = "sha256:01265f5e40f5a17f8241d52656ed27192be03bfa8764d88e8220141d1e4b3556"}, - {file = "multidict-6.0.5-py3-none-any.whl", hash = "sha256:0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7"}, - {file = "multidict-6.0.5.tar.gz", hash = "sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b04772ed465fa3cc947db808fa306d79b43e896beb677a56fb2347ca1a49c1fa"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6180c0ae073bddeb5a97a38c03f30c233e0a4d39cd86166251617d1bbd0af436"}, + {file = "multidict-6.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:071120490b47aa997cca00666923a83f02c7fbb44f71cf7f136df753f7fa8761"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50b3a2710631848991d0bf7de077502e8994c804bb805aeb2925a981de58ec2e"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58c621844d55e71c1b7f7c498ce5aa6985d743a1a59034c57a905b3f153c1ef"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:55b6d90641869892caa9ca42ff913f7ff1c5ece06474fbd32fb2cf6834726c95"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b820514bfc0b98a30e3d85462084779900347e4d49267f747ff54060cc33925"}, + {file = "multidict-6.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10a9b09aba0c5b48c53761b7c720aaaf7cf236d5fe394cd399c7ba662d5f9966"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e16bf3e5fc9f44632affb159d30a437bfe286ce9e02754759be5536b169b305"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76f364861c3bfc98cbbcbd402d83454ed9e01a5224bb3a28bf70002a230f73e2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:820c661588bd01a0aa62a1283f20d2be4281b086f80dad9e955e690c75fb54a2"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0e5f362e895bc5b9e67fe6e4ded2492d8124bdf817827f33c5b46c2fe3ffaca6"}, + {file = "multidict-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ec660d19bbc671e3a6443325f07263be452c453ac9e512f5eb935e7d4ac28b3"}, + {file = "multidict-6.1.0-cp312-cp312-win32.whl", hash = "sha256:58130ecf8f7b8112cdb841486404f1282b9c86ccb30d3519faf301b2e5659133"}, + {file = "multidict-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:188215fc0aafb8e03341995e7c4797860181562380f81ed0a87ff455b70bf1f1"}, + {file = "multidict-6.1.0-py3-none-any.whl", hash = "sha256:48e171e52d1c4d33888e529b999e5900356b9ae588c2f09a52dcefb158b27506"}, + {file = "multidict-6.1.0.tar.gz", hash = "sha256:22ae2ebf9b0c69d206c003e2f6a914ea33f0a932d4aa16f236afc049d9958f4a"}, ] [[package]] @@ -1114,20 +1153,22 @@ files = [ [[package]] name = "numpy" -version = "1.26.4" -requires_python = ">=3.9" +version = "2.2.0" +requires_python = ">=3.10" summary = "Fundamental package for array computing in Python" groups = ["perf"] files = [ - {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, - {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, - {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, - {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, + {file = "numpy-2.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cff210198bb4cae3f3c100444c5eaa573a823f05c253e7188e1362a5555235b3"}, + {file = "numpy-2.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:58b92a5828bd4d9aa0952492b7de803135038de47343b2aa3cc23f3b71a3dc4e"}, + {file = "numpy-2.2.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:ebe5e59545401fbb1b24da76f006ab19734ae71e703cdb4a8b347e84a0cece67"}, + {file = "numpy-2.2.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:e2b8cd48a9942ed3f85b95ca4105c45758438c7ed28fff1e4ce3e57c3b589d8e"}, + {file = "numpy-2.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57fcc997ffc0bef234b8875a54d4058afa92b0b0c4223fc1f62f24b3b5e86038"}, + {file = "numpy-2.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ad7d11b309bd132d74397fcf2920933c9d1dc865487128f5c03d580f2c3d03"}, + {file = "numpy-2.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:cb24cca1968b21355cc6f3da1a20cd1cebd8a023e3c5b09b432444617949085a"}, + {file = "numpy-2.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0798b138c291d792f8ea40fe3768610f3c7dd2574389e37c3f26573757c8f7ef"}, + {file = "numpy-2.2.0-cp312-cp312-win32.whl", hash = "sha256:afe8fb968743d40435c3827632fd36c5fbde633b0423da7692e426529b1759b1"}, + {file = "numpy-2.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:3a4199f519e57d517ebd48cb76b36c82da0360781c6a0353e64c0cac30ecaad3"}, + {file = "numpy-2.2.0.tar.gz", hash = "sha256:140dd80ff8981a583a60980be1a655068f8adebf7a45a06a6858c873fcdcd4a0"}, ] [[package]] @@ -1166,13 +1207,13 @@ files = [ [[package]] name = "packaging" -version = "24.1" +version = "24.2" requires_python = ">=3.8" summary = "Core utilities for Python packages" groups = ["default", "docs", "lint", "test"] files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] [[package]] @@ -1201,13 +1242,13 @@ files = [ [[package]] name = "platformdirs" -version = "4.2.2" +version = "4.3.6" requires_python = ">=3.8" summary = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." groups = ["default", "lint"] files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, ] [[package]] @@ -1257,46 +1298,46 @@ files = [ [[package]] name = "propcache" -version = "0.2.0" -requires_python = ">=3.8" +version = "0.2.1" +requires_python = ">=3.9" summary = "Accelerated property cache" groups = ["default", "test"] files = [ - {file = "propcache-0.2.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:2ee7606193fb267be4b2e3b32714f2d58cad27217638db98a60f9efb5efeccc2"}, - {file = "propcache-0.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:91ee8fc02ca52e24bcb77b234f22afc03288e1dafbb1f88fe24db308910c4ac7"}, - {file = "propcache-0.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2e900bad2a8456d00a113cad8c13343f3b1f327534e3589acc2219729237a2e8"}, - {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f52a68c21363c45297aca15561812d542f8fc683c85201df0bebe209e349f793"}, - {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e41d67757ff4fbc8ef2af99b338bfb955010444b92929e9e55a6d4dcc3c4f09"}, - {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a64e32f8bd94c105cc27f42d3b658902b5bcc947ece3c8fe7bc1b05982f60e89"}, - {file = "propcache-0.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:55346705687dbd7ef0d77883ab4f6fabc48232f587925bdaf95219bae072491e"}, - {file = "propcache-0.2.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:00181262b17e517df2cd85656fcd6b4e70946fe62cd625b9d74ac9977b64d8d9"}, - {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6994984550eaf25dd7fc7bd1b700ff45c894149341725bb4edc67f0ffa94efa4"}, - {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:56295eb1e5f3aecd516d91b00cfd8bf3a13991de5a479df9e27dd569ea23959c"}, - {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:439e76255daa0f8151d3cb325f6dd4a3e93043e6403e6491813bcaaaa8733887"}, - {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f6475a1b2ecb310c98c28d271a30df74f9dd436ee46d09236a6b750a7599ce57"}, - {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3444cdba6628accf384e349014084b1cacd866fbb88433cd9d279d90a54e0b23"}, - {file = "propcache-0.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4a9d9b4d0a9b38d1c391bb4ad24aa65f306c6f01b512e10a8a34a2dc5675d348"}, - {file = "propcache-0.2.0-cp312-cp312-win32.whl", hash = "sha256:69d3a98eebae99a420d4b28756c8ce6ea5a29291baf2dc9ff9414b42676f61d5"}, - {file = "propcache-0.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:ad9c9b99b05f163109466638bd30ada1722abb01bbb85c739c50b6dc11f92dc3"}, - {file = "propcache-0.2.0-py3-none-any.whl", hash = "sha256:2ccc28197af5313706511fab3a8b66dcd6da067a1331372c82ea1cb74285e036"}, - {file = "propcache-0.2.0.tar.gz", hash = "sha256:df81779732feb9d01e5d513fad0122efb3d53bbc75f61b2a4f29a020bc985e70"}, + {file = "propcache-0.2.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:081a430aa8d5e8876c6909b67bd2d937bfd531b0382d3fdedb82612c618bc41a"}, + {file = "propcache-0.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2ccec9ac47cf4e04897619c0e0c1a48c54a71bdf045117d3a26f80d38ab1fb0"}, + {file = "propcache-0.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:14d86fe14b7e04fa306e0c43cdbeebe6b2c2156a0c9ce56b815faacc193e320d"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:049324ee97bb67285b49632132db351b41e77833678432be52bdd0289c0e05e4"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1cd9a1d071158de1cc1c71a26014dcdfa7dd3d5f4f88c298c7f90ad6f27bb46d"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98110aa363f1bb4c073e8dcfaefd3a5cea0f0834c2aab23dda657e4dab2f53b5"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:647894f5ae99c4cf6bb82a1bb3a796f6e06af3caa3d32e26d2350d0e3e3faf24"}, + {file = "propcache-0.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfd3223c15bebe26518d58ccf9a39b93948d3dcb3e57a20480dfdd315356baff"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d71264a80f3fcf512eb4f18f59423fe82d6e346ee97b90625f283df56aee103f"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:e73091191e4280403bde6c9a52a6999d69cdfde498f1fdf629105247599b57ec"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3935bfa5fede35fb202c4b569bb9c042f337ca4ff7bd540a0aa5e37131659348"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f508b0491767bb1f2b87fdfacaba5f7eddc2f867740ec69ece6d1946d29029a6"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:1672137af7c46662a1c2be1e8dc78cb6d224319aaa40271c9257d886be4363a6"}, + {file = "propcache-0.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b74c261802d3d2b85c9df2dfb2fa81b6f90deeef63c2db9f0e029a3cac50b518"}, + {file = "propcache-0.2.1-cp312-cp312-win32.whl", hash = "sha256:d09c333d36c1409d56a9d29b3a1b800a42c76a57a5a8907eacdbce3f18768246"}, + {file = "propcache-0.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:c214999039d4f2a5b2073ac506bba279945233da8c786e490d411dfc30f855c1"}, + {file = "propcache-0.2.1-py3-none-any.whl", hash = "sha256:52277518d6aae65536e9cea52d4e7fd2f7a66f4aa2d30ed3f2fcea620ace3c54"}, + {file = "propcache-0.2.1.tar.gz", hash = "sha256:3f77ce728b19cb537714499928fe800c3dda29e8d9428778fc7c186da4c09a64"}, ] [[package]] name = "psutil" -version = "6.0.0" +version = "6.1.1" requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" summary = "Cross-platform lib for process and system monitoring in Python." groups = ["perf"] files = [ - {file = "psutil-6.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c588a7e9b1173b6e866756dde596fd4cad94f9399daf99ad8c3258b3cb2b47a0"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ed2440ada7ef7d0d608f20ad89a04ec47d2d3ab7190896cd62ca5fc4fe08bf0"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fd9a97c8e94059b0ef54a7d4baf13b405011176c3b6ff257c247cae0d560ecd"}, - {file = "psutil-6.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e8d0054fc88153ca0544f5c4d554d42e33df2e009c4ff42284ac9ebdef4132"}, - {file = "psutil-6.0.0-cp37-abi3-win32.whl", hash = "sha256:a495580d6bae27291324fe60cea0b5a7c23fa36a7cd35035a16d93bdcf076b9d"}, - {file = "psutil-6.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:33ea5e1c975250a720b3a6609c490db40dae5d83a4eb315170c4fe0d8b1f34b3"}, - {file = "psutil-6.0.0-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:ffe7fc9b6b36beadc8c322f84e1caff51e8703b88eee1da46d1e3a6ae11b4fd0"}, - {file = "psutil-6.0.0.tar.gz", hash = "sha256:8faae4f310b6d969fa26ca0545338b21f73c6b15db7c4a8d934a5482faa818f2"}, + {file = "psutil-6.1.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fc0ed7fe2231a444fc219b9c42d0376e0a9a1a72f16c5cfa0f68d19f1a0663e8"}, + {file = "psutil-6.1.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:0bdd4eab935276290ad3cb718e9809412895ca6b5b334f5a9111ee6d9aff9377"}, + {file = "psutil-6.1.1-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6e06c20c05fe95a3d7302d74e7097756d4ba1247975ad6905441ae1b5b66003"}, + {file = "psutil-6.1.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97f7cb9921fbec4904f522d972f0c0e1f4fabbdd4e0287813b21215074a0f160"}, + {file = "psutil-6.1.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33431e84fee02bc84ea36d9e2c4a6d395d479c9dd9bba2376c1f6ee8f3a4e0b3"}, + {file = "psutil-6.1.1-cp37-abi3-win32.whl", hash = "sha256:eaa912e0b11848c4d9279a93d7e2783df352b082f40111e078388701fd479e53"}, + {file = "psutil-6.1.1-cp37-abi3-win_amd64.whl", hash = "sha256:f35cfccb065fff93529d2afb4a2e89e363fe63ca1e4a5da22b603a85833c2649"}, + {file = "psutil-6.1.1.tar.gz", hash = "sha256:cf8496728c18f2d0b45198f06895be52f36611711746b7f30c464b422b50e2f5"}, ] [[package]] @@ -1402,13 +1443,13 @@ files = [ [[package]] name = "pypika-tortoise" -version = "0.2.1" -requires_python = "<4.0,>=3.7" +version = "0.2.2" +requires_python = "<4.0,>=3.8" summary = "Forked from pypika and streamline just for tortoise-orm" groups = ["default", "migrations"] files = [ - {file = "pypika_tortoise-0.2.1-py3-none-any.whl", hash = "sha256:e91a1c5a78c6753ead1a9ba1aa169a1f1282c5035170e5462f0073564bc18886"}, - {file = "pypika_tortoise-0.2.1.tar.gz", hash = "sha256:979bbb9d60fe9f6e4129a25c44ee008aab4a4e97b296350be9983dcaa2766354"}, + {file = "pypika_tortoise-0.2.2-py3-none-any.whl", hash = "sha256:e93190aedd95acb08b69636bc2328cc053b2c9971307b6d44405bc6d9f9b71a5"}, + {file = "pypika_tortoise-0.2.2.tar.gz", hash = "sha256:f0fbc9e0c3ddc33118a5be69907428863849df60788e125edef1f46a6261d63b"}, ] [[package]] @@ -1531,22 +1572,23 @@ files = [ [[package]] name = "pytz" -version = "2024.1" +version = "2024.2" summary = "World timezone definitions, modern and historical" groups = ["default", "migrations"] files = [ - {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, - {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, + {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, + {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, ] [[package]] name = "pyunormalize" -version = "15.1.0" +version = "16.0.0" requires_python = ">=3.6" -summary = "Unicode normalization forms (NFC, NFKC, NFD, NFKD). A library independent from the Python core Unicode database." +summary = "Unicode normalization forms (NFC, NFKC, NFD, NFKD). A library independent of the Python core Unicode database." groups = ["default"] files = [ - {file = "pyunormalize-15.1.0.tar.gz", hash = "sha256:cf4a87451a0f1cb76911aa97f432f4579e1f564a2f0c84ce488c73a73901b6c1"}, + {file = "pyunormalize-16.0.0-py3-none-any.whl", hash = "sha256:c647d95e5d1e2ea9a2f448d1d95d8518348df24eab5c3fd32d2b5c3300a49152"}, + {file = "pyunormalize-16.0.0.tar.gz", hash = "sha256:2e1dfbb4a118154ae26f70710426a52a364b926c9191f764601f5a8cb12761f7"}, ] [[package]] @@ -1582,27 +1624,27 @@ files = [ [[package]] name = "regex" -version = "2024.7.24" +version = "2024.11.6" requires_python = ">=3.8" summary = "Alternative regular expression module, to replace re." groups = ["default"] files = [ - {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:fe4ebef608553aff8deb845c7f4f1d0740ff76fa672c011cc0bacb2a00fbde86"}, - {file = "regex-2024.7.24-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:74007a5b25b7a678459f06559504f1eec2f0f17bca218c9d56f6a0a12bfffdad"}, - {file = "regex-2024.7.24-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7df9ea48641da022c2a3c9c641650cd09f0cd15e8908bf931ad538f5ca7919c9"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a1141a1dcc32904c47f6846b040275c6e5de0bf73f17d7a409035d55b76f289"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80c811cfcb5c331237d9bad3bea2c391114588cf4131707e84d9493064d267f9"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7214477bf9bd195894cf24005b1e7b496f46833337b5dedb7b2a6e33f66d962c"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d55588cba7553f0b6ec33130bc3e114b355570b45785cebdc9daed8c637dd440"}, - {file = "regex-2024.7.24-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558a57cfc32adcf19d3f791f62b5ff564922942e389e3cfdb538a23d65a6b610"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a512eed9dfd4117110b1881ba9a59b31433caed0c4101b361f768e7bcbaf93c5"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:86b17ba823ea76256b1885652e3a141a99a5c4422f4a869189db328321b73799"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5eefee9bfe23f6df09ffb6dfb23809f4d74a78acef004aa904dc7c88b9944b05"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:731fcd76bbdbf225e2eb85b7c38da9633ad3073822f5ab32379381e8c3c12e94"}, - {file = "regex-2024.7.24-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eaef80eac3b4cfbdd6de53c6e108b4c534c21ae055d1dbea2de6b3b8ff3def38"}, - {file = "regex-2024.7.24-cp312-cp312-win32.whl", hash = "sha256:185e029368d6f89f36e526764cf12bf8d6f0e3a2a7737da625a76f594bdfcbfc"}, - {file = "regex-2024.7.24-cp312-cp312-win_amd64.whl", hash = "sha256:2f1baff13cc2521bea83ab2528e7a80cbe0ebb2c6f0bfad15be7da3aed443908"}, - {file = "regex-2024.7.24.tar.gz", hash = "sha256:9cfd009eed1a46b27c14039ad5bbc5e71b6367c5b2e6d5f5da0ea91600817506"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad"}, + {file = "regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54"}, + {file = "regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b"}, + {file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"}, ] [[package]] @@ -1624,18 +1666,18 @@ files = [ [[package]] name = "rich" -version = "13.8.0" -requires_python = ">=3.7.0" +version = "13.9.4" +requires_python = ">=3.8.0" summary = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" groups = ["perf"] dependencies = [ "markdown-it-py>=2.2.0", "pygments<3.0.0,>=2.13.0", - "typing-extensions<5.0,>=4.0.0; python_version < \"3.9\"", + "typing-extensions<5.0,>=4.0.0; python_version < \"3.11\"", ] files = [ - {file = "rich-13.8.0-py3-none-any.whl", hash = "sha256:2e85306a063b9492dffc86278197a60cbece75bcb766022f3436f567cae11bdc"}, - {file = "rich-13.8.0.tar.gz", hash = "sha256:a5ac1f1cd448ade0d59cc3356f7db7a7ccda2c8cbae9c7a90c28ff463d3e91f4"}, + {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, + {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, ] [[package]] @@ -1668,21 +1710,22 @@ files = [ [[package]] name = "ruamel-yaml-clib" -version = "0.2.8" -requires_python = ">=3.6" +version = "0.2.12" +requires_python = ">=3.9" summary = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml" groups = ["default"] marker = "platform_python_implementation == \"CPython\" and python_version < \"3.13\"" files = [ - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:ebc06178e8821efc9692ea7544aa5644217358490145629914d8020042c24aa1"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:edaef1c1200c4b4cb914583150dcaa3bc30e592e907c01117c08b13a07255ec2"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d176b57452ab5b7028ac47e7b3cf644bcfdc8cacfecf7e71759f7f51a59e5c92"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-manylinux_2_24_aarch64.whl", hash = "sha256:1dc67314e7e1086c9fdf2680b7b6c2be1c0d8e3a8279f2e993ca2a7545fecf62"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3213ece08ea033eb159ac52ae052a4899b56ecc124bb80020d9bbceeb50258e9"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aab7fd643f71d7946f2ee58cc88c9b7bfc97debd71dcc93e03e2d174628e7e2d"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win32.whl", hash = "sha256:5c365d91c88390c8d0a8545df0b5857172824b1c604e867161e6b3d59a827eaa"}, - {file = "ruamel.yaml.clib-0.2.8-cp312-cp312-win_amd64.whl", hash = "sha256:1758ce7d8e1a29d23de54a16ae867abd370f01b5a69e1a3ba75223eaa3ca1a1b"}, - {file = "ruamel.yaml.clib-0.2.8.tar.gz", hash = "sha256:beb2e0404003de9a4cab9753a8805a8fe9320ee6673136ed7f04255fe60bb512"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:20b0f8dc160ba83b6dcc0e256846e1a02d044e13f7ea74a3d1d56ede4e48c632"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux2014_aarch64.whl", hash = "sha256:943f32bc9dedb3abff9879edc134901df92cfce2c3d5c9348f172f62eb2d771d"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95c3829bb364fdb8e0332c9931ecf57d9be3519241323c5274bd82f709cebc0c"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:749c16fcc4a2b09f28843cda5a193e0283e47454b63ec4b81eaa2242f50e4ccd"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bf165fef1f223beae7333275156ab2022cffe255dcc51c27f066b4370da81e31"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:32621c177bbf782ca5a18ba4d7af0f1082a3f6e517ac2a18b3974d4edf349680"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b82a7c94a498853aa0b272fd5bc67f29008da798d4f93a2f9f289feb8426a58d"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win32.whl", hash = "sha256:e8c4ebfcfd57177b572e2040777b8abc537cdef58a2120e830124946aa9b42c5"}, + {file = "ruamel.yaml.clib-0.2.12-cp312-cp312-win_amd64.whl", hash = "sha256:0467c5965282c62203273b838ae77c0d29d7638c8a4e3a1c8bdd3602c10904e4"}, + {file = "ruamel.yaml.clib-0.2.12.tar.gz", hash = "sha256:6c8fbb13ec503f99a91901ab46e0b07ae7941cd527393187039aec586fdfd36f"}, ] [[package]] @@ -1712,6 +1755,22 @@ files = [ {file = "ruff-0.8.4.tar.gz", hash = "sha256:0d5f89f254836799af1615798caa5f80b7f935d7a670fad66c5007928e57ace8"}, ] +[[package]] +name = "scalecodec" +version = "1.2.11" +requires_python = "<4,>=3.6" +summary = "Python SCALE Codec Library" +groups = ["default"] +dependencies = [ + "base58>=2.0.1", + "more-itertools", + "requests>=2.24.0", +] +files = [ + {file = "scalecodec-1.2.11-py3-none-any.whl", hash = "sha256:d15c94965f617caa25096f83a45f5f73031d05e6ee08d6039969f0a64fc35de1"}, + {file = "scalecodec-1.2.11.tar.gz", hash = "sha256:99a2cdbfccdcaf22bd86b86da55a730a2855514ad2309faef4a4a93ac6cbeb8d"}, +] + [[package]] name = "scalene" version = "1.5.49" @@ -1754,13 +1813,13 @@ files = [ [[package]] name = "six" -version = "1.16.0" -requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +version = "1.17.0" +requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" summary = "Python 2 and 3 compatibility utilities" groups = ["default"] files = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] [[package]] @@ -2006,14 +2065,14 @@ files = [ [[package]] name = "toolz" -version = "0.12.1" -requires_python = ">=3.7" +version = "1.0.0" +requires_python = ">=3.8" summary = "List processing tools and functional utilities" groups = ["default"] marker = "implementation_name == \"pypy\" or implementation_name == \"cpython\"" files = [ - {file = "toolz-0.12.1-py3-none-any.whl", hash = "sha256:d22731364c07d72eea0a0ad45bafb2c2937ab6fd38a3507bf55eae8744aa7d85"}, - {file = "toolz-0.12.1.tar.gz", hash = "sha256:ecca342664893f177a13dac0e6b41cbd8ac25a358e5f215316d43e2100224f4d"}, + {file = "toolz-1.0.0-py3-none-any.whl", hash = "sha256:292c8f1c4e7516bf9086f8850935c799a874039c8bcf959d47b600e4c44a6236"}, + {file = "toolz-1.0.0.tar.gz", hash = "sha256:2c86e3d9a04798ac556793bced838816296a2f085017664e4995cb40a1047a02"}, ] [[package]] @@ -2036,33 +2095,33 @@ files = [ [[package]] name = "typeguard" -version = "4.0.1" -requires_python = ">=3.7.4" +version = "4.4.1" +requires_python = ">=3.9" summary = "Run-time type checker for Python" groups = ["default"] dependencies = [ "importlib-metadata>=3.6; python_version < \"3.10\"", - "typing-extensions>=4.7.0; python_version < \"3.12\"", + "typing-extensions>=4.10.0", ] files = [ - {file = "typeguard-4.0.1-py3-none-any.whl", hash = "sha256:43f55cc9953f26dae362adb973b6c9ad6b97bfffcc6757277912eddd5cfa345b"}, - {file = "typeguard-4.0.1.tar.gz", hash = "sha256:db35142d1f92fc8c1b954e5cc03b57810428f9cd4e4604647bdf5764fc5bbba9"}, + {file = "typeguard-4.4.1-py3-none-any.whl", hash = "sha256:9324ec07a27ec67fc54a9c063020ca4c0ae6abad5e9f0f9804ca59aee68c6e21"}, + {file = "typeguard-4.4.1.tar.gz", hash = "sha256:0d22a89d00b453b47c49875f42b6601b961757541a2e1e0ef517b6e24213c21b"}, ] [[package]] name = "types-pytz" -version = "2024.2.0.20241003" +version = "2024.2.0.20241221" requires_python = ">=3.8" summary = "Typing stubs for pytz" groups = ["lint"] files = [ - {file = "types-pytz-2024.2.0.20241003.tar.gz", hash = "sha256:575dc38f385a922a212bac00a7d6d2e16e141132a3c955078f4a4fd13ed6cb44"}, - {file = "types_pytz-2024.2.0.20241003-py3-none-any.whl", hash = "sha256:3e22df1336c0c6ad1d29163c8fda82736909eb977281cb823c57f8bae07118b7"}, + {file = "types_pytz-2024.2.0.20241221-py3-none-any.whl", hash = "sha256:8fc03195329c43637ed4f593663df721fef919b60a969066e22606edf0b53ad5"}, + {file = "types_pytz-2024.2.0.20241221.tar.gz", hash = "sha256:06d7cde9613e9f7504766a0554a270c369434b50e00975b3a4a0f6eed0f2c1a9"}, ] [[package]] name = "types-requests" -version = "2.32.0.20240712" +version = "2.32.0.20241016" requires_python = ">=3.8" summary = "Typing stubs for requests" groups = ["default"] @@ -2070,8 +2129,8 @@ dependencies = [ "urllib3>=2", ] files = [ - {file = "types-requests-2.32.0.20240712.tar.gz", hash = "sha256:90c079ff05e549f6bf50e02e910210b98b8ff1ebdd18e19c873cd237737c1358"}, - {file = "types_requests-2.32.0.20240712-py3-none-any.whl", hash = "sha256:f754283e152c752e46e70942fa2a146b5bc70393522257bb85bd1ef7e019dcc3"}, + {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"}, + {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"}, ] [[package]] @@ -2113,14 +2172,14 @@ files = [ [[package]] name = "tzdata" -version = "2024.1" +version = "2024.2" requires_python = ">=2" summary = "Provider of IANA time zone data" groups = ["default"] marker = "platform_system == \"Windows\"" files = [ - {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"}, - {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"}, + {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, + {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, ] [[package]] @@ -2140,13 +2199,13 @@ files = [ [[package]] name = "urllib3" -version = "2.2.2" +version = "2.2.3" requires_python = ">=3.8" summary = "HTTP library with thread-safe connection pooling, file post, and more." groups = ["default", "docs", "test"] files = [ - {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, - {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, ] [[package]] @@ -2215,42 +2274,78 @@ files = [ {file = "web3-7.6.1.tar.gz", hash = "sha256:0cce0f33bef9096fc976ee00c38e71cdd10a61d4302a6b13b56ab58327764f9a"}, ] +[[package]] +name = "websocket-client" +version = "1.8.0" +requires_python = ">=3.8" +summary = "WebSocket client for Python with low level API options" +groups = ["default"] +files = [ + {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, + {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, +] + [[package]] name = "websockets" -version = "12.0" +version = "13.1" requires_python = ">=3.8" summary = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" groups = ["default"] files = [ - {file = "websockets-12.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0e6e2711d5a8e6e482cacb927a49a3d432345dfe7dea8ace7b5790df5932e4df"}, - {file = "websockets-12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dbcf72a37f0b3316e993e13ecf32f10c0e1259c28ffd0a85cee26e8549595fbc"}, - {file = "websockets-12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12743ab88ab2af1d17dd4acb4645677cb7063ef4db93abffbf164218a5d54c6b"}, - {file = "websockets-12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b645f491f3c48d3f8a00d1fce07445fab7347fec54a3e65f0725d730d5b99cb"}, - {file = "websockets-12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9893d1aa45a7f8b3bc4510f6ccf8db8c3b62120917af15e3de247f0780294b92"}, - {file = "websockets-12.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f38a7b376117ef7aff996e737583172bdf535932c9ca021746573bce40165ed"}, - {file = "websockets-12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f764ba54e33daf20e167915edc443b6f88956f37fb606449b4a5b10ba42235a5"}, - {file = "websockets-12.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1e4b3f8ea6a9cfa8be8484c9221ec0257508e3a1ec43c36acdefb2a9c3b00aa2"}, - {file = "websockets-12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9fdf06fd06c32205a07e47328ab49c40fc1407cdec801d698a7c41167ea45113"}, - {file = "websockets-12.0-cp312-cp312-win32.whl", hash = "sha256:baa386875b70cbd81798fa9f71be689c1bf484f65fd6fb08d051a0ee4e79924d"}, - {file = "websockets-12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ae0a5da8f35a5be197f328d4727dbcfafa53d1824fac3d96cdd3a642fe09394f"}, - {file = "websockets-12.0-py3-none-any.whl", hash = "sha256:dc284bbc8d7c78a6c69e0c7325ab46ee5e40bb4d50e494d8131a07ef47500e9e"}, - {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"}, + {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"}, + {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"}, + {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"}, + {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"}, + {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, ] [[package]] name = "wheel" -version = "0.44.0" +version = "0.45.1" requires_python = ">=3.8" summary = "A built-package format for Python" groups = ["perf"] files = [ - {file = "wheel-0.44.0-py3-none-any.whl", hash = "sha256:2376a90c98cc337d18623527a97c31797bd02bad0033d41547043a1cbfbe448f"}, - {file = "wheel-0.44.0.tar.gz", hash = "sha256:a29c3f2817e95ab89aa4660681ad547c0e9547f20e75b0562fe7723c9a2a9d49"}, + {file = "wheel-0.45.1-py3-none-any.whl", hash = "sha256:708e7481cc80179af0e556bbf0cc00b8444c7321e2700b8d8580231d13017248"}, + {file = "wheel-0.45.1.tar.gz", hash = "sha256:661e1abd9198507b1409a20c02106d9670b2576e916d58f520316666abca6729"}, +] + +[[package]] +name = "xxhash" +version = "3.5.0" +requires_python = ">=3.7" +summary = "Python binding for xxHash" +groups = ["default"] +files = [ + {file = "xxhash-3.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:14470ace8bd3b5d51318782cd94e6f94431974f16cb3b8dc15d52f3b69df8e00"}, + {file = "xxhash-3.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:59aa1203de1cb96dbeab595ded0ad0c0056bb2245ae11fac11c0ceea861382b9"}, + {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08424f6648526076e28fae6ea2806c0a7d504b9ef05ae61d196d571e5c879c84"}, + {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61a1ff00674879725b194695e17f23d3248998b843eb5e933007ca743310f793"}, + {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2f2c61bee5844d41c3eb015ac652a0229e901074951ae48581d58bfb2ba01be"}, + {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d32a592cac88d18cc09a89172e1c32d7f2a6e516c3dfde1b9adb90ab5df54a6"}, + {file = "xxhash-3.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70dabf941dede727cca579e8c205e61121afc9b28516752fd65724be1355cc90"}, + {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e5d0ddaca65ecca9c10dcf01730165fd858533d0be84c75c327487c37a906a27"}, + {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e5b5e16c5a480fe5f59f56c30abdeba09ffd75da8d13f6b9b6fd224d0b4d0a2"}, + {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149b7914451eb154b3dfaa721315117ea1dac2cc55a01bfbd4df7c68c5dd683d"}, + {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:eade977f5c96c677035ff39c56ac74d851b1cca7d607ab3d8f23c6b859379cab"}, + {file = "xxhash-3.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fa9f547bd98f5553d03160967866a71056a60960be00356a15ecc44efb40ba8e"}, + {file = "xxhash-3.5.0-cp312-cp312-win32.whl", hash = "sha256:f7b58d1fd3551b8c80a971199543379be1cee3d0d409e1f6d8b01c1a2eebf1f8"}, + {file = "xxhash-3.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:fa0cafd3a2af231b4e113fba24a65d7922af91aeb23774a8b78228e6cd785e3e"}, + {file = "xxhash-3.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:586886c7e89cb9828bcd8a5686b12e161368e0064d040e225e72607b43858ba2"}, + {file = "xxhash-3.5.0.tar.gz", hash = "sha256:84f2caddf951c9cbf8dc2e22a89d4ccf5d86391ac6418fe81e3c67d0cf60b45f"}, ] [[package]] name = "yarl" -version = "1.18.0" +version = "1.18.3" requires_python = ">=3.9" summary = "Yet another URL library" groups = ["default", "test"] @@ -2260,22 +2355,22 @@ dependencies = [ "propcache>=0.2.0", ] files = [ - {file = "yarl-1.18.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1ece25e2251c28bab737bdf0519c88189b3dd9492dc086a1d77336d940c28ced"}, - {file = "yarl-1.18.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:454902dc1830d935c90b5b53c863ba2a98dcde0fbaa31ca2ed1ad33b2a7171c6"}, - {file = "yarl-1.18.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:01be8688fc211dc237e628fcc209dda412d35de7642453059a0553747018d075"}, - {file = "yarl-1.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d26f1fa9fa2167bb238f6f4b20218eb4e88dd3ef21bb8f97439fa6b5313e30d"}, - {file = "yarl-1.18.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b234a4a9248a9f000b7a5dfe84b8cb6210ee5120ae70eb72a4dcbdb4c528f72f"}, - {file = "yarl-1.18.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe94d1de77c4cd8caff1bd5480e22342dbd54c93929f5943495d9c1e8abe9f42"}, - {file = "yarl-1.18.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b4c90c5363c6b0a54188122b61edb919c2cd1119684999d08cd5e538813a28e"}, - {file = "yarl-1.18.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49a98ecadc5a241c9ba06de08127ee4796e1009555efd791bac514207862b43d"}, - {file = "yarl-1.18.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9106025c7f261f9f5144f9aa7681d43867eed06349a7cfb297a1bc804de2f0d1"}, - {file = "yarl-1.18.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:f275ede6199d0f1ed4ea5d55a7b7573ccd40d97aee7808559e1298fe6efc8dbd"}, - {file = "yarl-1.18.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:f7edeb1dcc7f50a2c8e08b9dc13a413903b7817e72273f00878cb70e766bdb3b"}, - {file = "yarl-1.18.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c083f6dd6951b86e484ebfc9c3524b49bcaa9c420cb4b2a78ef9f7a512bfcc85"}, - {file = "yarl-1.18.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:80741ec5b471fbdfb997821b2842c59660a1c930ceb42f8a84ba8ca0f25a66aa"}, - {file = "yarl-1.18.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b1a3297b9cad594e1ff0c040d2881d7d3a74124a3c73e00c3c71526a1234a9f7"}, - {file = "yarl-1.18.0-cp312-cp312-win32.whl", hash = "sha256:cd6ab7d6776c186f544f893b45ee0c883542b35e8a493db74665d2e594d3ca75"}, - {file = "yarl-1.18.0-cp312-cp312-win_amd64.whl", hash = "sha256:039c299a0864d1f43c3e31570045635034ea7021db41bf4842693a72aca8df3a"}, - {file = "yarl-1.18.0-py3-none-any.whl", hash = "sha256:dbf53db46f7cf176ee01d8d98c39381440776fcda13779d269a8ba664f69bec0"}, - {file = "yarl-1.18.0.tar.gz", hash = "sha256:20d95535e7d833889982bfe7cc321b7f63bf8879788fee982c76ae2b24cfb715"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1dd4bdd05407ced96fed3d7f25dbbf88d2ffb045a0db60dbc247f5b3c5c25d50"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7c33dd1931a95e5d9a772d0ac5e44cac8957eaf58e3c8da8c1414de7dd27c576"}, + {file = "yarl-1.18.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b411eddcfd56a2f0cd6a384e9f4f7aa3efee14b188de13048c25b5e91f1640"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:436c4fc0a4d66b2badc6c5fc5ef4e47bb10e4fd9bf0c79524ac719a01f3607c2"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e35ef8683211db69ffe129a25d5634319a677570ab6b2eba4afa860f54eeaf75"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84b2deecba4a3f1a398df819151eb72d29bfeb3b69abb145a00ddc8d30094512"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00e5a1fea0fd4f5bfa7440a47eff01d9822a65b4488f7cff83155a0f31a2ecba"}, + {file = "yarl-1.18.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0e883008013c0e4aef84dcfe2a0b172c4d23c2669412cf5b3371003941f72bb"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5a3f356548e34a70b0172d8890006c37be92995f62d95a07b4a42e90fba54272"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:ccd17349166b1bee6e529b4add61727d3f55edb7babbe4069b5764c9587a8cc6"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b958ddd075ddba5b09bb0be8a6d9906d2ce933aee81100db289badbeb966f54e"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c7d79f7d9aabd6011004e33b22bc13056a3e3fb54794d138af57f5ee9d9032cb"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4891ed92157e5430874dad17b15eb1fda57627710756c27422200c52d8a4e393"}, + {file = "yarl-1.18.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ce1af883b94304f493698b00d0f006d56aea98aeb49d75ec7d98cd4a777e9285"}, + {file = "yarl-1.18.3-cp312-cp312-win32.whl", hash = "sha256:f91c4803173928a25e1a55b943c81f55b8872f0018be83e3ad4938adffb77dd2"}, + {file = "yarl-1.18.3-cp312-cp312-win_amd64.whl", hash = "sha256:7e2ee16578af3b52ac2f334c3b1f92262f47e02cc6193c598502bd46f5cd1477"}, + {file = "yarl-1.18.3-py3-none-any.whl", hash = "sha256:b57f4f58099328dfb26c6a771d09fb20dbbae81d20cfb66141251ea063bd101b"}, + {file = "yarl-1.18.3.tar.gz", hash = "sha256:ac1801c45cbf77b6c99242eeff4fffb5e4e73a800b5c4ad4fc0be5def634d2e1"}, ] diff --git a/pyproject.toml b/pyproject.toml index 60ba06b19..7e2af3079 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,6 +51,7 @@ classifiers = [ dependencies = [ "aiohttp~=3.10", "aiolimiter~=1.1", + "aiosubstrate>=0.1.0", "anyio~=4.4", "appdirs~=1.4", "APScheduler~=3.10", @@ -71,12 +72,11 @@ dependencies = [ "ruamel.yaml~=0.18.6", "sentry-sdk~=2.16", "sqlparse~=0.5", - "starknet-py==0.24.0", + "starknet-py==0.24.0", # pinned "strict-rfc3339~=0.7", "survey~=5.4", "tabulate~=0.9", - # NOTE: Heavily patched; don't update without testing. - "tortoise-orm==0.21.7", + "tortoise-orm==0.21.7", # pinned "uvloop~=0.20", "web3~=7.2", ] @@ -85,9 +85,6 @@ dependencies = [ migrations = [ "aerich~=0.7.2", ] -[tool.pdm.resolution] -# NOTE: Introduced by starknetpy 0.24; depends on half of the PyPI -excludes = ["bip-utils", "ledgerwallet"] [project.urls] Homepage = "https://dipdup.io/" @@ -97,35 +94,6 @@ Repository = "https://github.com/dipdup-io/dipdup" [project.scripts] dipdup = "dipdup.cli:cli" -[tool.pdm.dev-dependencies] -lint = [ - "black", - "mypy", - "ruff", - "types-pytz", - "types-tabulate", -] -test = [ - "docker", - "pytest", - "pytest-aiohttp", - "pytest-asyncio", - "pytest-cov", - "pytest-xdist", -] -docs = [ - "Sphinx", - "sphinx-click", - "sphinx-markdown-builder", - "watchdog", -] -perf = [ - "scalene", -] - -[tool.pdm.build] -includes = ["src/dipdup"] - [tool.black] line-length = 120 target-version = ["py312"] @@ -179,6 +147,66 @@ exclude_lines = [ "if env.DEBUG:", ] + +[tool.pdm.resolution] +# NOTE: Introduced by starknetpy 0.24; depends on half of the PyPI +excludes = ["bip-utils", "ledgerwallet"] + +[tool.pdm.dev-dependencies] +lint = [ + "black", + "mypy", + "ruff", + "types-pytz", + "types-tabulate", +] +test = [ + "docker", + "pytest", + "pytest-aiohttp", + "pytest-asyncio", + "pytest-cov", + "pytest-xdist", +] +docs = [ + "Sphinx", + "sphinx-click", + "sphinx-markdown-builder", + "watchdog", +] +perf = [ + "scalene", +] + +[tool.pdm.build] +includes = ["src/dipdup"] + [build-system] requires = ["pdm-backend"] build-backend = "pdm.backend" + +[dependency-groups] +lint = [ + "black", + "mypy", + "ruff", + "types-pytz", + "types-tabulate", +] +test = [ + "docker", + "pytest", + "pytest-aiohttp", + "pytest-asyncio", + "pytest-cov", + "pytest-xdist", +] +docs = [ + "Sphinx", + "sphinx-click", + "sphinx-markdown-builder", + "watchdog", +] +perf = [ + "scalene", +] diff --git a/requirements.txt b/requirements.txt index a8fc7cfc2..46821d46c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,86 +1,90 @@ # This file is @generated by PDM. # Please do not edit it manually. -aiohappyeyeballs==2.4.0 +aiohappyeyeballs==2.4.4 aiohttp==3.11.11 aiolimiter==1.2.1 -aiosignal==1.3.1 +aiosignal==1.3.2 aiosqlite==0.20.0 +aiosubstrate==0.1.0 annotated-types==0.7.0 anyio==4.7.0 appdirs==1.4.4 apscheduler==3.11.0 -argcomplete==3.5.0 +argcomplete==3.5.2 asgiref==3.8.1 async-lru==2.0.4 asyncpg==0.30.0 -attrs==24.2.0 -bitarray==2.9.2 +attrs==24.3.0 +base58==2.1.1 +bitarray==3.0.0 black==24.10.0 -certifi==2024.8.30 -charset-normalizer==3.3.2 -ckzg==2.0.0 -click==8.1.7 +certifi==2024.12.14 +charset-normalizer==3.4.0 +ckzg==2.0.1 +click==8.1.8 colorama==0.4.6; platform_system == "Windows" or sys_platform == "win32" crypto-cpp-py==1.4.4 -cytoolz==0.12.3; implementation_name == "cpython" +cytoolz==1.0.1; implementation_name == "cpython" datamodel-code-generator==0.26.4 -dnspython==2.6.1; python_version ~= "3.11" +dnspython==2.7.0; python_version ~= "3.11" ecdsa==0.18.0 email-validator==2.2.0; python_version ~= "3.11" eth-abi==5.1.0 -eth-account==0.13.3 +eth-account==0.13.4 eth-hash[pycryptodome]==0.7.0 eth-keyfile==0.8.1 -eth-keys==0.5.1 +eth-keys==0.6.0 eth-rlp==2.1.0 -eth-typing==5.0.0 -eth-utils==5.0.0 -frozenlist==1.4.1 +eth-typing==5.0.1 +eth-utils==5.1.0 +frozenlist==1.5.0 genson==1.3.0 hexbytes==1.2.1 -idna==3.8 +idna==3.10 inflect==5.6.2 iso8601==2.1.0 isort==5.13.2 jinja2==3.1.4 lark==1.2.2 lru-dict==1.3.0 -markupsafe==2.1.5 -marshmallow==3.22.0 -marshmallow-dataclass==8.7.0 +markupsafe==3.0.2 +marshmallow==3.23.2 +marshmallow-dataclass==8.7.1 marshmallow-oneofschema==3.1.1 +more-itertools==10.5.0 mpmath==1.3.0 -msgpack==1.0.8 -multidict==6.0.5 +msgpack==1.1.0 +multidict==6.1.0 mypy-extensions==1.0.0 orjson==3.10.12 -packaging==24.1 +packaging==24.2 parsimonious==0.10.0 pathspec==0.12.1 -platformdirs==4.2.2 +platformdirs==4.3.6 poseidon-py==0.1.5 prometheus-client==0.21.1 -propcache==0.2.0 +propcache==0.2.1 pycryptodome==3.21.0 pydantic-core==2.27.2 pydantic[email]==2.10.4; python_version ~= "3.11" pyhumps==3.8.0 -pypika-tortoise==0.2.1 +pypika-tortoise==0.2.2 pysignalr==1.1.0 python-dotenv==1.0.1 python-json-logger==2.0.7 -pytz==2024.1 -pyunormalize==15.1.0 +pytz==2024.2 +pyunormalize==16.0.0 pywin32==306; platform_system == "Windows" or sys_platform == "win32" or os_name == "nt" pyyaml==6.0.2 -regex==2024.7.24 +regex==2024.11.6 requests==2.32.3 rlp==4.0.1 ruamel-yaml==0.18.6 -ruamel-yaml-clib==0.2.8; platform_python_implementation == "CPython" and python_version < "3.13" +ruamel-yaml-clib==0.2.12; platform_python_implementation == "CPython" and python_version < "3.13" +scalecodec==1.2.11 sentry-sdk==2.19.2 -six==1.16.0 +six==1.17.0 sniffio==1.3.1 sqlparse==0.5.3 starknet-py==0.24.0 @@ -88,16 +92,18 @@ strict-rfc3339==0.7 survey==5.4.2 sympy==1.11.1 tabulate==0.9.0 -toolz==0.12.1; implementation_name == "pypy" or implementation_name == "cpython" +toolz==1.0.0; implementation_name == "pypy" or implementation_name == "cpython" tortoise-orm==0.21.7 -typeguard==4.0.1 -types-requests==2.32.0.20240712 +typeguard==4.4.1 +types-requests==2.32.0.20241016 typing-extensions==4.12.2 typing-inspect==0.9.0 -tzdata==2024.1; platform_system == "Windows" +tzdata==2024.2; platform_system == "Windows" tzlocal==5.2 -urllib3==2.2.2 +urllib3==2.2.3 uvloop==0.21.0 web3==7.6.1 -websockets==12.0 -yarl==1.18.0 +websocket-client==1.8.0 +websockets==13.1 +xxhash==3.5.0 +yarl==1.18.3 diff --git a/schemas/dipdup-3.0.json b/schemas/dipdup-3.0.json index f2682a2fc..04dc08524 100644 --- a/schemas/dipdup-3.0.json +++ b/schemas/dipdup-3.0.json @@ -1,54 +1,5 @@ { "$defs": { - "AbiEtherscanDatasourceConfig": { - "additionalProperties": false, - "description": "Etherscan datasource config", - "properties": { - "api_key": { - "anyOf": [ - { - "type": "string" - }, - { - "type": "null" - } - ], - "default": null, - "description": "API key", - "title": "api_key" - }, - "http": { - "anyOf": [ - { - "$ref": "#/$defs/HttpConfig" - }, - { - "type": "null" - } - ], - "default": null, - "description": "HTTP client configuration", - "title": "http" - }, - "kind": { - "const": "abi.etherscan", - "description": "always 'abi.etherscan'", - "title": "kind", - "type": "string" - }, - "url": { - "description": "API URL", - "title": "url", - "type": "string" - } - }, - "required": [ - "kind", - "url" - ], - "title": "AbiEtherscanDatasourceConfig", - "type": "object" - }, "AdvancedConfig": { "additionalProperties": true, "description": "This section allows users to tune some system-wide options, either experimental or unsuitable for generic configurations.", @@ -285,6 +236,63 @@ "title": "EvmContractConfig", "type": "object" }, + "EvmEtherscanDatasourceConfig": { + "additionalProperties": false, + "description": "Etherscan datasource config", + "properties": { + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key", + "title": "api_key" + }, + "http": { + "anyOf": [ + { + "$ref": "#/$defs/HttpConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "HTTP client configuration", + "title": "http" + }, + "kind": { + "anyOf": [ + { + "const": "evm.etherscan", + "type": "string" + }, + { + "const": "abi.etherscan", + "type": "string" + } + ], + "description": "always 'evm.etherscan'", + "title": "kind" + }, + "url": { + "description": "API URL", + "title": "url", + "type": "string" + } + }, + "required": [ + "kind", + "url" + ], + "title": "EvmEtherscanDatasourceConfig", + "type": "object" + }, "EvmEventsHandlerConfig": { "additionalProperties": false, "description": "Subsquid event handler", @@ -338,7 +346,7 @@ "$ref": "#/$defs/EvmNodeDatasourceConfig" }, { - "$ref": "#/$defs/AbiEtherscanDatasourceConfig" + "$ref": "#/$defs/EvmEtherscanDatasourceConfig" } ] }, @@ -563,7 +571,7 @@ "$ref": "#/$defs/EvmNodeDatasourceConfig" }, { - "$ref": "#/$defs/AbiEtherscanDatasourceConfig" + "$ref": "#/$defs/EvmEtherscanDatasourceConfig" } ] }, @@ -1581,6 +1589,263 @@ "title": "StarknetSubsquidDatasourceConfig", "type": "object" }, + "SubstrateEventsHandlerConfig": { + "additionalProperties": false, + "description": "Subsquid event handler", + "properties": { + "callback": { + "description": "Callback name", + "title": "callback", + "type": "string" + }, + "name": { + "description": "Event name (pallet.event)", + "title": "name", + "type": "string" + } + }, + "required": [ + "callback", + "name" + ], + "title": "SubstrateEventsHandlerConfig", + "type": "object" + }, + "SubstrateEventsIndexConfig": { + "additionalProperties": false, + "description": "Subsquid datasource config", + "properties": { + "datasources": { + "description": "`substrate` datasources to use", + "items": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/$defs/SubstrateSubsquidDatasourceConfig" + }, + { + "$ref": "#/$defs/SubstrateSubscanDatasourceConfig" + }, + { + "$ref": "#/$defs/SubstrateNodeDatasourceConfig" + } + ] + }, + "title": "datasources", + "type": "array" + }, + "first_level": { + "default": 0, + "description": "Level to start indexing from", + "title": "first_level", + "type": "integer" + }, + "handlers": { + "description": "Event handlers", + "items": { + "$ref": "#/$defs/SubstrateEventsHandlerConfig" + }, + "title": "handlers", + "type": "array" + }, + "kind": { + "const": "substrate.events", + "description": "Always 'substrate.events'", + "title": "kind", + "type": "string" + }, + "last_level": { + "default": 0, + "description": "Level to stop indexing and disable this index", + "title": "last_level", + "type": "integer" + }, + "runtime": { + "anyOf": [ + { + "type": "string" + }, + { + "$ref": "#/$defs/SubstrateRuntimeConfig" + } + ], + "description": "Substrate runtime", + "title": "runtime" + } + }, + "required": [ + "kind", + "datasources", + "runtime", + "handlers" + ], + "title": "SubstrateEventsIndexConfig", + "type": "object" + }, + "SubstrateNodeDatasourceConfig": { + "additionalProperties": false, + "description": "Substrate node datasource config", + "properties": { + "http": { + "anyOf": [ + { + "$ref": "#/$defs/HttpConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "HTTP client configuration", + "title": "http" + }, + "kind": { + "const": "substrate.node", + "description": "Always 'substrate.node'", + "title": "kind", + "type": "string" + }, + "url": { + "$ref": "#/$defs/Url", + "description": "Substrate node URL", + "title": "url" + }, + "ws_url": { + "anyOf": [ + { + "$ref": "#/$defs/WsUrl" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Substrate node WebSocket URL", + "title": "ws_url" + } + }, + "required": [ + "kind", + "url" + ], + "title": "SubstrateNodeDatasourceConfig", + "type": "object" + }, + "SubstrateRuntimeConfig": { + "additionalProperties": false, + "description": "Substrate runtime config", + "properties": { + "kind": { + "const": "substrate", + "default": "substrate", + "description": "Always 'substrate'", + "title": "kind", + "type": "string" + }, + "type_registry": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "Path to type registry or its alias", + "title": "type_registry" + } + }, + "title": "SubstrateRuntimeConfig", + "type": "object" + }, + "SubstrateSubscanDatasourceConfig": { + "additionalProperties": false, + "description": "Subscan datasource config", + "properties": { + "api_key": { + "anyOf": [ + { + "type": "string" + }, + { + "type": "null" + } + ], + "default": null, + "description": "API key", + "title": "api_key" + }, + "http": { + "anyOf": [ + { + "$ref": "#/$defs/HttpConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "HTTP client configuration", + "title": "http" + }, + "kind": { + "const": "substrate.subscan", + "description": "always 'substrate.subscan'", + "title": "kind", + "type": "string" + }, + "url": { + "description": "API URL", + "title": "url", + "type": "string" + } + }, + "required": [ + "kind", + "url" + ], + "title": "SubstrateSubscanDatasourceConfig", + "type": "object" + }, + "SubstrateSubsquidDatasourceConfig": { + "additionalProperties": false, + "description": "Subsquid datasource config", + "properties": { + "http": { + "anyOf": [ + { + "$ref": "#/$defs/HttpConfig" + }, + { + "type": "null" + } + ], + "default": null, + "description": "HTTP client configuration", + "title": "http" + }, + "kind": { + "const": "substrate.subsquid", + "description": "always 'substrate.subsquid'", + "title": "kind", + "type": "string" + }, + "url": { + "$ref": "#/$defs/Url", + "description": "URL of Subsquid Network API", + "title": "url" + } + }, + "required": [ + "kind", + "url" + ], + "title": "SubstrateSubsquidDatasourceConfig", + "type": "object" + }, "TezosAddress": { "type": "string" }, @@ -2797,7 +3062,7 @@ "$ref": "#/$defs/CoinbaseDatasourceConfig" }, { - "$ref": "#/$defs/AbiEtherscanDatasourceConfig" + "$ref": "#/$defs/EvmEtherscanDatasourceConfig" }, { "$ref": "#/$defs/HttpDatasourceConfig" @@ -2822,6 +3087,15 @@ }, { "$ref": "#/$defs/StarknetNodeDatasourceConfig" + }, + { + "$ref": "#/$defs/SubstrateSubsquidDatasourceConfig" + }, + { + "$ref": "#/$defs/SubstrateSubscanDatasourceConfig" + }, + { + "$ref": "#/$defs/SubstrateNodeDatasourceConfig" } ] }, @@ -2883,6 +3157,9 @@ { "$ref": "#/$defs/StarknetEventsIndexConfig" }, + { + "$ref": "#/$defs/SubstrateEventsIndexConfig" + }, { "$ref": "#/$defs/IndexTemplateConfig" } @@ -2944,6 +3221,14 @@ "description": "Prometheus integration config", "title": "prometheus" }, + "runtimes": { + "additionalProperties": { + "$ref": "#/$defs/SubstrateRuntimeConfig" + }, + "description": "Mapping of runtime aliases and runtime configs", + "title": "runtimes", + "type": "object" + }, "sentry": { "anyOf": [ { @@ -2994,6 +3279,9 @@ }, { "$ref": "#/$defs/StarknetEventsIndexConfig" + }, + { + "$ref": "#/$defs/SubstrateEventsIndexConfig" } ] }, diff --git a/scripts/docs.py b/scripts/docs.py index f6c15d4da..5129c7821 100755 --- a/scripts/docs.py +++ b/scripts/docs.py @@ -144,6 +144,8 @@ class ReferencePage(TypedDict): 'dipdup.models.MessageType', 'dipdup.models.QuerySet', 'dipdup.models.RollbackMessage', + 'dipdup.models.substrate.HeadBlock', + 'dipdup.models.substrate_node.SubstrateNodeHeadSubscription', 'dipdup.models.subsquid.AbstractSubsquidQuery', 'dipdup.models.subsquid.SubsquidMessageType', 'dipdup.models.starknet.StarknetSubscription', @@ -514,7 +516,7 @@ def _compare(ref: str, ignore: set[str]) -> None: else: package_path_str = '.' + package_path.with_suffix('').as_posix().replace('/', '.') # NOTE: Skip private modules and classes - if '._' in package_path_str or 'ABC' in match.group(2): + if '._' in package_path_str or 'ABC' in match.group(2) or match.group(1)[0] == '_': continue classes_in_package.add(f'dipdup.{ref}{package_path_str}.{match.group(1)}') @@ -808,7 +810,7 @@ def move_pages(path: Path, insert: int, pop: int) -> None: break file = toc[index] - new_name = path / f'{index + 1}.{file.name.split(".")[1]}.md' + new_name = path / f'{index + 1}.{'.'.join(file.stem.split(".")[1:])}.md' file.rename(new_name) toc[index + 1] = new_name @@ -826,7 +828,7 @@ def move_pages(path: Path, insert: int, pop: int) -> None: for index in sorted(toc.keys()): if index > pop: file = toc.pop(index) - new_name = path / f'{index - 1}.{file.name.split(".")[1]}.md' + new_name = path / f'{index + 1}.{'.'.join(file.stem.split(".")[1:])}.md' file.rename(new_name) toc[index - 1] = new_name diff --git a/src/demo_evm_events/dipdup.yaml b/src/demo_evm_events/dipdup.yaml index 756b76d5b..d14101c55 100644 --- a/src/demo_evm_events/dipdup.yaml +++ b/src/demo_evm_events/dipdup.yaml @@ -6,7 +6,7 @@ datasources: kind: evm.subsquid url: ${SUBSQUID_URL:-https://v2.archive.subsquid.io/network/ethereum-mainnet} etherscan: - kind: abi.etherscan + kind: evm.etherscan url: ${ETHERSCAN_URL:-https://api.etherscan.io/api} api_key: ${ETHERSCAN_API_KEY:-''} evm_node: diff --git a/src/demo_evm_transactions/dipdup.yaml b/src/demo_evm_transactions/dipdup.yaml index f4eb30062..ace9f69f4 100644 --- a/src/demo_evm_transactions/dipdup.yaml +++ b/src/demo_evm_transactions/dipdup.yaml @@ -6,7 +6,7 @@ datasources: kind: evm.subsquid url: ${SUBSQUID_URL:-https://v2.archive.subsquid.io/network/ethereum-mainnet} etherscan: - kind: abi.etherscan + kind: evm.etherscan url: ${ETHERSCAN_URL:-https://api.etherscan.io/api} api_key: ${ETHERSCAN_API_KEY:-''} evm_node: diff --git a/src/demo_evm_uniswap/dipdup.yaml b/src/demo_evm_uniswap/dipdup.yaml index 130e8aaaa..53df6e3e4 100644 --- a/src/demo_evm_uniswap/dipdup.yaml +++ b/src/demo_evm_uniswap/dipdup.yaml @@ -6,7 +6,7 @@ datasources: kind: evm.subsquid url: ${SUBSQUID_URL:-https://v2.archive.subsquid.io/network/ethereum-mainnet} etherscan: - kind: abi.etherscan + kind: evm.etherscan url: ${ETHERSCAN_URL:-https://api.etherscan.io/api} api_key: ${ETHERSCAN_API_KEY:-''} evm_node: diff --git a/src/demo_substrate_events/.dockerignore b/src/demo_substrate_events/.dockerignore new file mode 100644 index 000000000..c0bf9e3fb --- /dev/null +++ b/src/demo_substrate_events/.dockerignore @@ -0,0 +1,22 @@ +# Ignore all +* + +# Add metadata and build files +!demo_substrate_events +!pyproject.toml +!*.lock +!README.md + +# Add Python code +!**/*.py +**/.*_cache +**/__pycache__ + +# Add configs and scripts (but not env!) +!**/*.graphql +!**/*.json +!**/*.sql +!**/*.yaml +!**/*.yml +!**/*.j2 +!**/.keep \ No newline at end of file diff --git a/src/demo_substrate_events/.gitignore b/src/demo_substrate_events/.gitignore new file mode 100644 index 000000000..b04caa128 --- /dev/null +++ b/src/demo_substrate_events/.gitignore @@ -0,0 +1,29 @@ +# Ignore all +* +!*/ + +# Add metadata and build files +!demo_substrate_events +!.gitignore +!.dockerignore +!py.typed +!**/Dockerfile +!**/Makefile +!**/pyproject.toml +!**/*.lock +!**/README.md +!**/.keep + +# Add Python code +!**/*.py +**/.*_cache +**/__pycache__ + +# Add configs and scripts (but not env!) +!**/*.graphql +!**/*.json +!**/*.sql +!**/*.yaml +!**/*.yml +!**/*.j2 +!**/*.env.default \ No newline at end of file diff --git a/src/demo_substrate_events/Makefile b/src/demo_substrate_events/Makefile new file mode 100644 index 000000000..c3b5f7569 --- /dev/null +++ b/src/demo_substrate_events/Makefile @@ -0,0 +1,54 @@ +.PHONY: $(MAKECMDGOALS) +MAKEFLAGS += --no-print-directory +## +## 🚧 DipDup developer tools +## +PACKAGE=demo_substrate_events +TAG=latest +COMPOSE=deploy/compose.yaml + +help: ## Show this help (default) + @grep -Fh "##" $(MAKEFILE_LIST) | grep -Fv grep -F | sed -e 's/\\$$//' | sed -e 's/##//' + +all: ## Run an entire CI pipeline + make format lint + +## + +install: ## Install dependencies + pdm install + +update: ## Update dependencies + pdm update + dipdup self update -q + +format: ## Format with all tools + make black + +lint: ## Lint with all tools + make ruff mypy + +## + +black: ## Format with black + black . + +ruff: ## Lint with ruff + ruff check --fix . + +mypy: ## Lint with mypy + mypy . + +## + +image: ## Build Docker image + docker buildx build . -t ${PACKAGE}:${TAG} --load + +up: ## Start Compose stack + docker-compose -f ${COMPOSE} up -d --build + docker-compose -f ${COMPOSE} logs -f + +down: ## Stop Compose stack + docker-compose -f ${COMPOSE} down + +## \ No newline at end of file diff --git a/src/demo_substrate_events/README.md b/src/demo_substrate_events/README.md new file mode 100644 index 000000000..f6b252c38 --- /dev/null +++ b/src/demo_substrate_events/README.md @@ -0,0 +1,49 @@ +# demo_substrate_events + +Substrate balance transfers + +## Installation + +This project is based on [DipDup](https://dipdup.io), a framework for building featureful dapps. + +You need a Linux/macOS system with Python 3.12 installed. To install DipDup with pipx or use our installer: + +```shell +curl -Lsf https://dipdup.io/install.py | python3.12 +``` + +See the [Installation](https://dipdup.io/docs/installation) page for all options. + +## Usage + +Run the indexer in memory: + +```shell +dipdup run +``` + +Store data in SQLite database (defaults to /tmp, set `SQLITE_PATH` env variable): + +```shell +dipdup -c . -c configs/dipdup.sqlite.yaml run +``` + +Or spawn a Compose stack with PostgreSQL and Hasura: + +```shell +cd deploy +cp .env.default .env +# Edit .env file before running +docker-compose up +``` + +## Development setup + +To set up the development environment: + +```shell +pdm install +$(pdm venv activate) +``` + +Run `make all` to run full CI check or `make help` to see other available commands. \ No newline at end of file diff --git a/src/dipdup/indexes/__init__.py b/src/demo_substrate_events/__init__.py similarity index 100% rename from src/dipdup/indexes/__init__.py rename to src/demo_substrate_events/__init__.py diff --git a/src/dipdup/indexes/starknet_events/__init__.py b/src/demo_substrate_events/abi/.keep similarity index 100% rename from src/dipdup/indexes/starknet_events/__init__.py rename to src/demo_substrate_events/abi/.keep diff --git a/src/demo_substrate_events/abi/assethub/v601.json b/src/demo_substrate_events/abi/assethub/v601.json new file mode 100644 index 000000000..a6cbce1b8 --- /dev/null +++ b/src/demo_substrate_events/abi/assethub/v601.json @@ -0,0 +1,8569 @@ +[ + { + "name": "System", + "prefix": "System", + "storage": [ + { + "name": "Account", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "frame_system:AccountInfo", + "keys_id": 0, + "value_id": 3 + } + }, + "fallback": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "docs": [ + " The full account information for a particular account ID." + ] + }, + { + "name": "ExtrinsicCount", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00", + "docs": [ + " Total extrinsics count for the current block." + ] + }, + { + "name": "BlockWeight", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "frame_support:weights:PerDispatchClass@7", + "PlainTypeValue": 7 + }, + "fallback": "0x000000000000000000000000000000000000000000000000", + "docs": [ + " The current weight for the block." + ] + }, + { + "name": "AllExtrinsicsLen", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00", + "docs": [ + " Total length (in bytes) for all extrinsics put together, for the current block." + ] + }, + { + "name": "BlockHash", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "U32" + ], + "value": "H256", + "keys_id": 4, + "value_id": 9 + } + }, + "fallback": "0x0000000000000000000000000000000000000000000000000000000000000000", + "docs": [ + " Map of block numbers to block hashes." + ] + }, + { + "name": "ExtrinsicData", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "U32" + ], + "value": "Vec", + "keys_id": 4, + "value_id": 10 + } + }, + "fallback": "0x00", + "docs": [ + " Extrinsics data for the current block (maps an extrinsic's index to its data)." + ] + }, + { + "name": "Number", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00000000", + "docs": [ + " The current block number being processed. Set by `execute_block`." + ] + }, + { + "name": "ParentHash", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "H256", + "PlainTypeValue": 9 + }, + "fallback": "0x0000000000000000000000000000000000000000000000000000000000000000", + "docs": [ + " Hash of the previous block." + ] + }, + { + "name": "Digest", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 11 + }, + "fallback": "0x00", + "docs": [ + " Digest of the current block, also part of the block header." + ] + }, + { + "name": "Events", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 18 + }, + "fallback": "0x00", + "docs": [ + " Events deposited for the current block.", + "", + " NOTE: This storage item is explicitly unbounded since it is never intended to be read", + " from within the runtime." + ] + }, + { + "name": "EventCount", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00000000", + "docs": [ + " The number of events in the `Events` list." + ] + }, + { + "name": "EventTopics", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "H256" + ], + "value": "Vec", + "keys_id": 9, + "value_id": 96 + } + }, + "fallback": "0x00", + "docs": [ + " Mapping between a topic (represented by T::Hash) and a vector of indexes", + " of events in the `>` list.", + "", + " All topic vectors have deterministic storage locations depending on the topic. This", + " allows light-clients to leverage the changes trie storage tracking mechanism and", + " in case of changes fetch the list of events of interest.", + "", + " The value has the type `(T::BlockNumber, EventIndex)` because if we used only just", + " the `EventIndex` then in case if the topic has the same contents on the next block", + " no notification will be triggered thus the event might be lost." + ] + }, + { + "name": "LastRuntimeUpgrade", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "frame_system:LastRuntimeUpgradeInfo", + "PlainTypeValue": 98 + }, + "fallback": "0x00", + "docs": [ + " Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened." + ] + }, + { + "name": "UpgradedToU32RefCount", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Bool", + "PlainTypeValue": 87 + }, + "fallback": "0x00", + "docs": [ + " True if we have upgraded so that `type RefCount` is `u32`. False (default) if not." + ] + }, + { + "name": "UpgradedToTripleRefCount", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Bool", + "PlainTypeValue": 87 + }, + "fallback": "0x00", + "docs": [ + " True if we have upgraded so that AccountInfo contains three types of `RefCount`. False", + " (default) if not." + ] + }, + { + "name": "ExecutionPhase", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "frame_system:Phase", + "PlainTypeValue": 94 + }, + "fallback": "0x00", + "docs": [ + " The execution phase of the block." + ] + } + ], + "calls": [ + { + "lookup": "0000", + "name": "fill_block", + "docs": [ + "A dispatch that will fill the block weight up to the given ratio." + ], + "args": [ + { + "name": "ratio", + "type": "U32", + "type_name": "Perbill" + } + ] + }, + { + "lookup": "0001", + "name": "remark", + "docs": [ + "Make some on-chain remark.", + "", + "# ", + "- `O(1)`", + "# " + ], + "args": [ + { + "name": "remark", + "type": "Vec", + "type_name": "Bytes" + } + ] + }, + { + "lookup": "0002", + "name": "set_heap_pages", + "docs": [ + "Set the number of pages in the WebAssembly environment's heap.", + "", + "# ", + "- `O(1)`", + "- 1 storage write.", + "0x2d2042617365205765696768743a20312e34303520c2b573", + "- 1 write to HEAP_PAGES", + "- 1 digest item", + "# " + ], + "args": [ + { + "name": "pages", + "type": "U64", + "type_name": "u64" + } + ] + }, + { + "lookup": "0003", + "name": "set_code", + "docs": [ + "Set the new runtime code.", + "", + "# ", + "- `O(C + S)` where `C` length of `code` and `S` complexity of `can_set_code`", + "- 1 call to `can_set_code`: `O(S)` (calls `sp_io::misc::runtime_version` which is", + " expensive).", + "- 1 storage write (codec `O(C)`).", + "- 1 digest item.", + "- 1 event.", + "The weight of this function is dependent on the runtime, but generally this is very", + "expensive. We will treat this as a full block.", + "# " + ], + "args": [ + { + "name": "code", + "type": "Vec", + "type_name": "Bytes" + } + ] + }, + { + "lookup": "0004", + "name": "set_code_without_checks", + "docs": [ + "Set the new runtime code without doing any checks of the given `code`.", + "", + "# ", + "- `O(C)` where `C` length of `code`", + "- 1 storage write (codec `O(C)`).", + "- 1 digest item.", + "- 1 event.", + "The weight of this function is dependent on the runtime. We will treat this as a full", + "block. # " + ], + "args": [ + { + "name": "code", + "type": "Vec", + "type_name": "Bytes" + } + ] + }, + { + "lookup": "0005", + "name": "set_changes_trie_config", + "docs": [ + "Set the new changes trie configuration.", + "", + "# ", + "- `O(1)`", + "- 1 storage write or delete (codec `O(1)`).", + "- 1 call to `deposit_log`: Uses `append` API, so O(1)", + "0x2d2042617365205765696768743a20372e32313820c2b573", + "- DB Weight:", + " - Writes: Changes Trie, System Digest", + "# " + ], + "args": [ + { + "name": "changes_trie_config", + "type": "option", + "type_name": "Option" + } + ] + }, + { + "lookup": "0006", + "name": "set_storage", + "docs": [ + "Set some items of storage.", + "", + "# ", + "- `O(I)` where `I` length of `items`", + "- `I` storage writes (`O(1)`).", + "0x2d2042617365205765696768743a20302e353638202a206920c2b573", + "- Writes: Number of items", + "# " + ], + "args": [ + { + "name": "items", + "type": "VecVec>", + "type_name": "Vec" + } + ] + }, + { + "lookup": "0007", + "name": "kill_storage", + "docs": [ + "Kill some items from storage.", + "", + "# ", + "- `O(IK)` where `I` length of `keys` and `K` length of one key", + "- `I` storage deletions.", + "0x2d2042617365205765696768743a202e333738202a206920c2b573", + "- Writes: Number of items", + "# " + ], + "args": [ + { + "name": "keys", + "type": "Vec>", + "type_name": "Vec" + } + ] + }, + { + "lookup": "0008", + "name": "kill_prefix", + "docs": [ + "Kill all storage items with a key that starts with the given prefix.", + "", + "**NOTE:** We rely on the Root origin to provide us the number of subkeys under", + "the prefix we are removing to accurately calculate the weight of this function.", + "", + "# ", + "- `O(P)` where `P` amount of keys with prefix `prefix`", + "- `P` storage deletions.", + "0x2d2042617365205765696768743a20302e383334202a205020c2b573", + "- Writes: Number of subkeys + 1", + "# " + ], + "args": [ + { + "name": "prefix", + "type": "Vec", + "type_name": "Key" + }, + { + "name": "subkeys", + "type": "U32", + "type_name": "u32" + } + ] + }, + { + "lookup": "0009", + "name": "remark_with_event", + "docs": [ + "Make some on-chain remark and emit event.", + "", + "# ", + "- `O(b)` where b is the length of the remark.", + "- 1 event.", + "# " + ], + "args": [ + { + "name": "remark", + "type": "Vec", + "type_name": "Bytes" + } + ] + } + ], + "calls_value": { + "type": 100 + }, + "events": [ + { + "lookup": "0000", + "name": "ExtrinsicSuccess", + "docs": [ + "An extrinsic completed successfully. \\[info\\]" + ], + "args": [ + "frame_support:weights:DispatchInfo" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "DispatchInfo" + ] + }, + { + "lookup": "0001", + "name": "ExtrinsicFailed", + "docs": [ + "An extrinsic failed. \\[error, info\\]" + ], + "args": [ + "sp_runtime:DispatchError", + "frame_support:weights:DispatchInfo" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "DispatchError", + "DispatchInfo" + ] + }, + { + "lookup": "0002", + "name": "CodeUpdated", + "docs": [ + "`:code` was updated." + ], + "args": null + }, + { + "lookup": "0003", + "name": "NewAccount", + "docs": [ + "A new \\[account\\] was created." + ], + "args": [ + "AccountId" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "AccountId" + ] + }, + { + "lookup": "0004", + "name": "KilledAccount", + "docs": [ + "An \\[account\\] was reaped." + ], + "args": [ + "AccountId" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "AccountId" + ] + }, + { + "lookup": "0005", + "name": "Remarked", + "docs": [ + "On on-chain remark happened. \\[origin, remark_hash\\]" + ], + "args": [ + "AccountId", + "H256" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "AccountId", + "Hash" + ] + } + ], + "events_value": { + "type": 21 + }, + "constants": [ + { + "name": "BlockWeights", + "type": "frame_system:limits:BlockWeights", + "type_value": 105, + "constants_value": "00f2052a010000000088526a74000000405973070000000001c0d22c76510000000100e6bd4f57000000010000000000000000405973070000000001c074c1906e000000010088526a740000000100a2941a1d0000004059730700000000000000", + "docs": [ + " Block & extrinsics weights: base values and limits." + ] + }, + { + "name": "BlockLength", + "type": "frame_support:weights:PerDispatchClass@110", + "type_value": 109, + "constants_value": "00003c000000500000005000", + "docs": [ + " The maximum length of a block (in bytes)." + ] + }, + { + "name": "BlockHashCount", + "type": "U32", + "type_value": 4, + "constants_value": "60090000", + "docs": [ + " Maximum number of block number to block hash mappings to keep (oldest pruned first)." + ] + }, + { + "name": "DbWeight", + "type": "frame_support:weights:RuntimeDbWeight", + "type_value": 111, + "constants_value": "40787d010000000000e1f50500000000", + "docs": [ + " The weight of runtime database operations the runtime can invoke." + ] + }, + { + "name": "Version", + "type": "sp_version:RuntimeVersion", + "type_value": 112, + "constants_value": "2473746174656d696e742473746174656d696e7401000000590200000000000028dd718d5cc53262d401000000df6acb689907609b0300000037e397fc7c91f5e40100000040fe3ad401f8959a05000000d2bc9897eed08f1503000000f78b278be53f454c02000000ab3c0572291feb8b01000000bc9d89904f5b923f0100000037c8bb1350a9a2a801000000ea93e3f16f3d69620100000004000000", + "docs": [ + " Get the chain's current version." + ] + }, + { + "name": "SS58Prefix", + "type": "U16", + "type_value": 85, + "constants_value": "0000", + "docs": [ + " The designated SS85 prefix of this chain.", + "", + " This replaces the \"ss58Format\" property declared in the chain spec. Reason is", + " that the runtime should know about the prefix in order to make use of it as", + " an identifier of the chain." + ] + } + ], + "errors": [ + { + "name": "InvalidSpecName", + "doc": [ + "The name of specification does not match between the current runtime", + "and the new runtime." + ] + }, + { + "name": "SpecVersionNeedsToIncrease", + "doc": [ + "The specification version is not allowed to decrease between the current runtime", + "and the new runtime." + ] + }, + { + "name": "FailedToExtractRuntimeVersion", + "doc": [ + "Failed to extract the runtime version from the new runtime.", + "", + "Either calling `Core_version` or decoding `RuntimeVersion` failed." + ] + }, + { + "name": "NonDefaultComposite", + "doc": [ + "Suicide called when the account has non-default composite data." + ] + }, + { + "name": "NonZeroRefCount", + "doc": [ + "There is a non-zero reference count preventing the account from being purged." + ] + } + ], + "errors_value": { + "type": 116 + }, + "index": 0 + }, + { + "name": "ParachainSystem", + "prefix": "ParachainSystem", + "storage": [ + { + "name": "PendingValidationCode", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 10 + }, + "fallback": "0x00", + "docs": [ + " In case of a scheduled upgrade, this storage field contains the validation code to be applied.", + "", + " As soon as the relay chain gives us the go-ahead signal, we will overwrite the [`:code`][well_known_keys::CODE]", + " which will result the next block process with the new validation code. This concludes the upgrade process.", + "", + " [well_known_keys::CODE]: sp_core::storage::well_known_keys::CODE" + ] + }, + { + "name": "NewValidationCode", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 10 + }, + "fallback": "0x00", + "docs": [ + " Validation code that is set by the parachain and is to be communicated to collator and", + " consequently the relay-chain.", + "", + " This will be cleared in `on_initialize` of each new block if no other pallet already set", + " the value." + ] + }, + { + "name": "ValidationData", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "polkadot_primitives:v1:PersistedValidationData", + "PlainTypeValue": 117 + }, + "fallback": "0x00", + "docs": [ + " The [`PersistedValidationData`] set for this block.", + " This value is expected to be set only once per block and it's never stored", + " in the trie." + ] + }, + { + "name": "DidSetValidationCode", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Bool", + "PlainTypeValue": 87 + }, + "fallback": "0x00", + "docs": [ + " Were the validation data set to notify the relay chain?" + ] + }, + { + "name": "UpgradeRestrictionSignal", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "option", + "PlainTypeValue": 119 + }, + "fallback": "0x00", + "docs": [ + " An option which indicates if the relay-chain restricts signalling a validation code upgrade.", + " In other words, if this is `Some` and [`NewValidationCode`] is `Some` then the produced", + " candidate will be invalid.", + "", + " This storage item is a mirror of the corresponding value for the current parachain from the", + " relay-chain. This value is ephemeral which means it doesn't hit the storage. This value is", + " set after the inherent." + ] + }, + { + "name": "RelevantMessagingState", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "cumulus_pallet_parachain_system:relay_state_snapshot:MessagingStateSnapshot", + "PlainTypeValue": 121 + }, + "fallback": "0x00", + "docs": [ + " The snapshot of some state related to messaging relevant to the current parachain as per", + " the relay parent.", + "", + " This field is meant to be updated each block with the validation data inherent. Therefore,", + " before processing of the inherent, e.g. in `on_initialize` this data may be stale.", + "", + " This data is also absent from the genesis." + ] + }, + { + "name": "HostConfiguration", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "polkadot_primitives:v1:AbridgedHostConfiguration", + "PlainTypeValue": 126 + }, + "fallback": "0x00", + "docs": [ + " The parachain host configuration that was obtained from the relay parent.", + "", + " This field is meant to be updated each block with the validation data inherent. Therefore,", + " before processing of the inherent, e.g. in `on_initialize` this data may be stale.", + "", + " This data is also absent from the genesis." + ] + }, + { + "name": "LastDmqMqcHead", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "H256", + "PlainTypeValue": 127 + }, + "fallback": "0x0000000000000000000000000000000000000000000000000000000000000000", + "docs": [ + " The last downward message queue chain head we have observed.", + "", + " This value is loaded before and saved after processing inbound downward messages carried", + " by the system inherent." + ] + }, + { + "name": "LastHrmpMqcHeads", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 128 + }, + "fallback": "0x00", + "docs": [ + " The message queue chain heads we have observed per each channel incoming channel.", + "", + " This value is loaded before and saved after processing inbound downward messages carried", + " by the system inherent." + ] + }, + { + "name": "ProcessedDownwardMessages", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00000000", + "docs": [ + " Number of downward messages processed in a block.", + "", + " This will be cleared in `on_initialize` of each new block." + ] + }, + { + "name": "HrmpWatermark", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00000000", + "docs": [ + " HRMP watermark that was set in a block.", + "", + " This will be cleared in `on_initialize` of each new block." + ] + }, + { + "name": "HrmpOutboundMessages", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 131 + }, + "fallback": "0x00", + "docs": [ + " HRMP messages that were sent in a block.", + "", + " This will be cleared in `on_initialize` of each new block." + ] + }, + { + "name": "UpwardMessages", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec>", + "PlainTypeValue": 104 + }, + "fallback": "0x00", + "docs": [ + " Upward messages that were sent in a block.", + "", + " This will be cleared in `on_initialize` of each new block." + ] + }, + { + "name": "PendingUpwardMessages", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec>", + "PlainTypeValue": 104 + }, + "fallback": "0x00", + "docs": [ + " Upward messages that are still pending and not yet send to the relay chain." + ] + }, + { + "name": "AnnouncedHrmpMessagesPerCandidate", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00000000", + "docs": [ + " The number of HRMP messages we observed in `on_initialize` and thus used that number for", + " announcing the weight of `on_initialize` and `on_finalize`." + ] + }, + { + "name": "ReservedXcmpWeightOverride", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "U64", + "PlainTypeValue": 8 + }, + "fallback": "0x00", + "docs": [ + " The weight we reserve at the beginning of the block for processing XCMP messages. This", + " overrides the amount set in the Config trait." + ] + }, + { + "name": "ReservedDmpWeightOverride", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "U64", + "PlainTypeValue": 8 + }, + "fallback": "0x00", + "docs": [ + " The weight we reserve at the beginning of the block for processing DMP messages. This", + " overrides the amount set in the Config trait." + ] + }, + { + "name": "AuthorizedUpgrade", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "H256", + "PlainTypeValue": 9 + }, + "fallback": "0x00", + "docs": [ + " The next authorized upgrade, if there is one." + ] + } + ], + "calls": [ + { + "lookup": "0100", + "name": "set_validation_data", + "docs": [ + "Set the current validation data.", + "", + "This should be invoked exactly once per block. It will panic at the finalization", + "phase if the call was not invoked.", + "", + "The dispatch origin for this call must be `Inherent`", + "", + "As a side effect, this function upgrades the current validation function", + "if the appropriate time has come." + ], + "args": [ + { + "name": "data", + "type": "cumulus_primitives_parachain_inherent:ParachainInherentData", + "type_name": "ParachainInherentData" + } + ] + }, + { + "lookup": "0101", + "name": "sudo_send_upward_message", + "docs": null, + "args": [ + { + "name": "message", + "type": "Vec", + "type_name": "UpwardMessage" + } + ] + }, + { + "lookup": "0102", + "name": "authorize_upgrade", + "docs": null, + "args": [ + { + "name": "code_hash", + "type": "H256", + "type_name": "Hash" + } + ] + }, + { + "lookup": "0103", + "name": "enact_authorized_upgrade", + "docs": null, + "args": [ + { + "name": "code", + "type": "Vec", + "type_name": "Bytes" + } + ] + } + ], + "calls_value": { + "type": 133 + }, + "events": [ + { + "lookup": "0100", + "name": "ValidationFunctionStored", + "docs": [ + "The validation function has been scheduled to apply." + ], + "args": null + }, + { + "lookup": "0101", + "name": "ValidationFunctionApplied", + "docs": [ + "The validation function was applied as of the contained relay chain block number." + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "RelayChainBlockNumber" + ] + }, + { + "lookup": "0102", + "name": "ValidationFunctionDiscarded", + "docs": [ + "The relay-chain aborted the upgrade process." + ], + "args": null + }, + { + "lookup": "0103", + "name": "UpgradeAuthorized", + "docs": [ + "An upgrade has been authorized." + ], + "args": [ + "H256" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "Hash" + ] + }, + { + "lookup": "0104", + "name": "DownwardMessagesReceived", + "docs": [ + "Some downward messages have been received and will be processed.", + "\\[ count \\]" + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "u32" + ] + }, + { + "lookup": "0105", + "name": "DownwardMessagesProcessed", + "docs": [ + "Downward messages were processed using the given weight.", + "\\[ weight_used, result_mqc_head \\]" + ], + "args": [ + "U64", + "H256" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "Weight", + "relay_chain::Hash" + ] + } + ], + "events_value": { + "type": 28 + }, + "errors": [ + { + "name": "OverlappingUpgrades", + "doc": [ + "Attempt to upgrade validation function while existing upgrade pending" + ] + }, + { + "name": "ProhibitedByPolkadot", + "doc": [ + "Polkadot currently prohibits this parachain from upgrading its validation function" + ] + }, + { + "name": "TooBig", + "doc": [ + "The supplied validation function has compiled into a blob larger than Polkadot is", + "willing to run" + ] + }, + { + "name": "ValidationDataNotAvailable", + "doc": [ + "The inherent which supplies the validation data did not run this block" + ] + }, + { + "name": "HostConfigurationNotAvailable", + "doc": [ + "The inherent which supplies the host configuration did not run this block" + ] + }, + { + "name": "NotScheduled", + "doc": [ + "No validation function upgrade is currently scheduled." + ] + }, + { + "name": "NothingAuthorized", + "doc": [ + "No code upgrade has been authorized." + ] + }, + { + "name": "Unauthorized", + "doc": [ + "The given code upgrade has not been authorized." + ] + } + ], + "errors_value": { + "type": 143 + }, + "index": 1 + }, + { + "name": "Timestamp", + "prefix": "Timestamp", + "storage": [ + { + "name": "Now", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U64", + "PlainTypeValue": 8 + }, + "fallback": "0x0000000000000000", + "docs": [ + " Current time for the current block." + ] + }, + { + "name": "DidUpdate", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Bool", + "PlainTypeValue": 87 + }, + "fallback": "0x00", + "docs": [ + " Did the timestamp get updated in this block?" + ] + } + ], + "calls": [ + { + "lookup": "0300", + "name": "set", + "docs": [ + "Set the current time.", + "", + "This call should be invoked exactly once per block. It will panic at the finalization", + "phase, if this call hasn't been invoked by that time.", + "", + "The timestamp should be greater than the previous one by the amount specified by", + "`MinimumPeriod`.", + "", + "The dispatch origin for this call must be `Inherent`.", + "", + "# ", + "- `O(1)` (Note that implementations of `OnTimestampSet` must also be `O(1)`)", + "- 1 storage read and 1 storage mutation (codec `O(1)`). (because of `DidUpdate::take` in", + " `on_finalize`)", + "- 1 event handler `on_timestamp_set`. Must be `O(1)`.", + "# " + ], + "args": [ + { + "name": "now", + "type": "compact", + "type_name": "Moment" + } + ] + } + ], + "calls_value": { + "type": 144 + }, + "constants": [ + { + "name": "MinimumPeriod", + "type": "U64", + "type_value": 8, + "constants_value": "7017000000000000", + "docs": [ + " The minimum period between blocks. Beware that this is different to the *expected*", + " period that the block production apparatus provides. Your chosen consensus system will", + " generally work with this to determine a sensible block time. e.g. For Aura, it will be", + " double this period on default settings." + ] + } + ], + "errors": null, + "errors_value": null, + "index": 3 + }, + { + "name": "ParachainInfo", + "prefix": "ParachainInfo", + "storage": [ + { + "name": "ParachainId", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 124 + }, + "fallback": "0x64000000", + "docs": null + } + ], + "errors": null, + "errors_value": null, + "index": 4 + }, + { + "name": "Balances", + "prefix": "Balances", + "storage": [ + { + "name": "TotalIssuance", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U128", + "PlainTypeValue": 6 + }, + "fallback": "0x00000000000000000000000000000000", + "docs": [ + " The total units issued in the system." + ] + }, + { + "name": "Account", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "pallet_balances:AccountData", + "keys_id": 0, + "value_id": 5 + } + }, + "fallback": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "docs": [ + " The balance of an account.", + "", + " NOTE: This is only used in the case that this pallet is used to store balances." + ] + }, + { + "name": "Locks", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "Vec", + "keys_id": 0, + "value_id": 145 + } + }, + "fallback": "0x00", + "docs": [ + " Any liquidity locks on some account balances.", + " NOTE: Should only be accessed when setting, changing and freeing a lock." + ] + }, + { + "name": "Reserves", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "Vec", + "keys_id": 0, + "value_id": 149 + } + }, + "fallback": "0x00", + "docs": [ + " Named reserves on some account balances." + ] + }, + { + "name": "StorageVersion", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "pallet_balances:Releases", + "PlainTypeValue": 152 + }, + "fallback": "0x00", + "docs": [ + " Storage version of the pallet.", + "", + " This is set to v2.0.0 for new networks." + ] + } + ], + "calls": [ + { + "lookup": "0a00", + "name": "transfer", + "docs": [ + "Transfer some liquid free balance to another account.", + "", + "`transfer` will set the `FreeBalance` of the sender and receiver.", + "It will decrease the total issuance of the system by the `TransferFee`.", + "If the sender's account is below the existential deposit as a result", + "of the transfer, the account will be reaped.", + "", + "The dispatch origin for this call must be `Signed` by the transactor.", + "", + "# ", + "- Dependent on arguments but not critical, given proper implementations for input config", + " types. See related functions below.", + "- It contains a limited number of reads and writes internally and no complex", + " computation.", + "", + "Related functions:", + "", + " - `ensure_can_withdraw` is always called internally but has a bounded complexity.", + " - Transferring balances to accounts that did not exist before will cause", + " `T::OnNewAccount::on_new_account` to be called.", + " - Removing enough funds from an account will trigger `T::DustRemoval::on_unbalanced`.", + " - `transfer_keep_alive` works the same way as `transfer`, but has an additional check", + " that the transfer will not kill the origin account.", + "---------------------------------", + "0x2d2042617365205765696768743a2037332e363420c2b5732c20776f7273742063617365207363656e6172696f20286163636f756e7420637265617465642c206163636f756e742072656d6f76656429", + "- DB Weight: 1 Read and 1 Write to destination account", + "- Origin account is already in memory, so no DB operations for them.", + "# " + ], + "args": [ + { + "name": "dest", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "value", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "0a01", + "name": "set_balance", + "docs": [ + "Set the balances of a given account.", + "", + "This will alter `FreeBalance` and `ReservedBalance` in storage. it will", + "also decrease the total issuance of the system (`TotalIssuance`).", + "If the new free or reserved balance is below the existential deposit,", + "it will reset the account nonce (`frame_system::AccountNonce`).", + "", + "The dispatch origin for this call is `root`.", + "", + "# ", + "- Independent of the arguments.", + "- Contains a limited number of reads and writes.", + "---------------------", + "- Base Weight:", + "0x202020202d204372656174696e673a2032372e353620c2b573", + "0x202020202d204b696c6c696e673a2033352e313120c2b573", + "- DB Weight: 1 Read, 1 Write to `who`", + "# " + ], + "args": [ + { + "name": "who", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "new_free", + "type": "compact", + "type_name": "Balance" + }, + { + "name": "new_reserved", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "0a02", + "name": "force_transfer", + "docs": [ + "Exactly as `transfer`, except the origin must be root and the source account may be", + "specified.", + "# ", + "- Same as transfer, but additional read and write because the source account is not", + " assumed to be in the overlay.", + "# " + ], + "args": [ + { + "name": "source", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "dest", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "value", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "0a03", + "name": "transfer_keep_alive", + "docs": [ + "Same as the [`transfer`] call, but with a check that the transfer will not kill the", + "origin account.", + "", + "99% of the time you want [`transfer`] instead.", + "", + "[`transfer`]: struct.Pallet.html#method.transfer", + "# ", + "- Cheaper than transfer because account cannot be killed.", + "0x2d2042617365205765696768743a2035312e3420c2b573", + "- DB Weight: 1 Read and 1 Write to dest (sender is in overlay already)", + "#" + ], + "args": [ + { + "name": "dest", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "value", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "0a04", + "name": "transfer_all", + "docs": [ + "Transfer the entire transferable balance from the caller account.", + "", + "NOTE: This function only attempts to transfer _transferable_ balances. This means that", + "any locked, reserved, or existential deposits (when `keep_alive` is `true`), will not be", + "transferred by this function. To ensure that this function results in a killed account,", + "you might need to prepare the account by removing any reference counters, storage", + "deposits, etc...", + "", + "The dispatch origin of this call must be Signed.", + "", + "- `dest`: The recipient of the transfer.", + "- `keep_alive`: A boolean to determine if the `transfer_all` operation should send all", + " of the funds the account has, causing the sender account to be killed (false), or", + " transfer everything except at least the existential deposit, which will guarantee to", + " keep the sender account alive (true). # ", + "- O(1). Just like transfer, but reading the user's transferable balance first.", + " #" + ], + "args": [ + { + "name": "dest", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "keep_alive", + "type": "Bool", + "type_name": "bool" + } + ] + }, + { + "lookup": "0a05", + "name": "force_unreserve", + "docs": [ + "Unreserve some balance from a user by force.", + "", + "Can only be called by ROOT." + ], + "args": [ + { + "name": "who", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "U128", + "type_name": "Balance" + } + ] + } + ], + "calls_value": { + "type": 153 + }, + "events": [ + { + "lookup": "0a00", + "name": "Endowed", + "docs": [ + "An account was created with some free balance. \\[account, free_balance\\]" + ], + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "AccountId", + "Balance" + ] + }, + { + "lookup": "0a01", + "name": "DustLost", + "docs": [ + "An account was removed whose balance was non-zero but below ExistentialDeposit,", + "resulting in an outright loss. \\[account, balance\\]" + ], + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "AccountId", + "Balance" + ] + }, + { + "lookup": "0a02", + "name": "Transfer", + "docs": [ + "Transfer succeeded. \\[from, to, value\\]" + ], + "args": [ + "AccountId", + "AccountId", + "U128" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "AccountId", + "AccountId", + "Balance" + ] + }, + { + "lookup": "0a03", + "name": "BalanceSet", + "docs": [ + "A balance was set by root. \\[who, free, reserved\\]" + ], + "args": [ + "AccountId", + "U128", + "U128" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "AccountId", + "Balance", + "Balance" + ] + }, + { + "lookup": "0a04", + "name": "Reserved", + "docs": [ + "Some balance was reserved (moved from free to reserved). \\[who, value\\]" + ], + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "AccountId", + "Balance" + ] + }, + { + "lookup": "0a05", + "name": "Unreserved", + "docs": [ + "Some balance was unreserved (moved from reserved to free). \\[who, value\\]" + ], + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "AccountId", + "Balance" + ] + }, + { + "lookup": "0a06", + "name": "ReserveRepatriated", + "docs": [ + "Some balance was moved from the reserve of the first account to the second account.", + "Final argument indicates the destination balance type.", + "\\[from, to, balance, destination_status\\]" + ], + "args": [ + "AccountId", + "AccountId", + "U128", + "frame_support:traits:tokens:misc:BalanceStatus" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "AccountId", + "AccountId", + "Balance", + "Status" + ] + }, + { + "lookup": "0a07", + "name": "Deposit", + "docs": [ + "Some amount was deposited into the account (e.g. for transaction fees). \\[who,", + "deposit\\]" + ], + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "AccountId", + "Balance" + ] + }, + { + "lookup": "0a08", + "name": "Withdraw", + "docs": [ + "Some amount was withdrawn from the account (e.g. for transaction fees). \\[who, value\\]" + ], + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "AccountId", + "Balance" + ] + }, + { + "lookup": "0a09", + "name": "Slashed", + "docs": [ + "Some amount was removed from the account (e.g. for misbehavior). \\[who,", + "amount_slashed\\]" + ], + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "AccountId", + "Balance" + ] + } + ], + "events_value": { + "type": 29 + }, + "constants": [ + { + "name": "ExistentialDeposit", + "type": "U128", + "type_value": 6, + "constants_value": "00ca9a3b000000000000000000000000", + "docs": [ + " The minimum amount required to keep an account open." + ] + }, + { + "name": "MaxLocks", + "type": "U32", + "type_value": 4, + "constants_value": "32000000", + "docs": [ + " The maximum number of locks that should exist on an account.", + " Not strictly enforced, but used for weight estimation." + ] + }, + { + "name": "MaxReserves", + "type": "U32", + "type_value": 4, + "constants_value": "32000000", + "docs": [ + " The maximum number of named reserves that can exist on an account." + ] + } + ], + "errors": [ + { + "name": "VestingBalance", + "doc": [ + "Vesting balance too high to send value" + ] + }, + { + "name": "LiquidityRestrictions", + "doc": [ + "Account liquidity restrictions prevent withdrawal" + ] + }, + { + "name": "InsufficientBalance", + "doc": [ + "Balance too low to send value" + ] + }, + { + "name": "ExistentialDeposit", + "doc": [ + "Value too low to create account due to existential deposit" + ] + }, + { + "name": "KeepAlive", + "doc": [ + "Transfer/payment would kill account" + ] + }, + { + "name": "ExistingVestingSchedule", + "doc": [ + "A vesting schedule already exists for this account" + ] + }, + { + "name": "DeadAccount", + "doc": [ + "Beneficiary account must pre-exist" + ] + }, + { + "name": "TooManyReserves", + "doc": [ + "Number of named reserves exceed MaxReserves" + ] + } + ], + "errors_value": { + "type": 156 + }, + "index": 10 + }, + { + "name": "TransactionPayment", + "prefix": "TransactionPayment", + "storage": [ + { + "name": "NextFeeMultiplier", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U128", + "PlainTypeValue": 157 + }, + "fallback": "0x000064a7b3b6e00d0000000000000000", + "docs": null + }, + { + "name": "StorageVersion", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "pallet_transaction_payment:Releases", + "PlainTypeValue": 158 + }, + "fallback": "0x00", + "docs": null + } + ], + "constants": [ + { + "name": "TransactionByteFee", + "type": "U128", + "type_value": 6, + "constants_value": "a0860100000000000000000000000000", + "docs": [ + " The fee to be paid for making a transaction; the per-byte portion." + ] + }, + { + "name": "OperationalFeeMultiplier", + "type": "U8", + "type_value": 2, + "constants_value": "05", + "docs": [ + " A fee mulitplier for `Operational` extrinsics to compute \"virtual tip\" to boost their", + " `priority`", + "", + " This value is multipled by the `final_fee` to obtain a \"virtual tip\" that is later", + " added to a tip component in regular `priority` calculations.", + " It means that a `Normal` transaction can front-run a similarly-sized `Operational`", + " extrinsic (with no tip), by including a tip value greater than the virtual tip.", + "", + " ```rust,ignore", + " // For `Normal`", + " let priority = priority_calc(tip);", + "", + " // For `Operational`", + " let virtual_tip = (inclusion_fee + tip) * OperationalFeeMultiplier;", + " let priority = priority_calc(tip + virtual_tip);", + " ```", + "", + " Note that since we use `final_fee` the multiplier applies also to the regular `tip`", + " sent with the transaction. So, not only does the transaction get a priority bump based", + " on the `inclusion_fee`, but we also amplify the impact of tips applied to `Operational`", + " transactions." + ] + }, + { + "name": "WeightToFee", + "type": "Vec", + "type_value": 159, + "constants_value": "0400000000000000000000000000000000ff117a000001", + "docs": [ + " The polynomial that is applied in order to derive fee from weight." + ] + } + ], + "errors": null, + "errors_value": null, + "index": 11 + }, + { + "name": "AssetTxPayment", + "prefix": "", + "storage": null, + "errors": null, + "errors_value": null, + "index": 12 + }, + { + "name": "Authorship", + "prefix": "Authorship", + "storage": [ + { + "name": "Uncles", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 161 + }, + "fallback": "0x00", + "docs": [ + " Uncles" + ] + }, + { + "name": "Author", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "AccountId", + "PlainTypeValue": 0 + }, + "fallback": "0x00", + "docs": [ + " Author of current block." + ] + }, + { + "name": "DidSetUncles", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Bool", + "PlainTypeValue": 87 + }, + "fallback": "0x00", + "docs": [ + " Whether uncles were already set in this block." + ] + } + ], + "calls": [ + { + "lookup": "1400", + "name": "set_uncles", + "docs": [ + "Provide a set of uncles." + ], + "args": [ + { + "name": "new_uncles", + "type": "Vec", + "type_name": "Vec
    " + } + ] + } + ], + "calls_value": { + "type": 164 + }, + "constants": [ + { + "name": "UncleGenerations", + "type": "U32", + "type_value": 4, + "constants_value": "00000000", + "docs": [ + " The number of blocks back we should accept uncles.", + " This means that we will deal with uncle-parents that are", + " `UncleGenerations + 1` before `now`." + ] + } + ], + "errors": [ + { + "name": "InvalidUncleParent", + "doc": [ + "The uncle parent not in the chain." + ] + }, + { + "name": "UnclesAlreadySet", + "doc": [ + "Uncles already set in the block." + ] + }, + { + "name": "TooManyUncles", + "doc": [ + "Too many uncles." + ] + }, + { + "name": "GenesisUncle", + "doc": [ + "The uncle is genesis." + ] + }, + { + "name": "TooHighUncle", + "doc": [ + "The uncle is too high in chain." + ] + }, + { + "name": "UncleAlreadyIncluded", + "doc": [ + "The uncle is already included." + ] + }, + { + "name": "OldUncle", + "doc": [ + "The uncle isn't recent enough to be included." + ] + } + ], + "errors_value": { + "type": 168 + }, + "index": 20 + }, + { + "name": "CollatorSelection", + "prefix": "CollatorSelection", + "storage": [ + { + "name": "Invulnerables", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 32 + }, + "fallback": "0x00", + "docs": [ + " The invulnerable, fixed collators." + ] + }, + { + "name": "Candidates", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 169 + }, + "fallback": "0x00", + "docs": [ + " The (community, limited) collation candidates." + ] + }, + { + "name": "LastAuthoredBlock", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "U32", + "keys_id": 0, + "value_id": 4 + } + }, + "fallback": "0x00000000", + "docs": [ + " Last block authored by collator." + ] + }, + { + "name": "DesiredCandidates", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00000000", + "docs": [ + " Desired number of candidates.", + "", + " This should ideally always be less than [`Config::MaxCandidates`] for weights to be correct." + ] + }, + { + "name": "CandidacyBond", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U128", + "PlainTypeValue": 6 + }, + "fallback": "0x00000000000000000000000000000000", + "docs": [ + " Fixed deposit bond for each candidate." + ] + } + ], + "calls": [ + { + "lookup": "1500", + "name": "set_invulnerables", + "docs": null, + "args": [ + { + "name": "new", + "type": "Vec", + "type_name": "Vec" + } + ] + }, + { + "lookup": "1501", + "name": "set_desired_candidates", + "docs": null, + "args": [ + { + "name": "max", + "type": "U32", + "type_name": "u32" + } + ] + }, + { + "lookup": "1502", + "name": "set_candidacy_bond", + "docs": null, + "args": [ + { + "name": "bond", + "type": "U128", + "type_name": "BalanceOf" + } + ] + }, + { + "lookup": "1503", + "name": "register_as_candidate", + "docs": null, + "args": null + }, + { + "lookup": "1504", + "name": "leave_intent", + "docs": null, + "args": null + } + ], + "calls_value": { + "type": 171 + }, + "events": [ + { + "lookup": "1500", + "name": "NewInvulnerables", + "docs": null, + "args": [ + "Vec" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "Vec" + ] + }, + { + "lookup": "1501", + "name": "NewDesiredCandidates", + "docs": null, + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "u32" + ] + }, + { + "lookup": "1502", + "name": "NewCandidacyBond", + "docs": null, + "args": [ + "U128" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "BalanceOf" + ] + }, + { + "lookup": "1503", + "name": "CandidateAdded", + "docs": null, + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "AccountId", + "BalanceOf" + ] + }, + { + "lookup": "1504", + "name": "CandidateRemoved", + "docs": null, + "args": [ + "AccountId" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "AccountId" + ] + } + ], + "events_value": { + "type": 31 + }, + "errors": [ + { + "name": "TooManyCandidates", + "doc": [ + "Too many candidates" + ] + }, + { + "name": "TooFewCandidates", + "doc": [ + "Too few candidates" + ] + }, + { + "name": "Unknown", + "doc": [ + "Unknown error" + ] + }, + { + "name": "Permission", + "doc": [ + "Permission issue" + ] + }, + { + "name": "AlreadyCandidate", + "doc": [ + "User is already a candidate" + ] + }, + { + "name": "NotCandidate", + "doc": [ + "User is not a candidate" + ] + }, + { + "name": "AlreadyInvulnerable", + "doc": [ + "User is already an Invulnerable" + ] + }, + { + "name": "NoAssociatedValidatorId", + "doc": [ + "Account has no associated validator ID" + ] + }, + { + "name": "ValidatorNotRegistered", + "doc": [ + "Validator ID is not yet registered" + ] + } + ], + "errors_value": { + "type": 172 + }, + "index": 21 + }, + { + "name": "Session", + "prefix": "Session", + "storage": [ + { + "name": "Validators", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 32 + }, + "fallback": "0x00", + "docs": [ + " The current set of validators." + ] + }, + { + "name": "CurrentIndex", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00000000", + "docs": [ + " Current index of the session." + ] + }, + { + "name": "QueuedChanged", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Bool", + "PlainTypeValue": 87 + }, + "fallback": "0x00", + "docs": [ + " True if the underlying economic identities or weighting behind the validators", + " has changed in the queued validator set." + ] + }, + { + "name": "QueuedKeys", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 173 + }, + "fallback": "0x00", + "docs": [ + " The queued keys for the next session. When the next session begins, these keys", + " will be used to determine the validator's session keys." + ] + }, + { + "name": "DisabledValidators", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 90 + }, + "fallback": "0x00", + "docs": [ + " Indices of disabled validators.", + "", + " The vec is always kept sorted so that we can find whether a given validator is", + " disabled using binary search. It gets cleared when `on_session_ending` returns", + " a new set of identities." + ] + }, + { + "name": "NextKeys", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "[U8; 32]", + "keys_id": 0, + "value_id": 175 + } + }, + "fallback": "0x00", + "docs": [ + " The next session keys for a validator." + ] + }, + { + "name": "KeyOwner", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "[U8; 4]", + "Vec" + ], + "value": "AccountId", + "keys_id": 178, + "value_id": 0 + } + }, + "fallback": "0x00", + "docs": [ + " The owner of a key. The key is the `KeyTypeId` + the encoded key." + ] + } + ], + "calls": [ + { + "lookup": "1600", + "name": "set_keys", + "docs": [ + "Sets the session key(s) of the function caller to `keys`.", + "Allows an account to set its session key prior to becoming a validator.", + "This doesn't take effect until the next session.", + "", + "The dispatch origin of this function must be signed.", + "", + "# ", + "- Complexity: `O(1)`. Actual cost depends on the number of length of", + " `T::Keys::key_ids()` which is fixed.", + "- DbReads: `origin account`, `T::ValidatorIdOf`, `NextKeys`", + "- DbWrites: `origin account`, `NextKeys`", + "- DbReads per key id: `KeyOwner`", + "- DbWrites per key id: `KeyOwner`", + "# " + ], + "args": [ + { + "name": "keys", + "type": "[U8; 32]", + "type_name": "Keys" + }, + { + "name": "proof", + "type": "Vec", + "type_name": "Bytes" + } + ] + }, + { + "lookup": "1601", + "name": "purge_keys", + "docs": [ + "Removes any session key(s) of the function caller.", + "", + "This doesn't take effect until the next session.", + "", + "The dispatch origin of this function must be Signed and the account must be either be", + "convertible to a validator ID using the chain's typical addressing system (this usually", + "means being a controller account) or directly convertible into a validator ID (which", + "usually means being a stash account).", + "", + "# ", + "- Complexity: `O(1)` in number of key types. Actual cost depends on the number of length", + " of `T::Keys::key_ids()` which is fixed.", + "- DbReads: `T::ValidatorIdOf`, `NextKeys`, `origin account`", + "- DbWrites: `NextKeys`, `origin account`", + "- DbWrites per key id: `KeyOwner`", + "# " + ], + "args": null + } + ], + "calls_value": { + "type": 180 + }, + "events": [ + { + "lookup": "1600", + "name": "NewSession", + "docs": [ + "New session has happened. Note that the argument is the \\[session_index\\], not the", + "block number as the type might suggest." + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "SessionIndex" + ] + } + ], + "events_value": { + "type": 33 + }, + "errors": [ + { + "name": "InvalidProof", + "doc": [ + "Invalid ownership proof." + ] + }, + { + "name": "NoAssociatedValidatorId", + "doc": [ + "No associated validator ID for account." + ] + }, + { + "name": "DuplicatedKey", + "doc": [ + "Registered duplicate key." + ] + }, + { + "name": "NoKeys", + "doc": [ + "No keys are associated with this account." + ] + }, + { + "name": "NoAccount", + "doc": [ + "Key setting account is not live, so it's impossible to associate keys." + ] + } + ], + "errors_value": { + "type": 181 + }, + "index": 22 + }, + { + "name": "Aura", + "prefix": "Aura", + "storage": [ + { + "name": "Authorities", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec<[U8; 32]>", + "PlainTypeValue": 182 + }, + "fallback": "0x00", + "docs": [ + " The current authority set." + ] + }, + { + "name": "CurrentSlot", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U64", + "PlainTypeValue": 184 + }, + "fallback": "0x0000000000000000", + "docs": [ + " The current slot of this block.", + "", + " This will be set in `on_initialize`." + ] + } + ], + "errors": null, + "errors_value": null, + "index": 23 + }, + { + "name": "AuraExt", + "prefix": "AuraExt", + "storage": [ + { + "name": "Authorities", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec<[U8; 32]>", + "PlainTypeValue": 183 + }, + "fallback": "0x00", + "docs": [ + " Serves as cache for the authorities.", + "", + " The authorities in AuRa are overwritten in `on_initialize` when we switch to a new session,", + " but we require the old authorities to verify the seal when validating a PoV. This will always", + " be updated to the latest AuRa authorities in `on_finalize`." + ] + } + ], + "errors": null, + "errors_value": null, + "index": 24 + }, + { + "name": "XcmpQueue", + "prefix": "XcmpQueue", + "storage": [ + { + "name": "InboundXcmpStatus", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec>", + "PlainTypeValue": 185 + }, + "fallback": "0x00", + "docs": [ + " Status of the inbound XCMP channels." + ] + }, + { + "name": "InboundXcmpMessages", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Twox64Concat" + ], + "key_vec": [ + "U32", + "U32" + ], + "value": "Vec", + "keys_id": 191, + "value_id": 10 + } + }, + "fallback": "0x00", + "docs": [ + " Inbound aggregate XCMP messages. It can only be one per ParaId/block." + ] + }, + { + "name": "OutboundXcmpStatus", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 192 + }, + "fallback": "0x00", + "docs": [ + " The non-empty XCMP channels in order of becoming non-empty, and the index of the first", + " and last outbound message. If the two indices are equal, then it indicates an empty", + " queue and there must be a non-`Ok` `OutboundStatus`. We assume queues grow no greater", + " than 65535 items. Queue indices for normal messages begin at one; zero is reserved in", + " case of the need to send a high-priority signal message this block.", + " The bool is true if there is a signal message waiting to be sent." + ] + }, + { + "name": "OutboundXcmpMessages", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Twox64Concat" + ], + "key_vec": [ + "U32", + "U16" + ], + "value": "Vec", + "keys_id": 195, + "value_id": 10 + } + }, + "fallback": "0x00", + "docs": [ + " The messages outbound in a given XCMP channel." + ] + }, + { + "name": "SignalMessages", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U32" + ], + "value": "Vec", + "keys_id": 124, + "value_id": 10 + } + }, + "fallback": "0x00", + "docs": [ + " Any signal messages waiting to be sent." + ] + }, + { + "name": "QueueConfig", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "cumulus_pallet_xcmp_queue:QueueConfigData", + "PlainTypeValue": 196 + }, + "fallback": "0x020000000500000001000000a0860100000000000200000000000000", + "docs": [ + " The configuration which controls the dynamics of the outbound queue." + ] + } + ], + "calls_value": { + "type": 197 + }, + "events": [ + { + "lookup": "1e00", + "name": "Success", + "docs": [ + "Some XCM was executed ok." + ], + "args": [ + "option" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "Option" + ] + }, + { + "lookup": "1e01", + "name": "Fail", + "docs": [ + "Some XCM failed." + ], + "args": [ + "option", + "xcm:v2:traits:Error" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "Option", + "XcmError" + ] + }, + { + "lookup": "1e02", + "name": "BadVersion", + "docs": [ + "Bad XCM version used." + ], + "args": [ + "option" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "Option" + ] + }, + { + "lookup": "1e03", + "name": "BadFormat", + "docs": [ + "Bad XCM format used." + ], + "args": [ + "option" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "Option" + ] + }, + { + "lookup": "1e04", + "name": "UpwardMessageSent", + "docs": [ + "An upward message was sent to the relay chain." + ], + "args": [ + "option" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "Option" + ] + }, + { + "lookup": "1e05", + "name": "XcmpMessageSent", + "docs": [ + "An HRMP message was sent to a sibling parachain." + ], + "args": [ + "option" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "Option" + ] + } + ], + "events_value": { + "type": 34 + }, + "errors": [ + { + "name": "FailedToSend", + "doc": [ + "Failed to send XCM message." + ] + }, + { + "name": "BadXcmOrigin", + "doc": [ + "Bad XCM origin." + ] + }, + { + "name": "BadXcm", + "doc": [ + "Bad XCM data." + ] + } + ], + "errors_value": { + "type": 198 + }, + "index": 30 + }, + { + "name": "PolkadotXcm", + "prefix": "PolkadotXcm", + "storage": [ + { + "name": "QueryCounter", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U64", + "PlainTypeValue": 8 + }, + "fallback": "0x0000000000000000", + "docs": [ + " The latest available query index." + ] + }, + { + "name": "Queries", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U64" + ], + "value": "pallet_xcm:pallet:QueryStatus", + "keys_id": 8, + "value_id": 199 + } + }, + "fallback": "0x00", + "docs": [ + " The ongoing queries." + ] + }, + { + "name": "AssetTraps", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Identity" + ], + "key_vec": [ + "H256" + ], + "value": "U32", + "keys_id": 9, + "value_id": 4 + } + }, + "fallback": "0x00000000", + "docs": [ + " The existing asset traps.", + "", + " Key is the blake2 256 hash of (origin, versioned `MultiAssets`) pair. Value is the number of", + " times this pair has been trapped (usually just 1 if it exists at all)." + ] + }, + { + "name": "SafeXcmVersion", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00", + "docs": [ + " Default version to encode XCM when latest version of destination is unknown. If `None`,", + " then the destinations whose XCM version is unknown are considered unreachable." + ] + }, + { + "name": "SupportedVersion", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "xcm:VersionedMultiLocation" + ], + "value": "U32", + "keys_id": 205, + "value_id": 4 + } + }, + "fallback": "0x00", + "docs": [ + " The Latest versions that we know various locations support." + ] + }, + { + "name": "VersionNotifiers", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "xcm:VersionedMultiLocation" + ], + "value": "U64", + "keys_id": 205, + "value_id": 8 + } + }, + "fallback": "0x00", + "docs": [ + " All locations that we have requested version notifications from." + ] + }, + { + "name": "VersionNotifyTargets", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "xcm:VersionedMultiLocation" + ], + "value": "Tuple:U64U64U32", + "keys_id": 205, + "value_id": 206 + } + }, + "fallback": "0x00", + "docs": [ + " The target locations that are subscribed to our version changes, as well as the most recent", + " of our versions we informed them of." + ] + }, + { + "name": "VersionDiscoveryQueue", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 207 + }, + "fallback": "0x00", + "docs": [ + " Destinations whose latest XCM version we would like to know. Duplicates not allowed, and", + " the `u32` counter is the number of times that a send to the destination has been attempted,", + " which is used as a prioritization." + ] + }, + { + "name": "CurrentMigration", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "pallet_xcm:pallet:VersionMigrationStage", + "PlainTypeValue": 210 + }, + "fallback": "0x00", + "docs": [ + " The current migration's stage, if any." + ] + } + ], + "calls": [ + { + "lookup": "1f00", + "name": "send", + "docs": null, + "args": [ + { + "name": "dest", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "message", + "type": "xcm:VersionedXcm@213", + "type_name": "Box>" + } + ] + }, + { + "lookup": "1f01", + "name": "teleport_assets", + "docs": [ + "Teleport some assets from the local chain to some destination chain.", + "", + "Fee payment on the destination side is made from the first asset listed in the `assets` vector and", + "fee-weight is calculated locally and thus remote weights are assumed to be equal to", + "local weights.", + "", + "- `origin`: Must be capable of withdrawing the `assets` and executing XCM.", + "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send", + " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain.", + "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be", + " an `AccountId32` value.", + "- `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the", + " `dest` side. May not be empty.", + "- `dest_weight`: Equal to the total weight on `dest` of the XCM message", + " `Teleport { assets, effects: [ BuyExecution{..}, DepositAsset{..} ] }`." + ], + "args": [ + { + "name": "dest", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "beneficiary", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "assets", + "type": "xcm:VersionedMultiAssets", + "type_name": "Box" + }, + { + "name": "fee_asset_item", + "type": "U32", + "type_name": "u32" + } + ] + }, + { + "lookup": "1f02", + "name": "reserve_transfer_assets", + "docs": [ + "Transfer some assets from the local chain to the sovereign account of a destination chain and forward", + "a notification XCM.", + "", + "Fee payment on the destination side is made from the first asset listed in the `assets` vector and", + "fee-weight is calculated locally and thus remote weights are assumed to be equal to", + "local weights.", + "", + "- `origin`: Must be capable of withdrawing the `assets` and executing XCM.", + "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send", + " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain.", + "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be", + " an `AccountId32` value.", + "- `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the", + " `dest` side.", + "- `fee_asset_item`: The index into `assets` of the item which should be used to pay", + " fees." + ], + "args": [ + { + "name": "dest", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "beneficiary", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "assets", + "type": "xcm:VersionedMultiAssets", + "type_name": "Box" + }, + { + "name": "fee_asset_item", + "type": "U32", + "type_name": "u32" + } + ] + }, + { + "lookup": "1f03", + "name": "execute", + "docs": [ + "Execute an XCM message from a local, signed, origin.", + "", + "An event is deposited indicating whether `msg` could be executed completely or only", + "partially.", + "", + "No more than `max_weight` will be used in its attempted execution. If this is less than the", + "maximum amount of weight that the message could take to be executed, then no execution", + "attempt will be made.", + "", + "NOTE: A successful return to this does *not* imply that the `msg` was executed successfully", + "to completion; only that *some* of it was executed." + ], + "args": [ + { + "name": "message", + "type": "xcm:VersionedXcm@222", + "type_name": "Box::Call>>" + }, + { + "name": "max_weight", + "type": "U64", + "type_name": "Weight" + } + ] + }, + { + "lookup": "1f04", + "name": "force_xcm_version", + "docs": [ + "Extoll that a particular destination can be communicated with through a particular", + "version of XCM.", + "", + "- `origin`: Must be Root.", + "- `location`: The destination that is being described.", + "- `xcm_version`: The latest version of XCM that `location` supports." + ], + "args": [ + { + "name": "location", + "type": "xcm:v1:multilocation:MultiLocation", + "type_name": "Box" + }, + { + "name": "xcm_version", + "type": "U32", + "type_name": "XcmVersion" + } + ] + }, + { + "lookup": "1f05", + "name": "force_default_xcm_version", + "docs": [ + "Set a safe XCM version (the version that XCM should be encoded with if the most recent", + "version a destination can accept is unknown).", + "", + "- `origin`: Must be Root.", + "- `maybe_xcm_version`: The default XCM encoding version, or `None` to disable." + ], + "args": [ + { + "name": "maybe_xcm_version", + "type": "option", + "type_name": "Option" + } + ] + }, + { + "lookup": "1f06", + "name": "force_subscribe_version_notify", + "docs": [ + "Ask a location to notify us regarding their XCM version and any changes to it.", + "", + "- `origin`: Must be Root.", + "- `location`: The location to which we should subscribe for XCM version notifications." + ], + "args": [ + { + "name": "location", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + } + ] + }, + { + "lookup": "1f07", + "name": "force_unsubscribe_version_notify", + "docs": [ + "Require that a particular destination should no longer notify us regarding any XCM", + "version changes.", + "", + "- `origin`: Must be Root.", + "- `location`: The location to which we are currently subscribed for XCM version", + " notifications which we no longer desire." + ], + "args": [ + { + "name": "location", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + } + ] + }, + { + "lookup": "1f08", + "name": "limited_reserve_transfer_assets", + "docs": [ + "Transfer some assets from the local chain to the sovereign account of a destination chain and forward", + "a notification XCM.", + "", + "Fee payment on the destination side is made from the first asset listed in the `assets` vector.", + "", + "- `origin`: Must be capable of withdrawing the `assets` and executing XCM.", + "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send", + " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain.", + "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be", + " an `AccountId32` value.", + "- `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the", + " `dest` side.", + "- `fee_asset_item`: The index into `assets` of the item which should be used to pay", + " fees.", + "- `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase." + ], + "args": [ + { + "name": "dest", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "beneficiary", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "assets", + "type": "xcm:VersionedMultiAssets", + "type_name": "Box" + }, + { + "name": "fee_asset_item", + "type": "U32", + "type_name": "u32" + }, + { + "name": "weight_limit", + "type": "xcm:v2:WeightLimit", + "type_name": "WeightLimit" + } + ] + }, + { + "lookup": "1f09", + "name": "limited_teleport_assets", + "docs": [ + "Teleport some assets from the local chain to some destination chain.", + "", + "Fee payment on the destination side is made from the first asset listed in the `assets` vector.", + "", + "- `origin`: Must be capable of withdrawing the `assets` and executing XCM.", + "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send", + " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain.", + "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be", + " an `AccountId32` value.", + "- `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the", + " `dest` side. May not be empty.", + "- `dest_weight`: Equal to the total weight on `dest` of the XCM message", + " `Teleport { assets, effects: [ BuyExecution{..}, DepositAsset{..} ] }`.", + "- `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase." + ], + "args": [ + { + "name": "dest", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "beneficiary", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "assets", + "type": "xcm:VersionedMultiAssets", + "type_name": "Box" + }, + { + "name": "fee_asset_item", + "type": "U32", + "type_name": "u32" + }, + { + "name": "weight_limit", + "type": "xcm:v2:WeightLimit", + "type_name": "WeightLimit" + } + ] + } + ], + "calls_value": { + "type": 212 + }, + "events": [ + { + "lookup": "1f00", + "name": "Attempted", + "docs": [ + "Execution of an XCM message was attempted.", + "", + "\\[ outcome \\]" + ], + "args": [ + "xcm:v2:traits:Outcome" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "xcm::latest::Outcome" + ] + }, + { + "lookup": "1f01", + "name": "Sent", + "docs": [ + "A XCM message was sent.", + "", + "\\[ origin, destination, message \\]" + ], + "args": [ + "xcm:v1:multilocation:MultiLocation", + "xcm:v1:multilocation:MultiLocation", + "Vec" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "MultiLocation", + "MultiLocation", + "Xcm<()>" + ] + }, + { + "lookup": "1f02", + "name": "UnexpectedResponse", + "docs": [ + "Query response received which does not match a registered query. This may be because a", + "matching query was never registered, it may be because it is a duplicate response, or", + "because the query timed out.", + "", + "\\[ origin location, id \\]" + ], + "args": [ + "xcm:v1:multilocation:MultiLocation", + "U64" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "MultiLocation", + "QueryId" + ] + }, + { + "lookup": "1f03", + "name": "ResponseReady", + "docs": [ + "Query response has been received and is ready for taking with `take_response`. There is", + "no registered notification call.", + "", + "\\[ id, response \\]" + ], + "args": [ + "U64", + "xcm:v2:Response" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "QueryId", + "Response" + ] + }, + { + "lookup": "1f04", + "name": "Notified", + "docs": [ + "Query response has been received and query is removed. The registered notification has", + "been dispatched and executed successfully.", + "", + "\\[ id, pallet index, call index \\]" + ], + "args": [ + "U64", + "U8", + "U8" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "QueryId", + "u8", + "u8" + ] + }, + { + "lookup": "1f05", + "name": "NotifyOverweight", + "docs": [ + "Query response has been received and query is removed. The registered notification could", + "not be dispatched because the dispatch weight is greater than the maximum weight", + "originally budgeted by this runtime for the query result.", + "", + "\\[ id, pallet index, call index, actual weight, max budgeted weight \\]" + ], + "args": [ + "U64", + "U8", + "U8", + "U64", + "U64" + ], + "args_name": [ + "", + "", + "", + "", + "" + ], + "args_type_name": [ + "QueryId", + "u8", + "u8", + "Weight", + "Weight" + ] + }, + { + "lookup": "1f06", + "name": "NotifyDispatchError", + "docs": [ + "Query response has been received and query is removed. There was a general error with", + "dispatching the notification call.", + "", + "\\[ id, pallet index, call index \\]" + ], + "args": [ + "U64", + "U8", + "U8" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "QueryId", + "u8", + "u8" + ] + }, + { + "lookup": "1f07", + "name": "NotifyDecodeFailed", + "docs": [ + "Query response has been received and query is removed. The dispatch was unable to be", + "decoded into a `Call`; this might be due to dispatch function having a signature which", + "is not `(origin, QueryId, Response)`.", + "", + "\\[ id, pallet index, call index \\]" + ], + "args": [ + "U64", + "U8", + "U8" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "QueryId", + "u8", + "u8" + ] + }, + { + "lookup": "1f08", + "name": "InvalidResponder", + "docs": [ + "Expected query response has been received but the origin location of the response does", + "not match that expected. The query remains registered for a later, valid, response to", + "be received and acted upon.", + "", + "\\[ origin location, id, expected location \\]" + ], + "args": [ + "xcm:v1:multilocation:MultiLocation", + "U64", + "option" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "MultiLocation", + "QueryId", + "Option" + ] + }, + { + "lookup": "1f09", + "name": "InvalidResponderVersion", + "docs": [ + "Expected query response has been received but the expected origin location placed in", + "storate by this runtime previously cannot be decoded. The query remains registered.", + "", + "This is unexpected (since a location placed in storage in a previously executing", + "runtime should be readable prior to query timeout) and dangerous since the possibly", + "valid response will be dropped. Manual governance intervention is probably going to be", + "needed.", + "", + "\\[ origin location, id \\]" + ], + "args": [ + "xcm:v1:multilocation:MultiLocation", + "U64" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "MultiLocation", + "QueryId" + ] + }, + { + "lookup": "1f0a", + "name": "ResponseTaken", + "docs": [ + "Received query response has been read and removed.", + "", + "\\[ id \\]" + ], + "args": [ + "U64" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "QueryId" + ] + }, + { + "lookup": "1f0b", + "name": "AssetsTrapped", + "docs": [ + "Some assets have been placed in an asset trap.", + "", + "\\[ hash, origin, assets \\]" + ], + "args": [ + "H256", + "xcm:v1:multilocation:MultiLocation", + "xcm:VersionedMultiAssets" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "H256", + "MultiLocation", + "VersionedMultiAssets" + ] + }, + { + "lookup": "1f0c", + "name": "VersionChangeNotified", + "docs": [ + "An XCM version change notification message has been attempted to be sent.", + "", + "\\[ destination, result \\]" + ], + "args": [ + "xcm:v1:multilocation:MultiLocation", + "U32" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "MultiLocation", + "XcmVersion" + ] + }, + { + "lookup": "1f0d", + "name": "SupportedVersionChanged", + "docs": [ + "The supported version of a location has been changed. This might be through an", + "automatic notification or a manual intervention.", + "", + "\\[ location, XCM version \\]" + ], + "args": [ + "xcm:v1:multilocation:MultiLocation", + "U32" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "MultiLocation", + "XcmVersion" + ] + }, + { + "lookup": "1f0e", + "name": "NotifyTargetSendFail", + "docs": [ + "A given location which had a version change subscription was dropped owing to an error", + "sending the notification to it.", + "", + "\\[ location, query ID, error \\]" + ], + "args": [ + "xcm:v1:multilocation:MultiLocation", + "U64", + "xcm:v2:traits:Error" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "MultiLocation", + "QueryId", + "XcmError" + ] + }, + { + "lookup": "1f0f", + "name": "NotifyTargetMigrationFail", + "docs": [ + "A given location which had a version change subscription was dropped owing to an error", + "migrating the location to our new XCM format.", + "", + "\\[ location, query ID \\]" + ], + "args": [ + "xcm:VersionedMultiLocation", + "U64" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "VersionedMultiLocation", + "QueryId" + ] + } + ], + "events_value": { + "type": 37 + }, + "errors": [ + { + "name": "Unreachable", + "doc": [ + "The desired destination was unreachable, generally because there is a no way of routing", + "to it." + ] + }, + { + "name": "SendFailure", + "doc": [ + "There was some other issue (i.e. not to do with routing) in sending the message. Perhaps", + "a lack of space for buffering the message." + ] + }, + { + "name": "Filtered", + "doc": [ + "The message execution fails the filter." + ] + }, + { + "name": "UnweighableMessage", + "doc": [ + "The message's weight could not be determined." + ] + }, + { + "name": "DestinationNotInvertible", + "doc": [ + "The destination `MultiLocation` provided cannot be inverted." + ] + }, + { + "name": "Empty", + "doc": [ + "The assets to be sent are empty." + ] + }, + { + "name": "CannotReanchor", + "doc": [ + "Could not re-anchor the assets to declare the fees for the destination chain." + ] + }, + { + "name": "TooManyAssets", + "doc": [ + "Too many assets have been attempted for transfer." + ] + }, + { + "name": "InvalidOrigin", + "doc": [ + "Origin is invalid for sending." + ] + }, + { + "name": "BadVersion", + "doc": [ + "The version of the `Versioned` value used is not able to be interpreted." + ] + }, + { + "name": "BadLocation", + "doc": [ + "The given location could not be used (e.g. because it cannot be expressed in the", + "desired version of XCM)." + ] + }, + { + "name": "NoSubscription", + "doc": [ + "The referenced subscription could not be found." + ] + }, + { + "name": "AlreadySubscribed", + "doc": [ + "The location is invalid since it already has a subscription from us." + ] + } + ], + "errors_value": { + "type": 235 + }, + "index": 31 + }, + { + "name": "CumulusXcm", + "prefix": "", + "storage": null, + "events": [ + { + "lookup": "2000", + "name": "InvalidFormat", + "docs": [ + "Downward message is invalid XCM.", + "\\[ id \\]" + ], + "args": [ + "[U8; 8]" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "[u8; 8]" + ] + }, + { + "lookup": "2001", + "name": "UnsupportedVersion", + "docs": [ + "Downward message is unsupported version of XCM.", + "\\[ id \\]" + ], + "args": [ + "[U8; 8]" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "[u8; 8]" + ] + }, + { + "lookup": "2002", + "name": "ExecutedDownward", + "docs": [ + "Downward message executed with the given outcome.", + "\\[ id, outcome \\]" + ], + "args": [ + "[U8; 8]", + "xcm:v2:traits:Outcome" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "[u8; 8]", + "Outcome" + ] + } + ], + "events_value": { + "type": 76 + }, + "errors": null, + "errors_value": { + "type": 236 + }, + "index": 32 + }, + { + "name": "DmpQueue", + "prefix": "DmpQueue", + "storage": [ + { + "name": "Configuration", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U64", + "PlainTypeValue": 237 + }, + "fallback": "0x00e40b5402000000", + "docs": [ + " The configuration." + ] + }, + { + "name": "PageIndex", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "cumulus_pallet_dmp_queue:PageIndexData", + "PlainTypeValue": 238 + }, + "fallback": "0x00000000000000000000000000000000", + "docs": [ + " The page index." + ] + }, + { + "name": "Pages", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U32" + ], + "value": "Vec>", + "keys_id": 4, + "value_id": 239 + } + }, + "fallback": "0x00", + "docs": [ + " The queue pages." + ] + }, + { + "name": "Overweight", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U64" + ], + "value": "Tuple:U32Vec", + "keys_id": 8, + "value_id": 240 + } + }, + "fallback": "0x00", + "docs": [ + " The overweight messages." + ] + } + ], + "calls": [ + { + "lookup": "2100", + "name": "service_overweight", + "docs": [ + "Service a single overweight message.", + "", + "- `origin`: Must pass `ExecuteOverweightOrigin`.", + "- `index`: The index of the overweight message to service.", + "- `weight_limit`: The amount of weight that message execution may take.", + "", + "Errors:", + "- `Unknown`: Message of `index` is unknown.", + "- `OverLimit`: Message execution may use greater than `weight_limit`.", + "", + "Events:", + "- `OverweightServiced`: On success." + ], + "args": [ + { + "name": "index", + "type": "U64", + "type_name": "OverweightIndex" + }, + { + "name": "weight_limit", + "type": "U64", + "type_name": "Weight" + } + ] + } + ], + "calls_value": { + "type": 241 + }, + "events": [ + { + "lookup": "2100", + "name": "InvalidFormat", + "docs": [ + "Downward message is invalid XCM.", + "\\[ id \\]" + ], + "args": [ + "[U8; 32]" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "MessageId" + ] + }, + { + "lookup": "2101", + "name": "UnsupportedVersion", + "docs": [ + "Downward message is unsupported version of XCM.", + "\\[ id \\]" + ], + "args": [ + "[U8; 32]" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "MessageId" + ] + }, + { + "lookup": "2102", + "name": "ExecutedDownward", + "docs": [ + "Downward message executed with the given outcome.", + "\\[ id, outcome \\]" + ], + "args": [ + "[U8; 32]", + "xcm:v2:traits:Outcome" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "MessageId", + "Outcome" + ] + }, + { + "lookup": "2103", + "name": "WeightExhausted", + "docs": [ + "The weight limit for handling downward messages was reached.", + "\\[ id, remaining, required \\]" + ], + "args": [ + "[U8; 32]", + "U64", + "U64" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "MessageId", + "Weight", + "Weight" + ] + }, + { + "lookup": "2104", + "name": "OverweightEnqueued", + "docs": [ + "Downward message is overweight and was placed in the overweight queue.", + "\\[ id, index, required \\]" + ], + "args": [ + "[U8; 32]", + "U64", + "U64" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "MessageId", + "OverweightIndex", + "Weight" + ] + }, + { + "lookup": "2105", + "name": "OverweightServiced", + "docs": [ + "Downward message from the overweight queue was executed.", + "\\[ index, used \\]" + ], + "args": [ + "U64", + "U64" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "OverweightIndex", + "Weight" + ] + } + ], + "events_value": { + "type": 77 + }, + "errors": [ + { + "name": "Unknown", + "doc": [ + "The message index given is unknown." + ] + }, + { + "name": "OverLimit", + "doc": [ + "The amount of weight given is possibly not enough for executing the message." + ] + } + ], + "errors_value": { + "type": 242 + }, + "index": 33 + }, + { + "name": "Utility", + "prefix": "", + "storage": null, + "calls": [ + { + "lookup": "2800", + "name": "batch", + "docs": [ + "Send a batch of dispatch calls.", + "", + "May be called from any origin.", + "", + "- `calls`: The calls to be dispatched from the same origin. The number of call must not", + " exceed the constant: `batched_calls_limit` (available in constant metadata).", + "", + "If origin is root then call are dispatch without checking origin filter. (This includes", + "bypassing `frame_system::Config::BaseCallFilter`).", + "", + "# ", + "- Complexity: O(C) where C is the number of calls to be batched.", + "# ", + "", + "This will return `Ok` in all circumstances. To determine the success of the batch, an", + "event is deposited. If a call failed and the batch was interrupted, then the", + "`BatchInterrupted` event is deposited, along with the number of successful calls made", + "and the error of the failed call. If all were successful, then the `BatchCompleted`", + "event is deposited." + ], + "args": [ + { + "name": "calls", + "type": "Vec", + "type_name": "Vec<::Call>" + } + ] + }, + { + "lookup": "2801", + "name": "as_derivative", + "docs": [ + "Send a call through an indexed pseudonym of the sender.", + "", + "Filter from origin are passed along. The call will be dispatched with an origin which", + "use the same filter as the origin of this call.", + "", + "NOTE: If you need to ensure that any account-based filtering is not honored (i.e.", + "because you expect `proxy` to have been used prior in the call stack and you do not want", + "the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1`", + "in the Multisig pallet instead.", + "", + "NOTE: Prior to version *12, this was called `as_limited_sub`.", + "", + "The dispatch origin for this call must be _Signed_." + ], + "args": [ + { + "name": "index", + "type": "U16", + "type_name": "u16" + }, + { + "name": "call", + "type": "Call", + "type_name": "Box<::Call>" + } + ] + }, + { + "lookup": "2802", + "name": "batch_all", + "docs": [ + "Send a batch of dispatch calls and atomically execute them.", + "The whole transaction will rollback and fail if any of the calls failed.", + "", + "May be called from any origin.", + "", + "- `calls`: The calls to be dispatched from the same origin. The number of call must not", + " exceed the constant: `batched_calls_limit` (available in constant metadata).", + "", + "If origin is root then call are dispatch without checking origin filter. (This includes", + "bypassing `frame_system::Config::BaseCallFilter`).", + "", + "# ", + "- Complexity: O(C) where C is the number of calls to be batched.", + "# " + ], + "args": [ + { + "name": "calls", + "type": "Vec", + "type_name": "Vec<::Call>" + } + ] + } + ], + "calls_value": { + "type": 243 + }, + "events": [ + { + "lookup": "2800", + "name": "BatchInterrupted", + "docs": [ + "Batch of dispatches did not complete fully. Index of first failing dispatch given, as", + "well as the error. \\[index, error\\]" + ], + "args": [ + "U32", + "sp_runtime:DispatchError" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "u32", + "DispatchError" + ] + }, + { + "lookup": "2801", + "name": "BatchCompleted", + "docs": [ + "Batch of dispatches completed fully with no error." + ], + "args": null + }, + { + "lookup": "2802", + "name": "ItemCompleted", + "docs": [ + "A single item within a Batch of dispatches has completed with no error." + ], + "args": null + } + ], + "events_value": { + "type": 78 + }, + "constants": [ + { + "name": "batched_calls_limit", + "type": "U32", + "type_value": 4, + "constants_value": "8ee30000", + "docs": [ + " The limit on the number of batched calls." + ] + } + ], + "errors": [ + { + "name": "TooManyCalls", + "doc": [ + "Too many calls batched." + ] + } + ], + "errors_value": { + "type": 255 + }, + "index": 40 + }, + { + "name": "Multisig", + "prefix": "Multisig", + "storage": [ + { + "name": "Multisigs", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "AccountId", + "[U8; 32]" + ], + "value": "pallet_multisig:Multisig", + "keys_id": 256, + "value_id": 257 + } + }, + "fallback": "0x00", + "docs": [ + " The set of open multisig operations." + ] + }, + { + "name": "Calls", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Identity" + ], + "key_vec": [ + "[U8; 32]" + ], + "value": "Tuple:VecAccountIdU128", + "keys_id": 1, + "value_id": 258 + } + }, + "fallback": "0x00", + "docs": null + } + ], + "calls": [ + { + "lookup": "2900", + "name": "as_multi_threshold_1", + "docs": [ + "Immediately dispatch a multi-signature call using a single approval from the caller.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "- `other_signatories`: The accounts (other than the sender) who are part of the", + "multi-signature, but do not participate in the approval process.", + "- `call`: The call to be executed.", + "", + "Result is equivalent to the dispatched result.", + "", + "# ", + "O(Z + C) where Z is the length of the call and C its execution weight.", + "-------------------------------", + "- DB Weight: None", + "- Plus Call Weight", + "# " + ], + "args": [ + { + "name": "other_signatories", + "type": "Vec", + "type_name": "Vec" + }, + { + "name": "call", + "type": "Call", + "type_name": "Box<::Call>" + } + ] + }, + { + "lookup": "2901", + "name": "as_multi", + "docs": [ + "Register approval for a dispatch to be made from a deterministic composite account if", + "approved by a total of `threshold - 1` of `other_signatories`.", + "", + "If there are enough, then dispatch the call.", + "", + "Payment: `DepositBase` will be reserved if this is the first approval, plus", + "`threshold` times `DepositFactor`. It is returned once this dispatch happens or", + "is cancelled.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "- `threshold`: The total number of approvals for this dispatch before it is executed.", + "- `other_signatories`: The accounts (other than the sender) who can approve this", + "dispatch. May not be empty.", + "- `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is", + "not the first approval, then it must be `Some`, with the timepoint (block number and", + "transaction index) of the first approval transaction.", + "- `call`: The call to be executed.", + "", + "NOTE: Unless this is the final approval, you will generally want to use", + "`approve_as_multi` instead, since it only requires a hash of the call.", + "", + "Result is equivalent to the dispatched result if `threshold` is exactly `1`. Otherwise", + "on success, result is `Ok` and the result from the interior call, if it was executed,", + "may be found in the deposited `MultisigExecuted` event.", + "", + "# ", + "- `O(S + Z + Call)`.", + "- Up to one balance-reserve or unreserve operation.", + "- One passthrough operation, one insert, both `O(S)` where `S` is the number of", + " signatories. `S` is capped by `MaxSignatories`, with weight being proportional.", + "- One call encode & hash, both of complexity `O(Z)` where `Z` is tx-len.", + "- One encode & hash, both of complexity `O(S)`.", + "- Up to one binary search and insert (`O(logS + S)`).", + "- I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove.", + "- One event.", + "- The weight of the `call`.", + "- Storage: inserts one item, value size bounded by `MaxSignatories`, with a deposit", + " taken for its lifetime of `DepositBase + threshold * DepositFactor`.", + "-------------------------------", + "- DB Weight:", + " - Reads: Multisig Storage, [Caller Account], Calls (if `store_call`)", + " - Writes: Multisig Storage, [Caller Account], Calls (if `store_call`)", + "- Plus Call Weight", + "# " + ], + "args": [ + { + "name": "threshold", + "type": "U16", + "type_name": "u16" + }, + { + "name": "other_signatories", + "type": "Vec", + "type_name": "Vec" + }, + { + "name": "maybe_timepoint", + "type": "option", + "type_name": "Option>" + }, + { + "name": "call", + "type": "Vec", + "type_name": "OpaqueCall" + }, + { + "name": "store_call", + "type": "Bool", + "type_name": "bool" + }, + { + "name": "max_weight", + "type": "U64", + "type_name": "Weight" + } + ] + }, + { + "lookup": "2902", + "name": "approve_as_multi", + "docs": [ + "Register approval for a dispatch to be made from a deterministic composite account if", + "approved by a total of `threshold - 1` of `other_signatories`.", + "", + "Payment: `DepositBase` will be reserved if this is the first approval, plus", + "`threshold` times `DepositFactor`. It is returned once this dispatch happens or", + "is cancelled.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "- `threshold`: The total number of approvals for this dispatch before it is executed.", + "- `other_signatories`: The accounts (other than the sender) who can approve this", + "dispatch. May not be empty.", + "- `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is", + "not the first approval, then it must be `Some`, with the timepoint (block number and", + "transaction index) of the first approval transaction.", + "- `call_hash`: The hash of the call to be executed.", + "", + "NOTE: If this is the final approval, you will want to use `as_multi` instead.", + "", + "# ", + "- `O(S)`.", + "- Up to one balance-reserve or unreserve operation.", + "- One passthrough operation, one insert, both `O(S)` where `S` is the number of", + " signatories. `S` is capped by `MaxSignatories`, with weight being proportional.", + "- One encode & hash, both of complexity `O(S)`.", + "- Up to one binary search and insert (`O(logS + S)`).", + "- I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove.", + "- One event.", + "- Storage: inserts one item, value size bounded by `MaxSignatories`, with a deposit", + " taken for its lifetime of `DepositBase + threshold * DepositFactor`.", + "----------------------------------", + "- DB Weight:", + " - Read: Multisig Storage, [Caller Account]", + " - Write: Multisig Storage, [Caller Account]", + "# " + ], + "args": [ + { + "name": "threshold", + "type": "U16", + "type_name": "u16" + }, + { + "name": "other_signatories", + "type": "Vec", + "type_name": "Vec" + }, + { + "name": "maybe_timepoint", + "type": "option", + "type_name": "Option>" + }, + { + "name": "call_hash", + "type": "[U8; 32]", + "type_name": "[u8; 32]" + }, + { + "name": "max_weight", + "type": "U64", + "type_name": "Weight" + } + ] + }, + { + "lookup": "2903", + "name": "cancel_as_multi", + "docs": [ + "Cancel a pre-existing, on-going multisig transaction. Any deposit reserved previously", + "for this operation will be unreserved on success.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "- `threshold`: The total number of approvals for this dispatch before it is executed.", + "- `other_signatories`: The accounts (other than the sender) who can approve this", + "dispatch. May not be empty.", + "- `timepoint`: The timepoint (block number and transaction index) of the first approval", + "transaction for this dispatch.", + "- `call_hash`: The hash of the call to be executed.", + "", + "# ", + "- `O(S)`.", + "- Up to one balance-reserve or unreserve operation.", + "- One passthrough operation, one insert, both `O(S)` where `S` is the number of", + " signatories. `S` is capped by `MaxSignatories`, with weight being proportional.", + "- One encode & hash, both of complexity `O(S)`.", + "- One event.", + "- I/O: 1 read `O(S)`, one remove.", + "- Storage: removes one item.", + "----------------------------------", + "- DB Weight:", + " - Read: Multisig Storage, [Caller Account], Refund Account, Calls", + " - Write: Multisig Storage, [Caller Account], Refund Account, Calls", + "# " + ], + "args": [ + { + "name": "threshold", + "type": "U16", + "type_name": "u16" + }, + { + "name": "other_signatories", + "type": "Vec", + "type_name": "Vec" + }, + { + "name": "timepoint", + "type": "pallet_multisig:Timepoint", + "type_name": "Timepoint" + }, + { + "name": "call_hash", + "type": "[U8; 32]", + "type_name": "[u8; 32]" + } + ] + } + ], + "calls_value": { + "type": 246 + }, + "events": [ + { + "lookup": "2900", + "name": "NewMultisig", + "docs": [ + "A new multisig operation has begun. \\[approving, multisig, call_hash\\]" + ], + "args": [ + "AccountId", + "AccountId", + "[U8; 32]" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "AccountId", + "AccountId", + "CallHash" + ] + }, + { + "lookup": "2901", + "name": "MultisigApproval", + "docs": [ + "A multisig operation has been approved by someone.", + "\\[approving, timepoint, multisig, call_hash\\]" + ], + "args": [ + "AccountId", + "pallet_multisig:Timepoint", + "AccountId", + "[U8; 32]" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "AccountId", + "Timepoint", + "AccountId", + "CallHash" + ] + }, + { + "lookup": "2902", + "name": "MultisigExecuted", + "docs": [ + "A multisig operation has been executed. \\[approving, timepoint, multisig, call_hash\\]" + ], + "args": [ + "AccountId", + "pallet_multisig:Timepoint", + "AccountId", + "[U8; 32]", + "Result" + ], + "args_name": [ + "", + "", + "", + "", + "" + ], + "args_type_name": [ + "AccountId", + "Timepoint", + "AccountId", + "CallHash", + "DispatchResult" + ] + }, + { + "lookup": "2903", + "name": "MultisigCancelled", + "docs": [ + "A multisig operation has been cancelled. \\[cancelling, timepoint, multisig, call_hash\\]" + ], + "args": [ + "AccountId", + "pallet_multisig:Timepoint", + "AccountId", + "[U8; 32]" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "AccountId", + "Timepoint", + "AccountId", + "CallHash" + ] + } + ], + "events_value": { + "type": 79 + }, + "constants": [ + { + "name": "DepositBase", + "type": "U128", + "type_value": 6, + "constants_value": "008e56ad040000000000000000000000", + "docs": [ + " The base amount of currency needed to reserve for creating a multisig execution or to", + " store a dispatch call for later.", + "", + " This is held for an additional storage item whose value size is", + " `4 + sizeof((BlockNumber, Balance, AccountId))` bytes and whose key size is", + " `32 + sizeof(AccountId)` bytes." + ] + }, + { + "name": "DepositFactor", + "type": "U128", + "type_value": 6, + "constants_value": "0048e801000000000000000000000000", + "docs": [ + " The amount of currency needed per unit threshold when creating a multisig execution.", + "", + " This is held for adding 32 bytes more into a pre-existing storage value." + ] + }, + { + "name": "MaxSignatories", + "type": "U16", + "type_value": 85, + "constants_value": "6400", + "docs": [ + " The maximum amount of signatories allowed in the multisig." + ] + } + ], + "errors": [ + { + "name": "MinimumThreshold", + "doc": [ + "Threshold must be 2 or greater." + ] + }, + { + "name": "AlreadyApproved", + "doc": [ + "Call is already approved by this signatory." + ] + }, + { + "name": "NoApprovalsNeeded", + "doc": [ + "Call doesn't need any (more) approvals." + ] + }, + { + "name": "TooFewSignatories", + "doc": [ + "There are too few signatories in the list." + ] + }, + { + "name": "TooManySignatories", + "doc": [ + "There are too many signatories in the list." + ] + }, + { + "name": "SignatoriesOutOfOrder", + "doc": [ + "The signatories were provided out of order; they should be ordered." + ] + }, + { + "name": "SenderInSignatories", + "doc": [ + "The sender was contained in the other signatories; it shouldn't be." + ] + }, + { + "name": "NotFound", + "doc": [ + "Multisig operation not found when attempting to cancel." + ] + }, + { + "name": "NotOwner", + "doc": [ + "Only the account that originally created the multisig is able to cancel it." + ] + }, + { + "name": "NoTimepoint", + "doc": [ + "No timepoint was given, yet the multisig operation is already underway." + ] + }, + { + "name": "WrongTimepoint", + "doc": [ + "A different timepoint was given to the multisig operation that is underway." + ] + }, + { + "name": "UnexpectedTimepoint", + "doc": [ + "A timepoint was given, yet no multisig operation is underway." + ] + }, + { + "name": "MaxWeightTooLow", + "doc": [ + "The maximum weight information provided was too low." + ] + }, + { + "name": "AlreadyStored", + "doc": [ + "The data to be stored is already stored." + ] + } + ], + "errors_value": { + "type": 259 + }, + "index": 41 + }, + { + "name": "Proxy", + "prefix": "Proxy", + "storage": [ + { + "name": "Proxies", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "Tuple:frame_support:storage:bounded_vec:BoundedVec@261U128", + "keys_id": 0, + "value_id": 260 + } + }, + "fallback": "0x0000000000000000000000000000000000", + "docs": [ + " The set of account proxies. Maps the account which has delegated to the accounts", + " which are being delegated to, together with the amount held on deposit." + ] + }, + { + "name": "Announcements", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "Tuple:frame_support:storage:bounded_vec:BoundedVec@265U128", + "keys_id": 0, + "value_id": 264 + } + }, + "fallback": "0x0000000000000000000000000000000000", + "docs": [ + " The announcements made by the proxy (key)." + ] + } + ], + "calls": [ + { + "lookup": "2a00", + "name": "proxy", + "docs": [ + "Dispatch the given `call` from an account that the sender is authorised for through", + "`add_proxy`.", + "", + "Removes any corresponding announcement(s).", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "Parameters:", + "- `real`: The account that the proxy will make a call on behalf of.", + "- `force_proxy_type`: Specify the exact proxy type to be used and checked for this call.", + "- `call`: The call to be made by the `real` account.", + "", + "# ", + "Weight is a function of the number of proxies the user has (P).", + "# " + ], + "args": [ + { + "name": "real", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "force_proxy_type", + "type": "option", + "type_name": "Option" + }, + { + "name": "call", + "type": "Call", + "type_name": "Box<::Call>" + } + ] + }, + { + "lookup": "2a01", + "name": "add_proxy", + "docs": [ + "Register a proxy account for the sender that is able to make calls on its behalf.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "Parameters:", + "- `proxy`: The account that the `caller` would like to make a proxy.", + "- `proxy_type`: The permissions allowed for this proxy account.", + "- `delay`: The announcement period required of the initial proxy. Will generally be", + "zero.", + "", + "# ", + "Weight is a function of the number of proxies the user has (P).", + "# " + ], + "args": [ + { + "name": "delegate", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "proxy_type", + "type": "statemint_runtime:ProxyType", + "type_name": "ProxyType" + }, + { + "name": "delay", + "type": "U32", + "type_name": "BlockNumber" + } + ] + }, + { + "lookup": "2a02", + "name": "remove_proxy", + "docs": [ + "Unregister a proxy account for the sender.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "Parameters:", + "- `proxy`: The account that the `caller` would like to remove as a proxy.", + "- `proxy_type`: The permissions currently enabled for the removed proxy account.", + "", + "# ", + "Weight is a function of the number of proxies the user has (P).", + "# " + ], + "args": [ + { + "name": "delegate", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "proxy_type", + "type": "statemint_runtime:ProxyType", + "type_name": "ProxyType" + }, + { + "name": "delay", + "type": "U32", + "type_name": "BlockNumber" + } + ] + }, + { + "lookup": "2a03", + "name": "remove_proxies", + "docs": [ + "Unregister all proxy accounts for the sender.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "WARNING: This may be called on accounts created by `anonymous`, however if done, then", + "the unreserved fees will be inaccessible. **All access to this account will be lost.**", + "", + "# ", + "Weight is a function of the number of proxies the user has (P).", + "# " + ], + "args": null + }, + { + "lookup": "2a04", + "name": "anonymous", + "docs": [ + "Spawn a fresh new account that is guaranteed to be otherwise inaccessible, and", + "initialize it with a proxy of `proxy_type` for `origin` sender.", + "", + "Requires a `Signed` origin.", + "", + "- `proxy_type`: The type of the proxy that the sender will be registered as over the", + "new account. This will almost always be the most permissive `ProxyType` possible to", + "allow for maximum flexibility.", + "- `index`: A disambiguation index, in case this is called multiple times in the same", + "transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just", + "want to use `0`.", + "- `delay`: The announcement period required of the initial proxy. Will generally be", + "zero.", + "", + "Fails with `Duplicate` if this has already been called in this transaction, from the", + "same sender, with the same parameters.", + "", + "Fails if there are insufficient funds to pay for deposit.", + "", + "# ", + "Weight is a function of the number of proxies the user has (P).", + "# ", + "TODO: Might be over counting 1 read" + ], + "args": [ + { + "name": "proxy_type", + "type": "statemint_runtime:ProxyType", + "type_name": "ProxyType" + }, + { + "name": "delay", + "type": "U32", + "type_name": "BlockNumber" + }, + { + "name": "index", + "type": "U16", + "type_name": "u16" + } + ] + }, + { + "lookup": "2a05", + "name": "kill_anonymous", + "docs": [ + "Removes a previously spawned anonymous proxy.", + "", + "WARNING: **All access to this account will be lost.** Any funds held in it will be", + "inaccessible.", + "", + "Requires a `Signed` origin, and the sender account must have been created by a call to", + "`anonymous` with corresponding parameters.", + "", + "- `spawner`: The account that originally called `anonymous` to create this account.", + "- `index`: The disambiguation index originally passed to `anonymous`. Probably `0`.", + "- `proxy_type`: The proxy type originally passed to `anonymous`.", + "- `height`: The height of the chain when the call to `anonymous` was processed.", + "- `ext_index`: The extrinsic index in which the call to `anonymous` was processed.", + "", + "Fails with `NoPermission` in case the caller is not a previously created anonymous", + "account whose `anonymous` call has corresponding parameters.", + "", + "# ", + "Weight is a function of the number of proxies the user has (P).", + "# " + ], + "args": [ + { + "name": "spawner", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "proxy_type", + "type": "statemint_runtime:ProxyType", + "type_name": "ProxyType" + }, + { + "name": "index", + "type": "U16", + "type_name": "u16" + }, + { + "name": "height", + "type": "compact", + "type_name": "BlockNumber" + }, + { + "name": "ext_index", + "type": "compact", + "type_name": "u32" + } + ] + }, + { + "lookup": "2a06", + "name": "announce", + "docs": [ + "Publish the hash of a proxy-call that will be made in the future.", + "", + "This must be called some number of blocks before the corresponding `proxy` is attempted", + "if the delay associated with the proxy relationship is greater than zero.", + "", + "No more than `MaxPending` announcements may be made at any one time.", + "", + "This will take a deposit of `AnnouncementDepositFactor` as well as", + "`AnnouncementDepositBase` if there are no other pending announcements.", + "", + "The dispatch origin for this call must be _Signed_ and a proxy of `real`.", + "", + "Parameters:", + "- `real`: The account that the proxy will make a call on behalf of.", + "- `call_hash`: The hash of the call to be made by the `real` account.", + "", + "# ", + "Weight is a function of:", + "- A: the number of announcements made.", + "- P: the number of proxies the user has.", + "# " + ], + "args": [ + { + "name": "real", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "call_hash", + "type": "H256", + "type_name": "CallHashOf" + } + ] + }, + { + "lookup": "2a07", + "name": "remove_announcement", + "docs": [ + "Remove a given announcement.", + "", + "May be called by a proxy account to remove a call they previously announced and return", + "the deposit.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "Parameters:", + "- `real`: The account that the proxy will make a call on behalf of.", + "- `call_hash`: The hash of the call to be made by the `real` account.", + "", + "# ", + "Weight is a function of:", + "- A: the number of announcements made.", + "- P: the number of proxies the user has.", + "# " + ], + "args": [ + { + "name": "real", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "call_hash", + "type": "H256", + "type_name": "CallHashOf" + } + ] + }, + { + "lookup": "2a08", + "name": "reject_announcement", + "docs": [ + "Remove the given announcement of a delegate.", + "", + "May be called by a target (proxied) account to remove a call that one of their delegates", + "(`delegate`) has announced they want to execute. The deposit is returned.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "Parameters:", + "- `delegate`: The account that previously announced the call.", + "- `call_hash`: The hash of the call to be made.", + "", + "# ", + "Weight is a function of:", + "- A: the number of announcements made.", + "- P: the number of proxies the user has.", + "# " + ], + "args": [ + { + "name": "delegate", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "call_hash", + "type": "H256", + "type_name": "CallHashOf" + } + ] + }, + { + "lookup": "2a09", + "name": "proxy_announced", + "docs": [ + "Dispatch the given `call` from an account that the sender is authorized for through", + "`add_proxy`.", + "", + "Removes any corresponding announcement(s).", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "Parameters:", + "- `real`: The account that the proxy will make a call on behalf of.", + "- `force_proxy_type`: Specify the exact proxy type to be used and checked for this call.", + "- `call`: The call to be made by the `real` account.", + "", + "# ", + "Weight is a function of:", + "- A: the number of announcements made.", + "- P: the number of proxies the user has.", + "# " + ], + "args": [ + { + "name": "delegate", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "real", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "force_proxy_type", + "type": "option", + "type_name": "Option" + }, + { + "name": "call", + "type": "Call", + "type_name": "Box<::Call>" + } + ] + } + ], + "calls_value": { + "type": 248 + }, + "events": [ + { + "lookup": "2a00", + "name": "ProxyExecuted", + "docs": [ + "A proxy was executed correctly, with the given \\[result\\]." + ], + "args": [ + "Result" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "DispatchResult" + ] + }, + { + "lookup": "2a01", + "name": "AnonymousCreated", + "docs": [ + "Anonymous account has been created by new proxy with given", + "disambiguation index and proxy type. \\[anonymous, who, proxy_type,", + "disambiguation_index\\]" + ], + "args": [ + "AccountId", + "AccountId", + "statemint_runtime:ProxyType", + "U16" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "AccountId", + "AccountId", + "ProxyType", + "u16" + ] + }, + { + "lookup": "2a02", + "name": "Announced", + "docs": [ + "An announcement was placed to make a call in the future. \\[real, proxy, call_hash\\]" + ], + "args": [ + "AccountId", + "AccountId", + "H256" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "AccountId", + "AccountId", + "CallHashOf" + ] + }, + { + "lookup": "2a03", + "name": "ProxyAdded", + "docs": [ + "A proxy was added. \\[delegator, delegatee, proxy_type, delay\\]" + ], + "args": [ + "AccountId", + "AccountId", + "statemint_runtime:ProxyType", + "U32" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "AccountId", + "AccountId", + "ProxyType", + "BlockNumber" + ] + } + ], + "events_value": { + "type": 83 + }, + "constants": [ + { + "name": "ProxyDepositBase", + "type": "U128", + "type_value": 6, + "constants_value": "00227aaa040000000000000000000000", + "docs": [ + " The base amount of currency needed to reserve for creating a proxy.", + "", + " This is held for an additional storage item whose value size is", + " `sizeof(Balance)` bytes and whose key size is `sizeof(AccountId)` bytes." + ] + }, + { + "name": "ProxyDepositFactor", + "type": "U128", + "type_value": 6, + "constants_value": "408af701000000000000000000000000", + "docs": [ + " The amount of currency needed per proxy added.", + "", + " This is held for adding 32 bytes plus an instance of `ProxyType` more into a", + " pre-existing storage value. Thus, when configuring `ProxyDepositFactor` one should take", + " into account `32 + proxy_type.encode().len()` bytes of data." + ] + }, + { + "name": "MaxProxies", + "type": "U32", + "type_value": 4, + "constants_value": "20000000", + "docs": [ + " The maximum amount of proxies allowed for a single account." + ] + }, + { + "name": "MaxPending", + "type": "U32", + "type_value": 4, + "constants_value": "20000000", + "docs": [ + " The maximum amount of time-delayed announcements that are allowed to be pending." + ] + }, + { + "name": "AnnouncementDepositBase", + "type": "U128", + "type_value": 6, + "constants_value": "0034f4aa040000000000000000000000", + "docs": [ + " The base amount of currency needed to reserve for creating an announcement.", + "", + " This is held when a new storage item holding a `Balance` is created (typically 16", + " bytes)." + ] + }, + { + "name": "AnnouncementDepositFactor", + "type": "U128", + "type_value": 6, + "constants_value": "8014ef03000000000000000000000000", + "docs": [ + " The amount of currency needed per announcement made.", + "", + " This is held for adding an `AccountId`, `Hash` and `BlockNumber` (typically 68 bytes)", + " into a pre-existing storage value." + ] + } + ], + "errors": [ + { + "name": "TooMany", + "doc": [ + "There are too many proxies registered or too many announcements pending." + ] + }, + { + "name": "NotFound", + "doc": [ + "Proxy registration not found." + ] + }, + { + "name": "NotProxy", + "doc": [ + "Sender is not a proxy of the account to be proxied." + ] + }, + { + "name": "Unproxyable", + "doc": [ + "A call which is incompatible with the proxy type's filter was attempted." + ] + }, + { + "name": "Duplicate", + "doc": [ + "Account is already a proxy." + ] + }, + { + "name": "NoPermission", + "doc": [ + "Call may not be made by proxy because it may escalate its privileges." + ] + }, + { + "name": "Unannounced", + "doc": [ + "Announcement, if made at all, was made too recently." + ] + }, + { + "name": "NoSelfProxy", + "doc": [ + "Cannot add self as proxy." + ] + } + ], + "errors_value": { + "type": 268 + }, + "index": 42 + }, + { + "name": "Assets", + "prefix": "Assets", + "storage": [ + { + "name": "Asset", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U32" + ], + "value": "pallet_assets:types:AssetDetails", + "keys_id": 4, + "value_id": 269 + } + }, + "fallback": "0x00", + "docs": [ + " Details of an asset." + ] + }, + { + "name": "Account", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "AccountId" + ], + "value": "pallet_assets:types:AssetBalance", + "keys_id": 270, + "value_id": 271 + } + }, + "fallback": "0x000000000000000000000000000000000000", + "docs": [ + " The number of units of assets held by any given account." + ] + }, + { + "name": "Approvals", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Blake2_128Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "AccountId", + "AccountId" + ], + "value": "pallet_assets:types:Approval", + "keys_id": 272, + "value_id": 273 + } + }, + "fallback": "0x00", + "docs": [ + " Approved balance transfers. First balance is the amount approved for transfer. Second", + " is the amount of `T::Currency` reserved for storing this.", + " First key is the asset ID, second key is the owner and third key is the delegate." + ] + }, + { + "name": "Metadata", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U32" + ], + "value": "pallet_assets:types:AssetMetadata", + "keys_id": 4, + "value_id": 274 + } + }, + "fallback": "0x0000000000000000000000000000000000000000", + "docs": [ + " Metadata of an asset." + ] + } + ], + "calls": [ + { + "lookup": "3200", + "name": "create", + "docs": [ + "Issue a new class of fungible assets from a public origin.", + "", + "This new asset class has no assets initially and its owner is the origin.", + "", + "The origin must be Signed and the sender must have sufficient funds free.", + "", + "Funds of sender are reserved by `AssetDeposit`.", + "", + "Parameters:", + "- `id`: The identifier of the new asset. This must not be currently in use to identify", + "an existing asset.", + "- `admin`: The admin of this class of assets. The admin is the initial address of each", + "member of the asset class's admin team.", + "- `min_balance`: The minimum balance of this new asset that any single account must", + "have. If an account's balance is reduced below this, then it collapses to zero.", + "", + "Emits `Created` event when successful.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "admin", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "min_balance", + "type": "U128", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3201", + "name": "force_create", + "docs": [ + "Issue a new class of fungible assets from a privileged origin.", + "", + "This new asset class has no assets initially.", + "", + "The origin must conform to `ForceOrigin`.", + "", + "Unlike `create`, no funds are reserved.", + "", + "- `id`: The identifier of the new asset. This must not be currently in use to identify", + "an existing asset.", + "- `owner`: The owner of this class of assets. The owner has full superuser permissions", + "over this asset, but may later change and configure the permissions using", + "`transfer_ownership` and `set_team`.", + "- `min_balance`: The minimum balance of this new asset that any single account must", + "have. If an account's balance is reduced below this, then it collapses to zero.", + "", + "Emits `ForceCreated` event when successful.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "is_sufficient", + "type": "Bool", + "type_name": "bool" + }, + { + "name": "min_balance", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3202", + "name": "destroy", + "docs": [ + "Destroy a class of fungible assets.", + "", + "The origin must conform to `ForceOrigin` or must be Signed and the sender must be the", + "owner of the asset `id`.", + "", + "- `id`: The identifier of the asset to be destroyed. This must identify an existing", + "asset.", + "", + "Emits `Destroyed` event when successful.", + "", + "NOTE: It can be helpful to first freeze an asset before destroying it so that you", + "can provide accurate witness information and prevent users from manipulating state", + "in a way that can make it harder to destroy.", + "", + "Weight: `O(c + p + a)` where:", + "- `c = (witness.accounts - witness.sufficients)`", + "- `s = witness.sufficients`", + "- `a = witness.approvals`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "witness", + "type": "pallet_assets:types:DestroyWitness", + "type_name": "DestroyWitness" + } + ] + }, + { + "lookup": "3203", + "name": "mint", + "docs": [ + "Mint assets of a particular class.", + "", + "The origin must be Signed and the sender must be the Issuer of the asset `id`.", + "", + "- `id`: The identifier of the asset to have some amount minted.", + "- `beneficiary`: The account to be credited with the minted assets.", + "- `amount`: The amount of the asset to be minted.", + "", + "Emits `Issued` event when successful.", + "", + "Weight: `O(1)`", + "Modes: Pre-existing balance of `beneficiary`; Account pre-existence of `beneficiary`." + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "beneficiary", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3204", + "name": "burn", + "docs": [ + "Reduce the balance of `who` by as much as possible up to `amount` assets of `id`.", + "", + "Origin must be Signed and the sender should be the Manager of the asset `id`.", + "", + "Bails with `BalanceZero` if the `who` is already dead.", + "", + "- `id`: The identifier of the asset to have some amount burned.", + "- `who`: The account to be debited from.", + "- `amount`: The maximum amount by which `who`'s balance should be reduced.", + "", + "Emits `Burned` with the actual amount burned. If this takes the balance to below the", + "minimum for the asset, then the amount burned is increased to take it to zero.", + "", + "Weight: `O(1)`", + "Modes: Post-existence of `who`; Pre & post Zombie-status of `who`." + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "who", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3205", + "name": "transfer", + "docs": [ + "Move some assets from the sender account to another.", + "", + "Origin must be Signed.", + "", + "- `id`: The identifier of the asset to have some amount transferred.", + "- `target`: The account to be credited.", + "- `amount`: The amount by which the sender's balance of assets should be reduced and", + "`target`'s balance increased. The amount actually transferred may be slightly greater in", + "the case that the transfer would otherwise take the sender balance above zero but below", + "the minimum balance. Must be greater than zero.", + "", + "Emits `Transferred` with the actual amount transferred. If this takes the source balance", + "to below the minimum for the asset, then the amount transferred is increased to take it", + "to zero.", + "", + "Weight: `O(1)`", + "Modes: Pre-existence of `target`; Post-existence of sender; Account pre-existence of", + "`target`." + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "target", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3206", + "name": "transfer_keep_alive", + "docs": [ + "Move some assets from the sender account to another, keeping the sender account alive.", + "", + "Origin must be Signed.", + "", + "- `id`: The identifier of the asset to have some amount transferred.", + "- `target`: The account to be credited.", + "- `amount`: The amount by which the sender's balance of assets should be reduced and", + "`target`'s balance increased. The amount actually transferred may be slightly greater in", + "the case that the transfer would otherwise take the sender balance above zero but below", + "the minimum balance. Must be greater than zero.", + "", + "Emits `Transferred` with the actual amount transferred. If this takes the source balance", + "to below the minimum for the asset, then the amount transferred is increased to take it", + "to zero.", + "", + "Weight: `O(1)`", + "Modes: Pre-existence of `target`; Post-existence of sender; Account pre-existence of", + "`target`." + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "target", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3207", + "name": "force_transfer", + "docs": [ + "Move some assets from one account to another.", + "", + "Origin must be Signed and the sender should be the Admin of the asset `id`.", + "", + "- `id`: The identifier of the asset to have some amount transferred.", + "- `source`: The account to be debited.", + "- `dest`: The account to be credited.", + "- `amount`: The amount by which the `source`'s balance of assets should be reduced and", + "`dest`'s balance increased. The amount actually transferred may be slightly greater in", + "the case that the transfer would otherwise take the `source` balance above zero but", + "below the minimum balance. Must be greater than zero.", + "", + "Emits `Transferred` with the actual amount transferred. If this takes the source balance", + "to below the minimum for the asset, then the amount transferred is increased to take it", + "to zero.", + "", + "Weight: `O(1)`", + "Modes: Pre-existence of `dest`; Post-existence of `source`; Account pre-existence of", + "`dest`." + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "source", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "dest", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3208", + "name": "freeze", + "docs": [ + "Disallow further unprivileged transfers from an account.", + "", + "Origin must be Signed and the sender should be the Freezer of the asset `id`.", + "", + "- `id`: The identifier of the asset to be frozen.", + "- `who`: The account to be frozen.", + "", + "Emits `Frozen`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "who", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "3209", + "name": "thaw", + "docs": [ + "Allow unprivileged transfers from an account again.", + "", + "Origin must be Signed and the sender should be the Admin of the asset `id`.", + "", + "- `id`: The identifier of the asset to be frozen.", + "- `who`: The account to be unfrozen.", + "", + "Emits `Thawed`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "who", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "320a", + "name": "freeze_asset", + "docs": [ + "Disallow further unprivileged transfers for the asset class.", + "", + "Origin must be Signed and the sender should be the Freezer of the asset `id`.", + "", + "- `id`: The identifier of the asset to be frozen.", + "", + "Emits `Frozen`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + } + ] + }, + { + "lookup": "320b", + "name": "thaw_asset", + "docs": [ + "Allow unprivileged transfers for the asset again.", + "", + "Origin must be Signed and the sender should be the Admin of the asset `id`.", + "", + "- `id`: The identifier of the asset to be thawed.", + "", + "Emits `Thawed`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + } + ] + }, + { + "lookup": "320c", + "name": "transfer_ownership", + "docs": [ + "Change the Owner of an asset.", + "", + "Origin must be Signed and the sender should be the Owner of the asset `id`.", + "", + "- `id`: The identifier of the asset.", + "- `owner`: The new Owner of this asset.", + "", + "Emits `OwnerChanged`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "320d", + "name": "set_team", + "docs": [ + "Change the Issuer, Admin and Freezer of an asset.", + "", + "Origin must be Signed and the sender should be the Owner of the asset `id`.", + "", + "- `id`: The identifier of the asset to be frozen.", + "- `issuer`: The new Issuer of this asset.", + "- `admin`: The new Admin of this asset.", + "- `freezer`: The new Freezer of this asset.", + "", + "Emits `TeamChanged`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "issuer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "admin", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "freezer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "320e", + "name": "set_metadata", + "docs": [ + "Set the metadata for an asset.", + "", + "Origin must be Signed and the sender should be the Owner of the asset `id`.", + "", + "Funds of sender are reserved according to the formula:", + "`MetadataDepositBase + MetadataDepositPerByte * (name.len + symbol.len)` taking into", + "account any already reserved funds.", + "", + "- `id`: The identifier of the asset to update.", + "- `name`: The user friendly name of this asset. Limited in length by `StringLimit`.", + "- `symbol`: The exchange symbol for this asset. Limited in length by `StringLimit`.", + "- `decimals`: The number of decimals this asset uses to represent one unit.", + "", + "Emits `MetadataSet`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "name", + "type": "Vec", + "type_name": "Bytes" + }, + { + "name": "symbol", + "type": "Vec", + "type_name": "Bytes" + }, + { + "name": "decimals", + "type": "U8", + "type_name": "u8" + } + ] + }, + { + "lookup": "320f", + "name": "clear_metadata", + "docs": [ + "Clear the metadata for an asset.", + "", + "Origin must be Signed and the sender should be the Owner of the asset `id`.", + "", + "Any deposit is freed for the asset owner.", + "", + "- `id`: The identifier of the asset to clear.", + "", + "Emits `MetadataCleared`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + } + ] + }, + { + "lookup": "3210", + "name": "force_set_metadata", + "docs": [ + "Force the metadata for an asset to some value.", + "", + "Origin must be ForceOrigin.", + "", + "Any deposit is left alone.", + "", + "- `id`: The identifier of the asset to update.", + "- `name`: The user friendly name of this asset. Limited in length by `StringLimit`.", + "- `symbol`: The exchange symbol for this asset. Limited in length by `StringLimit`.", + "- `decimals`: The number of decimals this asset uses to represent one unit.", + "", + "Emits `MetadataSet`.", + "", + "Weight: `O(N + S)` where N and S are the length of the name and symbol respectively." + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "name", + "type": "Vec", + "type_name": "Bytes" + }, + { + "name": "symbol", + "type": "Vec", + "type_name": "Bytes" + }, + { + "name": "decimals", + "type": "U8", + "type_name": "u8" + }, + { + "name": "is_frozen", + "type": "Bool", + "type_name": "bool" + } + ] + }, + { + "lookup": "3211", + "name": "force_clear_metadata", + "docs": [ + "Clear the metadata for an asset.", + "", + "Origin must be ForceOrigin.", + "", + "Any deposit is returned.", + "", + "- `id`: The identifier of the asset to clear.", + "", + "Emits `MetadataCleared`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + } + ] + }, + { + "lookup": "3212", + "name": "force_asset_status", + "docs": [ + "Alter the attributes of a given asset.", + "", + "Origin must be `ForceOrigin`.", + "", + "- `id`: The identifier of the asset.", + "- `owner`: The new Owner of this asset.", + "- `issuer`: The new Issuer of this asset.", + "- `admin`: The new Admin of this asset.", + "- `freezer`: The new Freezer of this asset.", + "- `min_balance`: The minimum balance of this new asset that any single account must", + "have. If an account's balance is reduced below this, then it collapses to zero.", + "- `is_sufficient`: Whether a non-zero balance of this asset is deposit of sufficient", + "value to account for the state bloat associated with its balance storage. If set to", + "`true`, then non-zero balances may be stored without a `consumer` reference (and thus", + "an ED in the Balances pallet or whatever else is used to control user-account state", + "growth).", + "- `is_frozen`: Whether this asset class is frozen except for permissioned/admin", + "instructions.", + "", + "Emits `AssetStatusChanged` with the identity of the asset.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "issuer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "admin", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "freezer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "min_balance", + "type": "compact", + "type_name": "Balance" + }, + { + "name": "is_sufficient", + "type": "Bool", + "type_name": "bool" + }, + { + "name": "is_frozen", + "type": "Bool", + "type_name": "bool" + } + ] + }, + { + "lookup": "3213", + "name": "approve_transfer", + "docs": [ + "Approve an amount of asset for transfer by a delegated third-party account.", + "", + "Origin must be Signed.", + "", + "Ensures that `ApprovalDeposit` worth of `Currency` is reserved from signing account", + "for the purpose of holding the approval. If some non-zero amount of assets is already", + "approved from signing account to `delegate`, then it is topped up or unreserved to", + "meet the right value.", + "", + "NOTE: The signing account does not need to own `amount` of assets at the point of", + "making this call.", + "", + "- `id`: The identifier of the asset.", + "- `delegate`: The account to delegate permission to transfer asset.", + "- `amount`: The amount of asset that may be transferred by `delegate`. If there is", + "already an approval in place, then this acts additively.", + "", + "Emits `ApprovedTransfer` on success.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "delegate", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3214", + "name": "cancel_approval", + "docs": [ + "Cancel all of some asset approved for delegated transfer by a third-party account.", + "", + "Origin must be Signed and there must be an approval in place between signer and", + "`delegate`.", + "", + "Unreserves any deposit previously reserved by `approve_transfer` for the approval.", + "", + "- `id`: The identifier of the asset.", + "- `delegate`: The account delegated permission to transfer asset.", + "", + "Emits `ApprovalCancelled` on success.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "delegate", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "3215", + "name": "force_cancel_approval", + "docs": [ + "Cancel all of some asset approved for delegated transfer by a third-party account.", + "", + "Origin must be either ForceOrigin or Signed origin with the signer being the Admin", + "account of the asset `id`.", + "", + "Unreserves any deposit previously reserved by `approve_transfer` for the approval.", + "", + "- `id`: The identifier of the asset.", + "- `delegate`: The account delegated permission to transfer asset.", + "", + "Emits `ApprovalCancelled` on success.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "delegate", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "3216", + "name": "transfer_approved", + "docs": [ + "Transfer some asset balance from a previously delegated account to some third-party", + "account.", + "", + "Origin must be Signed and there must be an approval in place by the `owner` to the", + "signer.", + "", + "If the entire amount approved for transfer is transferred, then any deposit previously", + "reserved by `approve_transfer` is unreserved.", + "", + "- `id`: The identifier of the asset.", + "- `owner`: The account which previously approved for a transfer of at least `amount` and", + "from which the asset balance will be withdrawn.", + "- `destination`: The account to which the asset balance of `amount` will be transferred.", + "- `amount`: The amount of assets to transfer.", + "", + "Emits `TransferredApproved` on success.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "destination", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "compact", + "type_name": "Balance" + } + ] + } + ], + "calls_value": { + "type": 250 + }, + "events": [ + { + "lookup": "3200", + "name": "Created", + "docs": [ + "Some asset class was created. \\[asset_id, creator, owner\\]" + ], + "args": [ + "U32", + "AccountId", + "AccountId" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "3201", + "name": "Issued", + "docs": [ + "Some assets were issued. \\[asset_id, owner, total_supply\\]" + ], + "args": [ + "U32", + "AccountId", + "U128" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "Balance" + ] + }, + { + "lookup": "3202", + "name": "Transferred", + "docs": [ + "Some assets were transferred. \\[asset_id, from, to, amount\\]" + ], + "args": [ + "U32", + "AccountId", + "AccountId", + "U128" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "AccountId", + "Balance" + ] + }, + { + "lookup": "3203", + "name": "Burned", + "docs": [ + "Some assets were destroyed. \\[asset_id, owner, balance\\]" + ], + "args": [ + "U32", + "AccountId", + "U128" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "Balance" + ] + }, + { + "lookup": "3204", + "name": "TeamChanged", + "docs": [ + "The management team changed \\[asset_id, issuer, admin, freezer\\]" + ], + "args": [ + "U32", + "AccountId", + "AccountId", + "AccountId" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "3205", + "name": "OwnerChanged", + "docs": [ + "The owner changed \\[asset_id, owner\\]" + ], + "args": [ + "U32", + "AccountId" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "AssetId", + "AccountId" + ] + }, + { + "lookup": "3206", + "name": "Frozen", + "docs": [ + "Some account `who` was frozen. \\[asset_id, who\\]" + ], + "args": [ + "U32", + "AccountId" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "AssetId", + "AccountId" + ] + }, + { + "lookup": "3207", + "name": "Thawed", + "docs": [ + "Some account `who` was thawed. \\[asset_id, who\\]" + ], + "args": [ + "U32", + "AccountId" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "AssetId", + "AccountId" + ] + }, + { + "lookup": "3208", + "name": "AssetFrozen", + "docs": [ + "Some asset `asset_id` was frozen. \\[asset_id\\]" + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "AssetId" + ] + }, + { + "lookup": "3209", + "name": "AssetThawed", + "docs": [ + "Some asset `asset_id` was thawed. \\[asset_id\\]" + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "AssetId" + ] + }, + { + "lookup": "320a", + "name": "Destroyed", + "docs": [ + "An asset class was destroyed." + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "AssetId" + ] + }, + { + "lookup": "320b", + "name": "ForceCreated", + "docs": [ + "Some asset class was force-created. \\[asset_id, owner\\]" + ], + "args": [ + "U32", + "AccountId" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "AssetId", + "AccountId" + ] + }, + { + "lookup": "320c", + "name": "MetadataSet", + "docs": [ + "New metadata has been set for an asset. \\[asset_id, name, symbol, decimals, is_frozen\\]" + ], + "args": [ + "U32", + "Vec", + "Vec", + "U8", + "Bool" + ], + "args_name": [ + "", + "", + "", + "", + "" + ], + "args_type_name": [ + "AssetId", + "Bytes", + "Bytes", + "u8", + "bool" + ] + }, + { + "lookup": "320d", + "name": "MetadataCleared", + "docs": [ + "Metadata has been cleared for an asset. \\[asset_id\\]" + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "AssetId" + ] + }, + { + "lookup": "320e", + "name": "ApprovedTransfer", + "docs": [ + "(Additional) funds have been approved for transfer to a destination account.", + "\\[asset_id, source, delegate, amount\\]" + ], + "args": [ + "U32", + "AccountId", + "AccountId", + "U128" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "AccountId", + "Balance" + ] + }, + { + "lookup": "320f", + "name": "ApprovalCancelled", + "docs": [ + "An approval for account `delegate` was cancelled by `owner`.", + "\\[id, owner, delegate\\]" + ], + "args": [ + "U32", + "AccountId", + "AccountId" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "3210", + "name": "TransferredApproved", + "docs": [ + "An `amount` was transferred in its entirety from `owner` to `destination` by", + "the approved `delegate`.", + "\\[id, owner, delegate, destination\\]" + ], + "args": [ + "U32", + "AccountId", + "AccountId", + "AccountId", + "U128" + ], + "args_name": [ + "", + "", + "", + "", + "" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "AccountId", + "AccountId", + "Balance" + ] + }, + { + "lookup": "3211", + "name": "AssetStatusChanged", + "docs": [ + "An asset has had its attributes changed by the `Force` origin.", + "\\[id\\]" + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "AssetId" + ] + } + ], + "events_value": { + "type": 86 + }, + "constants": [ + { + "name": "AssetDeposit", + "type": "U128", + "type_value": 6, + "constants_value": "0010a5d4e80000000000000000000000", + "docs": [ + " The basic amount of funds that must be reserved for an asset." + ] + }, + { + "name": "MetadataDepositBase", + "type": "U128", + "type_value": 6, + "constants_value": "006125ac040000000000000000000000", + "docs": [ + " The basic amount of funds that must be reserved when adding metadata to your asset." + ] + }, + { + "name": "MetadataDepositPerByte", + "type": "U128", + "type_value": 6, + "constants_value": "40420f00000000000000000000000000", + "docs": [ + " The additional funds that must be reserved for the number of bytes you store in your", + " metadata." + ] + }, + { + "name": "ApprovalDeposit", + "type": "U128", + "type_value": 6, + "constants_value": "00ca9a3b000000000000000000000000", + "docs": [ + " The amount of funds that must be reserved when creating a new approval." + ] + }, + { + "name": "StringLimit", + "type": "U32", + "type_value": 4, + "constants_value": "32000000", + "docs": [ + " The maximum length of a name or symbol stored on-chain." + ] + } + ], + "errors": [ + { + "name": "BalanceLow", + "doc": [ + "Account balance must be greater than or equal to the transfer amount." + ] + }, + { + "name": "BalanceZero", + "doc": [ + "Balance should be non-zero." + ] + }, + { + "name": "NoPermission", + "doc": [ + "The signing account has no permission to do the operation." + ] + }, + { + "name": "Unknown", + "doc": [ + "The given asset ID is unknown." + ] + }, + { + "name": "Frozen", + "doc": [ + "The origin account is frozen." + ] + }, + { + "name": "InUse", + "doc": [ + "The asset ID is already taken." + ] + }, + { + "name": "BadWitness", + "doc": [ + "Invalid witness data given." + ] + }, + { + "name": "MinBalanceZero", + "doc": [ + "Minimum balance should be non-zero." + ] + }, + { + "name": "NoProvider", + "doc": [ + "No provider reference exists to allow a non-zero balance of a non-self-sufficient", + "asset." + ] + }, + { + "name": "BadMetadata", + "doc": [ + "Invalid metadata given." + ] + }, + { + "name": "Unapproved", + "doc": [ + "No approval exists that would allow the transfer." + ] + }, + { + "name": "WouldDie", + "doc": [ + "The source account would not survive the transfer and it needs to stay alive." + ] + } + ], + "errors_value": { + "type": 276 + }, + "index": 50 + }, + { + "name": "Uniques", + "prefix": "Uniques", + "storage": [ + { + "name": "Class", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U32" + ], + "value": "pallet_uniques:types:ClassDetails", + "keys_id": 4, + "value_id": 277 + } + }, + "fallback": "0x00", + "docs": [ + " Details of an asset class." + ] + }, + { + "name": "Account", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Blake2_128Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "AccountId", + "U32", + "U32" + ], + "value": "NULL", + "keys_id": 278, + "value_id": 82 + } + }, + "fallback": "0x00", + "docs": [ + " The assets held by any given account; set out this way so that assets owned by a single", + " account can be enumerated." + ] + }, + { + "name": "Asset", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "U32" + ], + "value": "pallet_uniques:types:InstanceDetails", + "keys_id": 97, + "value_id": 279 + } + }, + "fallback": "0x00", + "docs": [ + " The assets in existence and their ownership details." + ] + }, + { + "name": "ClassMetadataOf", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U32" + ], + "value": "pallet_uniques:types:ClassMetadata", + "keys_id": 4, + "value_id": 280 + } + }, + "fallback": "0x00", + "docs": [ + " Metadata of an asset class." + ] + }, + { + "name": "InstanceMetadataOf", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "U32" + ], + "value": "pallet_uniques:types:InstanceMetadata", + "keys_id": 97, + "value_id": 281 + } + }, + "fallback": "0x00", + "docs": [ + " Metadata of an asset instance." + ] + }, + { + "name": "Attribute", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Blake2_128Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "option", + "Vec" + ], + "value": "Tuple:VecU128", + "keys_id": 282, + "value_id": 283 + } + }, + "fallback": "0x00", + "docs": [ + " Metadata of an asset class." + ] + } + ], + "calls": [ + { + "lookup": "3300", + "name": "create", + "docs": [ + "Issue a new class of non-fungible assets from a public origin.", + "", + "This new asset class has no assets initially and its owner is the origin.", + "", + "The origin must be Signed and the sender must have sufficient funds free.", + "", + "`AssetDeposit` funds of sender are reserved.", + "", + "Parameters:", + "- `class`: The identifier of the new asset class. This must not be currently in use.", + "- `admin`: The admin of this class of assets. The admin is the initial address of each", + "member of the asset class's admin team.", + "", + "Emits `Created` event when successful.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "admin", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "3301", + "name": "force_create", + "docs": [ + "Issue a new class of non-fungible assets from a privileged origin.", + "", + "This new asset class has no assets initially.", + "", + "The origin must conform to `ForceOrigin`.", + "", + "Unlike `create`, no funds are reserved.", + "", + "- `class`: The identifier of the new asset. This must not be currently in use.", + "- `owner`: The owner of this class of assets. The owner has full superuser permissions", + "over this asset, but may later change and configure the permissions using", + "`transfer_ownership` and `set_team`.", + "", + "Emits `ForceCreated` event when successful.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "free_holding", + "type": "Bool", + "type_name": "bool" + } + ] + }, + { + "lookup": "3302", + "name": "destroy", + "docs": [ + "Destroy a class of fungible assets.", + "", + "The origin must conform to `ForceOrigin` or must be `Signed` and the sender must be the", + "owner of the asset `class`.", + "", + "- `class`: The identifier of the asset class to be destroyed.", + "- `witness`: Information on the instances minted in the asset class. This must be", + "correct.", + "", + "Emits `Destroyed` event when successful.", + "", + "Weight: `O(n + m)` where:", + "- `n = witness.instances`", + "- `m = witness.instance_metadatas`", + "- `a = witness.attributes`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "witness", + "type": "pallet_uniques:types:DestroyWitness", + "type_name": "DestroyWitness" + } + ] + }, + { + "lookup": "3303", + "name": "mint", + "docs": [ + "Mint an asset instance of a particular class.", + "", + "The origin must be Signed and the sender must be the Issuer of the asset `class`.", + "", + "- `class`: The class of the asset to be minted.", + "- `instance`: The instance value of the asset to be minted.", + "- `beneficiary`: The initial owner of the minted asset.", + "", + "Emits `Issued` event when successful.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "3304", + "name": "burn", + "docs": [ + "Destroy a single asset instance.", + "", + "Origin must be Signed and the sender should be the Admin of the asset `class`.", + "", + "- `class`: The class of the asset to be burned.", + "- `instance`: The instance of the asset to be burned.", + "- `check_owner`: If `Some` then the operation will fail with `WrongOwner` unless the", + " asset is owned by this value.", + "", + "Emits `Burned` with the actual amount burned.", + "", + "Weight: `O(1)`", + "Modes: `check_owner.is_some()`." + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + }, + { + "name": "check_owner", + "type": "option", + "type_name": "Option<::Source>" + } + ] + }, + { + "lookup": "3305", + "name": "transfer", + "docs": [ + "Move an asset from the sender account to another.", + "", + "Origin must be Signed and the signing account must be either:", + "- the Admin of the asset `class`;", + "- the Owner of the asset `instance`;", + "- the approved delegate for the asset `instance` (in this case, the approval is reset).", + "", + "Arguments:", + "- `class`: The class of the asset to be transferred.", + "- `instance`: The instance of the asset to be transferred.", + "- `dest`: The account to receive ownership of the asset.", + "", + "Emits `Transferred`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + }, + { + "name": "dest", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "3306", + "name": "redeposit", + "docs": [ + "Reevaluate the deposits on some assets.", + "", + "Origin must be Signed and the sender should be the Owner of the asset `class`.", + "", + "- `class`: The class of the asset to be frozen.", + "- `instances`: The instances of the asset class whose deposits will be reevaluated.", + "", + "NOTE: This exists as a best-effort function. Any asset instances which are unknown or", + "in the case that the owner account does not have reservable funds to pay for a", + "deposit increase are ignored. Generally the owner isn't going to call this on instances", + "whose existing deposit is less than the refreshed deposit as it would only cost them,", + "so it's of little consequence.", + "", + "It will still return an error in the case that the class is unknown of the signer is", + "not permitted to call it.", + "", + "Weight: `O(instances.len())`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instances", + "type": "Vec", + "type_name": "Vec" + } + ] + }, + { + "lookup": "3307", + "name": "freeze", + "docs": [ + "Disallow further unprivileged transfer of an asset instance.", + "", + "Origin must be Signed and the sender should be the Freezer of the asset `class`.", + "", + "- `class`: The class of the asset to be frozen.", + "- `instance`: The instance of the asset to be frozen.", + "", + "Emits `Frozen`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + } + ] + }, + { + "lookup": "3308", + "name": "thaw", + "docs": [ + "Re-allow unprivileged transfer of an asset instance.", + "", + "Origin must be Signed and the sender should be the Freezer of the asset `class`.", + "", + "- `class`: The class of the asset to be thawed.", + "- `instance`: The instance of the asset to be thawed.", + "", + "Emits `Thawed`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + } + ] + }, + { + "lookup": "3309", + "name": "freeze_class", + "docs": [ + "Disallow further unprivileged transfers for a whole asset class.", + "", + "Origin must be Signed and the sender should be the Freezer of the asset `class`.", + "", + "- `class`: The asset class to be frozen.", + "", + "Emits `ClassFrozen`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + } + ] + }, + { + "lookup": "330a", + "name": "thaw_class", + "docs": [ + "Re-allow unprivileged transfers for a whole asset class.", + "", + "Origin must be Signed and the sender should be the Admin of the asset `class`.", + "", + "- `class`: The class to be thawed.", + "", + "Emits `ClassThawed`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + } + ] + }, + { + "lookup": "330b", + "name": "transfer_ownership", + "docs": [ + "Change the Owner of an asset class.", + "", + "Origin must be Signed and the sender should be the Owner of the asset `class`.", + "", + "- `class`: The asset class whose owner should be changed.", + "- `owner`: The new Owner of this asset class.", + "", + "Emits `OwnerChanged`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "330c", + "name": "set_team", + "docs": [ + "Change the Issuer, Admin and Freezer of an asset class.", + "", + "Origin must be Signed and the sender should be the Owner of the asset `class`.", + "", + "- `class`: The asset class whose team should be changed.", + "- `issuer`: The new Issuer of this asset class.", + "- `admin`: The new Admin of this asset class.", + "- `freezer`: The new Freezer of this asset class.", + "", + "Emits `TeamChanged`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "issuer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "admin", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "freezer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "330d", + "name": "approve_transfer", + "docs": [ + "Approve an instance to be transferred by a delegated third-party account.", + "", + "Origin must be Signed and must be the owner of the asset `instance`.", + "", + "- `class`: The class of the asset to be approved for delegated transfer.", + "- `instance`: The instance of the asset to be approved for delegated transfer.", + "- `delegate`: The account to delegate permission to transfer the asset.", + "", + "Emits `ApprovedTransfer` on success.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + }, + { + "name": "delegate", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "330e", + "name": "cancel_approval", + "docs": [ + "Cancel the prior approval for the transfer of an asset by a delegate.", + "", + "Origin must be either:", + "- the `Force` origin;", + "- `Signed` with the signer being the Admin of the asset `class`;", + "- `Signed` with the signer being the Owner of the asset `instance`;", + "", + "Arguments:", + "- `class`: The class of the asset of whose approval will be cancelled.", + "- `instance`: The instance of the asset of whose approval will be cancelled.", + "- `maybe_check_delegate`: If `Some` will ensure that the given account is the one to", + " which permission of transfer is delegated.", + "", + "Emits `ApprovalCancelled` on success.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + }, + { + "name": "maybe_check_delegate", + "type": "option", + "type_name": "Option<::Source>" + } + ] + }, + { + "lookup": "330f", + "name": "force_asset_status", + "docs": [ + "Alter the attributes of a given asset.", + "", + "Origin must be `ForceOrigin`.", + "", + "- `class`: The identifier of the asset.", + "- `owner`: The new Owner of this asset.", + "- `issuer`: The new Issuer of this asset.", + "- `admin`: The new Admin of this asset.", + "- `freezer`: The new Freezer of this asset.", + "- `free_holding`: Whether a deposit is taken for holding an instance of this asset", + " class.", + "- `is_frozen`: Whether this asset class is frozen except for permissioned/admin", + "instructions.", + "", + "Emits `AssetStatusChanged` with the identity of the asset.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "issuer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "admin", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "freezer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "free_holding", + "type": "Bool", + "type_name": "bool" + }, + { + "name": "is_frozen", + "type": "Bool", + "type_name": "bool" + } + ] + }, + { + "lookup": "3310", + "name": "set_attribute", + "docs": [ + "Set an attribute for an asset class or instance.", + "", + "Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the", + "asset `class`.", + "", + "If the origin is Signed, then funds of signer are reserved according to the formula:", + "`MetadataDepositBase + DepositPerByte * (key.len + value.len)` taking into", + "account any already reserved funds.", + "", + "- `class`: The identifier of the asset class whose instance's metadata to set.", + "- `maybe_instance`: The identifier of the asset instance whose metadata to set.", + "- `key`: The key of the attribute.", + "- `value`: The value to which to set the attribute.", + "", + "Emits `AttributeSet`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "maybe_instance", + "type": "option", + "type_name": "Option" + }, + { + "name": "key", + "type": "Vec", + "type_name": "BoundedVec" + }, + { + "name": "value", + "type": "Vec", + "type_name": "BoundedVec" + } + ] + }, + { + "lookup": "3311", + "name": "clear_attribute", + "docs": [ + "Set an attribute for an asset class or instance.", + "", + "Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the", + "asset `class`.", + "", + "If the origin is Signed, then funds of signer are reserved according to the formula:", + "`MetadataDepositBase + DepositPerByte * (key.len + value.len)` taking into", + "account any already reserved funds.", + "", + "- `class`: The identifier of the asset class whose instance's metadata to set.", + "- `instance`: The identifier of the asset instance whose metadata to set.", + "- `key`: The key of the attribute.", + "- `value`: The value to which to set the attribute.", + "", + "Emits `AttributeSet`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "maybe_instance", + "type": "option", + "type_name": "Option" + }, + { + "name": "key", + "type": "Vec", + "type_name": "BoundedVec" + } + ] + }, + { + "lookup": "3312", + "name": "set_metadata", + "docs": [ + "Set the metadata for an asset instance.", + "", + "Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the", + "asset `class`.", + "", + "If the origin is Signed, then funds of signer are reserved according to the formula:", + "`MetadataDepositBase + DepositPerByte * data.len` taking into", + "account any already reserved funds.", + "", + "- `class`: The identifier of the asset class whose instance's metadata to set.", + "- `instance`: The identifier of the asset instance whose metadata to set.", + "- `data`: The general information of this asset. Limited in length by `StringLimit`.", + "- `is_frozen`: Whether the metadata should be frozen against further changes.", + "", + "Emits `MetadataSet`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + }, + { + "name": "data", + "type": "Vec", + "type_name": "BoundedVec" + }, + { + "name": "is_frozen", + "type": "Bool", + "type_name": "bool" + } + ] + }, + { + "lookup": "3313", + "name": "clear_metadata", + "docs": [ + "Clear the metadata for an asset instance.", + "", + "Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the", + "asset `instance`.", + "", + "Any deposit is freed for the asset class owner.", + "", + "- `class`: The identifier of the asset class whose instance's metadata to clear.", + "- `instance`: The identifier of the asset instance whose metadata to clear.", + "", + "Emits `MetadataCleared`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + } + ] + }, + { + "lookup": "3314", + "name": "set_class_metadata", + "docs": [ + "Set the metadata for an asset class.", + "", + "Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of", + "the asset `class`.", + "", + "If the origin is `Signed`, then funds of signer are reserved according to the formula:", + "`MetadataDepositBase + DepositPerByte * data.len` taking into", + "account any already reserved funds.", + "", + "- `class`: The identifier of the asset whose metadata to update.", + "- `data`: The general information of this asset. Limited in length by `StringLimit`.", + "- `is_frozen`: Whether the metadata should be frozen against further changes.", + "", + "Emits `ClassMetadataSet`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "data", + "type": "Vec", + "type_name": "BoundedVec" + }, + { + "name": "is_frozen", + "type": "Bool", + "type_name": "bool" + } + ] + }, + { + "lookup": "3315", + "name": "clear_class_metadata", + "docs": [ + "Clear the metadata for an asset class.", + "", + "Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of", + "the asset `class`.", + "", + "Any deposit is freed for the asset class owner.", + "", + "- `class`: The identifier of the asset class whose metadata to clear.", + "", + "Emits `ClassMetadataCleared`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + } + ] + } + ], + "calls_value": { + "type": 252 + }, + "events": [ + { + "lookup": "3300", + "name": "Created", + "docs": [ + "An asset class was created. \\[ class, creator, owner \\]" + ], + "args": [ + "U32", + "AccountId", + "AccountId" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "ClassId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "3301", + "name": "ForceCreated", + "docs": [ + "An asset class was force-created. \\[ class, owner \\]" + ], + "args": [ + "U32", + "AccountId" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "ClassId", + "AccountId" + ] + }, + { + "lookup": "3302", + "name": "Destroyed", + "docs": [ + "An asset `class` was destroyed. \\[ class \\]" + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "ClassId" + ] + }, + { + "lookup": "3303", + "name": "Issued", + "docs": [ + "An asset `instance` was issued. \\[ class, instance, owner \\]" + ], + "args": [ + "U32", + "U32", + "AccountId" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "ClassId", + "InstanceId", + "AccountId" + ] + }, + { + "lookup": "3304", + "name": "Transferred", + "docs": [ + "An asset `instance` was transferred. \\[ class, instance, from, to \\]" + ], + "args": [ + "U32", + "U32", + "AccountId", + "AccountId" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "ClassId", + "InstanceId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "3305", + "name": "Burned", + "docs": [ + "An asset `instance` was destroyed. \\[ class, instance, owner \\]" + ], + "args": [ + "U32", + "U32", + "AccountId" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "ClassId", + "InstanceId", + "AccountId" + ] + }, + { + "lookup": "3306", + "name": "Frozen", + "docs": [ + "Some asset `instance` was frozen. \\[ class, instance \\]" + ], + "args": [ + "U32", + "U32" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "ClassId", + "InstanceId" + ] + }, + { + "lookup": "3307", + "name": "Thawed", + "docs": [ + "Some asset `instance` was thawed. \\[ class, instance \\]" + ], + "args": [ + "U32", + "U32" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "ClassId", + "InstanceId" + ] + }, + { + "lookup": "3308", + "name": "ClassFrozen", + "docs": [ + "Some asset `class` was frozen. \\[ class \\]" + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "ClassId" + ] + }, + { + "lookup": "3309", + "name": "ClassThawed", + "docs": [ + "Some asset `class` was thawed. \\[ class \\]" + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "ClassId" + ] + }, + { + "lookup": "330a", + "name": "OwnerChanged", + "docs": [ + "The owner changed \\[ class, new_owner \\]" + ], + "args": [ + "U32", + "AccountId" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "ClassId", + "AccountId" + ] + }, + { + "lookup": "330b", + "name": "TeamChanged", + "docs": [ + "The management team changed \\[ class, issuer, admin, freezer \\]" + ], + "args": [ + "U32", + "AccountId", + "AccountId", + "AccountId" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "ClassId", + "AccountId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "330c", + "name": "ApprovedTransfer", + "docs": [ + "An `instance` of an asset `class` has been approved by the `owner` for transfer by a", + "`delegate`.", + "\\[ class, instance, owner, delegate \\]" + ], + "args": [ + "U32", + "U32", + "AccountId", + "AccountId" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "ClassId", + "InstanceId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "330d", + "name": "ApprovalCancelled", + "docs": [ + "An approval for a `delegate` account to transfer the `instance` of an asset `class` was", + "cancelled by its `owner`.", + "\\[ class, instance, owner, delegate \\]" + ], + "args": [ + "U32", + "U32", + "AccountId", + "AccountId" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "ClassId", + "InstanceId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "330e", + "name": "AssetStatusChanged", + "docs": [ + "An asset `class` has had its attributes changed by the `Force` origin.", + "\\[ class \\]" + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "ClassId" + ] + }, + { + "lookup": "330f", + "name": "ClassMetadataSet", + "docs": [ + "New metadata has been set for an asset class. \\[ class, data, is_frozen \\]" + ], + "args": [ + "U32", + "Vec", + "Bool" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "ClassId", + "BoundedVec", + "bool" + ] + }, + { + "lookup": "3310", + "name": "ClassMetadataCleared", + "docs": [ + "Metadata has been cleared for an asset class. \\[ class \\]" + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "ClassId" + ] + }, + { + "lookup": "3311", + "name": "MetadataSet", + "docs": [ + "New metadata has been set for an asset instance.", + "\\[ class, instance, data, is_frozen \\]" + ], + "args": [ + "U32", + "U32", + "Vec", + "Bool" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "ClassId", + "InstanceId", + "BoundedVec", + "bool" + ] + }, + { + "lookup": "3312", + "name": "MetadataCleared", + "docs": [ + "Metadata has been cleared for an asset instance. \\[ class, instance \\]" + ], + "args": [ + "U32", + "U32" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "ClassId", + "InstanceId" + ] + }, + { + "lookup": "3313", + "name": "Redeposited", + "docs": [ + "Metadata has been cleared for an asset instance. \\[ class, successful_instances \\]" + ], + "args": [ + "U32", + "Vec" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "ClassId", + "Vec" + ] + }, + { + "lookup": "3314", + "name": "AttributeSet", + "docs": [ + "New attribute metadata has been set for an asset class or instance.", + "\\[ class, maybe_instance, key, value \\]" + ], + "args": [ + "U32", + "option", + "Vec", + "Vec" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "ClassId", + "Option", + "BoundedVec", + "BoundedVec" + ] + }, + { + "lookup": "3315", + "name": "AttributeCleared", + "docs": [ + "Attribute metadata has been cleared for an asset class or instance.", + "\\[ class, maybe_instance, key, maybe_value \\]" + ], + "args": [ + "U32", + "option", + "Vec" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "ClassId", + "Option", + "BoundedVec" + ] + } + ], + "events_value": { + "type": 88 + }, + "constants": [ + { + "name": "ClassDeposit", + "type": "U128", + "type_value": 6, + "constants_value": "00e40b54020000000000000000000000", + "docs": [ + " The basic amount of funds that must be reserved for an asset class." + ] + }, + { + "name": "InstanceDeposit", + "type": "U128", + "type_value": 6, + "constants_value": "00e1f505000000000000000000000000", + "docs": [ + " The basic amount of funds that must be reserved for an asset instance." + ] + }, + { + "name": "MetadataDepositBase", + "type": "U128", + "type_value": 6, + "constants_value": "402ac8af040000000000000000000000", + "docs": [ + " The basic amount of funds that must be reserved when adding metadata to your asset." + ] + }, + { + "name": "AttributeDepositBase", + "type": "U128", + "type_value": 6, + "constants_value": "00c817a8040000000000000000000000", + "docs": [ + " The basic amount of funds that must be reserved when adding an attribute to an asset." + ] + }, + { + "name": "DepositPerByte", + "type": "U128", + "type_value": 6, + "constants_value": "40420f00000000000000000000000000", + "docs": [ + " The additional funds that must be reserved for the number of bytes store in metadata,", + " either \"normal\" metadata or attribute metadata." + ] + }, + { + "name": "StringLimit", + "type": "U32", + "type_value": 4, + "constants_value": "80000000", + "docs": [ + " The maximum length of data stored on-chain." + ] + }, + { + "name": "KeyLimit", + "type": "U32", + "type_value": 4, + "constants_value": "20000000", + "docs": [ + " The maximum length of an attribute key." + ] + }, + { + "name": "ValueLimit", + "type": "U32", + "type_value": 4, + "constants_value": "40000000", + "docs": [ + " The maximum length of an attribute value." + ] + } + ], + "errors": [ + { + "name": "NoPermission", + "doc": [ + "The signing account has no permission to do the operation." + ] + }, + { + "name": "Unknown", + "doc": [ + "The given asset ID is unknown." + ] + }, + { + "name": "AlreadyExists", + "doc": [ + "The asset instance ID has already been used for an asset." + ] + }, + { + "name": "WrongOwner", + "doc": [ + "The owner turned out to be different to what was expected." + ] + }, + { + "name": "BadWitness", + "doc": [ + "Invalid witness data given." + ] + }, + { + "name": "InUse", + "doc": [ + "The asset ID is already taken." + ] + }, + { + "name": "Frozen", + "doc": [ + "The asset instance or class is frozen." + ] + }, + { + "name": "WrongDelegate", + "doc": [ + "The delegate turned out to be different to what was expected." + ] + }, + { + "name": "NoDelegate", + "doc": [ + "There is no delegate approved." + ] + }, + { + "name": "Unapproved", + "doc": [ + "No approval exists that would allow the transfer." + ] + } + ], + "errors_value": { + "type": 284 + }, + "index": 51 + } +] \ No newline at end of file diff --git a/src/demo_substrate_events/abi/assethub/v700.json b/src/demo_substrate_events/abi/assethub/v700.json new file mode 100644 index 000000000..375fb6e6b --- /dev/null +++ b/src/demo_substrate_events/abi/assethub/v700.json @@ -0,0 +1,8956 @@ +[ + { + "name": "System", + "prefix": "System", + "storage": [ + { + "name": "Account", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "frame_system:AccountInfo", + "keys_id": 0, + "value_id": 3 + } + }, + "fallback": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "docs": [ + " The full account information for a particular account ID." + ] + }, + { + "name": "ExtrinsicCount", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00", + "docs": [ + " Total extrinsics count for the current block." + ] + }, + { + "name": "BlockWeight", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "frame_support:weights:PerDispatchClass@7", + "PlainTypeValue": 7 + }, + "fallback": "0x000000000000000000000000000000000000000000000000", + "docs": [ + " The current weight for the block." + ] + }, + { + "name": "AllExtrinsicsLen", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00", + "docs": [ + " Total length (in bytes) for all extrinsics put together, for the current block." + ] + }, + { + "name": "BlockHash", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "U32" + ], + "value": "H256", + "keys_id": 4, + "value_id": 9 + } + }, + "fallback": "0x0000000000000000000000000000000000000000000000000000000000000000", + "docs": [ + " Map of block numbers to block hashes." + ] + }, + { + "name": "ExtrinsicData", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "U32" + ], + "value": "Vec", + "keys_id": 4, + "value_id": 10 + } + }, + "fallback": "0x00", + "docs": [ + " Extrinsics data for the current block (maps an extrinsic's index to its data)." + ] + }, + { + "name": "Number", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00000000", + "docs": [ + " The current block number being processed. Set by `execute_block`." + ] + }, + { + "name": "ParentHash", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "H256", + "PlainTypeValue": 9 + }, + "fallback": "0x0000000000000000000000000000000000000000000000000000000000000000", + "docs": [ + " Hash of the previous block." + ] + }, + { + "name": "Digest", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 11 + }, + "fallback": "0x00", + "docs": [ + " Digest of the current block, also part of the block header." + ] + }, + { + "name": "Events", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 15 + }, + "fallback": "0x00", + "docs": [ + " Events deposited for the current block.", + "", + " NOTE: This storage item is explicitly unbounded since it is never intended to be read", + " from within the runtime." + ] + }, + { + "name": "EventCount", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00000000", + "docs": [ + " The number of events in the `Events` list." + ] + }, + { + "name": "EventTopics", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "H256" + ], + "value": "Vec", + "keys_id": 9, + "value_id": 95 + } + }, + "fallback": "0x00", + "docs": [ + " Mapping between a topic (represented by T::Hash) and a vector of indexes", + " of events in the `>` list.", + "", + " All topic vectors have deterministic storage locations depending on the topic. This", + " allows light-clients to leverage the changes trie storage tracking mechanism and", + " in case of changes fetch the list of events of interest.", + "", + " The value has the type `(T::BlockNumber, EventIndex)` because if we used only just", + " the `EventIndex` then in case if the topic has the same contents on the next block", + " no notification will be triggered thus the event might be lost." + ] + }, + { + "name": "LastRuntimeUpgrade", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "frame_system:LastRuntimeUpgradeInfo", + "PlainTypeValue": 97 + }, + "fallback": "0x00", + "docs": [ + " Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened." + ] + }, + { + "name": "UpgradedToU32RefCount", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Bool", + "PlainTypeValue": 86 + }, + "fallback": "0x00", + "docs": [ + " True if we have upgraded so that `type RefCount` is `u32`. False (default) if not." + ] + }, + { + "name": "UpgradedToTripleRefCount", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Bool", + "PlainTypeValue": 86 + }, + "fallback": "0x00", + "docs": [ + " True if we have upgraded so that AccountInfo contains three types of `RefCount`. False", + " (default) if not." + ] + }, + { + "name": "ExecutionPhase", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "frame_system:Phase", + "PlainTypeValue": 93 + }, + "fallback": "0x00", + "docs": [ + " The execution phase of the block." + ] + } + ], + "calls": [ + { + "lookup": "0000", + "name": "fill_block", + "docs": [ + "A dispatch that will fill the block weight up to the given ratio." + ], + "args": [ + { + "name": "ratio", + "type": "U32", + "type_name": "Perbill" + } + ] + }, + { + "lookup": "0001", + "name": "remark", + "docs": [ + "Make some on-chain remark.", + "", + "# ", + "- `O(1)`", + "# " + ], + "args": [ + { + "name": "remark", + "type": "Vec", + "type_name": "Bytes" + } + ] + }, + { + "lookup": "0002", + "name": "set_heap_pages", + "docs": [ + "Set the number of pages in the WebAssembly environment's heap." + ], + "args": [ + { + "name": "pages", + "type": "U64", + "type_name": "u64" + } + ] + }, + { + "lookup": "0003", + "name": "set_code", + "docs": [ + "Set the new runtime code.", + "", + "# ", + "- `O(C + S)` where `C` length of `code` and `S` complexity of `can_set_code`", + "- 1 call to `can_set_code`: `O(S)` (calls `sp_io::misc::runtime_version` which is", + " expensive).", + "- 1 storage write (codec `O(C)`).", + "- 1 digest item.", + "- 1 event.", + "The weight of this function is dependent on the runtime, but generally this is very", + "expensive. We will treat this as a full block.", + "# " + ], + "args": [ + { + "name": "code", + "type": "Vec", + "type_name": "Bytes" + } + ] + }, + { + "lookup": "0004", + "name": "set_code_without_checks", + "docs": [ + "Set the new runtime code without doing any checks of the given `code`.", + "", + "# ", + "- `O(C)` where `C` length of `code`", + "- 1 storage write (codec `O(C)`).", + "- 1 digest item.", + "- 1 event.", + "The weight of this function is dependent on the runtime. We will treat this as a full", + "block. # " + ], + "args": [ + { + "name": "code", + "type": "Vec", + "type_name": "Bytes" + } + ] + }, + { + "lookup": "0005", + "name": "set_storage", + "docs": [ + "Set some items of storage." + ], + "args": [ + { + "name": "items", + "type": "VecVec>", + "type_name": "Vec" + } + ] + }, + { + "lookup": "0006", + "name": "kill_storage", + "docs": [ + "Kill some items from storage." + ], + "args": [ + { + "name": "keys", + "type": "Vec>", + "type_name": "Vec" + } + ] + }, + { + "lookup": "0007", + "name": "kill_prefix", + "docs": [ + "Kill all storage items with a key that starts with the given prefix.", + "", + "**NOTE:** We rely on the Root origin to provide us the number of subkeys under", + "the prefix we are removing to accurately calculate the weight of this function." + ], + "args": [ + { + "name": "prefix", + "type": "Vec", + "type_name": "Key" + }, + { + "name": "subkeys", + "type": "U32", + "type_name": "u32" + } + ] + }, + { + "lookup": "0008", + "name": "remark_with_event", + "docs": [ + "Make some on-chain remark and emit event." + ], + "args": [ + { + "name": "remark", + "type": "Vec", + "type_name": "Bytes" + } + ] + } + ], + "calls_value": { + "type": 99 + }, + "events": [ + { + "lookup": "0000", + "name": "ExtrinsicSuccess", + "docs": [ + "An extrinsic completed successfully." + ], + "args": [ + "frame_support:weights:DispatchInfo" + ], + "args_name": [ + "dispatch_info" + ], + "args_type_name": [ + "DispatchInfo" + ] + }, + { + "lookup": "0001", + "name": "ExtrinsicFailed", + "docs": [ + "An extrinsic failed." + ], + "args": [ + "sp_runtime:DispatchError", + "frame_support:weights:DispatchInfo" + ], + "args_name": [ + "dispatch_error", + "dispatch_info" + ], + "args_type_name": [ + "DispatchError", + "DispatchInfo" + ] + }, + { + "lookup": "0002", + "name": "CodeUpdated", + "docs": [ + "`:code` was updated." + ], + "args": null + }, + { + "lookup": "0003", + "name": "NewAccount", + "docs": [ + "A new account was created." + ], + "args": [ + "AccountId" + ], + "args_name": [ + "account" + ], + "args_type_name": [ + "AccountId" + ] + }, + { + "lookup": "0004", + "name": "KilledAccount", + "docs": [ + "An account was reaped." + ], + "args": [ + "AccountId" + ], + "args_name": [ + "account" + ], + "args_type_name": [ + "AccountId" + ] + }, + { + "lookup": "0005", + "name": "Remarked", + "docs": [ + "On on-chain remark happened." + ], + "args": [ + "AccountId", + "H256" + ], + "args_name": [ + "sender", + "hash" + ], + "args_type_name": [ + "AccountId", + "Hash" + ] + } + ], + "events_value": { + "type": 18 + }, + "constants": [ + { + "name": "BlockWeights", + "type": "frame_system:limits:BlockWeights", + "type_value": 104, + "constants_value": "00f2052a010000000088526a74000000405973070000000001c0d22c76510000000100e6bd4f57000000010000000000000000405973070000000001c074c1906e000000010088526a740000000100a2941a1d0000004059730700000000000000", + "docs": [ + " Block & extrinsics weights: base values and limits." + ] + }, + { + "name": "BlockLength", + "type": "frame_support:weights:PerDispatchClass@109", + "type_value": 108, + "constants_value": "00003c000000500000005000", + "docs": [ + " The maximum length of a block (in bytes)." + ] + }, + { + "name": "BlockHashCount", + "type": "U32", + "type_value": 4, + "constants_value": "60090000", + "docs": [ + " Maximum number of block number to block hash mappings to keep (oldest pruned first)." + ] + }, + { + "name": "DbWeight", + "type": "frame_support:weights:RuntimeDbWeight", + "type_value": 110, + "constants_value": "40787d010000000000e1f50500000000", + "docs": [ + " The weight of runtime database operations the runtime can invoke." + ] + }, + { + "name": "Version", + "type": "sp_version:RuntimeVersion", + "type_value": 111, + "constants_value": "2473746174656d696e742473746174656d696e7401000000bc0200000000000028dd718d5cc53262d401000000df6acb689907609b0400000037e397fc7c91f5e40100000040fe3ad401f8959a05000000d2bc9897eed08f1503000000f78b278be53f454c02000000ab3c0572291feb8b01000000bc9d89904f5b923f0100000037c8bb1350a9a2a801000000ea93e3f16f3d6962020000000500000000", + "docs": [ + " Get the chain's current version." + ] + }, + { + "name": "SS58Prefix", + "type": "U16", + "type_value": 84, + "constants_value": "0000", + "docs": [ + " The designated SS85 prefix of this chain.", + "", + " This replaces the \"ss58Format\" property declared in the chain spec. Reason is", + " that the runtime should know about the prefix in order to make use of it as", + " an identifier of the chain." + ] + } + ], + "errors": [ + { + "name": "InvalidSpecName", + "doc": [ + "The name of specification does not match between the current runtime", + "and the new runtime." + ] + }, + { + "name": "SpecVersionNeedsToIncrease", + "doc": [ + "The specification version is not allowed to decrease between the current runtime", + "and the new runtime." + ] + }, + { + "name": "FailedToExtractRuntimeVersion", + "doc": [ + "Failed to extract the runtime version from the new runtime.", + "", + "Either calling `Core_version` or decoding `RuntimeVersion` failed." + ] + }, + { + "name": "NonDefaultComposite", + "doc": [ + "Suicide called when the account has non-default composite data." + ] + }, + { + "name": "NonZeroRefCount", + "doc": [ + "There is a non-zero reference count preventing the account from being purged." + ] + }, + { + "name": "CallFiltered", + "doc": [ + "The origin filter prevent the call to be dispatched." + ] + } + ], + "errors_value": { + "type": 115 + }, + "index": 0 + }, + { + "name": "ParachainSystem", + "prefix": "ParachainSystem", + "storage": [ + { + "name": "PendingValidationCode", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 10 + }, + "fallback": "0x00", + "docs": [ + " In case of a scheduled upgrade, this storage field contains the validation code to be applied.", + "", + " As soon as the relay chain gives us the go-ahead signal, we will overwrite the [`:code`][well_known_keys::CODE]", + " which will result the next block process with the new validation code. This concludes the upgrade process.", + "", + " [well_known_keys::CODE]: sp_core::storage::well_known_keys::CODE" + ] + }, + { + "name": "NewValidationCode", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 10 + }, + "fallback": "0x00", + "docs": [ + " Validation code that is set by the parachain and is to be communicated to collator and", + " consequently the relay-chain.", + "", + " This will be cleared in `on_initialize` of each new block if no other pallet already set", + " the value." + ] + }, + { + "name": "ValidationData", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "polkadot_primitives:v1:PersistedValidationData", + "PlainTypeValue": 116 + }, + "fallback": "0x00", + "docs": [ + " The [`PersistedValidationData`] set for this block.", + " This value is expected to be set only once per block and it's never stored", + " in the trie." + ] + }, + { + "name": "DidSetValidationCode", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Bool", + "PlainTypeValue": 86 + }, + "fallback": "0x00", + "docs": [ + " Were the validation data set to notify the relay chain?" + ] + }, + { + "name": "UpgradeRestrictionSignal", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "option", + "PlainTypeValue": 118 + }, + "fallback": "0x00", + "docs": [ + " An option which indicates if the relay-chain restricts signalling a validation code upgrade.", + " In other words, if this is `Some` and [`NewValidationCode`] is `Some` then the produced", + " candidate will be invalid.", + "", + " This storage item is a mirror of the corresponding value for the current parachain from the", + " relay-chain. This value is ephemeral which means it doesn't hit the storage. This value is", + " set after the inherent." + ] + }, + { + "name": "RelevantMessagingState", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "cumulus_pallet_parachain_system:relay_state_snapshot:MessagingStateSnapshot", + "PlainTypeValue": 120 + }, + "fallback": "0x00", + "docs": [ + " The snapshot of some state related to messaging relevant to the current parachain as per", + " the relay parent.", + "", + " This field is meant to be updated each block with the validation data inherent. Therefore,", + " before processing of the inherent, e.g. in `on_initialize` this data may be stale.", + "", + " This data is also absent from the genesis." + ] + }, + { + "name": "HostConfiguration", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "polkadot_primitives:v1:AbridgedHostConfiguration", + "PlainTypeValue": 124 + }, + "fallback": "0x00", + "docs": [ + " The parachain host configuration that was obtained from the relay parent.", + "", + " This field is meant to be updated each block with the validation data inherent. Therefore,", + " before processing of the inherent, e.g. in `on_initialize` this data may be stale.", + "", + " This data is also absent from the genesis." + ] + }, + { + "name": "LastDmqMqcHead", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "H256", + "PlainTypeValue": 125 + }, + "fallback": "0x0000000000000000000000000000000000000000000000000000000000000000", + "docs": [ + " The last downward message queue chain head we have observed.", + "", + " This value is loaded before and saved after processing inbound downward messages carried", + " by the system inherent." + ] + }, + { + "name": "LastHrmpMqcHeads", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 126 + }, + "fallback": "0x00", + "docs": [ + " The message queue chain heads we have observed per each channel incoming channel.", + "", + " This value is loaded before and saved after processing inbound downward messages carried", + " by the system inherent." + ] + }, + { + "name": "ProcessedDownwardMessages", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00000000", + "docs": [ + " Number of downward messages processed in a block.", + "", + " This will be cleared in `on_initialize` of each new block." + ] + }, + { + "name": "HrmpWatermark", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00000000", + "docs": [ + " HRMP watermark that was set in a block.", + "", + " This will be cleared in `on_initialize` of each new block." + ] + }, + { + "name": "HrmpOutboundMessages", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 129 + }, + "fallback": "0x00", + "docs": [ + " HRMP messages that were sent in a block.", + "", + " This will be cleared in `on_initialize` of each new block." + ] + }, + { + "name": "UpwardMessages", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec>", + "PlainTypeValue": 103 + }, + "fallback": "0x00", + "docs": [ + " Upward messages that were sent in a block.", + "", + " This will be cleared in `on_initialize` of each new block." + ] + }, + { + "name": "PendingUpwardMessages", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec>", + "PlainTypeValue": 103 + }, + "fallback": "0x00", + "docs": [ + " Upward messages that are still pending and not yet send to the relay chain." + ] + }, + { + "name": "AnnouncedHrmpMessagesPerCandidate", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00000000", + "docs": [ + " The number of HRMP messages we observed in `on_initialize` and thus used that number for", + " announcing the weight of `on_initialize` and `on_finalize`." + ] + }, + { + "name": "ReservedXcmpWeightOverride", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "U64", + "PlainTypeValue": 8 + }, + "fallback": "0x00", + "docs": [ + " The weight we reserve at the beginning of the block for processing XCMP messages. This", + " overrides the amount set in the Config trait." + ] + }, + { + "name": "ReservedDmpWeightOverride", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "U64", + "PlainTypeValue": 8 + }, + "fallback": "0x00", + "docs": [ + " The weight we reserve at the beginning of the block for processing DMP messages. This", + " overrides the amount set in the Config trait." + ] + }, + { + "name": "AuthorizedUpgrade", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "H256", + "PlainTypeValue": 9 + }, + "fallback": "0x00", + "docs": [ + " The next authorized upgrade, if there is one." + ] + }, + { + "name": "CustomValidationHeadData", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 10 + }, + "fallback": "0x00", + "docs": [ + " A custom head data that should be returned as result of `validate_block`.", + "", + " See [`Pallet::set_custom_validation_head_data`] for more information." + ] + } + ], + "calls": [ + { + "lookup": "0100", + "name": "set_validation_data", + "docs": [ + "Set the current validation data.", + "", + "This should be invoked exactly once per block. It will panic at the finalization", + "phase if the call was not invoked.", + "", + "The dispatch origin for this call must be `Inherent`", + "", + "As a side effect, this function upgrades the current validation function", + "if the appropriate time has come." + ], + "args": [ + { + "name": "data", + "type": "cumulus_primitives_parachain_inherent:ParachainInherentData", + "type_name": "ParachainInherentData" + } + ] + }, + { + "lookup": "0101", + "name": "sudo_send_upward_message", + "docs": null, + "args": [ + { + "name": "message", + "type": "Vec", + "type_name": "UpwardMessage" + } + ] + }, + { + "lookup": "0102", + "name": "authorize_upgrade", + "docs": null, + "args": [ + { + "name": "code_hash", + "type": "H256", + "type_name": "Hash" + } + ] + }, + { + "lookup": "0103", + "name": "enact_authorized_upgrade", + "docs": null, + "args": [ + { + "name": "code", + "type": "Vec", + "type_name": "Bytes" + } + ] + } + ], + "calls_value": { + "type": 131 + }, + "events": [ + { + "lookup": "0100", + "name": "ValidationFunctionStored", + "docs": [ + "The validation function has been scheduled to apply." + ], + "args": null + }, + { + "lookup": "0101", + "name": "ValidationFunctionApplied", + "docs": [ + "The validation function was applied as of the contained relay chain block number." + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "RelayChainBlockNumber" + ] + }, + { + "lookup": "0102", + "name": "ValidationFunctionDiscarded", + "docs": [ + "The relay-chain aborted the upgrade process." + ], + "args": null + }, + { + "lookup": "0103", + "name": "UpgradeAuthorized", + "docs": [ + "An upgrade has been authorized." + ], + "args": [ + "H256" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "Hash" + ] + }, + { + "lookup": "0104", + "name": "DownwardMessagesReceived", + "docs": [ + "Some downward messages have been received and will be processed.", + "\\[ count \\]" + ], + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "u32" + ] + }, + { + "lookup": "0105", + "name": "DownwardMessagesProcessed", + "docs": [ + "Downward messages were processed using the given weight.", + "\\[ weight_used, result_mqc_head \\]" + ], + "args": [ + "U64", + "H256" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "Weight", + "relay_chain::Hash" + ] + } + ], + "events_value": { + "type": 26 + }, + "errors": [ + { + "name": "OverlappingUpgrades", + "doc": [ + "Attempt to upgrade validation function while existing upgrade pending" + ] + }, + { + "name": "ProhibitedByPolkadot", + "doc": [ + "Polkadot currently prohibits this parachain from upgrading its validation function" + ] + }, + { + "name": "TooBig", + "doc": [ + "The supplied validation function has compiled into a blob larger than Polkadot is", + "willing to run" + ] + }, + { + "name": "ValidationDataNotAvailable", + "doc": [ + "The inherent which supplies the validation data did not run this block" + ] + }, + { + "name": "HostConfigurationNotAvailable", + "doc": [ + "The inherent which supplies the host configuration did not run this block" + ] + }, + { + "name": "NotScheduled", + "doc": [ + "No validation function upgrade is currently scheduled." + ] + }, + { + "name": "NothingAuthorized", + "doc": [ + "No code upgrade has been authorized." + ] + }, + { + "name": "Unauthorized", + "doc": [ + "The given code upgrade has not been authorized." + ] + } + ], + "errors_value": { + "type": 141 + }, + "index": 1 + }, + { + "name": "Timestamp", + "prefix": "Timestamp", + "storage": [ + { + "name": "Now", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U64", + "PlainTypeValue": 8 + }, + "fallback": "0x0000000000000000", + "docs": [ + " Current time for the current block." + ] + }, + { + "name": "DidUpdate", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Bool", + "PlainTypeValue": 86 + }, + "fallback": "0x00", + "docs": [ + " Did the timestamp get updated in this block?" + ] + } + ], + "calls": [ + { + "lookup": "0300", + "name": "set", + "docs": [ + "Set the current time.", + "", + "This call should be invoked exactly once per block. It will panic at the finalization", + "phase, if this call hasn't been invoked by that time.", + "", + "The timestamp should be greater than the previous one by the amount specified by", + "`MinimumPeriod`.", + "", + "The dispatch origin for this call must be `Inherent`.", + "", + "# ", + "- `O(1)` (Note that implementations of `OnTimestampSet` must also be `O(1)`)", + "- 1 storage read and 1 storage mutation (codec `O(1)`). (because of `DidUpdate::take` in", + " `on_finalize`)", + "- 1 event handler `on_timestamp_set`. Must be `O(1)`.", + "# " + ], + "args": [ + { + "name": "now", + "type": "compact", + "type_name": "Moment" + } + ] + } + ], + "calls_value": { + "type": 142 + }, + "constants": [ + { + "name": "MinimumPeriod", + "type": "U64", + "type_value": 8, + "constants_value": "7017000000000000", + "docs": [ + " The minimum period between blocks. Beware that this is different to the *expected*", + " period that the block production apparatus provides. Your chosen consensus system will", + " generally work with this to determine a sensible block time. e.g. For Aura, it will be", + " double this period on default settings." + ] + } + ], + "errors": null, + "errors_value": null, + "index": 3 + }, + { + "name": "ParachainInfo", + "prefix": "ParachainInfo", + "storage": [ + { + "name": "ParachainId", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 35 + }, + "fallback": "0x64000000", + "docs": null + } + ], + "errors": null, + "errors_value": null, + "index": 4 + }, + { + "name": "Balances", + "prefix": "Balances", + "storage": [ + { + "name": "TotalIssuance", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U128", + "PlainTypeValue": 6 + }, + "fallback": "0x00000000000000000000000000000000", + "docs": [ + " The total units issued in the system." + ] + }, + { + "name": "Account", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "pallet_balances:AccountData", + "keys_id": 0, + "value_id": 5 + } + }, + "fallback": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "docs": [ + " The Balances pallet example of storing the balance of an account.", + "", + " # Example", + "", + " ```nocompile", + " impl pallet_balances::Config for Runtime {", + " type AccountStore = StorageMapShim, frame_system::Provider, AccountId, Self::AccountData>", + " }", + " ```", + "", + " You can also store the balance of an account in the `System` pallet.", + "", + " # Example", + "", + " ```nocompile", + " impl pallet_balances::Config for Runtime {", + " type AccountStore = System", + " }", + " ```", + "", + " But this comes with tradeoffs, storing account balances in the system pallet stores", + " `frame_system` data alongside the account data contrary to storing account balances in the", + " `Balances` pallet, which uses a `StorageMap` to store balances data only.", + " NOTE: This is only used in the case that this pallet is used to store balances." + ] + }, + { + "name": "Locks", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "Vec", + "keys_id": 0, + "value_id": 143 + } + }, + "fallback": "0x00", + "docs": [ + " Any liquidity locks on some account balances.", + " NOTE: Should only be accessed when setting, changing and freeing a lock." + ] + }, + { + "name": "Reserves", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "Vec", + "keys_id": 0, + "value_id": 147 + } + }, + "fallback": "0x00", + "docs": [ + " Named reserves on some account balances." + ] + }, + { + "name": "StorageVersion", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "pallet_balances:Releases", + "PlainTypeValue": 150 + }, + "fallback": "0x00", + "docs": [ + " Storage version of the pallet.", + "", + " This is set to v2.0.0 for new networks." + ] + } + ], + "calls": [ + { + "lookup": "0a00", + "name": "transfer", + "docs": [ + "Transfer some liquid free balance to another account.", + "", + "`transfer` will set the `FreeBalance` of the sender and receiver.", + "If the sender's account is below the existential deposit as a result", + "of the transfer, the account will be reaped.", + "", + "The dispatch origin for this call must be `Signed` by the transactor.", + "", + "# ", + "- Dependent on arguments but not critical, given proper implementations for input config", + " types. See related functions below.", + "- It contains a limited number of reads and writes internally and no complex", + " computation.", + "", + "Related functions:", + "", + " - `ensure_can_withdraw` is always called internally but has a bounded complexity.", + " - Transferring balances to accounts that did not exist before will cause", + " `T::OnNewAccount::on_new_account` to be called.", + " - Removing enough funds from an account will trigger `T::DustRemoval::on_unbalanced`.", + " - `transfer_keep_alive` works the same way as `transfer`, but has an additional check", + " that the transfer will not kill the origin account.", + "---------------------------------", + "- Origin account is already in memory, so no DB operations for them.", + "# " + ], + "args": [ + { + "name": "dest", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "value", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "0a01", + "name": "set_balance", + "docs": [ + "Set the balances of a given account.", + "", + "This will alter `FreeBalance` and `ReservedBalance` in storage. it will", + "also alter the total issuance of the system (`TotalIssuance`) appropriately.", + "If the new free or reserved balance is below the existential deposit,", + "it will reset the account nonce (`frame_system::AccountNonce`).", + "", + "The dispatch origin for this call is `root`." + ], + "args": [ + { + "name": "who", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "new_free", + "type": "compact", + "type_name": "Balance" + }, + { + "name": "new_reserved", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "0a02", + "name": "force_transfer", + "docs": [ + "Exactly as `transfer`, except the origin must be root and the source account may be", + "specified.", + "# ", + "- Same as transfer, but additional read and write because the source account is not", + " assumed to be in the overlay.", + "# " + ], + "args": [ + { + "name": "source", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "dest", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "value", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "0a03", + "name": "transfer_keep_alive", + "docs": [ + "Same as the [`transfer`] call, but with a check that the transfer will not kill the", + "origin account.", + "", + "99% of the time you want [`transfer`] instead.", + "", + "[`transfer`]: struct.Pallet.html#method.transfer" + ], + "args": [ + { + "name": "dest", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "value", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "0a04", + "name": "transfer_all", + "docs": [ + "Transfer the entire transferable balance from the caller account.", + "", + "NOTE: This function only attempts to transfer _transferable_ balances. This means that", + "any locked, reserved, or existential deposits (when `keep_alive` is `true`), will not be", + "transferred by this function. To ensure that this function results in a killed account,", + "you might need to prepare the account by removing any reference counters, storage", + "deposits, etc...", + "", + "The dispatch origin of this call must be Signed.", + "", + "- `dest`: The recipient of the transfer.", + "- `keep_alive`: A boolean to determine if the `transfer_all` operation should send all", + " of the funds the account has, causing the sender account to be killed (false), or", + " transfer everything except at least the existential deposit, which will guarantee to", + " keep the sender account alive (true). # ", + "- O(1). Just like transfer, but reading the user's transferable balance first.", + " #" + ], + "args": [ + { + "name": "dest", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "keep_alive", + "type": "Bool", + "type_name": "bool" + } + ] + }, + { + "lookup": "0a05", + "name": "force_unreserve", + "docs": [ + "Unreserve some balance from a user by force.", + "", + "Can only be called by ROOT." + ], + "args": [ + { + "name": "who", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "U128", + "type_name": "Balance" + } + ] + } + ], + "calls_value": { + "type": 151 + }, + "events": [ + { + "lookup": "0a00", + "name": "Endowed", + "docs": [ + "An account was created with some free balance." + ], + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "account", + "free_balance" + ], + "args_type_name": [ + "AccountId", + "Balance" + ] + }, + { + "lookup": "0a01", + "name": "DustLost", + "docs": [ + "An account was removed whose balance was non-zero but below ExistentialDeposit,", + "resulting in an outright loss." + ], + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "account", + "amount" + ], + "args_type_name": [ + "AccountId", + "Balance" + ] + }, + { + "lookup": "0a02", + "name": "Transfer", + "docs": [ + "Transfer succeeded." + ], + "args": [ + "AccountId", + "AccountId", + "U128" + ], + "args_name": [ + "from", + "to", + "amount" + ], + "args_type_name": [ + "AccountId", + "AccountId", + "Balance" + ] + }, + { + "lookup": "0a03", + "name": "BalanceSet", + "docs": [ + "A balance was set by root." + ], + "args": [ + "AccountId", + "U128", + "U128" + ], + "args_name": [ + "who", + "free", + "reserved" + ], + "args_type_name": [ + "AccountId", + "Balance", + "Balance" + ] + }, + { + "lookup": "0a04", + "name": "Reserved", + "docs": [ + "Some balance was reserved (moved from free to reserved)." + ], + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "who", + "amount" + ], + "args_type_name": [ + "AccountId", + "Balance" + ] + }, + { + "lookup": "0a05", + "name": "Unreserved", + "docs": [ + "Some balance was unreserved (moved from reserved to free)." + ], + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "who", + "amount" + ], + "args_type_name": [ + "AccountId", + "Balance" + ] + }, + { + "lookup": "0a06", + "name": "ReserveRepatriated", + "docs": [ + "Some balance was moved from the reserve of the first account to the second account.", + "Final argument indicates the destination balance type." + ], + "args": [ + "AccountId", + "AccountId", + "U128", + "frame_support:traits:tokens:misc:BalanceStatus" + ], + "args_name": [ + "from", + "to", + "amount", + "destination_status" + ], + "args_type_name": [ + "AccountId", + "AccountId", + "Balance", + "Status" + ] + }, + { + "lookup": "0a07", + "name": "Deposit", + "docs": [ + "Some amount was deposited (e.g. for transaction fees)." + ], + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "who", + "amount" + ], + "args_type_name": [ + "AccountId", + "Balance" + ] + }, + { + "lookup": "0a08", + "name": "Withdraw", + "docs": [ + "Some amount was withdrawn from the account (e.g. for transaction fees)." + ], + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "who", + "amount" + ], + "args_type_name": [ + "AccountId", + "Balance" + ] + }, + { + "lookup": "0a09", + "name": "Slashed", + "docs": [ + "Some amount was removed from the account (e.g. for misbehavior)." + ], + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "who", + "amount" + ], + "args_type_name": [ + "AccountId", + "Balance" + ] + } + ], + "events_value": { + "type": 27 + }, + "constants": [ + { + "name": "ExistentialDeposit", + "type": "U128", + "type_value": 6, + "constants_value": "00ca9a3b000000000000000000000000", + "docs": [ + " The minimum amount required to keep an account open." + ] + }, + { + "name": "MaxLocks", + "type": "U32", + "type_value": 4, + "constants_value": "32000000", + "docs": [ + " The maximum number of locks that should exist on an account.", + " Not strictly enforced, but used for weight estimation." + ] + }, + { + "name": "MaxReserves", + "type": "U32", + "type_value": 4, + "constants_value": "32000000", + "docs": [ + " The maximum number of named reserves that can exist on an account." + ] + } + ], + "errors": [ + { + "name": "VestingBalance", + "doc": [ + "Vesting balance too high to send value" + ] + }, + { + "name": "LiquidityRestrictions", + "doc": [ + "Account liquidity restrictions prevent withdrawal" + ] + }, + { + "name": "InsufficientBalance", + "doc": [ + "Balance too low to send value" + ] + }, + { + "name": "ExistentialDeposit", + "doc": [ + "Value too low to create account due to existential deposit" + ] + }, + { + "name": "KeepAlive", + "doc": [ + "Transfer/payment would kill account" + ] + }, + { + "name": "ExistingVestingSchedule", + "doc": [ + "A vesting schedule already exists for this account" + ] + }, + { + "name": "DeadAccount", + "doc": [ + "Beneficiary account must pre-exist" + ] + }, + { + "name": "TooManyReserves", + "doc": [ + "Number of named reserves exceed MaxReserves" + ] + } + ], + "errors_value": { + "type": 154 + }, + "index": 10 + }, + { + "name": "TransactionPayment", + "prefix": "TransactionPayment", + "storage": [ + { + "name": "NextFeeMultiplier", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U128", + "PlainTypeValue": 155 + }, + "fallback": "0x000064a7b3b6e00d0000000000000000", + "docs": null + }, + { + "name": "StorageVersion", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "pallet_transaction_payment:Releases", + "PlainTypeValue": 156 + }, + "fallback": "0x00", + "docs": null + } + ], + "constants": [ + { + "name": "TransactionByteFee", + "type": "U128", + "type_value": 6, + "constants_value": "a0860100000000000000000000000000", + "docs": [ + " The fee to be paid for making a transaction; the per-byte portion." + ] + }, + { + "name": "OperationalFeeMultiplier", + "type": "U8", + "type_value": 2, + "constants_value": "05", + "docs": [ + " A fee mulitplier for `Operational` extrinsics to compute \"virtual tip\" to boost their", + " `priority`", + "", + " This value is multipled by the `final_fee` to obtain a \"virtual tip\" that is later", + " added to a tip component in regular `priority` calculations.", + " It means that a `Normal` transaction can front-run a similarly-sized `Operational`", + " extrinsic (with no tip), by including a tip value greater than the virtual tip.", + "", + " ```rust,ignore", + " // For `Normal`", + " let priority = priority_calc(tip);", + "", + " // For `Operational`", + " let virtual_tip = (inclusion_fee + tip) * OperationalFeeMultiplier;", + " let priority = priority_calc(tip + virtual_tip);", + " ```", + "", + " Note that since we use `final_fee` the multiplier applies also to the regular `tip`", + " sent with the transaction. So, not only does the transaction get a priority bump based", + " on the `inclusion_fee`, but we also amplify the impact of tips applied to `Operational`", + " transactions." + ] + }, + { + "name": "WeightToFee", + "type": "Vec", + "type_value": 157, + "constants_value": "0400000000000000000000000000000000ff117a000001", + "docs": [ + " The polynomial that is applied in order to derive fee from weight." + ] + } + ], + "errors": null, + "errors_value": null, + "index": 11 + }, + { + "name": "AssetTxPayment", + "prefix": "", + "storage": null, + "errors": null, + "errors_value": null, + "index": 12 + }, + { + "name": "Authorship", + "prefix": "Authorship", + "storage": [ + { + "name": "Uncles", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 159 + }, + "fallback": "0x00", + "docs": [ + " Uncles" + ] + }, + { + "name": "Author", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "AccountId", + "PlainTypeValue": 0 + }, + "fallback": "0x00", + "docs": [ + " Author of current block." + ] + }, + { + "name": "DidSetUncles", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Bool", + "PlainTypeValue": 86 + }, + "fallback": "0x00", + "docs": [ + " Whether uncles were already set in this block." + ] + } + ], + "calls": [ + { + "lookup": "1400", + "name": "set_uncles", + "docs": [ + "Provide a set of uncles." + ], + "args": [ + { + "name": "new_uncles", + "type": "Vec", + "type_name": "Vec
    " + } + ] + } + ], + "calls_value": { + "type": 162 + }, + "constants": [ + { + "name": "UncleGenerations", + "type": "U32", + "type_value": 4, + "constants_value": "00000000", + "docs": [ + " The number of blocks back we should accept uncles.", + " This means that we will deal with uncle-parents that are", + " `UncleGenerations + 1` before `now`." + ] + } + ], + "errors": [ + { + "name": "InvalidUncleParent", + "doc": [ + "The uncle parent not in the chain." + ] + }, + { + "name": "UnclesAlreadySet", + "doc": [ + "Uncles already set in the block." + ] + }, + { + "name": "TooManyUncles", + "doc": [ + "Too many uncles." + ] + }, + { + "name": "GenesisUncle", + "doc": [ + "The uncle is genesis." + ] + }, + { + "name": "TooHighUncle", + "doc": [ + "The uncle is too high in chain." + ] + }, + { + "name": "UncleAlreadyIncluded", + "doc": [ + "The uncle is already included." + ] + }, + { + "name": "OldUncle", + "doc": [ + "The uncle isn't recent enough to be included." + ] + } + ], + "errors_value": { + "type": 166 + }, + "index": 20 + }, + { + "name": "CollatorSelection", + "prefix": "CollatorSelection", + "storage": [ + { + "name": "Invulnerables", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 30 + }, + "fallback": "0x00", + "docs": [ + " The invulnerable, fixed collators." + ] + }, + { + "name": "Candidates", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 167 + }, + "fallback": "0x00", + "docs": [ + " The (community, limited) collation candidates." + ] + }, + { + "name": "LastAuthoredBlock", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "U32", + "keys_id": 0, + "value_id": 4 + } + }, + "fallback": "0x00000000", + "docs": [ + " Last block authored by collator." + ] + }, + { + "name": "DesiredCandidates", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00000000", + "docs": [ + " Desired number of candidates.", + "", + " This should ideally always be less than [`Config::MaxCandidates`] for weights to be correct." + ] + }, + { + "name": "CandidacyBond", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U128", + "PlainTypeValue": 6 + }, + "fallback": "0x00000000000000000000000000000000", + "docs": [ + " Fixed amount to deposit to become a collator.", + "", + " When a collator calls `leave_intent` they immediately receive the deposit back." + ] + } + ], + "calls": [ + { + "lookup": "1500", + "name": "set_invulnerables", + "docs": [ + "Set the list of invulnerable (fixed) collators." + ], + "args": [ + { + "name": "new", + "type": "Vec", + "type_name": "Vec" + } + ] + }, + { + "lookup": "1501", + "name": "set_desired_candidates", + "docs": [ + "Set the ideal number of collators (not including the invulnerables).", + "If lowering this number, then the number of running collators could be higher than this figure.", + "Aside from that edge case, there should be no other way to have more collators than the desired number." + ], + "args": [ + { + "name": "max", + "type": "U32", + "type_name": "u32" + } + ] + }, + { + "lookup": "1502", + "name": "set_candidacy_bond", + "docs": [ + "Set the candidacy bond amount." + ], + "args": [ + { + "name": "bond", + "type": "U128", + "type_name": "BalanceOf" + } + ] + }, + { + "lookup": "1503", + "name": "register_as_candidate", + "docs": [ + "Register this account as a collator candidate. The account must (a) already have", + "registered session keys and (b) be able to reserve the `CandidacyBond`.", + "", + "This call is not available to `Invulnerable` collators." + ], + "args": null + }, + { + "lookup": "1504", + "name": "leave_intent", + "docs": [ + "Deregister `origin` as a collator candidate. Note that the collator can only leave on", + "session change. The `CandidacyBond` will be unreserved immediately.", + "", + "This call will fail if the total number of candidates would drop below `MinCandidates`.", + "", + "This call is not available to `Invulnerable` collators." + ], + "args": null + } + ], + "calls_value": { + "type": 169 + }, + "events": [ + { + "lookup": "1500", + "name": "NewInvulnerables", + "docs": null, + "args": [ + "Vec" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "Vec" + ] + }, + { + "lookup": "1501", + "name": "NewDesiredCandidates", + "docs": null, + "args": [ + "U32" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "u32" + ] + }, + { + "lookup": "1502", + "name": "NewCandidacyBond", + "docs": null, + "args": [ + "U128" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "BalanceOf" + ] + }, + { + "lookup": "1503", + "name": "CandidateAdded", + "docs": null, + "args": [ + "AccountId", + "U128" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "AccountId", + "BalanceOf" + ] + }, + { + "lookup": "1504", + "name": "CandidateRemoved", + "docs": null, + "args": [ + "AccountId" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "AccountId" + ] + } + ], + "events_value": { + "type": 29 + }, + "errors": [ + { + "name": "TooManyCandidates", + "doc": [ + "Too many candidates" + ] + }, + { + "name": "TooFewCandidates", + "doc": [ + "Too few candidates" + ] + }, + { + "name": "Unknown", + "doc": [ + "Unknown error" + ] + }, + { + "name": "Permission", + "doc": [ + "Permission issue" + ] + }, + { + "name": "AlreadyCandidate", + "doc": [ + "User is already a candidate" + ] + }, + { + "name": "NotCandidate", + "doc": [ + "User is not a candidate" + ] + }, + { + "name": "AlreadyInvulnerable", + "doc": [ + "User is already an Invulnerable" + ] + }, + { + "name": "NoAssociatedValidatorId", + "doc": [ + "Account has no associated validator ID" + ] + }, + { + "name": "ValidatorNotRegistered", + "doc": [ + "Validator ID is not yet registered" + ] + } + ], + "errors_value": { + "type": 170 + }, + "index": 21 + }, + { + "name": "Session", + "prefix": "Session", + "storage": [ + { + "name": "Validators", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 30 + }, + "fallback": "0x00", + "docs": [ + " The current set of validators." + ] + }, + { + "name": "CurrentIndex", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00000000", + "docs": [ + " Current index of the session." + ] + }, + { + "name": "QueuedChanged", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Bool", + "PlainTypeValue": 86 + }, + "fallback": "0x00", + "docs": [ + " True if the underlying economic identities or weighting behind the validators", + " has changed in the queued validator set." + ] + }, + { + "name": "QueuedKeys", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 171 + }, + "fallback": "0x00", + "docs": [ + " The queued keys for the next session. When the next session begins, these keys", + " will be used to determine the validator's session keys." + ] + }, + { + "name": "DisabledValidators", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 89 + }, + "fallback": "0x00", + "docs": [ + " Indices of disabled validators.", + "", + " The vec is always kept sorted so that we can find whether a given validator is", + " disabled using binary search. It gets cleared when `on_session_ending` returns", + " a new set of identities." + ] + }, + { + "name": "NextKeys", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "[U8; 32]", + "keys_id": 0, + "value_id": 173 + } + }, + "fallback": "0x00", + "docs": [ + " The next session keys for a validator." + ] + }, + { + "name": "KeyOwner", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "[U8; 4]", + "Vec" + ], + "value": "AccountId", + "keys_id": 176, + "value_id": 0 + } + }, + "fallback": "0x00", + "docs": [ + " The owner of a key. The key is the `KeyTypeId` + the encoded key." + ] + } + ], + "calls": [ + { + "lookup": "1600", + "name": "set_keys", + "docs": [ + "Sets the session key(s) of the function caller to `keys`.", + "Allows an account to set its session key prior to becoming a validator.", + "This doesn't take effect until the next session.", + "", + "The dispatch origin of this function must be signed.", + "", + "# ", + "- Complexity: `O(1)`. Actual cost depends on the number of length of", + " `T::Keys::key_ids()` which is fixed.", + "- DbReads: `origin account`, `T::ValidatorIdOf`, `NextKeys`", + "- DbWrites: `origin account`, `NextKeys`", + "- DbReads per key id: `KeyOwner`", + "- DbWrites per key id: `KeyOwner`", + "# " + ], + "args": [ + { + "name": "keys", + "type": "[U8; 32]", + "type_name": "Keys" + }, + { + "name": "proof", + "type": "Vec", + "type_name": "Bytes" + } + ] + }, + { + "lookup": "1601", + "name": "purge_keys", + "docs": [ + "Removes any session key(s) of the function caller.", + "", + "This doesn't take effect until the next session.", + "", + "The dispatch origin of this function must be Signed and the account must be either be", + "convertible to a validator ID using the chain's typical addressing system (this usually", + "means being a controller account) or directly convertible into a validator ID (which", + "usually means being a stash account).", + "", + "# ", + "- Complexity: `O(1)` in number of key types. Actual cost depends on the number of length", + " of `T::Keys::key_ids()` which is fixed.", + "- DbReads: `T::ValidatorIdOf`, `NextKeys`, `origin account`", + "- DbWrites: `NextKeys`, `origin account`", + "- DbWrites per key id: `KeyOwner`", + "# " + ], + "args": null + } + ], + "calls_value": { + "type": 178 + }, + "events": [ + { + "lookup": "1600", + "name": "NewSession", + "docs": [ + "New session has happened. Note that the argument is the session index, not the", + "block number as the type might suggest." + ], + "args": [ + "U32" + ], + "args_name": [ + "session_index" + ], + "args_type_name": [ + "SessionIndex" + ] + } + ], + "events_value": { + "type": 31 + }, + "errors": [ + { + "name": "InvalidProof", + "doc": [ + "Invalid ownership proof." + ] + }, + { + "name": "NoAssociatedValidatorId", + "doc": [ + "No associated validator ID for account." + ] + }, + { + "name": "DuplicatedKey", + "doc": [ + "Registered duplicate key." + ] + }, + { + "name": "NoKeys", + "doc": [ + "No keys are associated with this account." + ] + }, + { + "name": "NoAccount", + "doc": [ + "Key setting account is not live, so it's impossible to associate keys." + ] + } + ], + "errors_value": { + "type": 179 + }, + "index": 22 + }, + { + "name": "Aura", + "prefix": "Aura", + "storage": [ + { + "name": "Authorities", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec<[U8; 32]>", + "PlainTypeValue": 180 + }, + "fallback": "0x00", + "docs": [ + " The current authority set." + ] + }, + { + "name": "CurrentSlot", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U64", + "PlainTypeValue": 182 + }, + "fallback": "0x0000000000000000", + "docs": [ + " The current slot of this block.", + "", + " This will be set in `on_initialize`." + ] + } + ], + "errors": null, + "errors_value": null, + "index": 23 + }, + { + "name": "AuraExt", + "prefix": "AuraExt", + "storage": [ + { + "name": "Authorities", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec<[U8; 32]>", + "PlainTypeValue": 181 + }, + "fallback": "0x00", + "docs": [ + " Serves as cache for the authorities.", + "", + " The authorities in AuRa are overwritten in `on_initialize` when we switch to a new session,", + " but we require the old authorities to verify the seal when validating a PoV. This will always", + " be updated to the latest AuRa authorities in `on_finalize`." + ] + } + ], + "errors": null, + "errors_value": null, + "index": 24 + }, + { + "name": "XcmpQueue", + "prefix": "XcmpQueue", + "storage": [ + { + "name": "InboundXcmpStatus", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 183 + }, + "fallback": "0x00", + "docs": [ + " Status of the inbound XCMP channels." + ] + }, + { + "name": "InboundXcmpMessages", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Twox64Concat" + ], + "key_vec": [ + "U32", + "U32" + ], + "value": "Vec", + "keys_id": 189, + "value_id": 10 + } + }, + "fallback": "0x00", + "docs": [ + " Inbound aggregate XCMP messages. It can only be one per ParaId/block." + ] + }, + { + "name": "OutboundXcmpStatus", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 190 + }, + "fallback": "0x00", + "docs": [ + " The non-empty XCMP channels in order of becoming non-empty, and the index of the first", + " and last outbound message. If the two indices are equal, then it indicates an empty", + " queue and there must be a non-`Ok` `OutboundStatus`. We assume queues grow no greater", + " than 65535 items. Queue indices for normal messages begin at one; zero is reserved in", + " case of the need to send a high-priority signal message this block.", + " The bool is true if there is a signal message waiting to be sent." + ] + }, + { + "name": "OutboundXcmpMessages", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Twox64Concat" + ], + "key_vec": [ + "U32", + "U16" + ], + "value": "Vec", + "keys_id": 193, + "value_id": 10 + } + }, + "fallback": "0x00", + "docs": [ + " The messages outbound in a given XCMP channel." + ] + }, + { + "name": "SignalMessages", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U32" + ], + "value": "Vec", + "keys_id": 35, + "value_id": 10 + } + }, + "fallback": "0x00", + "docs": [ + " Any signal messages waiting to be sent." + ] + }, + { + "name": "QueueConfig", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "cumulus_pallet_xcmp_queue:QueueConfigData", + "PlainTypeValue": 194 + }, + "fallback": "0x020000000500000001000000a086010000000000020000000000000000c817a804000000", + "docs": [ + " The configuration which controls the dynamics of the outbound queue." + ] + }, + { + "name": "Overweight", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "U64" + ], + "value": "Tuple:U32U32Vec", + "keys_id": 8, + "value_id": 195 + } + }, + "fallback": "0x00", + "docs": [ + " The messages that exceeded max individual message weight budget.", + "", + " These message stay in this storage map until they are manually dispatched via", + " `service_overweight`." + ] + }, + { + "name": "OverweightCount", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U64", + "PlainTypeValue": 8 + }, + "fallback": "0x0000000000000000", + "docs": [ + " The number of overweight messages ever recorded in `Overweight`. Also doubles as the next", + " available free overweight index." + ] + }, + { + "name": "QueueSuspended", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Bool", + "PlainTypeValue": 86 + }, + "fallback": "0x00", + "docs": [ + " Whether or not the XCMP queue is suspended from executing incoming XCMs or not." + ] + } + ], + "calls": [ + { + "lookup": "1e00", + "name": "service_overweight", + "docs": [ + "Services a single overweight XCM.", + "", + "- `origin`: Must pass `ExecuteOverweightOrigin`.", + "- `index`: The index of the overweight XCM to service", + "- `weight_limit`: The amount of weight that XCM execution may take.", + "", + "Errors:", + "- `BadOverweightIndex`: XCM under `index` is not found in the `Overweight` storage map.", + "- `BadXcm`: XCM under `index` cannot be properly decoded into a valid XCM format.", + "- `WeightOverLimit`: XCM execution may use greater `weight_limit`.", + "", + "Events:", + "- `OverweightServiced`: On success." + ], + "args": [ + { + "name": "index", + "type": "U64", + "type_name": "OverweightIndex" + }, + { + "name": "weight_limit", + "type": "U64", + "type_name": "Weight" + } + ] + }, + { + "lookup": "1e01", + "name": "suspend_xcm_execution", + "docs": [ + "Suspends all XCM executions for the XCMP queue, regardless of the sender's origin.", + "", + "- `origin`: Must pass `ControllerOrigin`." + ], + "args": null + }, + { + "lookup": "1e02", + "name": "resume_xcm_execution", + "docs": [ + "Resumes all XCM executions for the XCMP queue.", + "", + "Note that this function doesn't change the status of the in/out bound channels.", + "", + "- `origin`: Must pass `ControllerOrigin`." + ], + "args": null + }, + { + "lookup": "1e03", + "name": "update_suspend_threshold", + "docs": [ + "Overwrites the number of pages of messages which must be in the queue for the other side to be told to", + "suspend their sending.", + "", + "- `origin`: Must pass `Root`.", + "- `new`: Desired value for `QueueConfigData.suspend_value`" + ], + "args": [ + { + "name": "new", + "type": "U32", + "type_name": "u32" + } + ] + }, + { + "lookup": "1e04", + "name": "update_drop_threshold", + "docs": [ + "Overwrites the number of pages of messages which must be in the queue after which we drop any further", + "messages from the channel.", + "", + "- `origin`: Must pass `Root`.", + "- `new`: Desired value for `QueueConfigData.drop_threshold`" + ], + "args": [ + { + "name": "new", + "type": "U32", + "type_name": "u32" + } + ] + }, + { + "lookup": "1e05", + "name": "update_resume_threshold", + "docs": [ + "Overwrites the number of pages of messages which the queue must be reduced to before it signals that", + "message sending may recommence after it has been suspended.", + "", + "- `origin`: Must pass `Root`.", + "- `new`: Desired value for `QueueConfigData.resume_threshold` " + ], + "args": [ + { + "name": "new", + "type": "U32", + "type_name": "u32" + } + ] + }, + { + "lookup": "1e06", + "name": "update_threshold_weight", + "docs": [ + "Overwrites the amount of remaining weight under which we stop processing messages.", + "", + "- `origin`: Must pass `Root`.", + "- `new`: Desired value for `QueueConfigData.threshold_weight` " + ], + "args": [ + { + "name": "new", + "type": "U64", + "type_name": "Weight" + } + ] + }, + { + "lookup": "1e07", + "name": "update_weight_restrict_decay", + "docs": [ + "Overwrites the speed to which the available weight approaches the maximum weight.", + "A lower number results in a faster progression. A value of 1 makes the entire weight available initially.", + "", + "- `origin`: Must pass `Root`.", + "- `new`: Desired value for `QueueConfigData.weight_restrict_decay`. " + ], + "args": [ + { + "name": "new", + "type": "U64", + "type_name": "Weight" + } + ] + }, + { + "lookup": "1e08", + "name": "update_xcmp_max_individual_weight", + "docs": [ + "Overwrite the maximum amount of weight any individual message may consume.", + "Messages above this weight go into the overweight queue and may only be serviced explicitly.", + "", + "- `origin`: Must pass `Root`.", + "- `new`: Desired value for `QueueConfigData.xcmp_max_individual_weight`. " + ], + "args": [ + { + "name": "new", + "type": "U64", + "type_name": "Weight" + } + ] + } + ], + "calls_value": { + "type": 196 + }, + "events": [ + { + "lookup": "1e00", + "name": "Success", + "docs": [ + "Some XCM was executed ok." + ], + "args": [ + "option" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "Option" + ] + }, + { + "lookup": "1e01", + "name": "Fail", + "docs": [ + "Some XCM failed." + ], + "args": [ + "option", + "xcm:v2:traits:Error" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "Option", + "XcmError" + ] + }, + { + "lookup": "1e02", + "name": "BadVersion", + "docs": [ + "Bad XCM version used." + ], + "args": [ + "option" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "Option" + ] + }, + { + "lookup": "1e03", + "name": "BadFormat", + "docs": [ + "Bad XCM format used." + ], + "args": [ + "option" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "Option" + ] + }, + { + "lookup": "1e04", + "name": "UpwardMessageSent", + "docs": [ + "An upward message was sent to the relay chain." + ], + "args": [ + "option" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "Option" + ] + }, + { + "lookup": "1e05", + "name": "XcmpMessageSent", + "docs": [ + "An HRMP message was sent to a sibling parachain." + ], + "args": [ + "option" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "Option" + ] + }, + { + "lookup": "1e06", + "name": "OverweightEnqueued", + "docs": [ + "An XCM exceeded the individual message weight budget." + ], + "args": [ + "U32", + "U32", + "U64", + "U64" + ], + "args_name": [ + "", + "", + "", + "" + ], + "args_type_name": [ + "ParaId", + "RelayBlockNumber", + "OverweightIndex", + "Weight" + ] + }, + { + "lookup": "1e07", + "name": "OverweightServiced", + "docs": [ + "An XCM from the overweight queue was executed with the given actual weight used." + ], + "args": [ + "U64", + "U64" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "OverweightIndex", + "Weight" + ] + } + ], + "events_value": { + "type": 32 + }, + "errors": [ + { + "name": "FailedToSend", + "doc": [ + "Failed to send XCM message." + ] + }, + { + "name": "BadXcmOrigin", + "doc": [ + "Bad XCM origin." + ] + }, + { + "name": "BadXcm", + "doc": [ + "Bad XCM data." + ] + }, + { + "name": "BadOverweightIndex", + "doc": [ + "Bad overweight index." + ] + }, + { + "name": "WeightOverLimit", + "doc": [ + "Provided weight is possibly not enough to execute the message." + ] + } + ], + "errors_value": { + "type": 197 + }, + "index": 30 + }, + { + "name": "PolkadotXcm", + "prefix": "PolkadotXcm", + "storage": [ + { + "name": "QueryCounter", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U64", + "PlainTypeValue": 8 + }, + "fallback": "0x0000000000000000", + "docs": [ + " The latest available query index." + ] + }, + { + "name": "Queries", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U64" + ], + "value": "pallet_xcm:pallet:QueryStatus", + "keys_id": 8, + "value_id": 198 + } + }, + "fallback": "0x00", + "docs": [ + " The ongoing queries." + ] + }, + { + "name": "AssetTraps", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Identity" + ], + "key_vec": [ + "H256" + ], + "value": "U32", + "keys_id": 9, + "value_id": 4 + } + }, + "fallback": "0x00000000", + "docs": [ + " The existing asset traps.", + "", + " Key is the blake2 256 hash of (origin, versioned `MultiAssets`) pair. Value is the number of", + " times this pair has been trapped (usually just 1 if it exists at all)." + ] + }, + { + "name": "SafeXcmVersion", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "U32", + "PlainTypeValue": 4 + }, + "fallback": "0x00", + "docs": [ + " Default version to encode XCM when latest version of destination is unknown. If `None`,", + " then the destinations whose XCM version is unknown are considered unreachable." + ] + }, + { + "name": "SupportedVersion", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "xcm:VersionedMultiLocation" + ], + "value": "U32", + "keys_id": 204, + "value_id": 4 + } + }, + "fallback": "0x00", + "docs": [ + " The Latest versions that we know various locations support." + ] + }, + { + "name": "VersionNotifiers", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "xcm:VersionedMultiLocation" + ], + "value": "U64", + "keys_id": 204, + "value_id": 8 + } + }, + "fallback": "0x00", + "docs": [ + " All locations that we have requested version notifications from." + ] + }, + { + "name": "VersionNotifyTargets", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "xcm:VersionedMultiLocation" + ], + "value": "Tuple:U64U64U32", + "keys_id": 204, + "value_id": 205 + } + }, + "fallback": "0x00", + "docs": [ + " The target locations that are subscribed to our version changes, as well as the most recent", + " of our versions we informed them of." + ] + }, + { + "name": "VersionDiscoveryQueue", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "Vec", + "PlainTypeValue": 206 + }, + "fallback": "0x00", + "docs": [ + " Destinations whose latest XCM version we would like to know. Duplicates not allowed, and", + " the `u32` counter is the number of times that a send to the destination has been attempted,", + " which is used as a prioritization." + ] + }, + { + "name": "CurrentMigration", + "modifier": "Optional", + "type": { + "origin": "PlainType", + "plain_type": "pallet_xcm:pallet:VersionMigrationStage", + "PlainTypeValue": 209 + }, + "fallback": "0x00", + "docs": [ + " The current migration's stage, if any." + ] + } + ], + "calls": [ + { + "lookup": "1f00", + "name": "send", + "docs": null, + "args": [ + { + "name": "dest", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "message", + "type": "xcm:VersionedXcm@212", + "type_name": "Box>" + } + ] + }, + { + "lookup": "1f01", + "name": "teleport_assets", + "docs": [ + "Teleport some assets from the local chain to some destination chain.", + "", + "Fee payment on the destination side is made from the first asset listed in the `assets` vector and", + "fee-weight is calculated locally and thus remote weights are assumed to be equal to", + "local weights.", + "", + "- `origin`: Must be capable of withdrawing the `assets` and executing XCM.", + "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send", + " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain.", + "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be", + " an `AccountId32` value.", + "- `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the", + " `dest` side. May not be empty.", + "- `fee_asset_item`: The index into `assets` of the item which should be used to pay", + " fees." + ], + "args": [ + { + "name": "dest", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "beneficiary", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "assets", + "type": "xcm:VersionedMultiAssets", + "type_name": "Box" + }, + { + "name": "fee_asset_item", + "type": "U32", + "type_name": "u32" + } + ] + }, + { + "lookup": "1f02", + "name": "reserve_transfer_assets", + "docs": [ + "Transfer some assets from the local chain to the sovereign account of a destination chain and forward", + "a notification XCM.", + "", + "Fee payment on the destination side is made from the first asset listed in the `assets` vector and", + "fee-weight is calculated locally and thus remote weights are assumed to be equal to", + "local weights.", + "", + "- `origin`: Must be capable of withdrawing the `assets` and executing XCM.", + "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send", + " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain.", + "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be", + " an `AccountId32` value.", + "- `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the", + " `dest` side.", + "- `fee_asset_item`: The index into `assets` of the item which should be used to pay", + " fees." + ], + "args": [ + { + "name": "dest", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "beneficiary", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "assets", + "type": "xcm:VersionedMultiAssets", + "type_name": "Box" + }, + { + "name": "fee_asset_item", + "type": "U32", + "type_name": "u32" + } + ] + }, + { + "lookup": "1f03", + "name": "execute", + "docs": [ + "Execute an XCM message from a local, signed, origin.", + "", + "An event is deposited indicating whether `msg` could be executed completely or only", + "partially.", + "", + "No more than `max_weight` will be used in its attempted execution. If this is less than the", + "maximum amount of weight that the message could take to be executed, then no execution", + "attempt will be made.", + "", + "NOTE: A successful return to this does *not* imply that the `msg` was executed successfully", + "to completion; only that *some* of it was executed." + ], + "args": [ + { + "name": "message", + "type": "xcm:VersionedXcm@221", + "type_name": "Box::Call>>" + }, + { + "name": "max_weight", + "type": "U64", + "type_name": "Weight" + } + ] + }, + { + "lookup": "1f04", + "name": "force_xcm_version", + "docs": [ + "Extoll that a particular destination can be communicated with through a particular", + "version of XCM.", + "", + "- `origin`: Must be Root.", + "- `location`: The destination that is being described.", + "- `xcm_version`: The latest version of XCM that `location` supports." + ], + "args": [ + { + "name": "location", + "type": "xcm:v1:multilocation:MultiLocation", + "type_name": "Box" + }, + { + "name": "xcm_version", + "type": "U32", + "type_name": "XcmVersion" + } + ] + }, + { + "lookup": "1f05", + "name": "force_default_xcm_version", + "docs": [ + "Set a safe XCM version (the version that XCM should be encoded with if the most recent", + "version a destination can accept is unknown).", + "", + "- `origin`: Must be Root.", + "- `maybe_xcm_version`: The default XCM encoding version, or `None` to disable." + ], + "args": [ + { + "name": "maybe_xcm_version", + "type": "option", + "type_name": "Option" + } + ] + }, + { + "lookup": "1f06", + "name": "force_subscribe_version_notify", + "docs": [ + "Ask a location to notify us regarding their XCM version and any changes to it.", + "", + "- `origin`: Must be Root.", + "- `location`: The location to which we should subscribe for XCM version notifications." + ], + "args": [ + { + "name": "location", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + } + ] + }, + { + "lookup": "1f07", + "name": "force_unsubscribe_version_notify", + "docs": [ + "Require that a particular destination should no longer notify us regarding any XCM", + "version changes.", + "", + "- `origin`: Must be Root.", + "- `location`: The location to which we are currently subscribed for XCM version", + " notifications which we no longer desire." + ], + "args": [ + { + "name": "location", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + } + ] + }, + { + "lookup": "1f08", + "name": "limited_reserve_transfer_assets", + "docs": [ + "Transfer some assets from the local chain to the sovereign account of a destination chain and forward", + "a notification XCM.", + "", + "Fee payment on the destination side is made from the first asset listed in the `assets` vector.", + "", + "- `origin`: Must be capable of withdrawing the `assets` and executing XCM.", + "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send", + " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain.", + "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be", + " an `AccountId32` value.", + "- `assets`: The assets to be withdrawn. This should include the assets used to pay the fee on the", + " `dest` side.", + "- `fee_asset_item`: The index into `assets` of the item which should be used to pay", + " fees.", + "- `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase." + ], + "args": [ + { + "name": "dest", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "beneficiary", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "assets", + "type": "xcm:VersionedMultiAssets", + "type_name": "Box" + }, + { + "name": "fee_asset_item", + "type": "U32", + "type_name": "u32" + }, + { + "name": "weight_limit", + "type": "xcm:v2:WeightLimit", + "type_name": "WeightLimit" + } + ] + }, + { + "lookup": "1f09", + "name": "limited_teleport_assets", + "docs": [ + "Teleport some assets from the local chain to some destination chain.", + "", + "Fee payment on the destination side is made from the first asset listed in the `assets` vector.", + "", + "- `origin`: Must be capable of withdrawing the `assets` and executing XCM.", + "- `dest`: Destination context for the assets. Will typically be `X2(Parent, Parachain(..))` to send", + " from parachain to parachain, or `X1(Parachain(..))` to send from relay to parachain.", + "- `beneficiary`: A beneficiary location for the assets in the context of `dest`. Will generally be", + " an `AccountId32` value.", + "- `assets`: The assets to be withdrawn. The first item should be the currency used to to pay the fee on the", + " `dest` side. May not be empty.", + "- `fee_asset_item`: The index into `assets` of the item which should be used to pay", + " fees.", + "- `weight_limit`: The remote-side weight limit, if any, for the XCM fee purchase." + ], + "args": [ + { + "name": "dest", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "beneficiary", + "type": "xcm:VersionedMultiLocation", + "type_name": "Box" + }, + { + "name": "assets", + "type": "xcm:VersionedMultiAssets", + "type_name": "Box" + }, + { + "name": "fee_asset_item", + "type": "U32", + "type_name": "u32" + }, + { + "name": "weight_limit", + "type": "xcm:v2:WeightLimit", + "type_name": "WeightLimit" + } + ] + } + ], + "calls_value": { + "type": 211 + }, + "events": [ + { + "lookup": "1f00", + "name": "Attempted", + "docs": [ + "Execution of an XCM message was attempted.", + "", + "\\[ outcome \\]" + ], + "args": [ + "xcm:v2:traits:Outcome" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "xcm::latest::Outcome" + ] + }, + { + "lookup": "1f01", + "name": "Sent", + "docs": [ + "A XCM message was sent.", + "", + "\\[ origin, destination, message \\]" + ], + "args": [ + "xcm:v1:multilocation:MultiLocation", + "xcm:v1:multilocation:MultiLocation", + "Vec" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "MultiLocation", + "MultiLocation", + "Xcm<()>" + ] + }, + { + "lookup": "1f02", + "name": "UnexpectedResponse", + "docs": [ + "Query response received which does not match a registered query. This may be because a", + "matching query was never registered, it may be because it is a duplicate response, or", + "because the query timed out.", + "", + "\\[ origin location, id \\]" + ], + "args": [ + "xcm:v1:multilocation:MultiLocation", + "U64" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "MultiLocation", + "QueryId" + ] + }, + { + "lookup": "1f03", + "name": "ResponseReady", + "docs": [ + "Query response has been received and is ready for taking with `take_response`. There is", + "no registered notification call.", + "", + "\\[ id, response \\]" + ], + "args": [ + "U64", + "xcm:v2:Response" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "QueryId", + "Response" + ] + }, + { + "lookup": "1f04", + "name": "Notified", + "docs": [ + "Query response has been received and query is removed. The registered notification has", + "been dispatched and executed successfully.", + "", + "\\[ id, pallet index, call index \\]" + ], + "args": [ + "U64", + "U8", + "U8" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "QueryId", + "u8", + "u8" + ] + }, + { + "lookup": "1f05", + "name": "NotifyOverweight", + "docs": [ + "Query response has been received and query is removed. The registered notification could", + "not be dispatched because the dispatch weight is greater than the maximum weight", + "originally budgeted by this runtime for the query result.", + "", + "\\[ id, pallet index, call index, actual weight, max budgeted weight \\]" + ], + "args": [ + "U64", + "U8", + "U8", + "U64", + "U64" + ], + "args_name": [ + "", + "", + "", + "", + "" + ], + "args_type_name": [ + "QueryId", + "u8", + "u8", + "Weight", + "Weight" + ] + }, + { + "lookup": "1f06", + "name": "NotifyDispatchError", + "docs": [ + "Query response has been received and query is removed. There was a general error with", + "dispatching the notification call.", + "", + "\\[ id, pallet index, call index \\]" + ], + "args": [ + "U64", + "U8", + "U8" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "QueryId", + "u8", + "u8" + ] + }, + { + "lookup": "1f07", + "name": "NotifyDecodeFailed", + "docs": [ + "Query response has been received and query is removed. The dispatch was unable to be", + "decoded into a `Call`; this might be due to dispatch function having a signature which", + "is not `(origin, QueryId, Response)`.", + "", + "\\[ id, pallet index, call index \\]" + ], + "args": [ + "U64", + "U8", + "U8" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "QueryId", + "u8", + "u8" + ] + }, + { + "lookup": "1f08", + "name": "InvalidResponder", + "docs": [ + "Expected query response has been received but the origin location of the response does", + "not match that expected. The query remains registered for a later, valid, response to", + "be received and acted upon.", + "", + "\\[ origin location, id, expected location \\]" + ], + "args": [ + "xcm:v1:multilocation:MultiLocation", + "U64", + "option" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "MultiLocation", + "QueryId", + "Option" + ] + }, + { + "lookup": "1f09", + "name": "InvalidResponderVersion", + "docs": [ + "Expected query response has been received but the expected origin location placed in", + "storage by this runtime previously cannot be decoded. The query remains registered.", + "", + "This is unexpected (since a location placed in storage in a previously executing", + "runtime should be readable prior to query timeout) and dangerous since the possibly", + "valid response will be dropped. Manual governance intervention is probably going to be", + "needed.", + "", + "\\[ origin location, id \\]" + ], + "args": [ + "xcm:v1:multilocation:MultiLocation", + "U64" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "MultiLocation", + "QueryId" + ] + }, + { + "lookup": "1f0a", + "name": "ResponseTaken", + "docs": [ + "Received query response has been read and removed.", + "", + "\\[ id \\]" + ], + "args": [ + "U64" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "QueryId" + ] + }, + { + "lookup": "1f0b", + "name": "AssetsTrapped", + "docs": [ + "Some assets have been placed in an asset trap.", + "", + "\\[ hash, origin, assets \\]" + ], + "args": [ + "H256", + "xcm:v1:multilocation:MultiLocation", + "xcm:VersionedMultiAssets" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "H256", + "MultiLocation", + "VersionedMultiAssets" + ] + }, + { + "lookup": "1f0c", + "name": "VersionChangeNotified", + "docs": [ + "An XCM version change notification message has been attempted to be sent.", + "", + "\\[ destination, result \\]" + ], + "args": [ + "xcm:v1:multilocation:MultiLocation", + "U32" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "MultiLocation", + "XcmVersion" + ] + }, + { + "lookup": "1f0d", + "name": "SupportedVersionChanged", + "docs": [ + "The supported version of a location has been changed. This might be through an", + "automatic notification or a manual intervention.", + "", + "\\[ location, XCM version \\]" + ], + "args": [ + "xcm:v1:multilocation:MultiLocation", + "U32" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "MultiLocation", + "XcmVersion" + ] + }, + { + "lookup": "1f0e", + "name": "NotifyTargetSendFail", + "docs": [ + "A given location which had a version change subscription was dropped owing to an error", + "sending the notification to it.", + "", + "\\[ location, query ID, error \\]" + ], + "args": [ + "xcm:v1:multilocation:MultiLocation", + "U64", + "xcm:v2:traits:Error" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "MultiLocation", + "QueryId", + "XcmError" + ] + }, + { + "lookup": "1f0f", + "name": "NotifyTargetMigrationFail", + "docs": [ + "A given location which had a version change subscription was dropped owing to an error", + "migrating the location to our new XCM format.", + "", + "\\[ location, query ID \\]" + ], + "args": [ + "xcm:VersionedMultiLocation", + "U64" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "VersionedMultiLocation", + "QueryId" + ] + } + ], + "events_value": { + "type": 36 + }, + "errors": [ + { + "name": "Unreachable", + "doc": [ + "The desired destination was unreachable, generally because there is a no way of routing", + "to it." + ] + }, + { + "name": "SendFailure", + "doc": [ + "There was some other issue (i.e. not to do with routing) in sending the message. Perhaps", + "a lack of space for buffering the message." + ] + }, + { + "name": "Filtered", + "doc": [ + "The message execution fails the filter." + ] + }, + { + "name": "UnweighableMessage", + "doc": [ + "The message's weight could not be determined." + ] + }, + { + "name": "DestinationNotInvertible", + "doc": [ + "The destination `MultiLocation` provided cannot be inverted." + ] + }, + { + "name": "Empty", + "doc": [ + "The assets to be sent are empty." + ] + }, + { + "name": "CannotReanchor", + "doc": [ + "Could not re-anchor the assets to declare the fees for the destination chain." + ] + }, + { + "name": "TooManyAssets", + "doc": [ + "Too many assets have been attempted for transfer." + ] + }, + { + "name": "InvalidOrigin", + "doc": [ + "Origin is invalid for sending." + ] + }, + { + "name": "BadVersion", + "doc": [ + "The version of the `Versioned` value used is not able to be interpreted." + ] + }, + { + "name": "BadLocation", + "doc": [ + "The given location could not be used (e.g. because it cannot be expressed in the", + "desired version of XCM)." + ] + }, + { + "name": "NoSubscription", + "doc": [ + "The referenced subscription could not be found." + ] + }, + { + "name": "AlreadySubscribed", + "doc": [ + "The location is invalid since it already has a subscription from us." + ] + } + ], + "errors_value": { + "type": 234 + }, + "index": 31 + }, + { + "name": "CumulusXcm", + "prefix": "", + "storage": null, + "events": [ + { + "lookup": "2000", + "name": "InvalidFormat", + "docs": [ + "Downward message is invalid XCM.", + "\\[ id \\]" + ], + "args": [ + "[U8; 8]" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "[u8; 8]" + ] + }, + { + "lookup": "2001", + "name": "UnsupportedVersion", + "docs": [ + "Downward message is unsupported version of XCM.", + "\\[ id \\]" + ], + "args": [ + "[U8; 8]" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "[u8; 8]" + ] + }, + { + "lookup": "2002", + "name": "ExecutedDownward", + "docs": [ + "Downward message executed with the given outcome.", + "\\[ id, outcome \\]" + ], + "args": [ + "[U8; 8]", + "xcm:v2:traits:Outcome" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "[u8; 8]", + "Outcome" + ] + } + ], + "events_value": { + "type": 75 + }, + "errors": null, + "errors_value": { + "type": 235 + }, + "index": 32 + }, + { + "name": "DmpQueue", + "prefix": "DmpQueue", + "storage": [ + { + "name": "Configuration", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "U64", + "PlainTypeValue": 236 + }, + "fallback": "0x00e40b5402000000", + "docs": [ + " The configuration." + ] + }, + { + "name": "PageIndex", + "modifier": "Default", + "type": { + "origin": "PlainType", + "plain_type": "cumulus_pallet_dmp_queue:PageIndexData", + "PlainTypeValue": 237 + }, + "fallback": "0x00000000000000000000000000000000", + "docs": [ + " The page index." + ] + }, + { + "name": "Pages", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U32" + ], + "value": "Vec>", + "keys_id": 4, + "value_id": 238 + } + }, + "fallback": "0x00", + "docs": [ + " The queue pages." + ] + }, + { + "name": "Overweight", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U64" + ], + "value": "Tuple:U32Vec", + "keys_id": 8, + "value_id": 239 + } + }, + "fallback": "0x00", + "docs": [ + " The overweight messages." + ] + } + ], + "calls": [ + { + "lookup": "2100", + "name": "service_overweight", + "docs": [ + "Service a single overweight message.", + "", + "- `origin`: Must pass `ExecuteOverweightOrigin`.", + "- `index`: The index of the overweight message to service.", + "- `weight_limit`: The amount of weight that message execution may take.", + "", + "Errors:", + "- `Unknown`: Message of `index` is unknown.", + "- `OverLimit`: Message execution may use greater than `weight_limit`.", + "", + "Events:", + "- `OverweightServiced`: On success." + ], + "args": [ + { + "name": "index", + "type": "U64", + "type_name": "OverweightIndex" + }, + { + "name": "weight_limit", + "type": "U64", + "type_name": "Weight" + } + ] + } + ], + "calls_value": { + "type": 240 + }, + "events": [ + { + "lookup": "2100", + "name": "InvalidFormat", + "docs": [ + "Downward message is invalid XCM.", + "\\[ id \\]" + ], + "args": [ + "[U8; 32]" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "MessageId" + ] + }, + { + "lookup": "2101", + "name": "UnsupportedVersion", + "docs": [ + "Downward message is unsupported version of XCM.", + "\\[ id \\]" + ], + "args": [ + "[U8; 32]" + ], + "args_name": [ + "" + ], + "args_type_name": [ + "MessageId" + ] + }, + { + "lookup": "2102", + "name": "ExecutedDownward", + "docs": [ + "Downward message executed with the given outcome.", + "\\[ id, outcome \\]" + ], + "args": [ + "[U8; 32]", + "xcm:v2:traits:Outcome" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "MessageId", + "Outcome" + ] + }, + { + "lookup": "2103", + "name": "WeightExhausted", + "docs": [ + "The weight limit for handling downward messages was reached.", + "\\[ id, remaining, required \\]" + ], + "args": [ + "[U8; 32]", + "U64", + "U64" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "MessageId", + "Weight", + "Weight" + ] + }, + { + "lookup": "2104", + "name": "OverweightEnqueued", + "docs": [ + "Downward message is overweight and was placed in the overweight queue.", + "\\[ id, index, required \\]" + ], + "args": [ + "[U8; 32]", + "U64", + "U64" + ], + "args_name": [ + "", + "", + "" + ], + "args_type_name": [ + "MessageId", + "OverweightIndex", + "Weight" + ] + }, + { + "lookup": "2105", + "name": "OverweightServiced", + "docs": [ + "Downward message from the overweight queue was executed.", + "\\[ index, used \\]" + ], + "args": [ + "U64", + "U64" + ], + "args_name": [ + "", + "" + ], + "args_type_name": [ + "OverweightIndex", + "Weight" + ] + } + ], + "events_value": { + "type": 76 + }, + "errors": [ + { + "name": "Unknown", + "doc": [ + "The message index given is unknown." + ] + }, + { + "name": "OverLimit", + "doc": [ + "The amount of weight given is possibly not enough for executing the message." + ] + } + ], + "errors_value": { + "type": 241 + }, + "index": 33 + }, + { + "name": "Utility", + "prefix": "", + "storage": null, + "calls": [ + { + "lookup": "2800", + "name": "batch", + "docs": [ + "Send a batch of dispatch calls.", + "", + "May be called from any origin.", + "", + "- `calls`: The calls to be dispatched from the same origin. The number of call must not", + " exceed the constant: `batched_calls_limit` (available in constant metadata).", + "", + "If origin is root then call are dispatch without checking origin filter. (This includes", + "bypassing `frame_system::Config::BaseCallFilter`).", + "", + "# ", + "- Complexity: O(C) where C is the number of calls to be batched.", + "# ", + "", + "This will return `Ok` in all circumstances. To determine the success of the batch, an", + "event is deposited. If a call failed and the batch was interrupted, then the", + "`BatchInterrupted` event is deposited, along with the number of successful calls made", + "and the error of the failed call. If all were successful, then the `BatchCompleted`", + "event is deposited." + ], + "args": [ + { + "name": "calls", + "type": "Vec", + "type_name": "Vec<::Call>" + } + ] + }, + { + "lookup": "2801", + "name": "as_derivative", + "docs": [ + "Send a call through an indexed pseudonym of the sender.", + "", + "Filter from origin are passed along. The call will be dispatched with an origin which", + "use the same filter as the origin of this call.", + "", + "NOTE: If you need to ensure that any account-based filtering is not honored (i.e.", + "because you expect `proxy` to have been used prior in the call stack and you do not want", + "the call restrictions to apply to any sub-accounts), then use `as_multi_threshold_1`", + "in the Multisig pallet instead.", + "", + "NOTE: Prior to version *12, this was called `as_limited_sub`.", + "", + "The dispatch origin for this call must be _Signed_." + ], + "args": [ + { + "name": "index", + "type": "U16", + "type_name": "u16" + }, + { + "name": "call", + "type": "Call", + "type_name": "Box<::Call>" + } + ] + }, + { + "lookup": "2802", + "name": "batch_all", + "docs": [ + "Send a batch of dispatch calls and atomically execute them.", + "The whole transaction will rollback and fail if any of the calls failed.", + "", + "May be called from any origin.", + "", + "- `calls`: The calls to be dispatched from the same origin. The number of call must not", + " exceed the constant: `batched_calls_limit` (available in constant metadata).", + "", + "If origin is root then call are dispatch without checking origin filter. (This includes", + "bypassing `frame_system::Config::BaseCallFilter`).", + "", + "# ", + "- Complexity: O(C) where C is the number of calls to be batched.", + "# " + ], + "args": [ + { + "name": "calls", + "type": "Vec", + "type_name": "Vec<::Call>" + } + ] + }, + { + "lookup": "2803", + "name": "dispatch_as", + "docs": [ + "Dispatches a function call with a provided origin.", + "", + "The dispatch origin for this call must be _Root_.", + "", + "# ", + "- O(1).", + "- Limited storage reads.", + "- One DB write (event).", + "- Weight of derivative `call` execution + T::WeightInfo::dispatch_as().", + "# " + ], + "args": [ + { + "name": "as_origin", + "type": "statemint_runtime:OriginCaller", + "type_name": "Box" + }, + { + "name": "call", + "type": "Call", + "type_name": "Box<::Call>" + } + ] + } + ], + "calls_value": { + "type": 242 + }, + "events": [ + { + "lookup": "2800", + "name": "BatchInterrupted", + "docs": [ + "Batch of dispatches did not complete fully. Index of first failing dispatch given, as", + "well as the error." + ], + "args": [ + "U32", + "sp_runtime:DispatchError" + ], + "args_name": [ + "index", + "error" + ], + "args_type_name": [ + "u32", + "DispatchError" + ] + }, + { + "lookup": "2801", + "name": "BatchCompleted", + "docs": [ + "Batch of dispatches completed fully with no error." + ], + "args": null + }, + { + "lookup": "2802", + "name": "ItemCompleted", + "docs": [ + "A single item within a Batch of dispatches has completed with no error." + ], + "args": null + }, + { + "lookup": "2803", + "name": "DispatchedAs", + "docs": [ + "A call was dispatched." + ], + "args": [ + "Result" + ], + "args_name": [ + "result" + ], + "args_type_name": [ + "DispatchResult" + ] + } + ], + "events_value": { + "type": 77 + }, + "constants": [ + { + "name": "batched_calls_limit", + "type": "U32", + "type_value": 4, + "constants_value": "aa2a0000", + "docs": [ + " The limit on the number of batched calls." + ] + } + ], + "errors": [ + { + "name": "TooManyCalls", + "doc": [ + "Too many calls batched." + ] + } + ], + "errors_value": { + "type": 260 + }, + "index": 40 + }, + { + "name": "Multisig", + "prefix": "Multisig", + "storage": [ + { + "name": "Multisigs", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "AccountId", + "[U8; 32]" + ], + "value": "pallet_multisig:Multisig", + "keys_id": 261, + "value_id": 262 + } + }, + "fallback": "0x00", + "docs": [ + " The set of open multisig operations." + ] + }, + { + "name": "Calls", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Identity" + ], + "key_vec": [ + "[U8; 32]" + ], + "value": "Tuple:WrapperOpaqueAccountIdU128", + "keys_id": 1, + "value_id": 263 + } + }, + "fallback": "0x00", + "docs": null + } + ], + "calls": [ + { + "lookup": "2900", + "name": "as_multi_threshold_1", + "docs": [ + "Immediately dispatch a multi-signature call using a single approval from the caller.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "- `other_signatories`: The accounts (other than the sender) who are part of the", + "multi-signature, but do not participate in the approval process.", + "- `call`: The call to be executed.", + "", + "Result is equivalent to the dispatched result.", + "", + "# ", + "O(Z + C) where Z is the length of the call and C its execution weight.", + "-------------------------------", + "- DB Weight: None", + "- Plus Call Weight", + "# " + ], + "args": [ + { + "name": "other_signatories", + "type": "Vec", + "type_name": "Vec" + }, + { + "name": "call", + "type": "Call", + "type_name": "Box<::Call>" + } + ] + }, + { + "lookup": "2901", + "name": "as_multi", + "docs": [ + "Register approval for a dispatch to be made from a deterministic composite account if", + "approved by a total of `threshold - 1` of `other_signatories`.", + "", + "If there are enough, then dispatch the call.", + "", + "Payment: `DepositBase` will be reserved if this is the first approval, plus", + "`threshold` times `DepositFactor`. It is returned once this dispatch happens or", + "is cancelled.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "- `threshold`: The total number of approvals for this dispatch before it is executed.", + "- `other_signatories`: The accounts (other than the sender) who can approve this", + "dispatch. May not be empty.", + "- `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is", + "not the first approval, then it must be `Some`, with the timepoint (block number and", + "transaction index) of the first approval transaction.", + "- `call`: The call to be executed.", + "", + "NOTE: Unless this is the final approval, you will generally want to use", + "`approve_as_multi` instead, since it only requires a hash of the call.", + "", + "Result is equivalent to the dispatched result if `threshold` is exactly `1`. Otherwise", + "on success, result is `Ok` and the result from the interior call, if it was executed,", + "may be found in the deposited `MultisigExecuted` event.", + "", + "# ", + "- `O(S + Z + Call)`.", + "- Up to one balance-reserve or unreserve operation.", + "- One passthrough operation, one insert, both `O(S)` where `S` is the number of", + " signatories. `S` is capped by `MaxSignatories`, with weight being proportional.", + "- One call encode & hash, both of complexity `O(Z)` where `Z` is tx-len.", + "- One encode & hash, both of complexity `O(S)`.", + "- Up to one binary search and insert (`O(logS + S)`).", + "- I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove.", + "- One event.", + "- The weight of the `call`.", + "- Storage: inserts one item, value size bounded by `MaxSignatories`, with a deposit", + " taken for its lifetime of `DepositBase + threshold * DepositFactor`.", + "-------------------------------", + "- DB Weight:", + " - Reads: Multisig Storage, [Caller Account], Calls (if `store_call`)", + " - Writes: Multisig Storage, [Caller Account], Calls (if `store_call`)", + "- Plus Call Weight", + "# " + ], + "args": [ + { + "name": "threshold", + "type": "U16", + "type_name": "u16" + }, + { + "name": "other_signatories", + "type": "Vec", + "type_name": "Vec" + }, + { + "name": "maybe_timepoint", + "type": "option", + "type_name": "Option>" + }, + { + "name": "call", + "type": "WrapperOpaque", + "type_name": "OpaqueCall" + }, + { + "name": "store_call", + "type": "Bool", + "type_name": "bool" + }, + { + "name": "max_weight", + "type": "U64", + "type_name": "Weight" + } + ] + }, + { + "lookup": "2902", + "name": "approve_as_multi", + "docs": [ + "Register approval for a dispatch to be made from a deterministic composite account if", + "approved by a total of `threshold - 1` of `other_signatories`.", + "", + "Payment: `DepositBase` will be reserved if this is the first approval, plus", + "`threshold` times `DepositFactor`. It is returned once this dispatch happens or", + "is cancelled.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "- `threshold`: The total number of approvals for this dispatch before it is executed.", + "- `other_signatories`: The accounts (other than the sender) who can approve this", + "dispatch. May not be empty.", + "- `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is", + "not the first approval, then it must be `Some`, with the timepoint (block number and", + "transaction index) of the first approval transaction.", + "- `call_hash`: The hash of the call to be executed.", + "", + "NOTE: If this is the final approval, you will want to use `as_multi` instead.", + "", + "# ", + "- `O(S)`.", + "- Up to one balance-reserve or unreserve operation.", + "- One passthrough operation, one insert, both `O(S)` where `S` is the number of", + " signatories. `S` is capped by `MaxSignatories`, with weight being proportional.", + "- One encode & hash, both of complexity `O(S)`.", + "- Up to one binary search and insert (`O(logS + S)`).", + "- I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove.", + "- One event.", + "- Storage: inserts one item, value size bounded by `MaxSignatories`, with a deposit", + " taken for its lifetime of `DepositBase + threshold * DepositFactor`.", + "----------------------------------", + "- DB Weight:", + " - Read: Multisig Storage, [Caller Account]", + " - Write: Multisig Storage, [Caller Account]", + "# " + ], + "args": [ + { + "name": "threshold", + "type": "U16", + "type_name": "u16" + }, + { + "name": "other_signatories", + "type": "Vec", + "type_name": "Vec" + }, + { + "name": "maybe_timepoint", + "type": "option", + "type_name": "Option>" + }, + { + "name": "call_hash", + "type": "[U8; 32]", + "type_name": "[u8; 32]" + }, + { + "name": "max_weight", + "type": "U64", + "type_name": "Weight" + } + ] + }, + { + "lookup": "2903", + "name": "cancel_as_multi", + "docs": [ + "Cancel a pre-existing, on-going multisig transaction. Any deposit reserved previously", + "for this operation will be unreserved on success.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "- `threshold`: The total number of approvals for this dispatch before it is executed.", + "- `other_signatories`: The accounts (other than the sender) who can approve this", + "dispatch. May not be empty.", + "- `timepoint`: The timepoint (block number and transaction index) of the first approval", + "transaction for this dispatch.", + "- `call_hash`: The hash of the call to be executed.", + "", + "# ", + "- `O(S)`.", + "- Up to one balance-reserve or unreserve operation.", + "- One passthrough operation, one insert, both `O(S)` where `S` is the number of", + " signatories. `S` is capped by `MaxSignatories`, with weight being proportional.", + "- One encode & hash, both of complexity `O(S)`.", + "- One event.", + "- I/O: 1 read `O(S)`, one remove.", + "- Storage: removes one item.", + "----------------------------------", + "- DB Weight:", + " - Read: Multisig Storage, [Caller Account], Refund Account, Calls", + " - Write: Multisig Storage, [Caller Account], Refund Account, Calls", + "# " + ], + "args": [ + { + "name": "threshold", + "type": "U16", + "type_name": "u16" + }, + { + "name": "other_signatories", + "type": "Vec", + "type_name": "Vec" + }, + { + "name": "timepoint", + "type": "pallet_multisig:Timepoint", + "type_name": "Timepoint" + }, + { + "name": "call_hash", + "type": "[U8; 32]", + "type_name": "[u8; 32]" + } + ] + } + ], + "calls_value": { + "type": 245 + }, + "events": [ + { + "lookup": "2900", + "name": "NewMultisig", + "docs": [ + "A new multisig operation has begun." + ], + "args": [ + "AccountId", + "AccountId", + "[U8; 32]" + ], + "args_name": [ + "approving", + "multisig", + "call_hash" + ], + "args_type_name": [ + "AccountId", + "AccountId", + "CallHash" + ] + }, + { + "lookup": "2901", + "name": "MultisigApproval", + "docs": [ + "A multisig operation has been approved by someone." + ], + "args": [ + "AccountId", + "pallet_multisig:Timepoint", + "AccountId", + "[U8; 32]" + ], + "args_name": [ + "approving", + "timepoint", + "multisig", + "call_hash" + ], + "args_type_name": [ + "AccountId", + "Timepoint", + "AccountId", + "CallHash" + ] + }, + { + "lookup": "2902", + "name": "MultisigExecuted", + "docs": [ + "A multisig operation has been executed." + ], + "args": [ + "AccountId", + "pallet_multisig:Timepoint", + "AccountId", + "[U8; 32]", + "Result" + ], + "args_name": [ + "approving", + "timepoint", + "multisig", + "call_hash", + "result" + ], + "args_type_name": [ + "AccountId", + "Timepoint", + "AccountId", + "CallHash", + "DispatchResult" + ] + }, + { + "lookup": "2903", + "name": "MultisigCancelled", + "docs": [ + "A multisig operation has been cancelled." + ], + "args": [ + "AccountId", + "pallet_multisig:Timepoint", + "AccountId", + "[U8; 32]" + ], + "args_name": [ + "cancelling", + "timepoint", + "multisig", + "call_hash" + ], + "args_type_name": [ + "AccountId", + "Timepoint", + "AccountId", + "CallHash" + ] + } + ], + "events_value": { + "type": 80 + }, + "constants": [ + { + "name": "DepositBase", + "type": "U128", + "type_value": 6, + "constants_value": "008e56ad040000000000000000000000", + "docs": [ + " The base amount of currency needed to reserve for creating a multisig execution or to", + " store a dispatch call for later.", + "", + " This is held for an additional storage item whose value size is", + " `4 + sizeof((BlockNumber, Balance, AccountId))` bytes and whose key size is", + " `32 + sizeof(AccountId)` bytes." + ] + }, + { + "name": "DepositFactor", + "type": "U128", + "type_value": 6, + "constants_value": "0048e801000000000000000000000000", + "docs": [ + " The amount of currency needed per unit threshold when creating a multisig execution.", + "", + " This is held for adding 32 bytes more into a pre-existing storage value." + ] + }, + { + "name": "MaxSignatories", + "type": "U16", + "type_value": 84, + "constants_value": "6400", + "docs": [ + " The maximum amount of signatories allowed in the multisig." + ] + } + ], + "errors": [ + { + "name": "MinimumThreshold", + "doc": [ + "Threshold must be 2 or greater." + ] + }, + { + "name": "AlreadyApproved", + "doc": [ + "Call is already approved by this signatory." + ] + }, + { + "name": "NoApprovalsNeeded", + "doc": [ + "Call doesn't need any (more) approvals." + ] + }, + { + "name": "TooFewSignatories", + "doc": [ + "There are too few signatories in the list." + ] + }, + { + "name": "TooManySignatories", + "doc": [ + "There are too many signatories in the list." + ] + }, + { + "name": "SignatoriesOutOfOrder", + "doc": [ + "The signatories were provided out of order; they should be ordered." + ] + }, + { + "name": "SenderInSignatories", + "doc": [ + "The sender was contained in the other signatories; it shouldn't be." + ] + }, + { + "name": "NotFound", + "doc": [ + "Multisig operation not found when attempting to cancel." + ] + }, + { + "name": "NotOwner", + "doc": [ + "Only the account that originally created the multisig is able to cancel it." + ] + }, + { + "name": "NoTimepoint", + "doc": [ + "No timepoint was given, yet the multisig operation is already underway." + ] + }, + { + "name": "WrongTimepoint", + "doc": [ + "A different timepoint was given to the multisig operation that is underway." + ] + }, + { + "name": "UnexpectedTimepoint", + "doc": [ + "A timepoint was given, yet no multisig operation is underway." + ] + }, + { + "name": "MaxWeightTooLow", + "doc": [ + "The maximum weight information provided was too low." + ] + }, + { + "name": "AlreadyStored", + "doc": [ + "The data to be stored is already stored." + ] + } + ], + "errors_value": { + "type": 264 + }, + "index": 41 + }, + { + "name": "Proxy", + "prefix": "Proxy", + "storage": [ + { + "name": "Proxies", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "Tuple:frame_support:storage:bounded_vec:BoundedVec@266U128", + "keys_id": 0, + "value_id": 265 + } + }, + "fallback": "0x0000000000000000000000000000000000", + "docs": [ + " The set of account proxies. Maps the account which has delegated to the accounts", + " which are being delegated to, together with the amount held on deposit." + ] + }, + { + "name": "Announcements", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Twox64Concat" + ], + "key_vec": [ + "AccountId" + ], + "value": "Tuple:frame_support:storage:bounded_vec:BoundedVec@270U128", + "keys_id": 0, + "value_id": 269 + } + }, + "fallback": "0x0000000000000000000000000000000000", + "docs": [ + " The announcements made by the proxy (key)." + ] + } + ], + "calls": [ + { + "lookup": "2a00", + "name": "proxy", + "docs": [ + "Dispatch the given `call` from an account that the sender is authorised for through", + "`add_proxy`.", + "", + "Removes any corresponding announcement(s).", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "Parameters:", + "- `real`: The account that the proxy will make a call on behalf of.", + "- `force_proxy_type`: Specify the exact proxy type to be used and checked for this call.", + "- `call`: The call to be made by the `real` account.", + "", + "# ", + "Weight is a function of the number of proxies the user has (P).", + "# " + ], + "args": [ + { + "name": "real", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "force_proxy_type", + "type": "option", + "type_name": "Option" + }, + { + "name": "call", + "type": "Call", + "type_name": "Box<::Call>" + } + ] + }, + { + "lookup": "2a01", + "name": "add_proxy", + "docs": [ + "Register a proxy account for the sender that is able to make calls on its behalf.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "Parameters:", + "- `proxy`: The account that the `caller` would like to make a proxy.", + "- `proxy_type`: The permissions allowed for this proxy account.", + "- `delay`: The announcement period required of the initial proxy. Will generally be", + "zero.", + "", + "# ", + "Weight is a function of the number of proxies the user has (P).", + "# " + ], + "args": [ + { + "name": "delegate", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "proxy_type", + "type": "statemint_runtime:ProxyType", + "type_name": "ProxyType" + }, + { + "name": "delay", + "type": "U32", + "type_name": "BlockNumber" + } + ] + }, + { + "lookup": "2a02", + "name": "remove_proxy", + "docs": [ + "Unregister a proxy account for the sender.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "Parameters:", + "- `proxy`: The account that the `caller` would like to remove as a proxy.", + "- `proxy_type`: The permissions currently enabled for the removed proxy account.", + "", + "# ", + "Weight is a function of the number of proxies the user has (P).", + "# " + ], + "args": [ + { + "name": "delegate", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "proxy_type", + "type": "statemint_runtime:ProxyType", + "type_name": "ProxyType" + }, + { + "name": "delay", + "type": "U32", + "type_name": "BlockNumber" + } + ] + }, + { + "lookup": "2a03", + "name": "remove_proxies", + "docs": [ + "Unregister all proxy accounts for the sender.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "WARNING: This may be called on accounts created by `anonymous`, however if done, then", + "the unreserved fees will be inaccessible. **All access to this account will be lost.**", + "", + "# ", + "Weight is a function of the number of proxies the user has (P).", + "# " + ], + "args": null + }, + { + "lookup": "2a04", + "name": "anonymous", + "docs": [ + "Spawn a fresh new account that is guaranteed to be otherwise inaccessible, and", + "initialize it with a proxy of `proxy_type` for `origin` sender.", + "", + "Requires a `Signed` origin.", + "", + "- `proxy_type`: The type of the proxy that the sender will be registered as over the", + "new account. This will almost always be the most permissive `ProxyType` possible to", + "allow for maximum flexibility.", + "- `index`: A disambiguation index, in case this is called multiple times in the same", + "transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just", + "want to use `0`.", + "- `delay`: The announcement period required of the initial proxy. Will generally be", + "zero.", + "", + "Fails with `Duplicate` if this has already been called in this transaction, from the", + "same sender, with the same parameters.", + "", + "Fails if there are insufficient funds to pay for deposit.", + "", + "# ", + "Weight is a function of the number of proxies the user has (P).", + "# ", + "TODO: Might be over counting 1 read" + ], + "args": [ + { + "name": "proxy_type", + "type": "statemint_runtime:ProxyType", + "type_name": "ProxyType" + }, + { + "name": "delay", + "type": "U32", + "type_name": "BlockNumber" + }, + { + "name": "index", + "type": "U16", + "type_name": "u16" + } + ] + }, + { + "lookup": "2a05", + "name": "kill_anonymous", + "docs": [ + "Removes a previously spawned anonymous proxy.", + "", + "WARNING: **All access to this account will be lost.** Any funds held in it will be", + "inaccessible.", + "", + "Requires a `Signed` origin, and the sender account must have been created by a call to", + "`anonymous` with corresponding parameters.", + "", + "- `spawner`: The account that originally called `anonymous` to create this account.", + "- `index`: The disambiguation index originally passed to `anonymous`. Probably `0`.", + "- `proxy_type`: The proxy type originally passed to `anonymous`.", + "- `height`: The height of the chain when the call to `anonymous` was processed.", + "- `ext_index`: The extrinsic index in which the call to `anonymous` was processed.", + "", + "Fails with `NoPermission` in case the caller is not a previously created anonymous", + "account whose `anonymous` call has corresponding parameters.", + "", + "# ", + "Weight is a function of the number of proxies the user has (P).", + "# " + ], + "args": [ + { + "name": "spawner", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "proxy_type", + "type": "statemint_runtime:ProxyType", + "type_name": "ProxyType" + }, + { + "name": "index", + "type": "U16", + "type_name": "u16" + }, + { + "name": "height", + "type": "compact", + "type_name": "BlockNumber" + }, + { + "name": "ext_index", + "type": "compact", + "type_name": "u32" + } + ] + }, + { + "lookup": "2a06", + "name": "announce", + "docs": [ + "Publish the hash of a proxy-call that will be made in the future.", + "", + "This must be called some number of blocks before the corresponding `proxy` is attempted", + "if the delay associated with the proxy relationship is greater than zero.", + "", + "No more than `MaxPending` announcements may be made at any one time.", + "", + "This will take a deposit of `AnnouncementDepositFactor` as well as", + "`AnnouncementDepositBase` if there are no other pending announcements.", + "", + "The dispatch origin for this call must be _Signed_ and a proxy of `real`.", + "", + "Parameters:", + "- `real`: The account that the proxy will make a call on behalf of.", + "- `call_hash`: The hash of the call to be made by the `real` account.", + "", + "# ", + "Weight is a function of:", + "- A: the number of announcements made.", + "- P: the number of proxies the user has.", + "# " + ], + "args": [ + { + "name": "real", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "call_hash", + "type": "H256", + "type_name": "CallHashOf" + } + ] + }, + { + "lookup": "2a07", + "name": "remove_announcement", + "docs": [ + "Remove a given announcement.", + "", + "May be called by a proxy account to remove a call they previously announced and return", + "the deposit.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "Parameters:", + "- `real`: The account that the proxy will make a call on behalf of.", + "- `call_hash`: The hash of the call to be made by the `real` account.", + "", + "# ", + "Weight is a function of:", + "- A: the number of announcements made.", + "- P: the number of proxies the user has.", + "# " + ], + "args": [ + { + "name": "real", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "call_hash", + "type": "H256", + "type_name": "CallHashOf" + } + ] + }, + { + "lookup": "2a08", + "name": "reject_announcement", + "docs": [ + "Remove the given announcement of a delegate.", + "", + "May be called by a target (proxied) account to remove a call that one of their delegates", + "(`delegate`) has announced they want to execute. The deposit is returned.", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "Parameters:", + "- `delegate`: The account that previously announced the call.", + "- `call_hash`: The hash of the call to be made.", + "", + "# ", + "Weight is a function of:", + "- A: the number of announcements made.", + "- P: the number of proxies the user has.", + "# " + ], + "args": [ + { + "name": "delegate", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "call_hash", + "type": "H256", + "type_name": "CallHashOf" + } + ] + }, + { + "lookup": "2a09", + "name": "proxy_announced", + "docs": [ + "Dispatch the given `call` from an account that the sender is authorized for through", + "`add_proxy`.", + "", + "Removes any corresponding announcement(s).", + "", + "The dispatch origin for this call must be _Signed_.", + "", + "Parameters:", + "- `real`: The account that the proxy will make a call on behalf of.", + "- `force_proxy_type`: Specify the exact proxy type to be used and checked for this call.", + "- `call`: The call to be made by the `real` account.", + "", + "# ", + "Weight is a function of:", + "- A: the number of announcements made.", + "- P: the number of proxies the user has.", + "# " + ], + "args": [ + { + "name": "delegate", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "real", + "type": "AccountId", + "type_name": "AccountId" + }, + { + "name": "force_proxy_type", + "type": "option", + "type_name": "Option" + }, + { + "name": "call", + "type": "Call", + "type_name": "Box<::Call>" + } + ] + } + ], + "calls_value": { + "type": 248 + }, + "events": [ + { + "lookup": "2a00", + "name": "ProxyExecuted", + "docs": [ + "A proxy was executed correctly, with the given." + ], + "args": [ + "Result" + ], + "args_name": [ + "result" + ], + "args_type_name": [ + "DispatchResult" + ] + }, + { + "lookup": "2a01", + "name": "AnonymousCreated", + "docs": [ + "Anonymous account has been created by new proxy with given", + "disambiguation index and proxy type." + ], + "args": [ + "AccountId", + "AccountId", + "statemint_runtime:ProxyType", + "U16" + ], + "args_name": [ + "anonymous", + "who", + "proxy_type", + "disambiguation_index" + ], + "args_type_name": [ + "AccountId", + "AccountId", + "ProxyType", + "u16" + ] + }, + { + "lookup": "2a02", + "name": "Announced", + "docs": [ + "An announcement was placed to make a call in the future." + ], + "args": [ + "AccountId", + "AccountId", + "H256" + ], + "args_name": [ + "real", + "proxy", + "call_hash" + ], + "args_type_name": [ + "AccountId", + "AccountId", + "CallHashOf" + ] + }, + { + "lookup": "2a03", + "name": "ProxyAdded", + "docs": [ + "A proxy was added." + ], + "args": [ + "AccountId", + "AccountId", + "statemint_runtime:ProxyType", + "U32" + ], + "args_name": [ + "delegator", + "delegatee", + "proxy_type", + "delay" + ], + "args_type_name": [ + "AccountId", + "AccountId", + "ProxyType", + "BlockNumber" + ] + } + ], + "events_value": { + "type": 82 + }, + "constants": [ + { + "name": "ProxyDepositBase", + "type": "U128", + "type_value": 6, + "constants_value": "00227aaa040000000000000000000000", + "docs": [ + " The base amount of currency needed to reserve for creating a proxy.", + "", + " This is held for an additional storage item whose value size is", + " `sizeof(Balance)` bytes and whose key size is `sizeof(AccountId)` bytes." + ] + }, + { + "name": "ProxyDepositFactor", + "type": "U128", + "type_value": 6, + "constants_value": "408af701000000000000000000000000", + "docs": [ + " The amount of currency needed per proxy added.", + "", + " This is held for adding 32 bytes plus an instance of `ProxyType` more into a", + " pre-existing storage value. Thus, when configuring `ProxyDepositFactor` one should take", + " into account `32 + proxy_type.encode().len()` bytes of data." + ] + }, + { + "name": "MaxProxies", + "type": "U32", + "type_value": 4, + "constants_value": "20000000", + "docs": [ + " The maximum amount of proxies allowed for a single account." + ] + }, + { + "name": "MaxPending", + "type": "U32", + "type_value": 4, + "constants_value": "20000000", + "docs": [ + " The maximum amount of time-delayed announcements that are allowed to be pending." + ] + }, + { + "name": "AnnouncementDepositBase", + "type": "U128", + "type_value": 6, + "constants_value": "0034f4aa040000000000000000000000", + "docs": [ + " The base amount of currency needed to reserve for creating an announcement.", + "", + " This is held when a new storage item holding a `Balance` is created (typically 16", + " bytes)." + ] + }, + { + "name": "AnnouncementDepositFactor", + "type": "U128", + "type_value": 6, + "constants_value": "8014ef03000000000000000000000000", + "docs": [ + " The amount of currency needed per announcement made.", + "", + " This is held for adding an `AccountId`, `Hash` and `BlockNumber` (typically 68 bytes)", + " into a pre-existing storage value." + ] + } + ], + "errors": [ + { + "name": "TooMany", + "doc": [ + "There are too many proxies registered or too many announcements pending." + ] + }, + { + "name": "NotFound", + "doc": [ + "Proxy registration not found." + ] + }, + { + "name": "NotProxy", + "doc": [ + "Sender is not a proxy of the account to be proxied." + ] + }, + { + "name": "Unproxyable", + "doc": [ + "A call which is incompatible with the proxy type's filter was attempted." + ] + }, + { + "name": "Duplicate", + "doc": [ + "Account is already a proxy." + ] + }, + { + "name": "NoPermission", + "doc": [ + "Call may not be made by proxy because it may escalate its privileges." + ] + }, + { + "name": "Unannounced", + "doc": [ + "Announcement, if made at all, was made too recently." + ] + }, + { + "name": "NoSelfProxy", + "doc": [ + "Cannot add self as proxy." + ] + } + ], + "errors_value": { + "type": 273 + }, + "index": 42 + }, + { + "name": "Assets", + "prefix": "Assets", + "storage": [ + { + "name": "Asset", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U32" + ], + "value": "pallet_assets:types:AssetDetails", + "keys_id": 4, + "value_id": 274 + } + }, + "fallback": "0x00", + "docs": [ + " Details of an asset." + ] + }, + { + "name": "Account", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "AccountId" + ], + "value": "pallet_assets:types:AssetAccount", + "keys_id": 275, + "value_id": 276 + } + }, + "fallback": "0x00", + "docs": [ + " The holdings of a specific account for a specific asset." + ] + }, + { + "name": "Approvals", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Blake2_128Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "AccountId", + "AccountId" + ], + "value": "pallet_assets:types:Approval", + "keys_id": 278, + "value_id": 279 + } + }, + "fallback": "0x00", + "docs": [ + " Approved balance transfers. First balance is the amount approved for transfer. Second", + " is the amount of `T::Currency` reserved for storing this.", + " First key is the asset ID, second key is the owner and third key is the delegate." + ] + }, + { + "name": "Metadata", + "modifier": "Default", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U32" + ], + "value": "pallet_assets:types:AssetMetadata", + "keys_id": 4, + "value_id": 280 + } + }, + "fallback": "0x0000000000000000000000000000000000000000", + "docs": [ + " Metadata of an asset." + ] + } + ], + "calls": [ + { + "lookup": "3200", + "name": "create", + "docs": [ + "Issue a new class of fungible assets from a public origin.", + "", + "This new asset class has no assets initially and its owner is the origin.", + "", + "The origin must be Signed and the sender must have sufficient funds free.", + "", + "Funds of sender are reserved by `AssetDeposit`.", + "", + "Parameters:", + "- `id`: The identifier of the new asset. This must not be currently in use to identify", + "an existing asset.", + "- `admin`: The admin of this class of assets. The admin is the initial address of each", + "member of the asset class's admin team.", + "- `min_balance`: The minimum balance of this new asset that any single account must", + "have. If an account's balance is reduced below this, then it collapses to zero.", + "", + "Emits `Created` event when successful.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "admin", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "min_balance", + "type": "U128", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3201", + "name": "force_create", + "docs": [ + "Issue a new class of fungible assets from a privileged origin.", + "", + "This new asset class has no assets initially.", + "", + "The origin must conform to `ForceOrigin`.", + "", + "Unlike `create`, no funds are reserved.", + "", + "- `id`: The identifier of the new asset. This must not be currently in use to identify", + "an existing asset.", + "- `owner`: The owner of this class of assets. The owner has full superuser permissions", + "over this asset, but may later change and configure the permissions using", + "`transfer_ownership` and `set_team`.", + "- `min_balance`: The minimum balance of this new asset that any single account must", + "have. If an account's balance is reduced below this, then it collapses to zero.", + "", + "Emits `ForceCreated` event when successful.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "is_sufficient", + "type": "Bool", + "type_name": "bool" + }, + { + "name": "min_balance", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3202", + "name": "destroy", + "docs": [ + "Destroy a class of fungible assets.", + "", + "The origin must conform to `ForceOrigin` or must be Signed and the sender must be the", + "owner of the asset `id`.", + "", + "- `id`: The identifier of the asset to be destroyed. This must identify an existing", + "asset.", + "", + "Emits `Destroyed` event when successful.", + "", + "NOTE: It can be helpful to first freeze an asset before destroying it so that you", + "can provide accurate witness information and prevent users from manipulating state", + "in a way that can make it harder to destroy.", + "", + "Weight: `O(c + p + a)` where:", + "- `c = (witness.accounts - witness.sufficients)`", + "- `s = witness.sufficients`", + "- `a = witness.approvals`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "witness", + "type": "pallet_assets:types:DestroyWitness", + "type_name": "DestroyWitness" + } + ] + }, + { + "lookup": "3203", + "name": "mint", + "docs": [ + "Mint assets of a particular class.", + "", + "The origin must be Signed and the sender must be the Issuer of the asset `id`.", + "", + "- `id`: The identifier of the asset to have some amount minted.", + "- `beneficiary`: The account to be credited with the minted assets.", + "- `amount`: The amount of the asset to be minted.", + "", + "Emits `Issued` event when successful.", + "", + "Weight: `O(1)`", + "Modes: Pre-existing balance of `beneficiary`; Account pre-existence of `beneficiary`." + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "beneficiary", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3204", + "name": "burn", + "docs": [ + "Reduce the balance of `who` by as much as possible up to `amount` assets of `id`.", + "", + "Origin must be Signed and the sender should be the Manager of the asset `id`.", + "", + "Bails with `NoAccount` if the `who` is already dead.", + "", + "- `id`: The identifier of the asset to have some amount burned.", + "- `who`: The account to be debited from.", + "- `amount`: The maximum amount by which `who`'s balance should be reduced.", + "", + "Emits `Burned` with the actual amount burned. If this takes the balance to below the", + "minimum for the asset, then the amount burned is increased to take it to zero.", + "", + "Weight: `O(1)`", + "Modes: Post-existence of `who`; Pre & post Zombie-status of `who`." + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "who", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3205", + "name": "transfer", + "docs": [ + "Move some assets from the sender account to another.", + "", + "Origin must be Signed.", + "", + "- `id`: The identifier of the asset to have some amount transferred.", + "- `target`: The account to be credited.", + "- `amount`: The amount by which the sender's balance of assets should be reduced and", + "`target`'s balance increased. The amount actually transferred may be slightly greater in", + "the case that the transfer would otherwise take the sender balance above zero but below", + "the minimum balance. Must be greater than zero.", + "", + "Emits `Transferred` with the actual amount transferred. If this takes the source balance", + "to below the minimum for the asset, then the amount transferred is increased to take it", + "to zero.", + "", + "Weight: `O(1)`", + "Modes: Pre-existence of `target`; Post-existence of sender; Account pre-existence of", + "`target`." + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "target", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3206", + "name": "transfer_keep_alive", + "docs": [ + "Move some assets from the sender account to another, keeping the sender account alive.", + "", + "Origin must be Signed.", + "", + "- `id`: The identifier of the asset to have some amount transferred.", + "- `target`: The account to be credited.", + "- `amount`: The amount by which the sender's balance of assets should be reduced and", + "`target`'s balance increased. The amount actually transferred may be slightly greater in", + "the case that the transfer would otherwise take the sender balance above zero but below", + "the minimum balance. Must be greater than zero.", + "", + "Emits `Transferred` with the actual amount transferred. If this takes the source balance", + "to below the minimum for the asset, then the amount transferred is increased to take it", + "to zero.", + "", + "Weight: `O(1)`", + "Modes: Pre-existence of `target`; Post-existence of sender; Account pre-existence of", + "`target`." + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "target", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3207", + "name": "force_transfer", + "docs": [ + "Move some assets from one account to another.", + "", + "Origin must be Signed and the sender should be the Admin of the asset `id`.", + "", + "- `id`: The identifier of the asset to have some amount transferred.", + "- `source`: The account to be debited.", + "- `dest`: The account to be credited.", + "- `amount`: The amount by which the `source`'s balance of assets should be reduced and", + "`dest`'s balance increased. The amount actually transferred may be slightly greater in", + "the case that the transfer would otherwise take the `source` balance above zero but", + "below the minimum balance. Must be greater than zero.", + "", + "Emits `Transferred` with the actual amount transferred. If this takes the source balance", + "to below the minimum for the asset, then the amount transferred is increased to take it", + "to zero.", + "", + "Weight: `O(1)`", + "Modes: Pre-existence of `dest`; Post-existence of `source`; Account pre-existence of", + "`dest`." + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "source", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "dest", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3208", + "name": "freeze", + "docs": [ + "Disallow further unprivileged transfers from an account.", + "", + "Origin must be Signed and the sender should be the Freezer of the asset `id`.", + "", + "- `id`: The identifier of the asset to be frozen.", + "- `who`: The account to be frozen.", + "", + "Emits `Frozen`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "who", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "3209", + "name": "thaw", + "docs": [ + "Allow unprivileged transfers from an account again.", + "", + "Origin must be Signed and the sender should be the Admin of the asset `id`.", + "", + "- `id`: The identifier of the asset to be frozen.", + "- `who`: The account to be unfrozen.", + "", + "Emits `Thawed`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "who", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "320a", + "name": "freeze_asset", + "docs": [ + "Disallow further unprivileged transfers for the asset class.", + "", + "Origin must be Signed and the sender should be the Freezer of the asset `id`.", + "", + "- `id`: The identifier of the asset to be frozen.", + "", + "Emits `Frozen`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + } + ] + }, + { + "lookup": "320b", + "name": "thaw_asset", + "docs": [ + "Allow unprivileged transfers for the asset again.", + "", + "Origin must be Signed and the sender should be the Admin of the asset `id`.", + "", + "- `id`: The identifier of the asset to be thawed.", + "", + "Emits `Thawed`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + } + ] + }, + { + "lookup": "320c", + "name": "transfer_ownership", + "docs": [ + "Change the Owner of an asset.", + "", + "Origin must be Signed and the sender should be the Owner of the asset `id`.", + "", + "- `id`: The identifier of the asset.", + "- `owner`: The new Owner of this asset.", + "", + "Emits `OwnerChanged`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "320d", + "name": "set_team", + "docs": [ + "Change the Issuer, Admin and Freezer of an asset.", + "", + "Origin must be Signed and the sender should be the Owner of the asset `id`.", + "", + "- `id`: The identifier of the asset to be frozen.", + "- `issuer`: The new Issuer of this asset.", + "- `admin`: The new Admin of this asset.", + "- `freezer`: The new Freezer of this asset.", + "", + "Emits `TeamChanged`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "issuer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "admin", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "freezer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "320e", + "name": "set_metadata", + "docs": [ + "Set the metadata for an asset.", + "", + "Origin must be Signed and the sender should be the Owner of the asset `id`.", + "", + "Funds of sender are reserved according to the formula:", + "`MetadataDepositBase + MetadataDepositPerByte * (name.len + symbol.len)` taking into", + "account any already reserved funds.", + "", + "- `id`: The identifier of the asset to update.", + "- `name`: The user friendly name of this asset. Limited in length by `StringLimit`.", + "- `symbol`: The exchange symbol for this asset. Limited in length by `StringLimit`.", + "- `decimals`: The number of decimals this asset uses to represent one unit.", + "", + "Emits `MetadataSet`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "name", + "type": "Vec", + "type_name": "Bytes" + }, + { + "name": "symbol", + "type": "Vec", + "type_name": "Bytes" + }, + { + "name": "decimals", + "type": "U8", + "type_name": "u8" + } + ] + }, + { + "lookup": "320f", + "name": "clear_metadata", + "docs": [ + "Clear the metadata for an asset.", + "", + "Origin must be Signed and the sender should be the Owner of the asset `id`.", + "", + "Any deposit is freed for the asset owner.", + "", + "- `id`: The identifier of the asset to clear.", + "", + "Emits `MetadataCleared`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + } + ] + }, + { + "lookup": "3210", + "name": "force_set_metadata", + "docs": [ + "Force the metadata for an asset to some value.", + "", + "Origin must be ForceOrigin.", + "", + "Any deposit is left alone.", + "", + "- `id`: The identifier of the asset to update.", + "- `name`: The user friendly name of this asset. Limited in length by `StringLimit`.", + "- `symbol`: The exchange symbol for this asset. Limited in length by `StringLimit`.", + "- `decimals`: The number of decimals this asset uses to represent one unit.", + "", + "Emits `MetadataSet`.", + "", + "Weight: `O(N + S)` where N and S are the length of the name and symbol respectively." + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "name", + "type": "Vec", + "type_name": "Bytes" + }, + { + "name": "symbol", + "type": "Vec", + "type_name": "Bytes" + }, + { + "name": "decimals", + "type": "U8", + "type_name": "u8" + }, + { + "name": "is_frozen", + "type": "Bool", + "type_name": "bool" + } + ] + }, + { + "lookup": "3211", + "name": "force_clear_metadata", + "docs": [ + "Clear the metadata for an asset.", + "", + "Origin must be ForceOrigin.", + "", + "Any deposit is returned.", + "", + "- `id`: The identifier of the asset to clear.", + "", + "Emits `MetadataCleared`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + } + ] + }, + { + "lookup": "3212", + "name": "force_asset_status", + "docs": [ + "Alter the attributes of a given asset.", + "", + "Origin must be `ForceOrigin`.", + "", + "- `id`: The identifier of the asset.", + "- `owner`: The new Owner of this asset.", + "- `issuer`: The new Issuer of this asset.", + "- `admin`: The new Admin of this asset.", + "- `freezer`: The new Freezer of this asset.", + "- `min_balance`: The minimum balance of this new asset that any single account must", + "have. If an account's balance is reduced below this, then it collapses to zero.", + "- `is_sufficient`: Whether a non-zero balance of this asset is deposit of sufficient", + "value to account for the state bloat associated with its balance storage. If set to", + "`true`, then non-zero balances may be stored without a `consumer` reference (and thus", + "an ED in the Balances pallet or whatever else is used to control user-account state", + "growth).", + "- `is_frozen`: Whether this asset class is frozen except for permissioned/admin", + "instructions.", + "", + "Emits `AssetStatusChanged` with the identity of the asset.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "issuer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "admin", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "freezer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "min_balance", + "type": "compact", + "type_name": "Balance" + }, + { + "name": "is_sufficient", + "type": "Bool", + "type_name": "bool" + }, + { + "name": "is_frozen", + "type": "Bool", + "type_name": "bool" + } + ] + }, + { + "lookup": "3213", + "name": "approve_transfer", + "docs": [ + "Approve an amount of asset for transfer by a delegated third-party account.", + "", + "Origin must be Signed.", + "", + "Ensures that `ApprovalDeposit` worth of `Currency` is reserved from signing account", + "for the purpose of holding the approval. If some non-zero amount of assets is already", + "approved from signing account to `delegate`, then it is topped up or unreserved to", + "meet the right value.", + "", + "NOTE: The signing account does not need to own `amount` of assets at the point of", + "making this call.", + "", + "- `id`: The identifier of the asset.", + "- `delegate`: The account to delegate permission to transfer asset.", + "- `amount`: The amount of asset that may be transferred by `delegate`. If there is", + "already an approval in place, then this acts additively.", + "", + "Emits `ApprovedTransfer` on success.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "delegate", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3214", + "name": "cancel_approval", + "docs": [ + "Cancel all of some asset approved for delegated transfer by a third-party account.", + "", + "Origin must be Signed and there must be an approval in place between signer and", + "`delegate`.", + "", + "Unreserves any deposit previously reserved by `approve_transfer` for the approval.", + "", + "- `id`: The identifier of the asset.", + "- `delegate`: The account delegated permission to transfer asset.", + "", + "Emits `ApprovalCancelled` on success.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "delegate", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "3215", + "name": "force_cancel_approval", + "docs": [ + "Cancel all of some asset approved for delegated transfer by a third-party account.", + "", + "Origin must be either ForceOrigin or Signed origin with the signer being the Admin", + "account of the asset `id`.", + "", + "Unreserves any deposit previously reserved by `approve_transfer` for the approval.", + "", + "- `id`: The identifier of the asset.", + "- `delegate`: The account delegated permission to transfer asset.", + "", + "Emits `ApprovalCancelled` on success.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "delegate", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "3216", + "name": "transfer_approved", + "docs": [ + "Transfer some asset balance from a previously delegated account to some third-party", + "account.", + "", + "Origin must be Signed and there must be an approval in place by the `owner` to the", + "signer.", + "", + "If the entire amount approved for transfer is transferred, then any deposit previously", + "reserved by `approve_transfer` is unreserved.", + "", + "- `id`: The identifier of the asset.", + "- `owner`: The account which previously approved for a transfer of at least `amount` and", + "from which the asset balance will be withdrawn.", + "- `destination`: The account to which the asset balance of `amount` will be transferred.", + "- `amount`: The amount of assets to transfer.", + "", + "Emits `TransferredApproved` on success.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "destination", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "amount", + "type": "compact", + "type_name": "Balance" + } + ] + }, + { + "lookup": "3217", + "name": "touch", + "docs": [ + "Create an asset account for non-provider assets.", + "", + "A deposit will be taken from the signer account.", + "", + "- `origin`: Must be Signed; the signer account must have sufficient funds for a deposit", + " to be taken.", + "- `id`: The identifier of the asset for the account to be created.", + "", + "Emits `Touched` event when successful." + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + } + ] + }, + { + "lookup": "3218", + "name": "refund", + "docs": [ + "Return the deposit (if any) of an asset account.", + "", + "The origin must be Signed.", + "", + "- `id`: The identifier of the asset for the account to be created.", + "- `allow_burn`: If `true` then assets may be destroyed in order to complete the refund.", + "", + "Emits `Refunded` event when successful." + ], + "args": [ + { + "name": "id", + "type": "compact", + "type_name": "AssetId" + }, + { + "name": "allow_burn", + "type": "Bool", + "type_name": "bool" + } + ] + } + ], + "calls_value": { + "type": 250 + }, + "events": [ + { + "lookup": "3200", + "name": "Created", + "docs": [ + "Some asset class was created." + ], + "args": [ + "U32", + "AccountId", + "AccountId" + ], + "args_name": [ + "asset_id", + "creator", + "owner" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "3201", + "name": "Issued", + "docs": [ + "Some assets were issued." + ], + "args": [ + "U32", + "AccountId", + "U128" + ], + "args_name": [ + "asset_id", + "owner", + "total_supply" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "Balance" + ] + }, + { + "lookup": "3202", + "name": "Transferred", + "docs": [ + "Some assets were transferred." + ], + "args": [ + "U32", + "AccountId", + "AccountId", + "U128" + ], + "args_name": [ + "asset_id", + "from", + "to", + "amount" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "AccountId", + "Balance" + ] + }, + { + "lookup": "3203", + "name": "Burned", + "docs": [ + "Some assets were destroyed." + ], + "args": [ + "U32", + "AccountId", + "U128" + ], + "args_name": [ + "asset_id", + "owner", + "balance" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "Balance" + ] + }, + { + "lookup": "3204", + "name": "TeamChanged", + "docs": [ + "The management team changed." + ], + "args": [ + "U32", + "AccountId", + "AccountId", + "AccountId" + ], + "args_name": [ + "asset_id", + "issuer", + "admin", + "freezer" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "3205", + "name": "OwnerChanged", + "docs": [ + "The owner changed." + ], + "args": [ + "U32", + "AccountId" + ], + "args_name": [ + "asset_id", + "owner" + ], + "args_type_name": [ + "AssetId", + "AccountId" + ] + }, + { + "lookup": "3206", + "name": "Frozen", + "docs": [ + "Some account `who` was frozen." + ], + "args": [ + "U32", + "AccountId" + ], + "args_name": [ + "asset_id", + "who" + ], + "args_type_name": [ + "AssetId", + "AccountId" + ] + }, + { + "lookup": "3207", + "name": "Thawed", + "docs": [ + "Some account `who` was thawed." + ], + "args": [ + "U32", + "AccountId" + ], + "args_name": [ + "asset_id", + "who" + ], + "args_type_name": [ + "AssetId", + "AccountId" + ] + }, + { + "lookup": "3208", + "name": "AssetFrozen", + "docs": [ + "Some asset `asset_id` was frozen." + ], + "args": [ + "U32" + ], + "args_name": [ + "asset_id" + ], + "args_type_name": [ + "AssetId" + ] + }, + { + "lookup": "3209", + "name": "AssetThawed", + "docs": [ + "Some asset `asset_id` was thawed." + ], + "args": [ + "U32" + ], + "args_name": [ + "asset_id" + ], + "args_type_name": [ + "AssetId" + ] + }, + { + "lookup": "320a", + "name": "Destroyed", + "docs": [ + "An asset class was destroyed." + ], + "args": [ + "U32" + ], + "args_name": [ + "asset_id" + ], + "args_type_name": [ + "AssetId" + ] + }, + { + "lookup": "320b", + "name": "ForceCreated", + "docs": [ + "Some asset class was force-created." + ], + "args": [ + "U32", + "AccountId" + ], + "args_name": [ + "asset_id", + "owner" + ], + "args_type_name": [ + "AssetId", + "AccountId" + ] + }, + { + "lookup": "320c", + "name": "MetadataSet", + "docs": [ + "New metadata has been set for an asset." + ], + "args": [ + "U32", + "Vec", + "Vec", + "U8", + "Bool" + ], + "args_name": [ + "asset_id", + "name", + "symbol", + "decimals", + "is_frozen" + ], + "args_type_name": [ + "AssetId", + "Bytes", + "Bytes", + "u8", + "bool" + ] + }, + { + "lookup": "320d", + "name": "MetadataCleared", + "docs": [ + "Metadata has been cleared for an asset." + ], + "args": [ + "U32" + ], + "args_name": [ + "asset_id" + ], + "args_type_name": [ + "AssetId" + ] + }, + { + "lookup": "320e", + "name": "ApprovedTransfer", + "docs": [ + "(Additional) funds have been approved for transfer to a destination account." + ], + "args": [ + "U32", + "AccountId", + "AccountId", + "U128" + ], + "args_name": [ + "asset_id", + "source", + "delegate", + "amount" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "AccountId", + "Balance" + ] + }, + { + "lookup": "320f", + "name": "ApprovalCancelled", + "docs": [ + "An approval for account `delegate` was cancelled by `owner`." + ], + "args": [ + "U32", + "AccountId", + "AccountId" + ], + "args_name": [ + "asset_id", + "owner", + "delegate" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "3210", + "name": "TransferredApproved", + "docs": [ + "An `amount` was transferred in its entirety from `owner` to `destination` by", + "the approved `delegate`." + ], + "args": [ + "U32", + "AccountId", + "AccountId", + "AccountId", + "U128" + ], + "args_name": [ + "asset_id", + "owner", + "delegate", + "destination", + "amount" + ], + "args_type_name": [ + "AssetId", + "AccountId", + "AccountId", + "AccountId", + "Balance" + ] + }, + { + "lookup": "3211", + "name": "AssetStatusChanged", + "docs": [ + "An asset has had its attributes changed by the `Force` origin." + ], + "args": [ + "U32" + ], + "args_name": [ + "asset_id" + ], + "args_type_name": [ + "AssetId" + ] + } + ], + "events_value": { + "type": 85 + }, + "constants": [ + { + "name": "AssetDeposit", + "type": "U128", + "type_value": 6, + "constants_value": "0010a5d4e80000000000000000000000", + "docs": [ + " The basic amount of funds that must be reserved for an asset." + ] + }, + { + "name": "AssetAccountDeposit", + "type": "U128", + "type_value": 6, + "constants_value": "00ec0ba9040000000000000000000000", + "docs": [ + " The amount of funds that must be reserved for a non-provider asset account to be", + " maintained." + ] + }, + { + "name": "MetadataDepositBase", + "type": "U128", + "type_value": 6, + "constants_value": "006125ac040000000000000000000000", + "docs": [ + " The basic amount of funds that must be reserved when adding metadata to your asset." + ] + }, + { + "name": "MetadataDepositPerByte", + "type": "U128", + "type_value": 6, + "constants_value": "40420f00000000000000000000000000", + "docs": [ + " The additional funds that must be reserved for the number of bytes you store in your", + " metadata." + ] + }, + { + "name": "ApprovalDeposit", + "type": "U128", + "type_value": 6, + "constants_value": "00ca9a3b000000000000000000000000", + "docs": [ + " The amount of funds that must be reserved when creating a new approval." + ] + }, + { + "name": "StringLimit", + "type": "U32", + "type_value": 4, + "constants_value": "32000000", + "docs": [ + " The maximum length of a name or symbol stored on-chain." + ] + } + ], + "errors": [ + { + "name": "BalanceLow", + "doc": [ + "Account balance must be greater than or equal to the transfer amount." + ] + }, + { + "name": "NoAccount", + "doc": [ + "The account to alter does not exist." + ] + }, + { + "name": "NoPermission", + "doc": [ + "The signing account has no permission to do the operation." + ] + }, + { + "name": "Unknown", + "doc": [ + "The given asset ID is unknown." + ] + }, + { + "name": "Frozen", + "doc": [ + "The origin account is frozen." + ] + }, + { + "name": "InUse", + "doc": [ + "The asset ID is already taken." + ] + }, + { + "name": "BadWitness", + "doc": [ + "Invalid witness data given." + ] + }, + { + "name": "MinBalanceZero", + "doc": [ + "Minimum balance should be non-zero." + ] + }, + { + "name": "NoProvider", + "doc": [ + "Unable to increment the consumer reference counters on the account. Either no provider", + "reference exists to allow a non-zero balance of a non-self-sufficient asset, or the", + "maximum number of consumers has been reached." + ] + }, + { + "name": "BadMetadata", + "doc": [ + "Invalid metadata given." + ] + }, + { + "name": "Unapproved", + "doc": [ + "No approval exists that would allow the transfer." + ] + }, + { + "name": "WouldDie", + "doc": [ + "The source account would not survive the transfer and it needs to stay alive." + ] + }, + { + "name": "AlreadyExists", + "doc": [ + "The asset-account already exists." + ] + }, + { + "name": "NoDeposit", + "doc": [ + "The asset-account doesn't have an associated deposit." + ] + }, + { + "name": "WouldBurn", + "doc": [ + "The operation would result in funds being burned." + ] + } + ], + "errors_value": { + "type": 282 + }, + "index": 50 + }, + { + "name": "Uniques", + "prefix": "Uniques", + "storage": [ + { + "name": "Class", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U32" + ], + "value": "pallet_uniques:types:ClassDetails", + "keys_id": 4, + "value_id": 283 + } + }, + "fallback": "0x00", + "docs": [ + " Details of an asset class." + ] + }, + { + "name": "Account", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Blake2_128Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "AccountId", + "U32", + "U32" + ], + "value": "NULL", + "keys_id": 284, + "value_id": 79 + } + }, + "fallback": "0x00", + "docs": [ + " The assets held by any given account; set out this way so that assets owned by a single", + " account can be enumerated." + ] + }, + { + "name": "ClassAccount", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "AccountId", + "U32" + ], + "value": "NULL", + "keys_id": 285, + "value_id": 79 + } + }, + "fallback": "0x00", + "docs": [ + " The classes owned by any given account; set out this way so that classes owned by a single", + " account can be enumerated." + ] + }, + { + "name": "Asset", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "U32" + ], + "value": "pallet_uniques:types:InstanceDetails", + "keys_id": 96, + "value_id": 286 + } + }, + "fallback": "0x00", + "docs": [ + " The assets in existence and their ownership details." + ] + }, + { + "name": "ClassMetadataOf", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat" + ], + "key_vec": [ + "U32" + ], + "value": "pallet_uniques:types:ClassMetadata", + "keys_id": 4, + "value_id": 287 + } + }, + "fallback": "0x00", + "docs": [ + " Metadata of an asset class." + ] + }, + { + "name": "InstanceMetadataOf", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "U32" + ], + "value": "pallet_uniques:types:InstanceMetadata", + "keys_id": 96, + "value_id": 288 + } + }, + "fallback": "0x00", + "docs": [ + " Metadata of an asset instance." + ] + }, + { + "name": "Attribute", + "modifier": "Optional", + "type": { + "origin": "Map", + "n_map_type": { + "hashers": [ + "Blake2_128Concat", + "Blake2_128Concat", + "Blake2_128Concat" + ], + "key_vec": [ + "U32", + "option", + "Vec" + ], + "value": "Tuple:VecU128", + "keys_id": 289, + "value_id": 290 + } + }, + "fallback": "0x00", + "docs": [ + " Metadata of an asset class." + ] + } + ], + "calls": [ + { + "lookup": "3300", + "name": "create", + "docs": [ + "Issue a new class of non-fungible assets from a public origin.", + "", + "This new asset class has no assets initially and its owner is the origin.", + "", + "The origin must be Signed and the sender must have sufficient funds free.", + "", + "`AssetDeposit` funds of sender are reserved.", + "", + "Parameters:", + "- `class`: The identifier of the new asset class. This must not be currently in use.", + "- `admin`: The admin of this class of assets. The admin is the initial address of each", + "member of the asset class's admin team.", + "", + "Emits `Created` event when successful.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "admin", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "3301", + "name": "force_create", + "docs": [ + "Issue a new class of non-fungible assets from a privileged origin.", + "", + "This new asset class has no assets initially.", + "", + "The origin must conform to `ForceOrigin`.", + "", + "Unlike `create`, no funds are reserved.", + "", + "- `class`: The identifier of the new asset. This must not be currently in use.", + "- `owner`: The owner of this class of assets. The owner has full superuser permissions", + "over this asset, but may later change and configure the permissions using", + "`transfer_ownership` and `set_team`.", + "", + "Emits `ForceCreated` event when successful.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "free_holding", + "type": "Bool", + "type_name": "bool" + } + ] + }, + { + "lookup": "3302", + "name": "destroy", + "docs": [ + "Destroy a class of fungible assets.", + "", + "The origin must conform to `ForceOrigin` or must be `Signed` and the sender must be the", + "owner of the asset `class`.", + "", + "- `class`: The identifier of the asset class to be destroyed.", + "- `witness`: Information on the instances minted in the asset class. This must be", + "correct.", + "", + "Emits `Destroyed` event when successful.", + "", + "Weight: `O(n + m)` where:", + "- `n = witness.instances`", + "- `m = witness.instance_metadatas`", + "- `a = witness.attributes`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "witness", + "type": "pallet_uniques:types:DestroyWitness", + "type_name": "DestroyWitness" + } + ] + }, + { + "lookup": "3303", + "name": "mint", + "docs": [ + "Mint an asset instance of a particular class.", + "", + "The origin must be Signed and the sender must be the Issuer of the asset `class`.", + "", + "- `class`: The class of the asset to be minted.", + "- `instance`: The instance value of the asset to be minted.", + "- `beneficiary`: The initial owner of the minted asset.", + "", + "Emits `Issued` event when successful.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "3304", + "name": "burn", + "docs": [ + "Destroy a single asset instance.", + "", + "Origin must be Signed and the sender should be the Admin of the asset `class`.", + "", + "- `class`: The class of the asset to be burned.", + "- `instance`: The instance of the asset to be burned.", + "- `check_owner`: If `Some` then the operation will fail with `WrongOwner` unless the", + " asset is owned by this value.", + "", + "Emits `Burned` with the actual amount burned.", + "", + "Weight: `O(1)`", + "Modes: `check_owner.is_some()`." + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + }, + { + "name": "check_owner", + "type": "option", + "type_name": "Option<::Source>" + } + ] + }, + { + "lookup": "3305", + "name": "transfer", + "docs": [ + "Move an asset from the sender account to another.", + "", + "Origin must be Signed and the signing account must be either:", + "- the Admin of the asset `class`;", + "- the Owner of the asset `instance`;", + "- the approved delegate for the asset `instance` (in this case, the approval is reset).", + "", + "Arguments:", + "- `class`: The class of the asset to be transferred.", + "- `instance`: The instance of the asset to be transferred.", + "- `dest`: The account to receive ownership of the asset.", + "", + "Emits `Transferred`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + }, + { + "name": "dest", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "3306", + "name": "redeposit", + "docs": [ + "Reevaluate the deposits on some assets.", + "", + "Origin must be Signed and the sender should be the Owner of the asset `class`.", + "", + "- `class`: The class of the asset to be frozen.", + "- `instances`: The instances of the asset class whose deposits will be reevaluated.", + "", + "NOTE: This exists as a best-effort function. Any asset instances which are unknown or", + "in the case that the owner account does not have reservable funds to pay for a", + "deposit increase are ignored. Generally the owner isn't going to call this on instances", + "whose existing deposit is less than the refreshed deposit as it would only cost them,", + "so it's of little consequence.", + "", + "It will still return an error in the case that the class is unknown of the signer is", + "not permitted to call it.", + "", + "Weight: `O(instances.len())`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instances", + "type": "Vec", + "type_name": "Vec" + } + ] + }, + { + "lookup": "3307", + "name": "freeze", + "docs": [ + "Disallow further unprivileged transfer of an asset instance.", + "", + "Origin must be Signed and the sender should be the Freezer of the asset `class`.", + "", + "- `class`: The class of the asset to be frozen.", + "- `instance`: The instance of the asset to be frozen.", + "", + "Emits `Frozen`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + } + ] + }, + { + "lookup": "3308", + "name": "thaw", + "docs": [ + "Re-allow unprivileged transfer of an asset instance.", + "", + "Origin must be Signed and the sender should be the Freezer of the asset `class`.", + "", + "- `class`: The class of the asset to be thawed.", + "- `instance`: The instance of the asset to be thawed.", + "", + "Emits `Thawed`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + } + ] + }, + { + "lookup": "3309", + "name": "freeze_class", + "docs": [ + "Disallow further unprivileged transfers for a whole asset class.", + "", + "Origin must be Signed and the sender should be the Freezer of the asset `class`.", + "", + "- `class`: The asset class to be frozen.", + "", + "Emits `ClassFrozen`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + } + ] + }, + { + "lookup": "330a", + "name": "thaw_class", + "docs": [ + "Re-allow unprivileged transfers for a whole asset class.", + "", + "Origin must be Signed and the sender should be the Admin of the asset `class`.", + "", + "- `class`: The class to be thawed.", + "", + "Emits `ClassThawed`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + } + ] + }, + { + "lookup": "330b", + "name": "transfer_ownership", + "docs": [ + "Change the Owner of an asset class.", + "", + "Origin must be Signed and the sender should be the Owner of the asset `class`.", + "", + "- `class`: The asset class whose owner should be changed.", + "- `owner`: The new Owner of this asset class.", + "", + "Emits `OwnerChanged`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "330c", + "name": "set_team", + "docs": [ + "Change the Issuer, Admin and Freezer of an asset class.", + "", + "Origin must be Signed and the sender should be the Owner of the asset `class`.", + "", + "- `class`: The asset class whose team should be changed.", + "- `issuer`: The new Issuer of this asset class.", + "- `admin`: The new Admin of this asset class.", + "- `freezer`: The new Freezer of this asset class.", + "", + "Emits `TeamChanged`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "issuer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "admin", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "freezer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "330d", + "name": "approve_transfer", + "docs": [ + "Approve an instance to be transferred by a delegated third-party account.", + "", + "Origin must be Signed and must be the owner of the asset `instance`.", + "", + "- `class`: The class of the asset to be approved for delegated transfer.", + "- `instance`: The instance of the asset to be approved for delegated transfer.", + "- `delegate`: The account to delegate permission to transfer the asset.", + "", + "Emits `ApprovedTransfer` on success.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + }, + { + "name": "delegate", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + } + ] + }, + { + "lookup": "330e", + "name": "cancel_approval", + "docs": [ + "Cancel the prior approval for the transfer of an asset by a delegate.", + "", + "Origin must be either:", + "- the `Force` origin;", + "- `Signed` with the signer being the Admin of the asset `class`;", + "- `Signed` with the signer being the Owner of the asset `instance`;", + "", + "Arguments:", + "- `class`: The class of the asset of whose approval will be cancelled.", + "- `instance`: The instance of the asset of whose approval will be cancelled.", + "- `maybe_check_delegate`: If `Some` will ensure that the given account is the one to", + " which permission of transfer is delegated.", + "", + "Emits `ApprovalCancelled` on success.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + }, + { + "name": "maybe_check_delegate", + "type": "option", + "type_name": "Option<::Source>" + } + ] + }, + { + "lookup": "330f", + "name": "force_asset_status", + "docs": [ + "Alter the attributes of a given asset.", + "", + "Origin must be `ForceOrigin`.", + "", + "- `class`: The identifier of the asset.", + "- `owner`: The new Owner of this asset.", + "- `issuer`: The new Issuer of this asset.", + "- `admin`: The new Admin of this asset.", + "- `freezer`: The new Freezer of this asset.", + "- `free_holding`: Whether a deposit is taken for holding an instance of this asset", + " class.", + "- `is_frozen`: Whether this asset class is frozen except for permissioned/admin", + "instructions.", + "", + "Emits `AssetStatusChanged` with the identity of the asset.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "owner", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "issuer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "admin", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "freezer", + "type": "sp_runtime:multiaddress:MultiAddress", + "type_name": "Address" + }, + { + "name": "free_holding", + "type": "Bool", + "type_name": "bool" + }, + { + "name": "is_frozen", + "type": "Bool", + "type_name": "bool" + } + ] + }, + { + "lookup": "3310", + "name": "set_attribute", + "docs": [ + "Set an attribute for an asset class or instance.", + "", + "Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the", + "asset `class`.", + "", + "If the origin is Signed, then funds of signer are reserved according to the formula:", + "`MetadataDepositBase + DepositPerByte * (key.len + value.len)` taking into", + "account any already reserved funds.", + "", + "- `class`: The identifier of the asset class whose instance's metadata to set.", + "- `maybe_instance`: The identifier of the asset instance whose metadata to set.", + "- `key`: The key of the attribute.", + "- `value`: The value to which to set the attribute.", + "", + "Emits `AttributeSet`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "maybe_instance", + "type": "option", + "type_name": "Option" + }, + { + "name": "key", + "type": "Vec", + "type_name": "BoundedVec" + }, + { + "name": "value", + "type": "Vec", + "type_name": "BoundedVec" + } + ] + }, + { + "lookup": "3311", + "name": "clear_attribute", + "docs": [ + "Clear an attribute for an asset class or instance.", + "", + "Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the", + "asset `class`.", + "", + "Any deposit is freed for the asset class owner.", + "", + "- `class`: The identifier of the asset class whose instance's metadata to clear.", + "- `maybe_instance`: The identifier of the asset instance whose metadata to clear.", + "- `key`: The key of the attribute.", + "", + "Emits `AttributeCleared`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "maybe_instance", + "type": "option", + "type_name": "Option" + }, + { + "name": "key", + "type": "Vec", + "type_name": "BoundedVec" + } + ] + }, + { + "lookup": "3312", + "name": "set_metadata", + "docs": [ + "Set the metadata for an asset instance.", + "", + "Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the", + "asset `class`.", + "", + "If the origin is Signed, then funds of signer are reserved according to the formula:", + "`MetadataDepositBase + DepositPerByte * data.len` taking into", + "account any already reserved funds.", + "", + "- `class`: The identifier of the asset class whose instance's metadata to set.", + "- `instance`: The identifier of the asset instance whose metadata to set.", + "- `data`: The general information of this asset. Limited in length by `StringLimit`.", + "- `is_frozen`: Whether the metadata should be frozen against further changes.", + "", + "Emits `MetadataSet`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + }, + { + "name": "data", + "type": "Vec", + "type_name": "BoundedVec" + }, + { + "name": "is_frozen", + "type": "Bool", + "type_name": "bool" + } + ] + }, + { + "lookup": "3313", + "name": "clear_metadata", + "docs": [ + "Clear the metadata for an asset instance.", + "", + "Origin must be either `ForceOrigin` or Signed and the sender should be the Owner of the", + "asset `instance`.", + "", + "Any deposit is freed for the asset class owner.", + "", + "- `class`: The identifier of the asset class whose instance's metadata to clear.", + "- `instance`: The identifier of the asset instance whose metadata to clear.", + "", + "Emits `MetadataCleared`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "instance", + "type": "compact", + "type_name": "InstanceId" + } + ] + }, + { + "lookup": "3314", + "name": "set_class_metadata", + "docs": [ + "Set the metadata for an asset class.", + "", + "Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of", + "the asset `class`.", + "", + "If the origin is `Signed`, then funds of signer are reserved according to the formula:", + "`MetadataDepositBase + DepositPerByte * data.len` taking into", + "account any already reserved funds.", + "", + "- `class`: The identifier of the asset whose metadata to update.", + "- `data`: The general information of this asset. Limited in length by `StringLimit`.", + "- `is_frozen`: Whether the metadata should be frozen against further changes.", + "", + "Emits `ClassMetadataSet`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + }, + { + "name": "data", + "type": "Vec", + "type_name": "BoundedVec" + }, + { + "name": "is_frozen", + "type": "Bool", + "type_name": "bool" + } + ] + }, + { + "lookup": "3315", + "name": "clear_class_metadata", + "docs": [ + "Clear the metadata for an asset class.", + "", + "Origin must be either `ForceOrigin` or `Signed` and the sender should be the Owner of", + "the asset `class`.", + "", + "Any deposit is freed for the asset class owner.", + "", + "- `class`: The identifier of the asset class whose metadata to clear.", + "", + "Emits `ClassMetadataCleared`.", + "", + "Weight: `O(1)`" + ], + "args": [ + { + "name": "class", + "type": "compact", + "type_name": "ClassId" + } + ] + } + ], + "calls_value": { + "type": 252 + }, + "events": [ + { + "lookup": "3300", + "name": "Created", + "docs": [ + "An asset class was created." + ], + "args": [ + "U32", + "AccountId", + "AccountId" + ], + "args_name": [ + "class", + "creator", + "owner" + ], + "args_type_name": [ + "ClassId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "3301", + "name": "ForceCreated", + "docs": [ + "An asset class was force-created." + ], + "args": [ + "U32", + "AccountId" + ], + "args_name": [ + "class", + "owner" + ], + "args_type_name": [ + "ClassId", + "AccountId" + ] + }, + { + "lookup": "3302", + "name": "Destroyed", + "docs": [ + "An asset `class` was destroyed." + ], + "args": [ + "U32" + ], + "args_name": [ + "class" + ], + "args_type_name": [ + "ClassId" + ] + }, + { + "lookup": "3303", + "name": "Issued", + "docs": [ + "An asset `instance` was issued." + ], + "args": [ + "U32", + "U32", + "AccountId" + ], + "args_name": [ + "class", + "instance", + "owner" + ], + "args_type_name": [ + "ClassId", + "InstanceId", + "AccountId" + ] + }, + { + "lookup": "3304", + "name": "Transferred", + "docs": [ + "An asset `instance` was transferred." + ], + "args": [ + "U32", + "U32", + "AccountId", + "AccountId" + ], + "args_name": [ + "class", + "instance", + "from", + "to" + ], + "args_type_name": [ + "ClassId", + "InstanceId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "3305", + "name": "Burned", + "docs": [ + "An asset `instance` was destroyed." + ], + "args": [ + "U32", + "U32", + "AccountId" + ], + "args_name": [ + "class", + "instance", + "owner" + ], + "args_type_name": [ + "ClassId", + "InstanceId", + "AccountId" + ] + }, + { + "lookup": "3306", + "name": "Frozen", + "docs": [ + "Some asset `instance` was frozen." + ], + "args": [ + "U32", + "U32" + ], + "args_name": [ + "class", + "instance" + ], + "args_type_name": [ + "ClassId", + "InstanceId" + ] + }, + { + "lookup": "3307", + "name": "Thawed", + "docs": [ + "Some asset `instance` was thawed." + ], + "args": [ + "U32", + "U32" + ], + "args_name": [ + "class", + "instance" + ], + "args_type_name": [ + "ClassId", + "InstanceId" + ] + }, + { + "lookup": "3308", + "name": "ClassFrozen", + "docs": [ + "Some asset `class` was frozen." + ], + "args": [ + "U32" + ], + "args_name": [ + "class" + ], + "args_type_name": [ + "ClassId" + ] + }, + { + "lookup": "3309", + "name": "ClassThawed", + "docs": [ + "Some asset `class` was thawed." + ], + "args": [ + "U32" + ], + "args_name": [ + "class" + ], + "args_type_name": [ + "ClassId" + ] + }, + { + "lookup": "330a", + "name": "OwnerChanged", + "docs": [ + "The owner changed." + ], + "args": [ + "U32", + "AccountId" + ], + "args_name": [ + "class", + "new_owner" + ], + "args_type_name": [ + "ClassId", + "AccountId" + ] + }, + { + "lookup": "330b", + "name": "TeamChanged", + "docs": [ + "The management team changed." + ], + "args": [ + "U32", + "AccountId", + "AccountId", + "AccountId" + ], + "args_name": [ + "class", + "issuer", + "admin", + "freezer" + ], + "args_type_name": [ + "ClassId", + "AccountId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "330c", + "name": "ApprovedTransfer", + "docs": [ + "An `instance` of an asset `class` has been approved by the `owner` for transfer by a", + "`delegate`." + ], + "args": [ + "U32", + "U32", + "AccountId", + "AccountId" + ], + "args_name": [ + "class", + "instance", + "owner", + "delegate" + ], + "args_type_name": [ + "ClassId", + "InstanceId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "330d", + "name": "ApprovalCancelled", + "docs": [ + "An approval for a `delegate` account to transfer the `instance` of an asset `class` was", + "cancelled by its `owner`." + ], + "args": [ + "U32", + "U32", + "AccountId", + "AccountId" + ], + "args_name": [ + "class", + "instance", + "owner", + "delegate" + ], + "args_type_name": [ + "ClassId", + "InstanceId", + "AccountId", + "AccountId" + ] + }, + { + "lookup": "330e", + "name": "AssetStatusChanged", + "docs": [ + "An asset `class` has had its attributes changed by the `Force` origin." + ], + "args": [ + "U32" + ], + "args_name": [ + "class" + ], + "args_type_name": [ + "ClassId" + ] + }, + { + "lookup": "330f", + "name": "ClassMetadataSet", + "docs": [ + "New metadata has been set for an asset class." + ], + "args": [ + "U32", + "Vec", + "Bool" + ], + "args_name": [ + "class", + "data", + "is_frozen" + ], + "args_type_name": [ + "ClassId", + "BoundedVec", + "bool" + ] + }, + { + "lookup": "3310", + "name": "ClassMetadataCleared", + "docs": [ + "Metadata has been cleared for an asset class." + ], + "args": [ + "U32" + ], + "args_name": [ + "class" + ], + "args_type_name": [ + "ClassId" + ] + }, + { + "lookup": "3311", + "name": "MetadataSet", + "docs": [ + "New metadata has been set for an asset instance." + ], + "args": [ + "U32", + "U32", + "Vec", + "Bool" + ], + "args_name": [ + "class", + "instance", + "data", + "is_frozen" + ], + "args_type_name": [ + "ClassId", + "InstanceId", + "BoundedVec", + "bool" + ] + }, + { + "lookup": "3312", + "name": "MetadataCleared", + "docs": [ + "Metadata has been cleared for an asset instance." + ], + "args": [ + "U32", + "U32" + ], + "args_name": [ + "class", + "instance" + ], + "args_type_name": [ + "ClassId", + "InstanceId" + ] + }, + { + "lookup": "3313", + "name": "Redeposited", + "docs": [ + "Metadata has been cleared for an asset instance." + ], + "args": [ + "U32", + "Vec" + ], + "args_name": [ + "class", + "successful_instances" + ], + "args_type_name": [ + "ClassId", + "Vec" + ] + }, + { + "lookup": "3314", + "name": "AttributeSet", + "docs": [ + "New attribute metadata has been set for an asset class or instance." + ], + "args": [ + "U32", + "option", + "Vec", + "Vec" + ], + "args_name": [ + "class", + "maybe_instance", + "key", + "value" + ], + "args_type_name": [ + "ClassId", + "Option", + "BoundedVec", + "BoundedVec" + ] + }, + { + "lookup": "3315", + "name": "AttributeCleared", + "docs": [ + "Attribute metadata has been cleared for an asset class or instance." + ], + "args": [ + "U32", + "option", + "Vec" + ], + "args_name": [ + "class", + "maybe_instance", + "key" + ], + "args_type_name": [ + "ClassId", + "Option", + "BoundedVec" + ] + } + ], + "events_value": { + "type": 87 + }, + "constants": [ + { + "name": "ClassDeposit", + "type": "U128", + "type_value": 6, + "constants_value": "00e40b54020000000000000000000000", + "docs": [ + " The basic amount of funds that must be reserved for an asset class." + ] + }, + { + "name": "InstanceDeposit", + "type": "U128", + "type_value": 6, + "constants_value": "00e1f505000000000000000000000000", + "docs": [ + " The basic amount of funds that must be reserved for an asset instance." + ] + }, + { + "name": "MetadataDepositBase", + "type": "U128", + "type_value": 6, + "constants_value": "402ac8af040000000000000000000000", + "docs": [ + " The basic amount of funds that must be reserved when adding metadata to your asset." + ] + }, + { + "name": "AttributeDepositBase", + "type": "U128", + "type_value": 6, + "constants_value": "00c817a8040000000000000000000000", + "docs": [ + " The basic amount of funds that must be reserved when adding an attribute to an asset." + ] + }, + { + "name": "DepositPerByte", + "type": "U128", + "type_value": 6, + "constants_value": "40420f00000000000000000000000000", + "docs": [ + " The additional funds that must be reserved for the number of bytes store in metadata,", + " either \"normal\" metadata or attribute metadata." + ] + }, + { + "name": "StringLimit", + "type": "U32", + "type_value": 4, + "constants_value": "80000000", + "docs": [ + " The maximum length of data stored on-chain." + ] + }, + { + "name": "KeyLimit", + "type": "U32", + "type_value": 4, + "constants_value": "20000000", + "docs": [ + " The maximum length of an attribute key." + ] + }, + { + "name": "ValueLimit", + "type": "U32", + "type_value": 4, + "constants_value": "40000000", + "docs": [ + " The maximum length of an attribute value." + ] + } + ], + "errors": [ + { + "name": "NoPermission", + "doc": [ + "The signing account has no permission to do the operation." + ] + }, + { + "name": "UnknownClass", + "doc": [ + "The given asset ID is unknown." + ] + }, + { + "name": "AlreadyExists", + "doc": [ + "The asset instance ID has already been used for an asset." + ] + }, + { + "name": "WrongOwner", + "doc": [ + "The owner turned out to be different to what was expected." + ] + }, + { + "name": "BadWitness", + "doc": [ + "Invalid witness data given." + ] + }, + { + "name": "InUse", + "doc": [ + "The asset ID is already taken." + ] + }, + { + "name": "Frozen", + "doc": [ + "The asset instance or class is frozen." + ] + }, + { + "name": "WrongDelegate", + "doc": [ + "The delegate turned out to be different to what was expected." + ] + }, + { + "name": "NoDelegate", + "doc": [ + "There is no delegate approved." + ] + }, + { + "name": "Unapproved", + "doc": [ + "No approval exists that would allow the transfer." + ] + } + ], + "errors_value": { + "type": 291 + }, + "index": 51 + } +] \ No newline at end of file diff --git a/src/demo_substrate_events/configs/.keep b/src/demo_substrate_events/configs/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/src/demo_substrate_events/configs/dipdup.compose.yaml b/src/demo_substrate_events/configs/dipdup.compose.yaml new file mode 100644 index 000000000..78fba783f --- /dev/null +++ b/src/demo_substrate_events/configs/dipdup.compose.yaml @@ -0,0 +1,24 @@ +database: + kind: postgres + host: ${POSTGRES_HOST:-db} + port: 5432 + user: ${POSTGRES_USER:-dipdup} + password: ${POSTGRES_PASSWORD} + database: ${POSTGRES_DB:-dipdup} + +hasura: + url: http://${HASURA_HOST:-hasura}:8080 + admin_secret: ${HASURA_SECRET} + allow_aggregations: ${HASURA_ALLOW_AGGREGATIONS:-true} + select_limit: ${HASURA_SELECT_LIMIT:-10000} + camel_case: ${HASURA_CAMEL_CASE:-true} + +sentry: + dsn: ${SENTRY_DSN:-''} + environment: ${SENTRY_ENVIRONMENT:-''} + +prometheus: + host: 0.0.0.0 + +api: + host: 0.0.0.0 \ No newline at end of file diff --git a/src/demo_substrate_events/configs/dipdup.sqlite.yaml b/src/demo_substrate_events/configs/dipdup.sqlite.yaml new file mode 100644 index 000000000..f534693c2 --- /dev/null +++ b/src/demo_substrate_events/configs/dipdup.sqlite.yaml @@ -0,0 +1,3 @@ +database: + kind: sqlite + path: ${SQLITE_PATH:-/tmp/demo_substrate_events.sqlite} \ No newline at end of file diff --git a/src/demo_substrate_events/configs/dipdup.swarm.yaml b/src/demo_substrate_events/configs/dipdup.swarm.yaml new file mode 100644 index 000000000..32d0d8b9b --- /dev/null +++ b/src/demo_substrate_events/configs/dipdup.swarm.yaml @@ -0,0 +1,24 @@ +database: + kind: postgres + host: ${POSTGRES_HOST:-demo_substrate_events_db} + port: 5432 + user: ${POSTGRES_USER:-dipdup} + password: ${POSTGRES_PASSWORD} + database: ${POSTGRES_DB:-dipdup} + +hasura: + url: http://${HASURA_HOST:-demo_substrate_events_hasura}:8080 + admin_secret: ${HASURA_SECRET} + allow_aggregations: ${HASURA_ALLOW_AGGREGATIONS:-false} + select_limit: ${HASURA_SELECT_LIMIT:-100} + camel_case: ${HASURA_CAMEL_CASE:-true} + +sentry: + dsn: ${SENTRY_DSN:-''} + environment: ${SENTRY_ENVIRONMENT:-''} + +prometheus: + host: 0.0.0.0 + +api: + host: 0.0.0.0 \ No newline at end of file diff --git a/src/demo_substrate_events/configs/replay.yaml b/src/demo_substrate_events/configs/replay.yaml new file mode 100644 index 000000000..ef6696c65 --- /dev/null +++ b/src/demo_substrate_events/configs/replay.yaml @@ -0,0 +1,18 @@ +# To refresh existing project run `dipdup init --base --force` after modifying this file. +# To generate a new project from this replay run `dipdup new --replay `. +# +spec_version: 3.0 +replay: + dipdup_version: 8 + template: demo_substrate_events + package: demo_substrate_events + version: 0.0.1 + description: Substrate balance transfers + license: MIT + name: John Doe + email: john_doe@example.com + postgres_image: postgres:15 + postgres_data_path: /var/lib/postgresql/data + hasura_image: hasura/graphql-engine:latest + line_length: 120 + package_manager: pdm diff --git a/src/demo_substrate_events/deploy/.env.default b/src/demo_substrate_events/deploy/.env.default new file mode 100644 index 000000000..0fa241260 --- /dev/null +++ b/src/demo_substrate_events/deploy/.env.default @@ -0,0 +1,15 @@ +# This env file was generated automatically by DipDup. Do not edit it! +# Create a copy with .env extension, fill it with your values and run DipDup with `--env-file` option. +# +HASURA_ALLOW_AGGREGATIONS=true +HASURA_CAMEL_CASE=true +HASURA_HOST=hasura +HASURA_SECRET= +HASURA_SELECT_LIMIT=10000 +NODE_API_KEY='' +POSTGRES_DB=dipdup +POSTGRES_HOST=db +POSTGRES_PASSWORD= +POSTGRES_USER=dipdup +SENTRY_DSN='' +SENTRY_ENVIRONMENT='' diff --git a/src/demo_substrate_events/deploy/.keep b/src/demo_substrate_events/deploy/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/src/demo_substrate_events/deploy/Dockerfile b/src/demo_substrate_events/deploy/Dockerfile new file mode 100644 index 000000000..10dbd737f --- /dev/null +++ b/src/demo_substrate_events/deploy/Dockerfile @@ -0,0 +1,9 @@ +FROM dipdup/dipdup:8 +# FROM ghcr.io/dipdup-io/dipdup:8 +# FROM ghcr.io/dipdup-io/dipdup:next + +# COPY --chown=dipdup pyproject.toml README.md . +# RUN pip install . + +COPY --chown=dipdup . demo_substrate_events +WORKDIR demo_substrate_events \ No newline at end of file diff --git a/src/demo_substrate_events/deploy/compose.sqlite.yaml b/src/demo_substrate_events/deploy/compose.sqlite.yaml new file mode 100644 index 000000000..0fbd22495 --- /dev/null +++ b/src/demo_substrate_events/deploy/compose.sqlite.yaml @@ -0,0 +1,18 @@ +name: demo_substrate_events + +services: + dipdup: + build: + context: .. + dockerfile: deploy/Dockerfile + command: ["-c", "dipdup.yaml", "-c", "configs/dipdup.sqlite.yaml", "run"] + restart: always + env_file: .env + ports: + - 46339 + - 9000 + volumes: + - sqlite:${SQLITE_PATH:-/tmp/demo_substrate_events.sqlite} + +volumes: + sqlite: \ No newline at end of file diff --git a/src/demo_substrate_events/deploy/compose.swarm.yaml b/src/demo_substrate_events/deploy/compose.swarm.yaml new file mode 100644 index 000000000..877b8376d --- /dev/null +++ b/src/demo_substrate_events/deploy/compose.swarm.yaml @@ -0,0 +1,91 @@ +name: demo_substrate_events + +services: + dipdup: + image: ${IMAGE:-ghcr.io/dipdup-io/dipdup}:${TAG:-8} + depends_on: + - db + - hasura + command: ["-c", "dipdup.yaml", "-c", "configs/dipdup.swarm.yaml", "run"] + env_file: .env + networks: + - internal + - prometheus-private + deploy: + mode: replicated + replicas: ${INDEXER_ENABLED:-1} + labels: + - prometheus-job=${SERVICE} + - prometheus-port=8000 + placement: &placement + constraints: + - node.labels.${SERVICE} == true + logging: &logging + driver: "json-file" + options: + max-size: "10m" + max-file: "10" + tag: "\{\{.Name\}\}.\{\{.ImageID\}\}" + + db: + image: postgres:15 + volumes: + - db:/var/lib/postgresql/data + env_file: .env + environment: + - POSTGRES_USER=dipdup + - POSTGRES_DB=dipdup + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + healthcheck: + test: ["CMD-SHELL", "pg_isready -U dipdup"] + interval: 10s + timeout: 5s + retries: 5 + networks: + - internal + deploy: + mode: replicated + replicas: 1 + placement: *placement + logging: *logging + + hasura: + image: hasura/graphql-engine:latest + depends_on: + - db + environment: + - HASURA_GRAPHQL_DATABASE_URL=postgres://dipdup:${POSTGRES_PASSWORD}@demo_substrate_events_db:5432/dipdup + - HASURA_GRAPHQL_ADMIN_SECRET=${HASURA_SECRET} + - HASURA_GRAPHQL_ENABLE_CONSOLE=true + - HASURA_GRAPHQL_DEV_MODE=false + - HASURA_GRAPHQL_LOG_LEVEL=warn + - HASURA_GRAPHQL_ENABLE_TELEMETRY=false + - HASURA_GRAPHQL_UNAUTHORIZED_ROLE=user + - HASURA_GRAPHQL_STRINGIFY_NUMERIC_TYPES=true + networks: + - internal + - traefik-public + deploy: + mode: replicated + replicas: 1 + labels: + - traefik.enable=true + - traefik.http.services.${SERVICE}.loadbalancer.server.port=8080 + - "traefik.http.routers.${SERVICE}.rule=Host(`${HOST}`) && (PathPrefix(`/v1/graphql`) || PathPrefix(`/api/rest`))" + - traefik.http.routers.${SERVICE}.entrypoints=http,${INGRESS:-ingress} + - "traefik.http.routers.${SERVICE}-console.rule=Host(`${SERVICE}.${SWARM_ROOT_DOMAIN}`)" + - traefik.http.routers.${SERVICE}-console.entrypoints=https + - traefik.http.middlewares.${SERVICE}-console.headers.customrequestheaders.X-Hasura-Admin-Secret=${HASURA_SECRET} + - traefik.http.routers.${SERVICE}-console.middlewares=authelia@docker,${SERVICE}-console + placement: *placement + logging: *logging + +volumes: + db: + +networks: + internal: + traefik-public: + external: true + prometheus-private: + external: true \ No newline at end of file diff --git a/src/demo_substrate_events/deploy/compose.yaml b/src/demo_substrate_events/deploy/compose.yaml new file mode 100644 index 000000000..4495d3fc6 --- /dev/null +++ b/src/demo_substrate_events/deploy/compose.yaml @@ -0,0 +1,54 @@ +name: demo_substrate_events + +services: + dipdup: + build: + context: .. + dockerfile: deploy/Dockerfile + restart: always + env_file: .env + ports: + - 46339 + - 9000 + command: ["-c", "dipdup.yaml", "-c", "configs/dipdup.compose.yaml", "run"] + depends_on: + - db + - hasura + + db: + image: postgres:15 + ports: + - "${POSTGRES_HOST_PORT:-5432}:5432" + volumes: + - db:/var/lib/postgresql/data + restart: always + env_file: .env + environment: + - POSTGRES_USER=dipdup + - POSTGRES_DB=dipdup + - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + healthcheck: + test: ["CMD-SHELL", "pg_isready -U dipdup"] + interval: 10s + timeout: 5s + retries: 5 + + hasura: + image: hasura/graphql-engine:latest + ports: + - "${HASURA_HOST_PORT:-8080}:8080" + depends_on: + - db + restart: always + environment: + - HASURA_GRAPHQL_DATABASE_URL=postgres://dipdup:${POSTGRES_PASSWORD}@db:5432/dipdup + - HASURA_GRAPHQL_ADMIN_SECRET=${HASURA_SECRET} + - HASURA_GRAPHQL_ENABLE_CONSOLE=true + - HASURA_GRAPHQL_DEV_MODE=true + - HASURA_GRAPHQL_LOG_LEVEL=info + - HASURA_GRAPHQL_ENABLE_TELEMETRY=false + - HASURA_GRAPHQL_UNAUTHORIZED_ROLE=user + - HASURA_GRAPHQL_STRINGIFY_NUMERIC_TYPES=true + +volumes: + db: \ No newline at end of file diff --git a/src/demo_substrate_events/deploy/sqlite.env.default b/src/demo_substrate_events/deploy/sqlite.env.default new file mode 100644 index 000000000..7a9b8f815 --- /dev/null +++ b/src/demo_substrate_events/deploy/sqlite.env.default @@ -0,0 +1,5 @@ +# This env file was generated automatically by DipDup. Do not edit it! +# Create a copy with .env extension, fill it with your values and run DipDup with `--env-file` option. +# +NODE_API_KEY='' +SQLITE_PATH=/tmp/demo_substrate_events.sqlite diff --git a/src/demo_substrate_events/deploy/swarm.env.default b/src/demo_substrate_events/deploy/swarm.env.default new file mode 100644 index 000000000..a1b976d4d --- /dev/null +++ b/src/demo_substrate_events/deploy/swarm.env.default @@ -0,0 +1,15 @@ +# This env file was generated automatically by DipDup. Do not edit it! +# Create a copy with .env extension, fill it with your values and run DipDup with `--env-file` option. +# +HASURA_ALLOW_AGGREGATIONS=false +HASURA_CAMEL_CASE=true +HASURA_HOST=demo_substrate_events_hasura +HASURA_SECRET= +HASURA_SELECT_LIMIT=100 +NODE_API_KEY='' +POSTGRES_DB=dipdup +POSTGRES_HOST=demo_substrate_events_db +POSTGRES_PASSWORD= +POSTGRES_USER=dipdup +SENTRY_DSN='' +SENTRY_ENVIRONMENT='' diff --git a/src/demo_substrate_events/dipdup.yaml b/src/demo_substrate_events/dipdup.yaml new file mode 100644 index 000000000..33995680c --- /dev/null +++ b/src/demo_substrate_events/dipdup.yaml @@ -0,0 +1,31 @@ +spec_version: 3.0 +package: demo_substrate_events + +runtimes: + assethub: + kind: substrate + type_registry: statemint + +datasources: + subsquid: + kind: substrate.subsquid + url: https://v2.archive.subsquid.io/network/asset-hub-polkadot + subscan: + kind: substrate.subscan + url: https://assethub-polkadot.api.subscan.io/api + node: + kind: substrate.node + url: https://statemint.api.onfinality.io/rpc?apikey=${NODE_API_KEY:-''} + ws_url: wss://statemint.api.onfinality.io/ws?apikey=${NODE_API_KEY:-''} + +indexes: + assethub_transfers: + kind: substrate.events + runtime: assethub + datasources: + - subsquid + - subscan + - node + handlers: + - callback: on_transfer + name: Assets.Transferred \ No newline at end of file diff --git a/src/demo_substrate_events/graphql/.keep b/src/demo_substrate_events/graphql/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/src/demo_substrate_events/handlers/.keep b/src/demo_substrate_events/handlers/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/src/demo_substrate_events/handlers/batch.py b/src/demo_substrate_events/handlers/batch.py new file mode 100644 index 000000000..c2c321ae0 --- /dev/null +++ b/src/demo_substrate_events/handlers/batch.py @@ -0,0 +1,12 @@ +from collections.abc import Iterable + +from dipdup.context import HandlerContext +from dipdup.index import MatchedHandler + + +async def batch( + ctx: HandlerContext, + handlers: Iterable[MatchedHandler], +) -> None: + for handler in handlers: + await ctx.fire_matched_handler(handler) diff --git a/src/demo_substrate_events/handlers/on_transfer.py b/src/demo_substrate_events/handlers/on_transfer.py new file mode 100644 index 000000000..9a301c88d --- /dev/null +++ b/src/demo_substrate_events/handlers/on_transfer.py @@ -0,0 +1,52 @@ +from decimal import Decimal + +from demo_substrate_events import models as models +from demo_substrate_events.types.assethub.substrate_events.assets_transferred import AssetsTransferredPayload +from dipdup.context import HandlerContext +from dipdup.models.substrate import SubstrateEvent +from tortoise.exceptions import DoesNotExist + + +async def sql_update( + ctx: HandlerContext, + address: str, + amount: Decimal, + level: int, +) -> None: + await ctx.execute_sql_query( + 'update_balance', + address, + str(amount), + level, + ) + + +# NOTE: Not used, just for demonstration purposes +async def orm_update( + ctx: HandlerContext, + address: str, + amount: Decimal, + level: int, +) -> None: + try: + holder = await models.Holder.cached_get(pk=address) + except DoesNotExist: + holder = models.Holder(address=address) + holder.cache() + holder.balance += amount + holder.turnover += abs(amount) + holder.tx_count += 1 + holder.last_seen = level + await holder.save() + + +async def on_transfer( + ctx: HandlerContext, + event: SubstrateEvent[AssetsTransferredPayload], +) -> None: + amount = Decimal(event.payload['amount']) + if not amount: + return + + await sql_update(ctx, event.payload['from'], -amount, event.data.level) + await sql_update(ctx, event.payload['to'], amount, event.data.level) diff --git a/src/demo_substrate_events/hasura/.keep b/src/demo_substrate_events/hasura/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/src/demo_substrate_events/hooks/.keep b/src/demo_substrate_events/hooks/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/src/demo_substrate_events/hooks/on_index_rollback.py b/src/demo_substrate_events/hooks/on_index_rollback.py new file mode 100644 index 000000000..e561b622a --- /dev/null +++ b/src/demo_substrate_events/hooks/on_index_rollback.py @@ -0,0 +1,16 @@ +from dipdup.context import HookContext +from dipdup.index import Index + + +async def on_index_rollback( + ctx: HookContext, + index: Index, # type: ignore[type-arg] + from_level: int, + to_level: int, +) -> None: + await ctx.execute_sql_script('on_index_rollback') + await ctx.rollback( + index=index.name, + from_level=from_level, + to_level=to_level, + ) diff --git a/src/demo_substrate_events/hooks/on_reindex.py b/src/demo_substrate_events/hooks/on_reindex.py new file mode 100644 index 000000000..e04646d8e --- /dev/null +++ b/src/demo_substrate_events/hooks/on_reindex.py @@ -0,0 +1,7 @@ +from dipdup.context import HookContext + + +async def on_reindex( + ctx: HookContext, +) -> None: + await ctx.execute_sql_script('on_reindex') diff --git a/src/demo_substrate_events/hooks/on_restart.py b/src/demo_substrate_events/hooks/on_restart.py new file mode 100644 index 000000000..91cb798e6 --- /dev/null +++ b/src/demo_substrate_events/hooks/on_restart.py @@ -0,0 +1,7 @@ +from dipdup.context import HookContext + + +async def on_restart( + ctx: HookContext, +) -> None: + await ctx.execute_sql_script('on_restart') diff --git a/src/demo_substrate_events/hooks/on_synchronized.py b/src/demo_substrate_events/hooks/on_synchronized.py new file mode 100644 index 000000000..1500d9cf9 --- /dev/null +++ b/src/demo_substrate_events/hooks/on_synchronized.py @@ -0,0 +1,7 @@ +from dipdup.context import HookContext + + +async def on_synchronized( + ctx: HookContext, +) -> None: + await ctx.execute_sql_script('on_synchronized') diff --git a/src/demo_substrate_events/models/.keep b/src/demo_substrate_events/models/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/src/demo_substrate_events/models/__init__.py b/src/demo_substrate_events/models/__init__.py new file mode 100644 index 000000000..0dc49a4c6 --- /dev/null +++ b/src/demo_substrate_events/models/__init__.py @@ -0,0 +1,13 @@ +from dipdup import fields +from dipdup.models import CachedModel + + +class Holder(CachedModel): + address = fields.TextField(primary_key=True) + balance = fields.DecimalField(decimal_places=6, max_digits=40, default=0) + turnover = fields.DecimalField(decimal_places=6, max_digits=40, default=0) + tx_count = fields.BigIntField(default=0) + last_seen = fields.BigIntField(null=True) + + class Meta: + maxsize = 2**12 diff --git a/src/demo_substrate_events/py.typed b/src/demo_substrate_events/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/src/demo_substrate_events/pyproject.toml b/src/demo_substrate_events/pyproject.toml new file mode 100644 index 000000000..69e8fd163 --- /dev/null +++ b/src/demo_substrate_events/pyproject.toml @@ -0,0 +1,50 @@ +# generated by DipDup 8.1.2 +[project] +name = "demo_substrate_events" +version = "0.0.1" +description = "Substrate balance transfers" +license = { text = "MIT" } +authors = [ + { name = "John Doe", email = "john_doe@example.com" }, +] +readme = "README.md" +requires-python = ">=3.12,<3.13" +dependencies = [ + "dipdup>=8,<9", +] + +[tool.pdm.dev-dependencies] +dev = [ + "black", + "ruff", + "mypy", +] + +[tool.black] +line-length = 120 +target-version = ['py312'] +skip-string-normalization = true +extend-exclude = "demo_substrate_events" + +[tool.ruff] +line-length = 120 +target-version = 'py312' + +[tool.ruff.lint] +extend-select = ["B", "C4", "FA", "G", "I", "PTH", "Q", "RET", "RUF", "TCH", "UP"] +flake8-quotes = { inline-quotes = "single", multiline-quotes = "double" } +isort = { force-single-line = true} + +[tool.mypy] +python_version = "3.12" +plugins = ["pydantic.mypy"] +strict = false +exclude = "demo_substrate_events" + +[[tool.mypy.overrides]] +module = "ruamel" +ignore_missing_imports = true + +[build-system] +requires = ["pdm-backend"] +build-backend = "pdm.backend" \ No newline at end of file diff --git a/src/demo_substrate_events/sql/.keep b/src/demo_substrate_events/sql/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/src/demo_substrate_events/sql/on_index_rollback/.keep b/src/demo_substrate_events/sql/on_index_rollback/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/src/demo_substrate_events/sql/on_reindex/.keep b/src/demo_substrate_events/sql/on_reindex/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/src/demo_substrate_events/sql/on_restart/.keep b/src/demo_substrate_events/sql/on_restart/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/src/demo_substrate_events/sql/on_synchronized/.keep b/src/demo_substrate_events/sql/on_synchronized/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/src/demo_substrate_events/sql/update_balance.sql b/src/demo_substrate_events/sql/update_balance.sql new file mode 100644 index 000000000..663a952cd --- /dev/null +++ b/src/demo_substrate_events/sql/update_balance.sql @@ -0,0 +1,22 @@ +insert into holder ( + address + ,balance + ,turnover + ,tx_count + ,last_seen +) +values ( + :address + ,:amount + ,abs(:amount) + ,1 + ,:level +) +on conflict (address) do +update +set + balance = balance + :amount + ,turnover = turnover + abs(:amount) + ,tx_count = tx_count + 1 + ,last_seen = :level +; \ No newline at end of file diff --git a/src/demo_substrate_events/types/.keep b/src/demo_substrate_events/types/.keep new file mode 100644 index 000000000..e69de29bb diff --git a/src/demo_substrate_events/types/assethub/substrate_events/assets_transferred/__init__.py b/src/demo_substrate_events/types/assethub/substrate_events/assets_transferred/__init__.py new file mode 100644 index 000000000..d8768a55e --- /dev/null +++ b/src/demo_substrate_events/types/assethub/substrate_events/assets_transferred/__init__.py @@ -0,0 +1,4 @@ +from .v601 import V601 +from .v700 import V700 + +type AssetsTransferredPayload = V601 | V700 diff --git a/src/demo_substrate_events/types/assethub/substrate_events/assets_transferred/v601.py b/src/demo_substrate_events/types/assethub/substrate_events/assets_transferred/v601.py new file mode 100644 index 000000000..afcbb6380 --- /dev/null +++ b/src/demo_substrate_events/types/assethub/substrate_events/assets_transferred/v601.py @@ -0,0 +1,18 @@ +# generated by DipDup 8.1.2 + +from __future__ import annotations + +from typing import TypedDict + +""" +Some assets were transferred. [asset_id, from, to, amount] +""" +V601 = TypedDict( + 'V601', + { + 'asset_id': int, + 'from': str, + 'to': str, + 'amount': int, + }, +) diff --git a/src/demo_substrate_events/types/assethub/substrate_events/assets_transferred/v700.py b/src/demo_substrate_events/types/assethub/substrate_events/assets_transferred/v700.py new file mode 100644 index 000000000..7f84fa9a8 --- /dev/null +++ b/src/demo_substrate_events/types/assethub/substrate_events/assets_transferred/v700.py @@ -0,0 +1,18 @@ +# generated by DipDup 8.1.2 + +from __future__ import annotations + +from typing import TypedDict + +""" +Some assets were transferred. +""" +V700 = TypedDict( + 'V700', + { + 'asset_id': int, + 'from': str, + 'to': str, + 'amount': int, + }, +) diff --git a/src/dipdup/_survey.py b/src/dipdup/_survey.py index eb6f038ae..5d88c16d6 100644 --- a/src/dipdup/_survey.py +++ b/src/dipdup/_survey.py @@ -92,7 +92,7 @@ class BlockchainConfig(TypedDict): 'name': 'node', }, { - 'kind': 'abi.etherscan', + 'kind': 'evm.etherscan', 'requires_api_key': True, 'default_url': 'https://api.etherscan.io/api', 'name': 'etherscan', @@ -224,7 +224,7 @@ def get_datasource_comments(datasources: list[str]) -> tuple[str, ...]: default_comments = { 'evm.subsquid': 'Use Subsquid as your datasource for EVM.', 'evm.node': 'Connect to an EVM node.', - 'abi.etherscan': 'Fetch ABI from Etherscan.', + 'evm.etherscan': 'Fetch ABI from Etherscan.', 'tezos.tzkt': 'Use TzKT API for Tezos.', 'starknet.subsquid': 'Use Subsquid for Starknet.', 'starknet.node': 'Connect to a Starknet node.', @@ -420,7 +420,7 @@ def query_survey_config(blockchain: str) -> DipDupSurveyConfig: else: api_key = '${ETHERSCAN_API_KEY:-' + api_key + '}' - if datasource_kind != 'abi.etherscan': + if datasource_kind != 'evm.etherscan': api_key = None if 'subsquid' in datasource_kind: diff --git a/src/dipdup/codegen/evm.py b/src/dipdup/codegen/evm.py index 414899910..9afee34e1 100644 --- a/src/dipdup/codegen/evm.py +++ b/src/dipdup/codegen/evm.py @@ -5,9 +5,9 @@ from dipdup.codegen import CodeGenerator from dipdup.config import EvmIndexConfigU from dipdup.config import HandlerConfig -from dipdup.config.abi_etherscan import AbiEtherscanDatasourceConfig from dipdup.config.evm import EvmContractConfig from dipdup.config.evm import EvmIndexConfig +from dipdup.config.evm_etherscan import EvmEtherscanDatasourceConfig from dipdup.config.evm_events import EvmEventsHandlerConfig from dipdup.config.evm_events import EvmEventsIndexConfig from dipdup.config.evm_transactions import EvmTransactionsHandlerConfig @@ -60,7 +60,7 @@ async def generate_handlers(self) -> None: pass async def _fetch_abi(self, index_config: EvmIndexConfigU) -> None: - datasource_configs = tuple(c for c in index_config.datasources if isinstance(c, AbiEtherscanDatasourceConfig)) + datasource_configs = tuple(c for c in index_config.datasources if isinstance(c, EvmEtherscanDatasourceConfig)) contract: EvmContractConfig | None = None diff --git a/src/dipdup/codegen/substrate.py b/src/dipdup/codegen/substrate.py new file mode 100644 index 000000000..4cd53a4ad --- /dev/null +++ b/src/dipdup/codegen/substrate.py @@ -0,0 +1,259 @@ +import logging +from collections import defaultdict +from pathlib import Path +from typing import Any +from typing import cast + +import orjson + +from dipdup.codegen import CodeGenerator +from dipdup.config import DipDupConfig +from dipdup.config import HandlerConfig +from dipdup.config.substrate import SubstrateIndexConfig +from dipdup.config.substrate_events import SubstrateEventsIndexConfig +from dipdup.config.substrate_subscan import SubstrateSubscanDatasourceConfig +from dipdup.datasources import Datasource +from dipdup.datasources.substrate_node import SubstrateNodeDatasource +from dipdup.datasources.substrate_subscan import SubstrateSubscanDatasource +from dipdup.package import DipDupPackage +from dipdup.runtimes import SubstrateRuntime +from dipdup.runtimes import extract_args_name +from dipdup.utils import json_dumps +from dipdup.utils import pascal_to_snake +from dipdup.utils import snake_to_pascal +from dipdup.utils import sorted_glob +from dipdup.utils import write + +_logger = logging.getLogger(__name__) + + +def scale_type_to_jsonschema( + type_registry: dict[str, Any], + type_string: str, +) -> dict[str, Any]: + if type_string in type_registry['types']: + type_def = type_registry['types'][type_string] + if isinstance(type_def, str): + return scale_type_to_jsonschema(type_registry, type_def) + if isinstance(type_def, dict): + if 'type' in type_def: + return scale_type_to_jsonschema(type_registry, type_def['type']) + if '_enum' in type_def: + return { + 'description': type_string, + 'type': 'string', + 'enum': ( + list(type_def['_enum'].keys()) if isinstance(type_def['_enum'], dict) else type_def['_enum'] + ), + } + if '_struct' in type_def: + return { + 'description': type_string, + 'type': 'object', + 'properties': { + k: scale_type_to_jsonschema(type_registry, v) for k, v in type_def['_struct'].items() + }, + } + + # Handle primitives, default to str + schema: dict[str, Any] = { + 'description': type_string, + 'type': 'string', + } + + if type_string.lower() in ('u8', 'u16', 'u32', 'u64', 'u128', 'i8', 'i16', 'i32', 'i64', 'i128'): + schema['type'] = 'integer' + elif type_string == 'bool': + schema['type'] = 'boolean' + elif type_string in ['String', 'str']: + schema['type'] = 'string' + elif type_string.startswith('Vec<'): + inner_type = type_string[4:-1] + schema['type'] = 'array' + schema['items'] = scale_type_to_jsonschema(type_registry, inner_type) + elif type_string.startswith('Option<'): + inner_type = type_string[7:-1] + schema['oneOf'] = [{'type': 'null'}, scale_type_to_jsonschema(type_registry, inner_type)] + + return schema + + +def event_metadata_to_jsonschema( + type_registry: dict[str, Any], + metadata: dict[str, Any], +) -> dict[str, Any]: + description = '\n'.join(metadata['docs']).replace(r'\[', '[').replace(r'\]', ']') + args_name = [a for a in metadata.get('args_name', ()) if a] + if not args_name: + args_name = extract_args_name(description) # type: ignore[assignment] + schema = { + '$schema': 'http://json-schema.org/draft-07/schema#', + 'title': metadata['name'], + 'description': description, + 'type': 'object', + 'properties': {}, + 'required': args_name, + } + for arg_name, arg_type in zip(args_name, metadata['args'], strict=True): + schema['properties'][arg_name] = scale_type_to_jsonschema(type_registry, arg_type) + schema['properties'][arg_name]['description'] = arg_type + + return schema + + +class SubstrateCodeGenerator(CodeGenerator): + kind = 'substrate' + + def __init__( + self, + config: DipDupConfig, + package: DipDupPackage, + datasources: dict[str, Datasource[Any]], + include: set[str] | None = None, + ) -> None: + super().__init__(config, package, datasources, include) + + self._runtimes: dict[str, SubstrateRuntime] = {} + + async def generate_abis(self) -> None: + processed = set() + + for index_config in self._config.indexes.values(): + if not isinstance(index_config, SubstrateIndexConfig): + continue + name = index_config.runtime.name + if name in processed: + continue + + for datasource_config in index_config.datasources: + if isinstance(datasource_config, SubstrateSubscanDatasourceConfig): + datasource = cast(SubstrateSubscanDatasource, self._datasources[datasource_config.name]) + break + else: + raise NotImplementedError('Codegen currently requires `substrate.subscan` datasource') + + runtime_list = await datasource.get_runtime_list() + _logger.info('found %s runtimes', len(runtime_list)) + + for spec in runtime_list[::-1]: + spec_version = spec['spec_version'] + + key = f'v{spec_version}' + # NOTE: Important versions will be copied to project later + abi_path = self._package.abi_local.joinpath(f'{name}/{key}.json') + if abi_path.exists(): + continue + + _logger.info('v%s metadata not found, fetching', spec_version) + metadata = await datasource.get_runtime_metadata(spec_version) + write(abi_path, json_dumps(metadata)) + + processed.add(name) + + async def generate_schemas(self) -> None: + self._cleanup_schemas() + + handler_config: HandlerConfig + target_events: dict[str, list[str]] = {} + + for index_config in self._config.indexes.values(): + if isinstance(index_config, SubstrateEventsIndexConfig): + runtime_name = index_config.runtime.name + if runtime_name not in target_events: + target_events[runtime_name] = [] + for handler_config in index_config.handlers: + target_events[runtime_name].append(handler_config.name) + + if not target_events: + return + + latest_dumps: defaultdict[str, bytes] = defaultdict(bytes) + + for runtime_name, events in target_events.items(): + for metadata_path in sorted_glob(self._package.abi_local, f'{runtime_name}/*.json'): + metadata = orjson.loads(metadata_path.read_bytes()) + + type_registry = self._get_runtime(runtime_name).runtime_config.type_registry + + for module in metadata: + for event_item in module.get('events', []): + qualname = f'{module["name"]}.{event_item["name"]}' + if qualname not in events: + continue + + # FIXME: ignore when only docs changed? + dump = orjson.dumps({**event_item, 'name': ''}) + if dump == latest_dumps[qualname]: + continue + latest_dumps[qualname] = dump + + # TODO: Copy used abis to the project. This logic should be somewhere else. + write(self._package.abi.joinpath(runtime_name, metadata_path.name), metadata_path.read_bytes()) + + schema_path = ( + self._package.schemas + / 'substrate' + / runtime_name + / 'substrate_events' + / pascal_to_snake(qualname) + / f'{metadata_path.stem.replace('.', '_')}.json' + ) + if schema_path.exists(): + continue + + jsonschema = event_metadata_to_jsonschema(type_registry, event_item) + + write(schema_path, json_dumps(jsonschema)) + + async def _generate_types(self, force: bool = False) -> None: + await super()._generate_types(force) + + for typeclass_dir in self._package.types.glob('**/substrate_events/*'): + + typeclass_name = f'{snake_to_pascal(typeclass_dir.name)}Payload' + + versions = [p.stem[1:] for p in typeclass_dir.glob('*.py') if p.name.startswith('v')] + root_lines = [ + *(f'from .v{v} import V{v}' for v in versions), + '', + f'type {typeclass_name} = ' + ' | '.join(f'V{v}' for v in versions), + '', + ] + + write(typeclass_dir.joinpath('__init__.py'), '\n'.join(root_lines), overwrite=True) + + async def generate_hooks(self) -> None: + pass + + async def generate_system_hooks(self) -> None: + pass + + async def generate_handlers(self) -> None: + pass + + def get_typeclass_name(self, schema_path: Path) -> str: + module_name = schema_path.stem + if schema_path.parent.name == 'substrate_events': + class_name = f'{module_name}_payload' + else: + class_name = module_name + return snake_to_pascal(class_name) + + async def _generate_type(self, schema_path: Path, force: bool) -> None: + markers = { + 'substrate_events', + } + if not set(schema_path.parts).intersection(markers): + return + await super()._generate_type(schema_path, force) + + def _get_runtime(self, name: str) -> SubstrateRuntime: + if name not in self._runtimes: + self._runtimes[name] = SubstrateRuntime( + config=self._config.runtimes[name], + package=self._package, + interface=next( + d for d in self._datasources.values() if isinstance(d, SubstrateNodeDatasource) + )._interface, + ) + return self._runtimes[name] diff --git a/src/dipdup/config/__init__.py b/src/dipdup/config/__init__.py index 72fdd3a65..614cca75c 100644 --- a/src/dipdup/config/__init__.py +++ b/src/dipdup/config/__init__.py @@ -267,6 +267,17 @@ def module_path(self) -> Path: return Path(*self.module_name.split('.')) +# FIXME: we use Substrate runtimes as contracts for codegen +class RuntimeConfig(ContractConfig): + """Runtime config + + :param kind: Defined by child class + :param typename: Alias for the typeclass directory + """ + + pass + + class DatasourceConfig(ABC, NameMixin): """Base class for datasource configs @@ -276,7 +287,8 @@ class DatasourceConfig(ABC, NameMixin): """ kind: str - url: str + url: Url + ws_url: WsUrl | None = None http: HttpConfig | None = None @@ -556,6 +568,7 @@ class DipDupConfig: :param package: Name of indexer's Python package, existing or not :param datasources: Mapping of datasource aliases and datasource configs :param database: Database config + :param runtimes: Mapping of runtime aliases and runtime configs :param contracts: Mapping of contract aliases and contract configs :param indexes: Mapping of index aliases and index configs :param templates: Mapping of template aliases and index templates @@ -576,6 +589,7 @@ class DipDupConfig: database: SqliteDatabaseConfig | PostgresDatabaseConfig = Field( default_factory=lambda *a, **kw: SqliteDatabaseConfig(kind='sqlite') ) + runtimes: dict[str, RuntimeConfigU] = Field(default_factory=dict) contracts: dict[str, ContractConfigU] = Field(default_factory=dict) indexes: dict[str, IndexConfigU] = Field(default_factory=dict) templates: dict[str, ResolvedIndexConfigU] = Field(default_factory=dict) @@ -780,12 +794,21 @@ def get_evm_node_datasource(self, name: str) -> EvmNodeDatasourceConfig: raise ConfigurationError('`datasource` field must refer to TzKT datasource') return datasource - def get_abi_etherscan_datasource(self, name: str) -> AbiEtherscanDatasourceConfig: + def get_evm_etherscan_datasource(self, name: str) -> EvmEtherscanDatasourceConfig: datasource = self.get_datasource(name) - if not isinstance(datasource, AbiEtherscanDatasourceConfig): + if not isinstance(datasource, EvmEtherscanDatasourceConfig): raise ConfigurationError('`datasource` field must refer to Etherscan datasource') return datasource + # NOTE: Alias, remove in 9.0 + get_abi_etherscan_datasource = get_evm_etherscan_datasource + + def get_substrate_subsquid_datasource(self, name: str) -> SubstrateSubsquidDatasourceConfig: + datasource = self.get_datasource(name) + if not isinstance(datasource, SubstrateSubsquidDatasourceConfig): + raise ConfigurationError('`datasource` field must refer to Subsquid datasource') + return datasource + def set_up_logging(self) -> None: if isinstance(self.logging, dict): loglevels = { @@ -1066,6 +1089,13 @@ def _resolve_index_links(self, index_config: ResolvedIndexConfigU) -> None: if isinstance(handler_config.contract, str): handler_config.contract = self.get_starknet_contract(handler_config.contract) + + elif isinstance(index_config, SubstrateEventsIndexConfig): + if isinstance(index_config.runtime, str): + index_config.runtime = self.runtimes[index_config.runtime] + for handler_config in index_config.handlers: + handler_config.parent = index_config + else: raise NotImplementedError(f'Index kind `{index_config.kind}` is not supported') @@ -1075,6 +1105,7 @@ def _set_names(self) -> None: ( self.contracts, self.datasources, + self.runtimes, self.hooks, self.jobs, self.templates, @@ -1097,9 +1128,9 @@ def _set_names(self) -> None: """ # NOTE: Reimport to avoid circular imports -from dipdup.config.abi_etherscan import AbiEtherscanDatasourceConfig from dipdup.config.coinbase import CoinbaseDatasourceConfig from dipdup.config.evm import EvmContractConfig +from dipdup.config.evm_etherscan import EvmEtherscanDatasourceConfig from dipdup.config.evm_events import EvmEventsIndexConfig from dipdup.config.evm_node import EvmNodeDatasourceConfig from dipdup.config.evm_subsquid import EvmSubsquidDatasourceConfig @@ -1110,6 +1141,11 @@ def _set_names(self) -> None: from dipdup.config.starknet_events import StarknetEventsIndexConfig from dipdup.config.starknet_node import StarknetNodeDatasourceConfig from dipdup.config.starknet_subsquid import StarknetSubsquidDatasourceConfig +from dipdup.config.substrate import SubstrateRuntimeConfig +from dipdup.config.substrate_events import SubstrateEventsIndexConfig +from dipdup.config.substrate_node import SubstrateNodeDatasourceConfig +from dipdup.config.substrate_subscan import SubstrateSubscanDatasourceConfig +from dipdup.config.substrate_subsquid import SubstrateSubsquidDatasourceConfig from dipdup.config.tezos import TezosContractConfig from dipdup.config.tezos_big_maps import TezosBigMapsIndexConfig from dipdup.config.tezos_events import TezosEventsIndexConfig @@ -1126,10 +1162,11 @@ def _set_names(self) -> None: from dipdup.config.tzip_metadata import TzipMetadataDatasourceConfig # NOTE: Unions for Pydantic config deserialization +RuntimeConfigU = SubstrateRuntimeConfig ContractConfigU = EvmContractConfig | TezosContractConfig | StarknetContractConfig DatasourceConfigU = ( CoinbaseDatasourceConfig - | AbiEtherscanDatasourceConfig + | EvmEtherscanDatasourceConfig | HttpDatasourceConfig | IpfsDatasourceConfig | EvmSubsquidDatasourceConfig @@ -1138,6 +1175,9 @@ def _set_names(self) -> None: | TezosTzktDatasourceConfig | StarknetSubsquidDatasourceConfig | StarknetNodeDatasourceConfig + | SubstrateSubsquidDatasourceConfig + | SubstrateSubscanDatasourceConfig + | SubstrateNodeDatasourceConfig ) TezosIndexConfigU = ( TezosBigMapsIndexConfig @@ -1150,8 +1190,9 @@ def _set_names(self) -> None: ) EvmIndexConfigU = EvmEventsIndexConfig | EvmTransactionsIndexConfig StarknetIndexConfigU = StarknetEventsIndexConfig +SubstrateIndexConfigU = SubstrateEventsIndexConfig -ResolvedIndexConfigU = TezosIndexConfigU | EvmIndexConfigU | StarknetIndexConfigU +ResolvedIndexConfigU = TezosIndexConfigU | EvmIndexConfigU | StarknetIndexConfigU | SubstrateIndexConfigU IndexConfigU = ResolvedIndexConfigU | IndexTemplateConfig diff --git a/src/dipdup/config/abi_etherscan.py b/src/dipdup/config/abi_etherscan.py index cad2db651..8e423501f 100644 --- a/src/dipdup/config/abi_etherscan.py +++ b/src/dipdup/config/abi_etherscan.py @@ -1,26 +1,2 @@ -from __future__ import annotations - -from typing import Literal - -from pydantic import ConfigDict -from pydantic.dataclasses import dataclass - -from dipdup.config import DatasourceConfig -from dipdup.config import HttpConfig - - -@dataclass(config=ConfigDict(extra='forbid', defer_build=True), kw_only=True) -class AbiEtherscanDatasourceConfig(DatasourceConfig): - """Etherscan datasource config - - :param kind: always 'abi.etherscan' - :param url: API URL - :param api_key: API key - :param http: HTTP client configuration - """ - - kind: Literal['abi.etherscan'] - url: str - api_key: str | None = None - - http: HttpConfig | None = None +# NOTE: Alias, remove in 9.0 +from dipdup.config.evm_etherscan import EvmEtherscanDatasourceConfig as EvmEtherscanDatasourceConfig diff --git a/src/dipdup/config/evm.py b/src/dipdup/config/evm.py index 246dfef47..d903937a1 100644 --- a/src/dipdup/config/evm.py +++ b/src/dipdup/config/evm.py @@ -13,7 +13,7 @@ from dipdup.config import ContractConfig from dipdup.config import Hex from dipdup.config import IndexConfig -from dipdup.config.abi_etherscan import AbiEtherscanDatasourceConfig +from dipdup.config.evm_etherscan import EvmEtherscanDatasourceConfig from dipdup.config.evm_node import EvmNodeDatasourceConfig from dipdup.config.evm_subsquid import EvmSubsquidDatasourceConfig from dipdup.exceptions import ConfigurationError @@ -21,7 +21,7 @@ EVM_ADDRESS_PREFIXES = ('0x',) EVM_ADDRESS_LENGTH = 42 -EvmDatasourceConfigU: TypeAlias = EvmSubsquidDatasourceConfig | EvmNodeDatasourceConfig | AbiEtherscanDatasourceConfig +EvmDatasourceConfigU: TypeAlias = EvmSubsquidDatasourceConfig | EvmNodeDatasourceConfig | EvmEtherscanDatasourceConfig def _validate_evm_address(v: str) -> str: diff --git a/src/dipdup/config/evm_etherscan.py b/src/dipdup/config/evm_etherscan.py new file mode 100644 index 000000000..834c72e62 --- /dev/null +++ b/src/dipdup/config/evm_etherscan.py @@ -0,0 +1,27 @@ +from __future__ import annotations + +from typing import Literal + +from pydantic import ConfigDict +from pydantic.dataclasses import dataclass + +from dipdup.config import DatasourceConfig +from dipdup.config import HttpConfig + + +@dataclass(config=ConfigDict(extra='forbid', defer_build=True), kw_only=True) +class EvmEtherscanDatasourceConfig(DatasourceConfig): + """Etherscan datasource config + + :param kind: always 'evm.etherscan' + :param url: API URL + :param api_key: API key + :param http: HTTP client configuration + """ + + # NOTE: Alias, remove in 9.0 + kind: Literal['evm.etherscan'] | Literal['abi.etherscan'] + url: str + api_key: str | None = None + + http: HttpConfig | None = None diff --git a/src/dipdup/config/starknet_subsquid.py b/src/dipdup/config/starknet_subsquid.py index fb8279391..581f8400d 100644 --- a/src/dipdup/config/starknet_subsquid.py +++ b/src/dipdup/config/starknet_subsquid.py @@ -29,4 +29,5 @@ def merge_subscriptions(self) -> bool: @property def rollback_depth(self) -> int: + # NOTE: Subsquid data is always finalized return 0 diff --git a/src/dipdup/config/substrate.py b/src/dipdup/config/substrate.py new file mode 100644 index 000000000..57dbca58c --- /dev/null +++ b/src/dipdup/config/substrate.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +from abc import ABC +from typing import Literal +from typing import TypeAlias + +from pydantic import ConfigDict +from pydantic.dataclasses import dataclass + +from dipdup.config import Alias +from dipdup.config import IndexConfig +from dipdup.config import RuntimeConfig +from dipdup.config.substrate_node import SubstrateNodeDatasourceConfig +from dipdup.config.substrate_subscan import SubstrateSubscanDatasourceConfig +from dipdup.config.substrate_subsquid import SubstrateSubsquidDatasourceConfig + +SubstrateDatasourceConfigU: TypeAlias = ( + SubstrateSubsquidDatasourceConfig | SubstrateSubscanDatasourceConfig | SubstrateNodeDatasourceConfig +) + + +@dataclass(config=ConfigDict(extra='forbid', defer_build=True), kw_only=True) +class SubstrateRuntimeConfig(RuntimeConfig): + """Substrate runtime config + + :param kind: Always 'substrate' + :param type_registry: Path to type registry or its alias + """ + + kind: Literal['substrate'] = 'substrate' + type_registry: str | None = None + + +@dataclass(config=ConfigDict(extra='forbid', defer_build=True), kw_only=True) +class SubstrateIndexConfig(IndexConfig, ABC): + """EVM index that use Subsquid Network as a datasource + + :param kind: starts with 'substrate' + :param datasources: `substrate` datasources to use + :param runtime: Substrate runtime + """ + + datasources: tuple[Alias[SubstrateDatasourceConfigU], ...] + runtime: Alias[SubstrateRuntimeConfig] diff --git a/src/dipdup/config/substrate_events.py b/src/dipdup/config/substrate_events.py new file mode 100644 index 000000000..dc9e43759 --- /dev/null +++ b/src/dipdup/config/substrate_events.py @@ -0,0 +1,74 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Literal +from typing import cast + +from pydantic import ConfigDict +from pydantic.dataclasses import dataclass + +from dipdup.config import Alias +from dipdup.config import HandlerConfig +from dipdup.config.substrate import SubstrateDatasourceConfigU +from dipdup.config.substrate import SubstrateIndexConfig +from dipdup.config.substrate import SubstrateRuntimeConfig +from dipdup.models.substrate_node import SubstrateNodeHeadSubscription +from dipdup.utils import pascal_to_snake +from dipdup.utils import snake_to_pascal + +if TYPE_CHECKING: + from collections.abc import Iterator + + from dipdup.subscriptions import Subscription + + +@dataclass(config=ConfigDict(extra='forbid', defer_build=True), kw_only=True) +class SubstrateEventsHandlerConfig(HandlerConfig): + """Subsquid event handler + + :param callback: Callback name + :param name: Event name (pallet.event) + """ + + name: str + + def iter_imports(self, package: str) -> Iterator[tuple[str, str]]: + yield 'dipdup.context', 'HandlerContext' + yield 'dipdup.models.substrate', 'SubstrateEvent' + yield package, 'models as models' + + event_cls = snake_to_pascal(self.name) + 'Payload' + event_module = pascal_to_snake(self.name) + + parent = cast(SubstrateIndexConfig, self.parent) + yield f'{package}.types.{parent.runtime.name}.substrate_events.{event_module}', event_cls + + def iter_arguments(self) -> Iterator[tuple[str, str]]: + event_cls = snake_to_pascal(self.name) + 'Payload' + yield 'ctx', 'HandlerContext' + yield 'event', f'SubstrateEvent[{event_cls}]' + + +@dataclass(config=ConfigDict(extra='forbid', defer_build=True), kw_only=True) +class SubstrateEventsIndexConfig(SubstrateIndexConfig): + """Subsquid datasource config + + :param kind: Always 'substrate.events' + :param datasources: `substrate` datasources to use + :param handlers: Event handlers + :param first_level: Level to start indexing from + :param last_level: Level to stop indexing and disable this index + :param typename: Alias for pallet interface + :param runtime: Substrate runtime + """ + + kind: Literal['substrate.events'] + datasources: tuple[Alias[SubstrateDatasourceConfigU], ...] + handlers: tuple[SubstrateEventsHandlerConfig, ...] + runtime: Alias[SubstrateRuntimeConfig] + + first_level: int = 0 + last_level: int = 0 + + def get_subscriptions(self) -> set[Subscription]: + return {SubstrateNodeHeadSubscription(fetch_events=True)} diff --git a/src/dipdup/config/substrate_node.py b/src/dipdup/config/substrate_node.py new file mode 100644 index 000000000..a7eeb402c --- /dev/null +++ b/src/dipdup/config/substrate_node.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from typing import Literal + +from pydantic import ConfigDict +from pydantic.dataclasses import dataclass + +from dipdup.config import DatasourceConfig +from dipdup.config import HttpConfig +from dipdup.config import Url +from dipdup.config import WsUrl + + +@dataclass(config=ConfigDict(extra='forbid', defer_build=True), kw_only=True) +class SubstrateNodeDatasourceConfig(DatasourceConfig): + """Substrate node datasource config + + :param kind: Always 'substrate.node' + :param url: Substrate node URL + :param ws_url: Substrate node WebSocket URL + :param http: HTTP client configuration + """ + + kind: Literal['substrate.node'] + url: Url + ws_url: WsUrl | None = None + http: HttpConfig | None = None + + @property + def merge_subscriptions(self) -> bool: + return False + + @property + def rollback_depth(self) -> int: + # NOTE: We use only finalzed heads + return 0 diff --git a/src/dipdup/config/substrate_subscan.py b/src/dipdup/config/substrate_subscan.py new file mode 100644 index 000000000..30dc2d178 --- /dev/null +++ b/src/dipdup/config/substrate_subscan.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from typing import Literal + +from pydantic import ConfigDict +from pydantic.dataclasses import dataclass + +from dipdup.config import DatasourceConfig +from dipdup.config import HttpConfig + + +@dataclass(config=ConfigDict(extra='forbid', defer_build=True), kw_only=True) +class SubstrateSubscanDatasourceConfig(DatasourceConfig): + """Subscan datasource config + + :param kind: always 'substrate.subscan' + :param url: API URL + :param api_key: API key + :param http: HTTP client configuration + """ + + kind: Literal['substrate.subscan'] + url: str + api_key: str | None = None + + http: HttpConfig | None = None diff --git a/src/dipdup/config/substrate_subsquid.py b/src/dipdup/config/substrate_subsquid.py new file mode 100644 index 000000000..b2de5eb3e --- /dev/null +++ b/src/dipdup/config/substrate_subsquid.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +from typing import Literal + +from pydantic import ConfigDict +from pydantic.dataclasses import dataclass + +from dipdup.config import DatasourceConfig +from dipdup.config import HttpConfig +from dipdup.config import Url + + +@dataclass(config=ConfigDict(extra='forbid', defer_build=True), kw_only=True) +class SubstrateSubsquidDatasourceConfig(DatasourceConfig): + """Subsquid datasource config + + :param kind: always 'substrate.subsquid' + :param url: URL of Subsquid Network API + :param http: HTTP client configuration + """ + + kind: Literal['substrate.subsquid'] + url: Url + http: HttpConfig | None = None + + @property + def merge_subscriptions(self) -> bool: + return False + + @property + def rollback_depth(self) -> int: + # NOTE: Subsquid data is always finalized + return 0 diff --git a/src/dipdup/context.py b/src/dipdup/context.py index 4ea69b833..46275f2cc 100644 --- a/src/dipdup/context.py +++ b/src/dipdup/context.py @@ -31,6 +31,8 @@ from dipdup.config.starknet import StarknetContractConfig from dipdup.config.starknet import StarknetIndexConfig from dipdup.config.starknet_events import StarknetEventsIndexConfig +from dipdup.config.substrate import SubstrateIndexConfig +from dipdup.config.substrate_events import SubstrateEventsIndexConfig from dipdup.config.tezos import TezosContractConfig from dipdup.config.tezos import TezosIndexConfig from dipdup.config.tezos_big_maps import TezosBigMapsIndexConfig @@ -42,14 +44,17 @@ from dipdup.config.tezos_token_transfers import TezosTokenTransfersIndexConfig from dipdup.datasources import Datasource from dipdup.datasources import IndexDatasource -from dipdup.datasources.abi_etherscan import AbiEtherscanDatasource from dipdup.datasources.coinbase import CoinbaseDatasource +from dipdup.datasources.evm_etherscan import EvmEtherscanDatasource from dipdup.datasources.evm_node import EvmNodeDatasource from dipdup.datasources.evm_subsquid import EvmSubsquidDatasource from dipdup.datasources.http import HttpDatasource from dipdup.datasources.ipfs import IpfsDatasource from dipdup.datasources.starknet_node import StarknetNodeDatasource from dipdup.datasources.starknet_subsquid import StarknetSubsquidDatasource +from dipdup.datasources.substrate_node import SubstrateNodeDatasource +from dipdup.datasources.substrate_subscan import SubstrateSubscanDatasource +from dipdup.datasources.substrate_subsquid import SubstrateSubsquidDatasource from dipdup.datasources.tezos_tzkt import TezosTzktDatasource from dipdup.datasources.tzip_metadata import TzipMetadataDatasource from dipdup.exceptions import ConfigurationError @@ -64,6 +69,8 @@ from dipdup.indexes.evm_transactions.index import EvmTransactionsIndex from dipdup.indexes.starknet import StarknetIndex from dipdup.indexes.starknet_events.index import StarknetEventsIndex +from dipdup.indexes.substrate import SubstrateIndex +from dipdup.indexes.substrate_events.index import SubstrateEventsIndex from dipdup.indexes.tezos_big_maps.index import TezosBigMapsIndex from dipdup.indexes.tezos_events.index import TezosEventsIndex from dipdup.indexes.tezos_head.index import TezosHeadIndex @@ -339,6 +346,8 @@ async def _spawn_index( index = self._create_tezos_index(index_config) elif isinstance(index_config, StarknetIndexConfig): index = self._create_starknet_index(index_config) + elif isinstance(index_config, SubstrateIndexConfig): + index = self._create_substrate_index(index_config) else: raise NotImplementedError @@ -391,6 +400,22 @@ def _create_starknet_index(self, index_config: StarknetIndexConfig) -> StarknetI return index + def _create_substrate_index(self, index_config: SubstrateIndexConfig) -> SubstrateIndex[Any, Any, Any]: + datasource_configs = index_config.datasources + datasources = tuple(self.get_substrate_datasource(c.name) for c in datasource_configs) + index_datasources = tuple(d for d in datasources if isinstance(d, IndexDatasource)) + + for datasource in index_datasources: + datasource.attach_index(index_config) + + index: SubstrateIndex[Any, Any, Any] + if isinstance(index_config, SubstrateEventsIndexConfig): + index = SubstrateEventsIndex(self, index_config, index_datasources) + else: + raise NotImplementedError + + return index + def _create_tezos_index(self, index_config: TezosIndexConfig) -> TezosIndex[Any, Any]: datasources = tuple(self.get_tezos_tzkt_datasource(c.name) for c in index_config.datasources) @@ -491,21 +516,30 @@ def get_evm_node_datasource(self, name: str) -> EvmNodeDatasource: """Get `evm.node` datasource by name""" return self._get_datasource(name, EvmNodeDatasource) - def get_abi_etherscan_datasource(self, name: str) -> AbiEtherscanDatasource: - """Get `abi.etherscan` datasource by name + def get_evm_etherscan_datasource(self, name: str) -> EvmEtherscanDatasource: + """Get `evm.etherscan` datasource by name :param name: Name of the datasource """ - return self._get_datasource(name, AbiEtherscanDatasource) + return self._get_datasource(name, EvmEtherscanDatasource) + + # NOTE: Alias, remove in 9.0 + get_abi_etherscan_datasource = get_evm_etherscan_datasource - def get_evm_datasource(self, name: str) -> EvmSubsquidDatasource | EvmNodeDatasource | AbiEtherscanDatasource: + def get_evm_datasource(self, name: str) -> EvmSubsquidDatasource | EvmNodeDatasource | EvmEtherscanDatasource: """Get `evm` datasource by name""" - return self._get_datasource(name, EvmSubsquidDatasource, EvmNodeDatasource, AbiEtherscanDatasource) # type: ignore[return-value] + return self._get_datasource(name, EvmSubsquidDatasource, EvmNodeDatasource, EvmEtherscanDatasource) # type: ignore[return-value] def get_starknet_datasource(self, name: str) -> StarknetSubsquidDatasource | StarknetNodeDatasource: """Get `starknet` datasource by name""" return self._get_datasource(name, StarknetSubsquidDatasource, StarknetNodeDatasource) # type: ignore[return-value] + def get_substrate_datasource( + self, name: str + ) -> SubstrateSubsquidDatasource | SubstrateSubscanDatasource | SubstrateNodeDatasource: + """Get `substrate` datasource by name""" + return self._get_datasource(name, SubstrateSubsquidDatasource, SubstrateSubscanDatasource, SubstrateNodeDatasource) # type: ignore[return-value] + def get_coinbase_datasource(self, name: str) -> CoinbaseDatasource: """Get `coinbase` datasource by name diff --git a/src/dipdup/datasources/__init__.py b/src/dipdup/datasources/__init__.py index 23ce0ad52..aa8acd06a 100644 --- a/src/dipdup/datasources/__init__.py +++ b/src/dipdup/datasources/__init__.py @@ -143,8 +143,7 @@ def _get_ws_client(self) -> WebsocketTransport: self._logger.debug('Creating Websocket client') - # FIXME: correct config class - url = self._config.ws_url # type: ignore + url = self._config.ws_url if not url: raise FrameworkException('Spawning node datasource, but `ws_url` is not set') self._ws_client = WebsocketTransport( @@ -227,29 +226,35 @@ async def get_head_level(self) -> int: ... def create_datasource(config: DatasourceConfig) -> Datasource[Any]: - from dipdup.config.abi_etherscan import AbiEtherscanDatasourceConfig from dipdup.config.coinbase import CoinbaseDatasourceConfig + from dipdup.config.evm_etherscan import EvmEtherscanDatasourceConfig from dipdup.config.evm_node import EvmNodeDatasourceConfig from dipdup.config.evm_subsquid import EvmSubsquidDatasourceConfig from dipdup.config.http import HttpDatasourceConfig from dipdup.config.ipfs import IpfsDatasourceConfig from dipdup.config.starknet_node import StarknetNodeDatasourceConfig from dipdup.config.starknet_subsquid import StarknetSubsquidDatasourceConfig + from dipdup.config.substrate_node import SubstrateNodeDatasourceConfig + from dipdup.config.substrate_subscan import SubstrateSubscanDatasourceConfig + from dipdup.config.substrate_subsquid import SubstrateSubsquidDatasourceConfig from dipdup.config.tezos_tzkt import TezosTzktDatasourceConfig from dipdup.config.tzip_metadata import TzipMetadataDatasourceConfig - from dipdup.datasources.abi_etherscan import AbiEtherscanDatasource from dipdup.datasources.coinbase import CoinbaseDatasource + from dipdup.datasources.evm_etherscan import EvmEtherscanDatasource from dipdup.datasources.evm_node import EvmNodeDatasource from dipdup.datasources.evm_subsquid import EvmSubsquidDatasource from dipdup.datasources.http import HttpDatasource from dipdup.datasources.ipfs import IpfsDatasource from dipdup.datasources.starknet_node import StarknetNodeDatasource from dipdup.datasources.starknet_subsquid import StarknetSubsquidDatasource + from dipdup.datasources.substrate_node import SubstrateNodeDatasource + from dipdup.datasources.substrate_subscan import SubstrateSubscanDatasource + from dipdup.datasources.substrate_subsquid import SubstrateSubsquidDatasource from dipdup.datasources.tezos_tzkt import TezosTzktDatasource from dipdup.datasources.tzip_metadata import TzipMetadataDatasource by_config: dict[type[DatasourceConfig], type[Datasource[Any]]] = { - AbiEtherscanDatasourceConfig: AbiEtherscanDatasource, + EvmEtherscanDatasourceConfig: EvmEtherscanDatasource, CoinbaseDatasourceConfig: CoinbaseDatasource, TezosTzktDatasourceConfig: TezosTzktDatasource, TzipMetadataDatasourceConfig: TzipMetadataDatasource, @@ -259,6 +264,9 @@ def create_datasource(config: DatasourceConfig) -> Datasource[Any]: EvmNodeDatasourceConfig: EvmNodeDatasource, StarknetSubsquidDatasourceConfig: StarknetSubsquidDatasource, StarknetNodeDatasourceConfig: StarknetNodeDatasource, + SubstrateSubsquidDatasourceConfig: SubstrateSubsquidDatasource, + SubstrateSubscanDatasourceConfig: SubstrateSubscanDatasource, + SubstrateNodeDatasourceConfig: SubstrateNodeDatasource, } try: diff --git a/src/dipdup/datasources/abi_etherscan.py b/src/dipdup/datasources/abi_etherscan.py index 092eb9d2a..9ef4c021a 100644 --- a/src/dipdup/datasources/abi_etherscan.py +++ b/src/dipdup/datasources/abi_etherscan.py @@ -1,82 +1,2 @@ -import asyncio -import re -from copy import copy -from typing import Any -from typing import cast - -import orjson - -from dipdup.config import HttpConfig -from dipdup.config.abi_etherscan import AbiEtherscanDatasourceConfig -from dipdup.datasources import AbiDatasource -from dipdup.datasources import Datasource -from dipdup.exceptions import DatasourceError - - -class AbiEtherscanDatasource(AbiDatasource[AbiEtherscanDatasourceConfig]): - _default_http_config = HttpConfig( - ratelimit_rate=1, - ratelimit_period=5, - ratelimit_sleep=15, - retry_count=5, - ) - - async def run(self) -> None: - pass - - async def get_abi(self, address: str) -> dict[str, Any]: - params = { - 'module': 'contract', - 'action': 'getabi', - 'address': address, - } - if self._config.api_key: - params['apikey'] = self._config.api_key - - for _ in range(self._http_config.retry_count): - response = await self.request( - 'get', - url='', - params=params, - ) - if message := response.get('message'): - self._logger.info(message) - - if result := response.get('result'): - if isinstance(result, str): - if 'rate limit reached' in result: - self._logger.warning('Ratelimited; sleeping %s seconds', self._http_config.ratelimit_sleep) - await asyncio.sleep(self._http_config.retry_sleep) - continue - if 'API Key' in result: - self._logger.warning('%s, trying workaround', result) - try: - return await self.get_abi_failover(address) - except Exception as e: - self._logger.warning('Failed to get ABI: %s', e) - - try: - return cast(dict[str, Any], orjson.loads(result)) - except orjson.JSONDecodeError as e: - raise DatasourceError(result, self.name) from e - - raise DatasourceError(message, self.name) - - async def get_abi_failover(self, address: str) -> dict[str, Any]: - config = copy(self._config) - config.url = f'{self._config.url}/token/{address}'.replace('api.', '').replace('/api', '') - html_etherscan = Datasource(config) - async with html_etherscan: - html = await ( - await html_etherscan._http._request( - method='get', - url='', - weight=1, - raw=True, - ) - ).text() - - regex = r'id=["\']js-copytextarea2(.*)>(\[.*?)\<\/pre' - if (match := re.search(regex, html)) and (abi := match.group(2)): - return cast(dict[str, Any], orjson.loads(abi)) - raise DatasourceError('Failed to get ABI', self.name) +# NOTE: Alias, remove in 9.0 +from dipdup.datasources.evm_etherscan import EvmEtherscanDatasource as AbiEtherscanDatasource # noqa: F401 diff --git a/src/dipdup/datasources/evm_etherscan.py b/src/dipdup/datasources/evm_etherscan.py new file mode 100644 index 000000000..34fdd3fef --- /dev/null +++ b/src/dipdup/datasources/evm_etherscan.py @@ -0,0 +1,82 @@ +import asyncio +import re +from copy import copy +from typing import Any +from typing import cast + +import orjson + +from dipdup.config import HttpConfig +from dipdup.config.evm_etherscan import EvmEtherscanDatasourceConfig +from dipdup.datasources import AbiDatasource +from dipdup.datasources import Datasource +from dipdup.exceptions import DatasourceError + + +class EvmEtherscanDatasource(AbiDatasource[EvmEtherscanDatasourceConfig]): + _default_http_config = HttpConfig( + ratelimit_rate=1, + ratelimit_period=5, + ratelimit_sleep=15, + retry_count=5, + ) + + async def run(self) -> None: + pass + + async def get_abi(self, address: str) -> dict[str, Any]: + params = { + 'module': 'contract', + 'action': 'getabi', + 'address': address, + } + if self._config.api_key: + params['apikey'] = self._config.api_key + + for _ in range(self._http_config.retry_count): + response = await self.request( + 'get', + url='', + params=params, + ) + if message := response.get('message'): + self._logger.info(message) + + if result := response.get('result'): + if isinstance(result, str): + if 'rate limit reached' in result: + self._logger.warning('Ratelimited; sleeping %s seconds', self._http_config.ratelimit_sleep) + await asyncio.sleep(self._http_config.retry_sleep) + continue + if 'API Key' in result: + self._logger.warning('%s, trying workaround', result) + try: + return await self.get_abi_failover(address) + except Exception as e: + self._logger.warning('Failed to get ABI: %s', e) + + try: + return cast(dict[str, Any], orjson.loads(result)) + except orjson.JSONDecodeError as e: + raise DatasourceError(result, self.name) from e + + raise DatasourceError(message, self.name) + + async def get_abi_failover(self, address: str) -> dict[str, Any]: + config = copy(self._config) + config.url = f'{self._config.url}/token/{address}'.replace('api.', '').replace('/api', '') + html_etherscan = Datasource(config) + async with html_etherscan: + html = await ( + await html_etherscan._http._request( + method='get', + url='', + weight=1, + raw=True, + ) + ).text() + + regex = r'id=["\']js-copytextarea2(.*)>(\[.*?)\<\/pre' + if (match := re.search(regex, html)) and (abi := match.group(2)): + return cast(dict[str, Any], orjson.loads(abi)) + raise DatasourceError('Failed to get ABI', self.name) diff --git a/src/dipdup/datasources/evm_node.py b/src/dipdup/datasources/evm_node.py index 6079dc6fe..064b8f2c8 100644 --- a/src/dipdup/datasources/evm_node.py +++ b/src/dipdup/datasources/evm_node.py @@ -38,7 +38,6 @@ NODE_LEVEL_TIMEOUT = 0.1 -NODE_LAST_MILE = 128 HeadCallback = Callable[['EvmNodeDatasource', EvmNodeHeadData], Awaitable[None]] @@ -103,7 +102,7 @@ async def initialize(self) -> None: self.set_sync_level(None, level) async def run(self) -> None: - if self.realtime: + if self.ws_available: await asyncio.gather( self._ws_loop(), self._emitter_loop(), @@ -182,11 +181,11 @@ async def _ws_loop(self) -> None: raise DatasourceError('Websocket connection failed', self.name) @property - def realtime(self) -> bool: + def ws_available(self) -> bool: return self._config.ws_url is not None async def subscribe(self) -> None: - if not self.realtime: + if not self.ws_available: return missing_subscriptions = self._subscriptions.missing_subscriptions diff --git a/src/dipdup/datasources/starknet_node.py b/src/dipdup/datasources/starknet_node.py index 0ad1c638c..537f2b269 100644 --- a/src/dipdup/datasources/starknet_node.py +++ b/src/dipdup/datasources/starknet_node.py @@ -35,7 +35,7 @@ async def initialize(self) -> None: self.set_sync_level(None, level) async def run(self) -> None: - if self.realtime: + if self.ws_available: raise NotImplementedError('Realtime mode is not supported yet; remove `ws_url` from datasource config') while True: @@ -44,11 +44,11 @@ async def run(self) -> None: await asyncio.sleep(self._http_config.polling_interval) @property - def realtime(self) -> bool: + def ws_available(self) -> bool: return self._config.ws_url is not None async def subscribe(self) -> None: - if self.realtime: + if self.ws_available: raise NotImplementedError('Realtime mode is not supported yet; remove `ws_url` from datasource config') async def get_head_level(self) -> int: diff --git a/src/dipdup/datasources/substrate_node.py b/src/dipdup/datasources/substrate_node.py new file mode 100644 index 000000000..2421641e6 --- /dev/null +++ b/src/dipdup/datasources/substrate_node.py @@ -0,0 +1,399 @@ +import asyncio +import logging +import math +from asyncio import Queue +from collections.abc import Awaitable +from collections.abc import Callable +from copy import copy +from dataclasses import dataclass +from dataclasses import field +from functools import partial +from pathlib import Path +from typing import Any + +import orjson +import pysignalr.exceptions + +from dipdup.config import HttpConfig +from dipdup.config.substrate_node import SubstrateNodeDatasourceConfig +from dipdup.datasources import JsonRpcDatasource +from dipdup.exceptions import DatasourceError +from dipdup.exceptions import FrameworkException +from dipdup.models.substrate import SubstrateEventData +from dipdup.models.substrate import SubstrateHeadBlockData +from dipdup.models.substrate import _BlockHeader +from dipdup.models.substrate import _SubstrateNodeEventResponse +from dipdup.models.substrate_node import SubstrateNodeHeadSubscription +from dipdup.models.substrate_node import SubstrateNodeSubscription +from dipdup.pysignalr import Message +from dipdup.pysignalr import WebsocketMessage +from dipdup.utils import Watchdog + +_logger = logging.getLogger(__name__) + + +HeadCallback = Callable[['SubstrateNodeDatasource', SubstrateHeadBlockData], Awaitable[None]] +EventCallback = Callable[['SubstrateNodeDatasource', tuple[SubstrateEventData, ...]], Awaitable[None]] + + +# NOTE: Renamed entity class LevelData from evm_node +@dataclass +class SubscriptionMessage: + head: SubstrateHeadBlockData + fetch_events: bool = False + + +@dataclass +class MetadataVersion: + spec_name: str + spec_version: int + block_number: int + block_hash: str + metadata: str | None = None + + @property + def key(self) -> str: + return f'{self.spec_name}@{self.spec_version}' + + +MetadataHeader = MetadataVersion + + +def equal_specs(a: MetadataVersion, b: MetadataVersion) -> bool: + return a.spec_name == b.spec_name and a.spec_version == b.spec_version + + +@dataclass +class MetadataStorage: + path: Path + versions: list[MetadataVersion] = field(default_factory=list) + + def load_file(self) -> None: + if self.path.name.endswith('.jsonl'): + self.versions = [] + for line in self.path.read_text().splitlines(): + if not line: + continue + version = MetadataVersion(**orjson.loads(line)) + self.versions.append(version) + elif self.path.name.endswith('.json'): + self.versions = [MetadataVersion(**i) for i in orjson.loads(self.path.read_bytes())] + else: + raise ValueError(f'Unsupported file type: {self.path}') + + def save_file(self) -> None: + if self.path.name.endswith('.jsonl'): + self.path.write_bytes(b'\n'.join(orjson.dumps(version.__dict__) for version in self.versions)) + elif self.path.name.endswith('.json'): + self.path.write_bytes(orjson.dumps(self.versions)) + else: + raise ValueError(f'Unsupported file type: {self.path}') + + +class SubstrateNodeDatasource(JsonRpcDatasource[SubstrateNodeDatasourceConfig]): + _default_http_config = HttpConfig( + batch_size=20, + ) + + def __init__(self, config: SubstrateNodeDatasourceConfig) -> None: + from aiosubstrate.base import SubstrateInterface + + # NOTE: Use our aiohttp session and limiters + SubstrateInterface.http_request = partial(self._jsonrpc_request, raw=True) # type: ignore[method-assign] + + super().__init__(config) + self._pending_subscription: SubstrateNodeSubscription | None = None + self._subscription_ids: dict[str, SubstrateNodeSubscription] = {} + self._interface = SubstrateInterface(config.url) # type: ignore[no-untyped-call] + + self._emitter_queue: Queue[SubscriptionMessage] = Queue() + + self._watchdog: Watchdog = Watchdog(self._http_config.connection_timeout) + + self._on_head_callbacks: set[HeadCallback] = set() + self._on_event_callbacks: set[EventCallback] = set() + + async def run(self) -> None: + if self.ws_available: + await asyncio.gather( + self._ws_loop(), + self._emitter_loop(), + # self._watchdog.run(), + ) + else: + while True: + level = await self.get_head_level() + self.set_sync_level(None, level) + await asyncio.sleep(self._http_config.polling_interval) + + async def initialize(self) -> None: + level = await self.get_head_level() + self.set_sync_level(None, level) + + # NOTE: Prepare substrate_interface + await self._interface.init_props() # type: ignore[no-untyped-call] + self._interface.reload_type_registry() + + async def _ws_loop(self) -> None: + # TODO: probably add to inheritance WebsocketSubscriptionDatasource, and move this method there + self._logger.info('Establishing realtime connection') + client = self._get_ws_client() + retry_sleep = self._http_config.retry_sleep + + for _ in range(1, self._http_config.retry_count + 1): + try: + await client.run() + except pysignalr.exceptions.ConnectionError as e: + self._logger.error('Websocket connection error: %s', e) + await self.emit_disconnected() + await asyncio.sleep(retry_sleep) + retry_sleep *= self._http_config.retry_multiplier + + raise DatasourceError('Websocket connection failed', self.name) + + @property + def ws_available(self) -> bool: + return self._config.ws_url is not None + + async def subscribe(self) -> None: + if not self.ws_available: + return + + missing_subscriptions = self._subscriptions.missing_subscriptions + if not missing_subscriptions: + return + + self._logger.info('Subscribing to %s channels', len(missing_subscriptions)) + for subscription in missing_subscriptions: + if isinstance(subscription, SubstrateNodeSubscription): + await self._subscribe(subscription) + + async def emit_head(self, head: SubstrateHeadBlockData) -> None: + for fn in self._on_head_callbacks: + await fn(self, head) + + async def emit_events(self, events: tuple[SubstrateEventData, ...]) -> None: + for fn in self._on_event_callbacks: + await fn(self, events) + + def call_on_head(self, fn: HeadCallback) -> None: + self._on_head_callbacks.add(fn) + + def call_on_events(self, fn: EventCallback) -> None: + self._on_event_callbacks.add(fn) + + async def _on_message(self, message: Message) -> None: + if not isinstance(message, WebsocketMessage): + raise FrameworkException(f'Unknown message type: {type(message)}') + + data = message.data + + if 'id' in data: + + # NOTE: Save subscription id + if self._pending_subscription: + self._subscription_ids[data['result']] = self._pending_subscription + self._requests[data['id']] = (self._requests[data['id']][0], data) + self._requests[data['id']][0].set() + + # NOTE: Possibly unreliable logic from evm_node, and possibly too time consuming for message handling + level = await self.get_head_level() + self._subscriptions.set_sync_level(self._pending_subscription, level) + + # NOTE: Set None to identify possible subscriptions conflicts + self._pending_subscription = None + else: + raise Exception + elif 'method' in data and data['method'].startswith('chain_'): + subscription_id = data['params']['subscription'] + if subscription_id not in self._subscription_ids: + raise FrameworkException(f'{self.name}: Unknown subscription ID: {subscription_id}') + subscription = self._subscription_ids[subscription_id] + await self._handle_subscription(subscription, data['params']['result']) + else: + raise DatasourceError(f'Unknown message: {data}', self.name) + + async def get_head_level(self) -> int: + head = await self._jsonrpc_request('chain_getFinalizedHead', []) + header = await self._jsonrpc_request('chain_getHeader', [head]) + return int(header['number'], 16) + + async def get_block_hash(self, height: int) -> str: + return await self._jsonrpc_request('chain_getBlockHash', [height]) # type: ignore[no-any-return] + + async def get_block_header(self, hash: str) -> _BlockHeader: + response = await self._jsonrpc_request('chain_getHeader', [hash]) + # FIXME: missing fields + return { + 'hash': hash, + 'number': int(response['number'], 16), + 'prev_root': response['parentHash'], + } + + async def get_metadata_header(self, height: int) -> MetadataHeader: + block_hash = await self.get_block_hash(height) + rt = await self._jsonrpc_request('chain_getRuntimeVersion', [block_hash]) + return MetadataHeader( + spec_name=rt['specName'], + spec_version=rt['specVersion'], + block_number=height, + block_hash=block_hash, + ) + + async def get_metadata_header_batch(self, heights: list[int]) -> list[MetadataHeader]: + return await asyncio.gather(*[self.get_metadata_header(h) for h in heights]) + + async def get_full_block(self, hash: str) -> dict[str, Any]: + return await self._jsonrpc_request('chain_getBlock', [hash]) # type: ignore[no-any-return] + + async def get_events(self, block_hash: str) -> tuple[_SubstrateNodeEventResponse, ...]: + events = await self._interface.get_events(block_hash) + + result: list[_SubstrateNodeEventResponse] = [] + for raw_event in events: + event: dict[str, Any] = raw_event.decode() + result.append( + { + 'name': f'{event['module_id']}.{event['event_id']}', + 'index': event['event_index'], + 'extrinsic_index': event['extrinsic_idx'], + 'decoded_args': event['attributes'], + } + ) + + return tuple(result) + + async def find_metadata_versions( + self, + from_block: int | None = None, + to_block: int | None = None, + ) -> list[MetadataHeader]: + height = await self.get_head_level() + + first_block = from_block or 0 + last_block = min(to_block, height) if to_block is not None else height + if first_block > last_block: + raise StopAsyncIteration + + queue: list[tuple[MetadataVersion, MetadataVersion]] = [] + versions: dict[str, MetadataVersion] = {} + + beg, end = await self.get_metadata_header_batch([first_block, last_block]) + versions[beg.key] = beg + + if not equal_specs(beg, end): + versions[end.key] = end + queue.append((beg, end)) + + step = 0 + while queue: + batch = queue[: self._http_config.batch_size] + queue = queue[self._http_config.batch_size :] + + step += 1 + _logger.info('step %s, %s versions found so far', step, len(versions)) + + heights = [b.block_number + math.floor((e.block_number - b.block_number) / 2) for b, e in batch] + new_versions = await self.get_metadata_header_batch(heights) + for (b, e), m in zip(batch, new_versions, strict=False): + if not equal_specs(b, m): + versions[m.key] = m + if not equal_specs(b, m) and m.block_number - b.block_number > 1: + queue.append((b, m)) + if not equal_specs(m, e) and e.block_number - m.block_number > 1: + queue.append((m, e)) + + return sorted(versions.values(), key=lambda x: x.block_number) + + async def get_raw_metadata(self, block_hash: str) -> str: + return await self._jsonrpc_request('state_getMetadata', [block_hash]) # type: ignore[no-any-return] + + async def get_dev_metadata_version(self) -> MetadataVersion | None: + genesis = await self.get_metadata_header(0) + height = await self.get_head_level() + last = await self.get_metadata_header(height) + if genesis == last: + return genesis + return None + + async def _subscribe(self, subscription: SubstrateNodeSubscription) -> None: + self._logger.debug('Subscribing to %s', subscription) + self._pending_subscription = subscription + response = await self._jsonrpc_request(subscription.method, params=[], ws=True) + self._subscription_ids[response] = subscription + + async def _handle_subscription(self, subscription: SubstrateNodeSubscription, data: Any) -> None: + if isinstance(subscription, SubstrateNodeHeadSubscription): + self._emitter_queue.put_nowait(SubscriptionMessage(head=data, fetch_events=True)) + else: + raise NotImplementedError + + async def _emitter_loop(self) -> None: + while True: + level_data: SubscriptionMessage = await self._emitter_queue.get() + + level = int(level_data.head['number'], 16) + self._logger.info('New head: %s', level) + await self.emit_head(level_data.head) + + # NOTE: subscribing to finalized head, no rollback required + + if level_data.fetch_events: + block_hash = await self.get_block_hash(level) + event_dicts = await self.get_events(block_hash) + block_header = await self.get_block_header(block_hash) + events = tuple(SubstrateEventData.from_node(event_dict, block_header) for event_dict in event_dicts) + await self.emit_events(events) + + +# FIXME: Not used, should be a subscan replacement +async def fetch_metadata( + datasource: SubstrateNodeDatasource, + storage: MetadataStorage, + from_block: int | None = None, + to_block: int | None = None, +) -> None: + matched = 0 + for version in storage.versions: + _logger.info('checking %s block %s against current chain', version.key, version.block_number) + current = await datasource.get_metadata_header(version.block_number) + if current and current.block_hash and version.block_hash.startswith(current.block_hash): + matched += 1 + else: + _logger.info('record mismatch') + break + + if matched > 0: + if matched != len(storage.versions): + storage.versions = storage.versions[:matched] + storage.save_file() + last_known = storage.versions[-1] + from_block = max(last_known.block_number, from_block or 0) + _logger.info('exploring chain from block %s, from_block') + new_versions = (await datasource.find_metadata_versions(from_block, to_block))[1:] + _logger.info('%s new versions found', len(new_versions)) + elif not storage.versions: + from_block = from_block or 0 + _logger.info('exploring chain from block %s', from_block) + new_versions = await datasource.find_metadata_versions(from_block, to_block) + _logger.info('%s new versions found', len(new_versions)) + else: + last_known = storage.versions[-1] + new_version = await datasource.get_dev_metadata_version() + if new_version is None or ( + new_version.spec_name == last_known.spec_name and last_known.spec_version > new_version.spec_version + ): + raise ValueError("Output file already contains data for a different chain, don't know how to proceed.") + if new_version.spec_name == last_known.spec_name and new_version.spec_version == last_known.spec_version: + _logger.info('replacing metadata for %s, assuming it came from dev runtime', last_known.key) + storage.versions = storage.versions[:-1] + storage.save_file() + new_versions = [new_version] + + for header in new_versions: + version = copy(header) + version.metadata = await datasource.get_raw_metadata(version.block_hash) + storage.versions.append(version) + _logger.info('saved %s block %s', version.key, version.block_number) + + storage.save_file() diff --git a/src/dipdup/datasources/substrate_subscan.py b/src/dipdup/datasources/substrate_subscan.py new file mode 100644 index 000000000..3344ee9b0 --- /dev/null +++ b/src/dipdup/datasources/substrate_subscan.py @@ -0,0 +1,29 @@ +from typing import Any +from typing import cast + +from dipdup.config.substrate_subscan import SubstrateSubscanDatasourceConfig +from dipdup.datasources import AbiDatasource + + +class SubstrateSubscanDatasource(AbiDatasource[SubstrateSubscanDatasourceConfig]): + # FIXME: not used in codegen + async def get_abi(self, address: str) -> dict[str, Any]: + raise NotImplementedError + + async def run(self) -> None: + pass + + async def get_runtime_list(self) -> list[dict[str, Any]]: + res = await self.request( + 'post', + 'scan/runtime/list', + ) + return cast(list[dict[str, Any]], res['data']['list']) + + async def get_runtime_metadata(self, spec_version: int) -> dict[str, Any]: + res = await self.request( + 'post', + 'scan/runtime/metadata', + json={'spec': spec_version}, + ) + return cast(dict[str, Any], res['data']['info']['metadata']) diff --git a/src/dipdup/datasources/substrate_subsquid.py b/src/dipdup/datasources/substrate_subsquid.py new file mode 100644 index 000000000..beadb02ba --- /dev/null +++ b/src/dipdup/datasources/substrate_subsquid.py @@ -0,0 +1,56 @@ +from collections.abc import AsyncIterator + +from dipdup.config.substrate_subsquid import SubstrateSubsquidDatasourceConfig +from dipdup.datasources._subsquid import AbstractSubsquidDatasource +from dipdup.models._subsquid import AbstractSubsquidQuery +from dipdup.models.substrate import _SubstrateSubsquidEventResponse + +Query = AbstractSubsquidQuery + + +class SubstrateSubsquidDatasource(AbstractSubsquidDatasource[SubstrateSubsquidDatasourceConfig, Query]): + async def iter_events( + self, + first_level: int, + last_level: int, + names: tuple[str, ...], + ) -> AsyncIterator[tuple[_SubstrateSubsquidEventResponse, ...]]: + current_level = first_level + + while current_level <= last_level: + query: Query = { # type: ignore[typeddict-unknown-key] + 'fields': { + 'event': { + 'name': True, + 'args': True, + }, + 'block': { + 'hash': True, + 'parentHash': True, + 'stateRoot': True, + 'extrinsicsRoot': True, + 'digest': True, + 'specName': True, + 'specVersion': True, + 'implName': True, + 'implVersion': True, + 'timestamp': True, + 'validator': True, + }, + }, + 'events': [ + { + 'name': list(names), + }, + ], + 'fromBlock': current_level, + 'toBlock': last_level, + 'type': 'substrate', + } + response = await self.query_worker(query, current_level) + + for level_item in response: + for event_item in level_item['events']: + event_item['header'] = level_item['header'] + yield tuple(level_item['events']) + current_level = level_item['header']['number'] + 1 diff --git a/src/dipdup/dipdup.py b/src/dipdup/dipdup.py index 24c2dd1fe..de8d71aa6 100644 --- a/src/dipdup/dipdup.py +++ b/src/dipdup/dipdup.py @@ -45,6 +45,7 @@ from dipdup.datasources import IndexDatasource from dipdup.datasources import create_datasource from dipdup.datasources.evm_node import EvmNodeDatasource +from dipdup.datasources.substrate_node import SubstrateNodeDatasource from dipdup.datasources.tezos_tzkt import TezosTzktDatasource from dipdup.datasources.tezos_tzkt import late_tzkt_initialization from dipdup.exceptions import ConfigInitializationException @@ -52,6 +53,7 @@ from dipdup.hasura import HasuraGateway from dipdup.indexes.evm_events.index import EvmEventsIndex from dipdup.indexes.evm_transactions.index import EvmTransactionsIndex +from dipdup.indexes.substrate_events.index import SubstrateEventsIndex from dipdup.indexes.tezos_big_maps.index import TezosBigMapsIndex from dipdup.indexes.tezos_events.index import TezosEventsIndex from dipdup.indexes.tezos_head.index import TezosHeadIndex @@ -72,6 +74,8 @@ from dipdup.models.evm import EvmTransactionData from dipdup.models.evm_node import EvmNodeHeadData from dipdup.models.evm_node import EvmNodeSyncingData +from dipdup.models.substrate import SubstrateEventData +from dipdup.models.substrate import SubstrateHeadBlockData from dipdup.models.tezos import TezosBigMapData from dipdup.models.tezos import TezosEventData from dipdup.models.tezos import TezosHeadBlockData @@ -226,11 +230,11 @@ async def _update_metrics(self) -> None: active, synced, realtime = 0, 0, 0 levels_indexed, levels_total, levels_interval = 0, 0, 0 for index in self._indexes.values(): - # FIXME: We don't remove disabled indexes from dispatcher anymore - active += 1 - if index.synchronized: + if index.is_active: + active += 1 + if index.is_synchronized: synced += 1 - if index.realtime: + if index.is_realtime: realtime += 1 try: @@ -315,9 +319,9 @@ def _log_status(self) -> None: if not progress: if self._indexes: if scanned_levels: - msg = f'indexing: {scanned_levels:6} levels, estimating...' - elif objects_indexed := int(metrics.objects_indexed): - msg = f'indexing: {objects_indexed:6} objects, estimating...' + msg = f'indexing: {scanned_levels} levels, estimating...' + elif metrics.objects_indexed: + msg = f'indexing: {metrics.objects_indexed} objects, estimating...' else: msg = 'indexing: warming up...' else: @@ -325,13 +329,17 @@ def _log_status(self) -> None: _logger.info(msg) return - levels_speed, objects_speed = int(metrics.levels_nonempty_speed), int(metrics.objects_speed) + levels_speed, objects_speed = float(metrics.levels_nonempty_speed), float(metrics.objects_speed) msg = 'last mile' if metrics.synchronized_at else 'indexing' msg += f': {progress:5.1f}% done, {left} levels left' # NOTE: Resulting message is about 80 chars with the current logging format msg += ' ' * (48 - len(msg)) - msg += f' {levels_speed:5} L {objects_speed:5} O' + + def fmt(speed: float) -> str: + return ' 0' if speed < 0.1 else f'{speed:5.{0 if speed >= 1 else 1}f}' + + msg += f' {fmt(levels_speed)} L {fmt(objects_speed)} O' _logger.info(msg) async def _apply_filters(self, index: TezosOperationsIndex) -> None: @@ -450,6 +458,9 @@ async def _subscribe_to_datasource_events(self) -> None: datasource.call_on_events(self._on_evm_node_events) datasource.call_on_transactions(self._on_evm_node_transactions) datasource.call_on_syncing(self._on_evm_node_syncing) + elif isinstance(datasource, SubstrateNodeDatasource): + datasource.call_on_head(self._on_substrate_head) + datasource.call_on_events(self._on_substrate_events) async def _on_tzkt_head(self, datasource: TezosTzktDatasource, head: TezosHeadBlockData) -> None: # NOTE: Do not await query results, it may block Websocket loop. We do not use Head anyway. @@ -546,6 +557,23 @@ async def _on_tzkt_events(self, datasource: TezosTzktDatasource, events: tuple[T if isinstance(index, TezosEventsIndex) and datasource in index.datasources: index.push_realtime_message(events) + async def _on_substrate_head( + self, + datasource: SubstrateNodeDatasource, + head: SubstrateHeadBlockData, + ) -> None: + # TODO: update Head. Does fire_and_forget work atm? + metrics._datasource_head_updated[datasource.name] = time.time() + + async def _on_substrate_events( + self, + datasource: SubstrateNodeDatasource, + events: tuple[SubstrateEventData, ...], + ) -> None: + for index in self._indexes.values(): + if isinstance(index, SubstrateEventsIndex) and datasource in index.datasources: + index.push_realtime_message(events) + async def _on_rollback( self, datasource: IndexDatasource[Any], @@ -635,6 +663,7 @@ async def init( """Create new or update existing dipdup project""" from dipdup.codegen.evm import EvmCodeGenerator from dipdup.codegen.starknet import StarknetCodeGenerator + from dipdup.codegen.substrate import SubstrateCodeGenerator from dipdup.codegen.tezos import TezosCodeGenerator await self._create_datasources() @@ -648,9 +677,10 @@ async def init( codegen_classes: tuple[type[CodeGenerator], ...] = ( # type: ignore[assignment] CommonCodeGenerator, - TezosCodeGenerator, EvmCodeGenerator, StarknetCodeGenerator, + SubstrateCodeGenerator, + TezosCodeGenerator, ) for codegen_cls in codegen_classes: codegen = codegen_cls( diff --git a/src/dipdup/env.py b/src/dipdup/env.py index 20741df58..14da43c7d 100644 --- a/src/dipdup/env.py +++ b/src/dipdup/env.py @@ -4,6 +4,7 @@ import sys import tomllib from contextlib import suppress +from functools import cache from os import getenv from pathlib import Path @@ -31,6 +32,7 @@ def get_pyproject_name() -> str | None: raise FrameworkException('`pyproject.toml` found, but has neither `project` nor `tool.poetry` section') +@cache def get_package_path(package: str) -> Path: """Absolute path to the indexer package, existing or default""" diff --git a/src/dipdup/fields.py b/src/dipdup/fields.py index a9cf2aa6e..ef9403ca1 100644 --- a/src/dipdup/fields.py +++ b/src/dipdup/fields.py @@ -4,10 +4,12 @@ from copy import copy from decimal import Decimal from enum import Enum +from functools import partial from typing import TYPE_CHECKING from typing import Any from typing import TypeVar +import orjson from tortoise.contrib.postgres.fields import ArrayField as ArrayField from tortoise.exceptions import ConfigurationError as TortoiseConfigurationError from tortoise.fields import relational as relational @@ -26,7 +28,7 @@ from tortoise.fields.data import FloatField as FloatField from tortoise.fields.data import IntEnumFieldInstance as IntEnumFieldInstance from tortoise.fields.data import IntField as IntField -from tortoise.fields.data import JSONField as JSONField +from tortoise.fields.data import JSONField as _JSONField from tortoise.fields.data import SmallIntField as SmallIntField from tortoise.fields.data import TimeDeltaField as TimeDeltaField from tortoise.fields.data import TimeField as TimeField @@ -44,6 +46,7 @@ from tortoise.fields.relational import ReverseRelation as ReverseRelation from dipdup.exceptions import FrameworkException +from dipdup.utils import json_dumps_plain if TYPE_CHECKING: from tortoise.models import Model as _TortoiseModel @@ -55,6 +58,13 @@ _EnumFieldT = TypeVar('_EnumFieldT', bound=Enum) +JSONField = partial( + _JSONField, + encoder=json_dumps_plain, + decoder=orjson.loads, +) + + class EnumField(Field[_EnumFieldT]): """Like CharEnumField but without max_size and additional validation""" diff --git a/src/dipdup/index.py b/src/dipdup/index.py index c5c4c5546..d9122adc3 100644 --- a/src/dipdup/index.py +++ b/src/dipdup/index.py @@ -190,11 +190,15 @@ def state(self) -> models.Index: return self._state @property - def synchronized(self) -> bool: + def is_active(self) -> bool: + return self.state.status not in (IndexStatus.disabled, IndexStatus.failed) + + @property + def is_synchronized(self) -> bool: return self.state.status == IndexStatus.realtime @property - def realtime(self) -> bool: + def is_realtime(self) -> bool: return self.state.status == IndexStatus.realtime and not self.queue def get_sync_level(self) -> int: diff --git a/src/dipdup/indexes/substrate.py b/src/dipdup/indexes/substrate.py new file mode 100644 index 000000000..e68236e67 --- /dev/null +++ b/src/dipdup/indexes/substrate.py @@ -0,0 +1,41 @@ +from abc import ABC +from typing import TYPE_CHECKING +from typing import Generic +from typing import TypeVar + +from dipdup.config import SubstrateIndexConfigU +from dipdup.datasources.substrate_node import SubstrateNodeDatasource +from dipdup.datasources.substrate_subscan import SubstrateSubscanDatasource +from dipdup.datasources.substrate_subsquid import SubstrateSubsquidDatasource +from dipdup.index import IndexQueueItemT +from dipdup.indexes._subsquid import SubsquidIndex +from dipdup.runtimes import SubstrateRuntime + +SubstrateDatasource = SubstrateSubsquidDatasource | SubstrateSubscanDatasource | SubstrateNodeDatasource + +IndexConfigT = TypeVar('IndexConfigT', bound=SubstrateIndexConfigU) +DatasourceT = TypeVar('DatasourceT', bound=SubstrateDatasource) + +if TYPE_CHECKING: + from dipdup.context import DipDupContext + + +class SubstrateIndex( + Generic[IndexConfigT, IndexQueueItemT, DatasourceT], + SubsquidIndex[IndexConfigT, IndexQueueItemT, DatasourceT], + ABC, +): + def __init__( + self, + ctx: 'DipDupContext', + config: IndexConfigT, + datasources: tuple[DatasourceT, ...], + ) -> None: + super().__init__(ctx, config, datasources) + self.subsquid_datasources = tuple(d for d in datasources if isinstance(d, SubstrateSubsquidDatasource)) + self.node_datasources = tuple(d for d in datasources if isinstance(d, SubstrateNodeDatasource)) + self.runtime = SubstrateRuntime( + config=config.runtime, + package=ctx.package, + interface=self.node_datasources[0]._interface if self.node_datasources else None, + ) diff --git a/src/dipdup/indexes/substrate_events/fetcher.py b/src/dipdup/indexes/substrate_events/fetcher.py new file mode 100644 index 000000000..ba647f034 --- /dev/null +++ b/src/dipdup/indexes/substrate_events/fetcher.py @@ -0,0 +1,64 @@ +from collections.abc import AsyncIterator + +from dipdup.datasources.substrate_node import SubstrateNodeDatasource +from dipdup.datasources.substrate_subsquid import SubstrateSubsquidDatasource +from dipdup.indexes.substrate_node import SubstrateNodeFetcher +from dipdup.indexes.substrate_subsquid import SubstrateSubsquidFetcher +from dipdup.models.substrate import SubstrateEventData + + +class SubstrateSubsquidEventFetcher(SubstrateSubsquidFetcher[SubstrateEventData]): + def __init__( + self, + name: str, + datasources: tuple[SubstrateSubsquidDatasource, ...], + first_level: int, + last_level: int, + names: tuple[str, ...], + ) -> None: + super().__init__( + name=name, + datasources=datasources, + first_level=first_level, + last_level=last_level, + ) + self._names = names + + async def fetch_by_level(self) -> AsyncIterator[tuple[int, tuple[SubstrateEventData, ...]]]: + async for level, events in self.readahead_by_level(self.fetch_events()): + yield level, events + + async def fetch_events(self) -> AsyncIterator[tuple[SubstrateEventData, ...]]: + async for events in self.random_datasource.iter_events( + first_level=self._first_level, + last_level=self._last_level, + names=self._names, + ): + yield tuple(SubstrateEventData.from_subsquid(event) for event in events) + + +class SubstrateNodeEventFetcher(SubstrateNodeFetcher[SubstrateEventData]): + def __init__( + self, + name: str, + datasources: tuple[SubstrateNodeDatasource, ...], + first_level: int, + last_level: int, + ) -> None: + super().__init__( + name=name, + datasources=datasources, + first_level=first_level, + last_level=last_level, + ) + + async def fetch_by_level(self) -> AsyncIterator[tuple[int, tuple[SubstrateEventData, ...]]]: + async for level, events in self.readahead_by_level(self.fetch_events()): + yield level, events + + async def fetch_events(self) -> AsyncIterator[tuple[SubstrateEventData, ...]]: + for level in range(self._first_level, self._last_level): + block_hash = await self.get_random_node().get_block_hash(level) + event_dicts = await self.get_random_node().get_events(block_hash) + block_header = await self.get_random_node().get_block_header(block_hash) + yield tuple(SubstrateEventData.from_node(event_dict, block_header) for event_dict in event_dicts) diff --git a/src/dipdup/indexes/substrate_events/index.py b/src/dipdup/indexes/substrate_events/index.py new file mode 100644 index 000000000..8c539c3c7 --- /dev/null +++ b/src/dipdup/indexes/substrate_events/index.py @@ -0,0 +1,96 @@ +from collections import deque +from collections.abc import Iterable +from typing import TYPE_CHECKING +from typing import Any + +from dipdup.config.substrate_events import SubstrateEventsHandlerConfig +from dipdup.config.substrate_events import SubstrateEventsIndexConfig +from dipdup.datasources.substrate_node import SubstrateNodeDatasource +from dipdup.datasources.substrate_subsquid import SubstrateSubsquidDatasource +from dipdup.indexes.substrate import SubstrateDatasource +from dipdup.indexes.substrate import SubstrateIndex +from dipdup.indexes.substrate_events.fetcher import SubstrateNodeEventFetcher +from dipdup.indexes.substrate_events.fetcher import SubstrateSubsquidEventFetcher +from dipdup.models import RollbackMessage +from dipdup.models._subsquid import SubsquidMessageType +from dipdup.models.substrate import SubstrateEvent +from dipdup.models.substrate import SubstrateEventData +from dipdup.performance import metrics + +QueueItem = tuple[SubstrateEventData, ...] | RollbackMessage +MatchedEventsT = tuple[SubstrateEventsHandlerConfig, SubstrateEvent[Any]] + +if TYPE_CHECKING: + from dipdup.context import DipDupContext + + +class SubstrateEventsIndex( + SubstrateIndex[SubstrateEventsIndexConfig, QueueItem, SubstrateDatasource], + message_type=SubsquidMessageType.substrate_events, +): + def __init__( + self, + ctx: 'DipDupContext', + config: SubstrateEventsIndexConfig, + datasources: tuple[SubstrateDatasource, ...], + ) -> None: + super().__init__(ctx, config, datasources) + self._names = tuple(c.name for c in self._config.handlers) + self.subsquid_datasources = tuple(d for d in datasources if isinstance(d, SubstrateSubsquidDatasource)) + self.node_datasources = tuple(d for d in datasources if isinstance(d, SubstrateNodeDatasource)) + + async def _synchronize_subsquid(self, sync_level: int) -> None: + first_level = self.state.level + 1 + fetcher = self._create_subsquid_fetcher(first_level, sync_level) + + async for _level, events in fetcher.fetch_by_level(): + await self._process_level_data(tuple(events), sync_level) + metrics._sqd_processor_last_block = int(_level) + + async def _synchronize_node(self, sync_level: int) -> None: + first_level = self.state.level + 1 + fetcher = self._create_node_fetcher(first_level, sync_level) + + async for _level, events in fetcher.fetch_by_level(): + await self._process_level_data(events, sync_level) + metrics._sqd_processor_last_block = _level + + def _create_subsquid_fetcher(self, first_level: int, last_level: int) -> SubstrateSubsquidEventFetcher: + return SubstrateSubsquidEventFetcher( + name=self.name, + datasources=self.subsquid_datasources, + first_level=first_level, + last_level=last_level, + names=self._names, + ) + + def _create_node_fetcher(self, first_level: int, last_level: int) -> SubstrateNodeEventFetcher: + return SubstrateNodeEventFetcher( + name=self.name, + datasources=self.node_datasources, + first_level=first_level, + last_level=last_level, + ) + + def _match_level_data( + self, + handlers: tuple[SubstrateEventsHandlerConfig, ...], + level_data: Iterable[SubstrateEventData], + ) -> deque[Any]: + """Try to match event events with all index handlers.""" + matched_handlers: deque[MatchedEventsT] = deque() + + for event in level_data: + for handler_config in handlers: + if handler_config.name != event.name: + continue + + arg: SubstrateEvent[Any] = SubstrateEvent( + data=event, + runtime=self.runtime, + ) + + matched_handlers.append((handler_config, arg)) + break + + return matched_handlers diff --git a/src/dipdup/indexes/substrate_node.py b/src/dipdup/indexes/substrate_node.py new file mode 100644 index 000000000..152d5d7c0 --- /dev/null +++ b/src/dipdup/indexes/substrate_node.py @@ -0,0 +1,36 @@ +import logging +import random +from abc import ABC +from typing import Generic + +from dipdup.datasources.substrate_node import SubstrateNodeDatasource +from dipdup.exceptions import FrameworkException +from dipdup.fetcher import BufferT +from dipdup.fetcher import DataFetcher + +SUBSTRATE_NODE_READAHEAD_LIMIT = 2500 + + +_logger = logging.getLogger(__name__) + + +class SubstrateNodeFetcher(Generic[BufferT], DataFetcher[BufferT, SubstrateNodeDatasource], ABC): + def __init__( + self, + name: str, + datasources: tuple[SubstrateNodeDatasource, ...], + first_level: int, + last_level: int, + ) -> None: + super().__init__( + name=name, + datasources=datasources, + first_level=first_level, + last_level=last_level, + readahead_limit=SUBSTRATE_NODE_READAHEAD_LIMIT, + ) + + def get_random_node(self) -> SubstrateNodeDatasource: + if not self._datasources: + raise FrameworkException('A node datasource requested, but none attached to this index') + return random.choice(self._datasources) diff --git a/src/dipdup/indexes/substrate_subsquid.py b/src/dipdup/indexes/substrate_subsquid.py new file mode 100644 index 000000000..fa75d8006 --- /dev/null +++ b/src/dipdup/indexes/substrate_subsquid.py @@ -0,0 +1,25 @@ +from abc import ABC +from typing import Generic + +from dipdup.datasources.substrate_subsquid import SubstrateSubsquidDatasource +from dipdup.fetcher import BufferT +from dipdup.fetcher import DataFetcher + +SUBSTRATE_SUBSQUID_READAHEAD_LIMIT = 10000 + + +class SubstrateSubsquidFetcher(Generic[BufferT], DataFetcher[BufferT, SubstrateSubsquidDatasource], ABC): + def __init__( + self, + name: str, + datasources: tuple[SubstrateSubsquidDatasource, ...], + first_level: int, + last_level: int, + ) -> None: + super().__init__( + name=name, + datasources=datasources, + first_level=first_level, + last_level=last_level, + readahead_limit=SUBSTRATE_SUBSQUID_READAHEAD_LIMIT, + ) diff --git a/src/dipdup/models/__init__.py b/src/dipdup/models/__init__.py index 8e7555a7d..3b079ced8 100644 --- a/src/dipdup/models/__init__.py +++ b/src/dipdup/models/__init__.py @@ -28,7 +28,6 @@ from dipdup import env from dipdup import fields from dipdup.exceptions import FrameworkException -from dipdup.utils import json_dumps_plain if TYPE_CHECKING: from collections import deque @@ -59,6 +58,7 @@ class IndexType(Enum): tezos_token_transfers = 'tezos.token_transfers' tezos_token_balances = 'tezos.token_balances' starknet_events = 'starknet.events' + substrate_events = 'substrate.events' class MessageType: @@ -161,7 +161,7 @@ class ModelUpdate(TortoiseModel): index = fields.TextField() action = fields.EnumField(ModelUpdateAction) - data: dict[str, Any] = fields.JSONField(encoder=json_dumps_plain, null=True) + data: dict[str, Any] = fields.JSONField(null=True) created_at = fields.DatetimeField(auto_now_add=True) updated_at = fields.DatetimeField(auto_now=True) @@ -641,7 +641,7 @@ class Index(TortoiseModel): config_hash = fields.TextField(null=True) template = fields.TextField(null=True) - template_values: dict[str, Any] = fields.JSONField(encoder=json_dumps_plain, null=True) + template_values: dict[str, Any] = fields.JSONField(null=True) level = fields.IntField(default=0) @@ -676,7 +676,7 @@ class Meta: class Meta(TortoiseModel): key = fields.TextField(primary_key=True) - value = fields.JSONField(encoder=json_dumps_plain, null=True) + value = fields.JSONField(null=True) created_at = fields.DatetimeField(auto_now_add=True) updated_at = fields.DatetimeField(auto_now=True) @@ -691,7 +691,7 @@ class Meta: class ContractMetadata(Model): network = fields.TextField() contract = fields.TextField() - metadata = fields.JSONField(encoder=json_dumps_plain, null=True) + metadata = fields.JSONField(null=True) update_id = fields.IntField() created_at = fields.DatetimeField(auto_now_add=True) @@ -706,7 +706,7 @@ class TokenMetadata(Model): network = fields.TextField() contract = fields.TextField() token_id = fields.TextField() - metadata = fields.JSONField(encoder=json_dumps_plain, null=True) + metadata = fields.JSONField(null=True) update_id = fields.IntField() created_at = fields.DatetimeField(auto_now_add=True) diff --git a/src/dipdup/models/_subsquid.py b/src/dipdup/models/_subsquid.py index 711f5cdad..2ec782e58 100644 --- a/src/dipdup/models/_subsquid.py +++ b/src/dipdup/models/_subsquid.py @@ -11,6 +11,7 @@ class SubsquidMessageType(MessageType, Enum): evm_traces = 'evm_traces' evm_transactions = 'evm_transactions' starknet_events = 'starknet_events' + substrate_events = 'substrate_events' FieldSelection = dict[str, dict[str, bool]] diff --git a/src/dipdup/models/substrate.py b/src/dipdup/models/substrate.py new file mode 100644 index 000000000..947a892c6 --- /dev/null +++ b/src/dipdup/models/substrate.py @@ -0,0 +1,136 @@ +from dataclasses import dataclass +from functools import cached_property +from typing import Any +from typing import Generic +from typing import Self +from typing import TypedDict +from typing import TypeVar +from typing import cast + +from dipdup.fetcher import HasLevel +from dipdup.runtimes import SubstrateRuntime + + +class _BlockHeaderExtra(TypedDict): + number: int + hash: str + parentHash: str + stateRoot: str + extrinsicsRoot: str + digest: str + specName: str + specVersion: int + implName: str + implVersion: int + timestamp: int + validator: str + + +class _SubstrateSubsquidEventResponse(TypedDict): + name: str + index: int + extrinsicIndex: int + callAddress: list[str] + args: list[Any] + header: _BlockHeaderExtra + + +class _BlockHeader(TypedDict): + hash: str + number: int + prev_root: str + + +class _SubstrateNodeEventResponse(TypedDict): + name: str + index: int + extrinsic_index: int + decoded_args: dict[str, Any] + + +@dataclass(frozen=True, kw_only=True) +class SubstrateEventData(HasLevel): + # TODO: there are more fields in event data: phase, topics + name: str + index: int + extrinsic_index: int + call_address: list[str] | None + # we receive decoded args from node datasource and encoded from subsquid datasource + args: list[Any] | None = None + decoded_args: dict[str, Any] | None = None + header: _BlockHeader + header_extra: _BlockHeaderExtra | None + + @property + def level(self) -> int: # type: ignore[override] + return self.header['number'] + + @classmethod + def from_node(cls, event_dict: _SubstrateNodeEventResponse, header: _BlockHeader) -> Self: + return cls( + **event_dict, + call_address=None, + args=None, + header=header, + header_extra=None, + ) + + @classmethod + def from_subsquid(cls, event_dict: _SubstrateSubsquidEventResponse) -> Self: + return cls( + name=event_dict['name'], + index=event_dict['index'], + extrinsic_index=event_dict['extrinsicIndex'], + call_address=event_dict['callAddress'], + args=event_dict['args'], + decoded_args=None, + header={ + 'hash': event_dict['header']['hash'], + 'number': event_dict['header']['number'], + 'prev_root': event_dict['header']['parentHash'], + }, + header_extra=event_dict['header'], + ) + + +class SubstrateHeadBlockData(TypedDict): + parentHash: str + number: str + stateRoot: str + extrinsicsRoot: str + digest: dict[str, Any] + + +PayloadT = TypeVar('PayloadT') + + +@dataclass(frozen=True) +class SubstrateEvent(Generic[PayloadT]): + data: SubstrateEventData + runtime: SubstrateRuntime + + # TODO: Use lazy decoding in other models with typed payload + @cached_property + def payload(self) -> PayloadT: + # NOTE: from node datasource + if self.data.decoded_args is not None: + return cast(PayloadT, self.data.decoded_args) + + # NOTE: from subsquid datasource + assert self.data.args is not None and self.data.header_extra is not None + return cast( + PayloadT, + self.runtime.decode_event_args( + name=self.name, + args=self.data.args, + spec_version=str(self.data.header_extra['specVersion']), + ), + ) + + @property + def level(self) -> int: + return self.data.level + + @property + def name(self) -> str: + return self.data.name diff --git a/src/dipdup/models/substrate_node.py b/src/dipdup/models/substrate_node.py new file mode 100644 index 000000000..0589f06fb --- /dev/null +++ b/src/dipdup/models/substrate_node.py @@ -0,0 +1,17 @@ +from abc import ABC +from typing import Literal + +from pydantic.dataclasses import dataclass + +from dipdup.subscriptions import Subscription + + +class SubstrateNodeSubscription(ABC, Subscription): + method: str + + +@dataclass(frozen=True) +class SubstrateNodeHeadSubscription(SubstrateNodeSubscription): + method: Literal['chain_subscribeFinalisedHeads'] = 'chain_subscribeFinalisedHeads' + # NOTE: used to determine which objects index require, since we can only subscribe to head + fetch_events: bool = False diff --git a/src/dipdup/models/substrate_subsquid.py b/src/dipdup/models/substrate_subsquid.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/dipdup/project.py b/src/dipdup/project.py index fa09729c3..61ee0a12c 100644 --- a/src/dipdup/project.py +++ b/src/dipdup/project.py @@ -38,6 +38,7 @@ 'demo_evm_uniswap', ), 'starknet': ('demo_starknet_events',), + 'substrate': ('demo_substrate_events',), 'tezos': ( 'demo_tezos_auction', 'demo_tezos_dao', @@ -148,11 +149,13 @@ def template_from_terminal() -> tuple[str | None, DipDupSurveyConfig | None]: options=( 'EVM', 'Starknet', + 'Substrate', 'Tezos', ), comments=( 'EVM-compatible blockchains', 'Starknet', + 'Substrate', 'Tezos', ), default=0, @@ -315,7 +318,7 @@ def render_project( _render( answers, template_path=Path(__file__).parent / 'templates' / 'replay.yaml.j2', - output_path=Path(answers['package']) / 'configs' / 'replay.yaml', + output_path=get_package_path(answers['package']) / 'configs' / 'replay.yaml', force=force, ) @@ -338,7 +341,7 @@ def render_base( _render( answers=answers, template_path=Path(__file__).parent / 'templates' / 'replay.yaml.j2', - output_path=Path('configs') / 'replay.yaml', + output_path=get_package_path(answers['package']) / Path('configs') / 'replay.yaml', force=force, ) @@ -353,9 +356,10 @@ def _render_templates( from jinja2 import Template project_path = Path(__file__).parent / 'projects' / path - project_paths = project_path.glob('**/*.j2') + project_templates = set(project_path.glob('**/*.j2')) + project_files = set(project_path.glob('**/*')) - project_templates - for path in project_paths: + for path in project_templates: template_path = path.relative_to(Path(__file__).parent) relative_path = str(Path(*template_path.parts[2:]))[:-3] @@ -369,8 +373,19 @@ def _render_templates( # NOTE: Remove ".j2" from extension ).with_suffix(path.suffix[:-3]) output_path = Path(Template(str(output_path)).render(project=answers)) + _render(answers, template_path, output_path, force) + # NOTE: If there are files without .j2 extension, just copy them + for path in project_files: + if path.is_dir() or path.name == 'replay.yaml': + continue + output_path = Path( + get_package_path(answers['package']), + *path.relative_to(project_path).parts, + ) + write(output_path, path.read_bytes(), overwrite=force) + def _render(answers: Answers, template_path: Path, output_path: Path, force: bool) -> None: if output_path.exists() and not force: diff --git a/src/dipdup/projects/demo_evm_events/dipdup.yaml.j2 b/src/dipdup/projects/demo_evm_events/dipdup.yaml.j2 index d3bdbeb9a..26c850d6e 100644 --- a/src/dipdup/projects/demo_evm_events/dipdup.yaml.j2 +++ b/src/dipdup/projects/demo_evm_events/dipdup.yaml.j2 @@ -6,7 +6,7 @@ datasources: kind: evm.subsquid url: ${SUBSQUID_URL:-https://v2.archive.subsquid.io/network/ethereum-mainnet} etherscan: - kind: abi.etherscan + kind: evm.etherscan url: ${ETHERSCAN_URL:-https://api.etherscan.io/api} api_key: ${ETHERSCAN_API_KEY:-''} evm_node: diff --git a/src/dipdup/projects/demo_evm_transactions/dipdup.yaml.j2 b/src/dipdup/projects/demo_evm_transactions/dipdup.yaml.j2 index 6d9cb2f84..4170fb4af 100644 --- a/src/dipdup/projects/demo_evm_transactions/dipdup.yaml.j2 +++ b/src/dipdup/projects/demo_evm_transactions/dipdup.yaml.j2 @@ -6,7 +6,7 @@ datasources: kind: evm.subsquid url: ${SUBSQUID_URL:-https://v2.archive.subsquid.io/network/ethereum-mainnet} etherscan: - kind: abi.etherscan + kind: evm.etherscan url: ${ETHERSCAN_URL:-https://api.etherscan.io/api} api_key: ${ETHERSCAN_API_KEY:-''} evm_node: diff --git a/src/dipdup/projects/demo_evm_uniswap/dipdup.yaml.j2 b/src/dipdup/projects/demo_evm_uniswap/dipdup.yaml.j2 index 1216b73e1..d6f4c2a01 100644 --- a/src/dipdup/projects/demo_evm_uniswap/dipdup.yaml.j2 +++ b/src/dipdup/projects/demo_evm_uniswap/dipdup.yaml.j2 @@ -6,7 +6,7 @@ datasources: kind: evm.subsquid url: ${SUBSQUID_URL:-https://v2.archive.subsquid.io/network/ethereum-mainnet} etherscan: - kind: abi.etherscan + kind: evm.etherscan url: ${ETHERSCAN_URL:-https://api.etherscan.io/api} api_key: ${ETHERSCAN_API_KEY:-''} evm_node: diff --git a/src/dipdup/projects/demo_substrate_events/dipdup.yaml.j2 b/src/dipdup/projects/demo_substrate_events/dipdup.yaml.j2 new file mode 100644 index 000000000..35e2bb728 --- /dev/null +++ b/src/dipdup/projects/demo_substrate_events/dipdup.yaml.j2 @@ -0,0 +1,31 @@ +spec_version: 3.0 +package: {{ project.package }} + +runtimes: + assethub: + kind: substrate + type_registry: statemint + +datasources: + subsquid: + kind: substrate.subsquid + url: https://v2.archive.subsquid.io/network/asset-hub-polkadot + subscan: + kind: substrate.subscan + url: https://assethub-polkadot.api.subscan.io/api + node: + kind: substrate.node + url: https://statemint.api.onfinality.io/rpc?apikey=${NODE_API_KEY:-''} + ws_url: wss://statemint.api.onfinality.io/ws?apikey=${NODE_API_KEY:-''} + +indexes: + assethub_transfers: + kind: substrate.events + runtime: assethub + datasources: + - subsquid + - subscan + - node + handlers: + - callback: on_transfer + name: Assets.Transferred diff --git a/src/dipdup/projects/demo_substrate_events/handlers/on_transfer.py.j2 b/src/dipdup/projects/demo_substrate_events/handlers/on_transfer.py.j2 new file mode 100644 index 000000000..86a15ed58 --- /dev/null +++ b/src/dipdup/projects/demo_substrate_events/handlers/on_transfer.py.j2 @@ -0,0 +1,52 @@ +from decimal import Decimal + +from {{ project.package }} import models as models +from {{ project.package }}.types.assethub.substrate_events.assets_transferred import AssetsTransferredPayload +from dipdup.context import HandlerContext +from dipdup.models.substrate import SubstrateEvent +from tortoise.exceptions import DoesNotExist + + +async def sql_update( + ctx: HandlerContext, + address: str, + amount: Decimal, + level: int, +) -> None: + await ctx.execute_sql_query( + 'update_balance', + address, + str(amount), + level, + ) + + +# NOTE: Not used, just for demonstration purposes +async def orm_update( + ctx: HandlerContext, + address: str, + amount: Decimal, + level: int, +) -> None: + try: + holder = await models.Holder.cached_get(pk=address) + except DoesNotExist: + holder = models.Holder(address=address) + holder.cache() + holder.balance += amount + holder.turnover += abs(amount) + holder.tx_count += 1 + holder.last_seen = level + await holder.save() + + +async def on_transfer( + ctx: HandlerContext, + event: SubstrateEvent[AssetsTransferredPayload], +) -> None: + amount = Decimal(event.payload['amount']) + if not amount: + return + + await sql_update(ctx, event.payload['from'], -amount, event.data.level) + await sql_update(ctx, event.payload['to'], amount, event.data.level) diff --git a/src/dipdup/projects/demo_substrate_events/models/__init__.py.j2 b/src/dipdup/projects/demo_substrate_events/models/__init__.py.j2 new file mode 100644 index 000000000..0dc49a4c6 --- /dev/null +++ b/src/dipdup/projects/demo_substrate_events/models/__init__.py.j2 @@ -0,0 +1,13 @@ +from dipdup import fields +from dipdup.models import CachedModel + + +class Holder(CachedModel): + address = fields.TextField(primary_key=True) + balance = fields.DecimalField(decimal_places=6, max_digits=40, default=0) + turnover = fields.DecimalField(decimal_places=6, max_digits=40, default=0) + tx_count = fields.BigIntField(default=0) + last_seen = fields.BigIntField(null=True) + + class Meta: + maxsize = 2**12 diff --git a/src/dipdup/projects/demo_substrate_events/replay.yaml b/src/dipdup/projects/demo_substrate_events/replay.yaml new file mode 100644 index 000000000..9da225d39 --- /dev/null +++ b/src/dipdup/projects/demo_substrate_events/replay.yaml @@ -0,0 +1,5 @@ +spec_version: 3.0 +replay: + description: Substrate balance transfers + package: demo_substrate_events + template: demo_substrate_events \ No newline at end of file diff --git a/src/dipdup/projects/demo_substrate_events/sql/update_balance.sql b/src/dipdup/projects/demo_substrate_events/sql/update_balance.sql new file mode 100644 index 000000000..663a952cd --- /dev/null +++ b/src/dipdup/projects/demo_substrate_events/sql/update_balance.sql @@ -0,0 +1,22 @@ +insert into holder ( + address + ,balance + ,turnover + ,tx_count + ,last_seen +) +values ( + :address + ,:amount + ,abs(:amount) + ,1 + ,:level +) +on conflict (address) do +update +set + balance = balance + :amount + ,turnover = turnover + abs(:amount) + ,tx_count = tx_count + 1 + ,last_seen = :level +; \ No newline at end of file diff --git a/src/dipdup/runtimes.py b/src/dipdup/runtimes.py new file mode 100644 index 000000000..48da0c763 --- /dev/null +++ b/src/dipdup/runtimes.py @@ -0,0 +1,165 @@ +import logging +import re +from functools import cache +from functools import cached_property +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Any + +import orjson + +from dipdup.config.substrate import SubstrateRuntimeConfig +from dipdup.exceptions import FrameworkException +from dipdup.package import DipDupPackage +from dipdup.utils import sorted_glob + +if TYPE_CHECKING: + from aiosubstrate import SubstrateInterface + from scalecodec.base import RuntimeConfigurationObject # type: ignore[import-untyped] + +_logger = logging.getLogger(__name__) + + +@cache +def extract_args_name(description: str) -> tuple[str, ...]: + pattern = r'\((.*?)\)|\[(.*?)\]' + match = re.search(pattern, description) + + if not match: + raise ValueError('No valid bracket pairs found in the description') + + args_str = match.group(1) or match.group(2) + return tuple(arg.strip('\\') for arg in args_str.split(', ')) + + +@cache +def get_type_registry(name_or_path: str | Path) -> 'RuntimeConfigurationObject': + from scalecodec.type_registry import load_type_registry_preset # type: ignore[import-untyped] + + if isinstance(name_or_path, str): + # NOTE: User path has higher priority + for path in ( + Path(f'type_registries/{name_or_path}.json'), + Path(name_or_path), + ): + if not path.is_file(): + continue + name_or_path = path + + if isinstance(name_or_path, Path): + return orjson.loads(name_or_path.read_bytes()) + return load_type_registry_preset(name_or_path) + + +class SubstrateSpecVersion: + def __init__(self, name: str, metadata: list[dict[str, Any]]) -> None: + self._name = name + self._metadata = metadata + self._events: dict[str, dict[str, Any]] = {} + + def get_event_abi(self, qualname: str) -> dict[str, Any]: + if qualname not in self._events: + pallet, name = qualname.split('.') + found = False + for item in self._metadata: + if found: + break + if item['name'] != pallet: + continue + for event in item.get('events', ()): + if event['name'] != name: + continue + self._events[qualname] = event + found = True + else: + raise FrameworkException(f'Event `{qualname}` not found in `{self._name}` spec') + + return self._events[qualname] + + +class SubstrateRuntime: + def __init__( + self, + config: SubstrateRuntimeConfig, + package: DipDupPackage, + interface: 'SubstrateInterface | None', + ) -> None: + self._config = config + self._package = package + self._interface = interface + # TODO: Unload not used + self._spec_versions: dict[str, SubstrateSpecVersion] = {} + + @property + def abi_path(self) -> Path: + return self._package.abi.joinpath(self._config.name) + + @cached_property + def runtime_config(self) -> 'RuntimeConfigurationObject': + if self._interface: + return self._interface.runtime_config + + from scalecodec.base import RuntimeConfigurationObject + + # FIXME: Generic configuration for cases when node datasources are not available + runtime_config = RuntimeConfigurationObject() + runtime_config.update_type_registry(get_type_registry('legacy')) + runtime_config.update_type_registry(get_type_registry('core')) + runtime_config.update_type_registry(get_type_registry(self._config.type_registry or self._config.name)) + + return runtime_config + + def get_spec_version(self, name: str) -> SubstrateSpecVersion: + if name not in self._spec_versions: + _logger.info('loading spec version `%s`', name) + try: + metadata_path = self.abi_path.joinpath(f'v{name}.json') + metadata = orjson.loads(metadata_path.read_bytes()) + self._spec_versions[name] = SubstrateSpecVersion( + name=f'v{name}', + metadata=metadata, + ) + except FileNotFoundError: + # FIXME: Using last known version to help with missing abis + last_known = sorted_glob(self.abi_path, 'v*.json')[-1].stem + _logger.debug('using last known version `%s`', last_known) + self._spec_versions[name] = self.get_spec_version(last_known[1:]) + + return self._spec_versions[name] + + def decode_event_args( + self, + name: str, + args: list[Any] | dict[str, Any], + spec_version: str, + ) -> dict[str, Any]: + from scalecodec.base import ScaleBytes + + spec_obj = self.get_spec_version(spec_version) + event_abi = spec_obj.get_event_abi( + qualname=name, + ) + + if isinstance(args, list): + assert 'args_name' not in event_abi + arg_names = extract_args_name(event_abi['docs'][0]) + args = dict(zip(arg_names, args, strict=True)) + else: + arg_names = event_abi['args_name'] + + arg_types = event_abi['args'] + + payload = {} + for (key, value), type_ in zip(args.items(), arg_types, strict=True): + if not isinstance(value, str) or not value.startswith('0x'): + payload[key] = value + continue + + scale_obj = self.runtime_config.create_scale_object( + type_string=type_, + data=ScaleBytes(value), + ) + scale_obj.decode() + payload[key] = scale_obj.value_serialized + + return payload diff --git a/src/dipdup/type_registries/hydradx.json b/src/dipdup/type_registries/hydradx.json new file mode 100644 index 000000000..5f0511578 --- /dev/null +++ b/src/dipdup/type_registries/hydradx.json @@ -0,0 +1,57 @@ +{ + "types": { + "AssetPair": { + "asset_in": "AssetId", + "asset_out": "AssetId" + }, + "Amount": "i128", + "AmountOf": "Amount", + "Address": "AccountId", + "OrmlAccountData": { + "free": "Balance", + "frozen": "Balance", + "reserved": "Balance" + }, + "Fee": { + "numerator": "u32", + "denominator": "u32" + }, + "BalanceInfo": { + "amount": "Balance", + "assetId": "AssetId" + }, + "Currency": "AssetId", + "CurrencyId": "AssetId", + "CurrencyIdOf": "AssetId", + "Intention": { + "who": "AccountId", + "asset_sell": "AssetId", + "asset_buy": "AssetId", + "amount": "Balance", + "discount": "bool", + "sell_or_buy": "IntentionType" + }, + "IntentionId": "u128", + "IntentionType": { + "_enum": [ + "SELL", + "BUY" + ] + }, + "LookupSource": "AccountId", + "OrderedSet": "Vec", + "Price": "Balance", + "Chain": { + "genesisHash": "Vec", + "lastBlockHash": "Vec" + } + }, + "typesAlias": { + "tokens": { + "AccountData": "OrmlAccountData" + } + }, + "signedExtensions": { + "ValidateClaim": "Null" + } +} \ No newline at end of file diff --git a/tests/configs/test_evm.yml b/tests/configs/test_evm.yml index 5a139950f..54cd349d5 100644 --- a/tests/configs/test_evm.yml +++ b/tests/configs/test_evm.yml @@ -3,7 +3,7 @@ datasources: kind: evm.subsquid url: ${SUBSQUID_URL:-https://v2.archive.subsquid.io/network/ethereum-mainnet} etherscan: - kind: abi.etherscan + kind: evm.etherscan url: ${ETHERSCAN_URL:-https://api.etherscan.io/api} api_key: ${ETHERSCAN_API_KEY:-''} evm_node: