From e0ccd008fe8bfaf29357ea87561e60f3baaae08c Mon Sep 17 00:00:00 2001 From: David Date: Tue, 10 May 2022 10:52:19 +0200 Subject: [PATCH] jsonrpsee integration (#8783) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add tokio * No need to map CallError to CallError * jsonrpsee proc macros (#9673) * port error types to `JsonRpseeError` * migrate chain module to proc macro api * make it compile with proc macros * update branch * update branch * update to jsonrpsee master * port system rpc * port state rpc * port childstate & offchain * frame system rpc * frame transaction payment * bring back CORS hack to work with polkadot UI * port babe rpc * port manual seal rpc * port frame mmr rpc * port frame contracts rpc * port finality grandpa rpc * port sync state rpc * resolve a few TODO + no jsonrpc deps * Update bin/node/rpc-client/src/main.rs * Update bin/node/rpc-client/src/main.rs * Update bin/node/rpc-client/src/main.rs * Update bin/node/rpc-client/src/main.rs * Port over system_ rpc tests * Make it compile * Use prost 0.8 * Use prost 0.8 * Make it compile * Ignore more failing tests * Comment out WIP tests * fix nit in frame system api * Update lockfile * No more juggling tokio versions * No more wait_for_stop ? * Remove browser-testing * Arguments must be arrays * Use same argument names * Resolve todo: no wait_for_stop for WS server Add todo: is parse_rpc_result used? Cleanup imports * fmt * log * One test passes * update jsonrpsee * update jsonrpsee * cleanup rpc-servers crate * jsonrpsee: add host and origin filtering (#9787) * add access control in the jsonrpsee servers * use master * fix nits * rpc runtime_version safe * fix nits * fix grumbles * remove unused files * resolve some todos * jsonrpsee more cleanup (#9803) * more cleanup * resolve TODOs * fix some unwraps * remove type hints * update jsonrpsee * downgrade zeroize * pin jsonrpsee rev * remove unwrap nit * Comment out more tests that aren't ported * Comment out more tests * Fix tests after merge * Subscription test * Invalid nonce test * Pending exts * WIP removeExtrinsic test * Test remove_extrinsic * Make state test: should_return_storage work * Uncomment/fix the other non-subscription related state tests * test: author_insertKey * test: author_rotateKeys * Get rest of state tests passing * asyncify a little more * Add todo to note #msg change * Crashing test for has_session_keys * Fix error conversion to avoid stack overflows Port author_hasSessionKeys test fmt * test author_hasKey * Add two missing tests Add a check on the return type Add todos for James's concerns * RPC tests for state, author and system (#9859) * Fix test runner * Impl Default for SubscriptionTaskExecutor * Keep the minimul amount of code needed to compile tests * Re-instate `RpcSession` (for now) * cleanup * Port over RPC tests * Add tokio * No need to map CallError to CallError * Port over system_ rpc tests * Make it compile * Use prost 0.8 * Use prost 0.8 * Make it compile * Ignore more failing tests * Comment out WIP tests * Update lockfile * No more juggling tokio versions * No more wait_for_stop ? * Remove browser-testing * Arguments must be arrays * Use same argument names * Resolve todo: no wait_for_stop for WS server Add todo: is parse_rpc_result used? Cleanup imports * fmt * log * One test passes * Comment out more tests that aren't ported * Comment out more tests * Fix tests after merge * Subscription test * Invalid nonce test * Pending exts * WIP removeExtrinsic test * Test remove_extrinsic * Make state test: should_return_storage work * Uncomment/fix the other non-subscription related state tests * test: author_insertKey * test: author_rotateKeys * Get rest of state tests passing * asyncify a little more * Add todo to note #msg change * Crashing test for has_session_keys * Fix error conversion to avoid stack overflows Port author_hasSessionKeys test fmt * test author_hasKey * Add two missing tests Add a check on the return type Add todos for James's concerns * offchain rpc tests * Address todos * fmt Co-authored-by: James Wilson * fix drop in state test * update jsonrpsee * fix ignored system test * fix chain tests * remove some boiler plate * Port BEEFY RPC (#9883) * Merge master * Port beefy RPC (ty @niklas!) * trivial changes left over from merge * Remove unused code * Update jsonrpsee * fix build * make tests compile again * beefy update jsonrpsee * fix: respect rpc methods policy * update cargo.lock * update jsonrpsee * update jsonrpsee * downgrade error logs * update jsonrpsee * Fix typo * remove unused file * Better name * Port Babe RPC tests * Put docs back * Resolve todo * Port tests for System RPCs * Resolve todo * fix build * Updated jsonrpsee to current master * fix: port finality grandpa rpc tests * Move .into() outside of the match * more review grumbles * jsonrpsee: add `rpc handlers` back (#10245) * add back RpcHandlers * cargo fmt * fix docs * fix grumble: remove needless alloc * resolve TODO * fmt * Fix typo * grumble: Use constants based on BASE_ERROR * grumble: DRY whitelisted listening addresses grumble: s/JSONRPC/JSON-RPC/ * cleanup * grumbles: Making readers aware of the possibility of gaps * review grumbles * grumbles * remove notes from niklasad1 * Update `jsonrpsee` * fix: jsonrpsee features * jsonrpsee: fallback to random port in case the specified port failed (#10304) * jsonrpsee: fallback to random port * better comment * Update client/rpc-servers/src/lib.rs Co-authored-by: Maciej Hirsz <1096222+maciejhirsz@users.noreply.github.com> * Update client/rpc-servers/src/lib.rs Co-authored-by: Maciej Hirsz <1096222+maciejhirsz@users.noreply.github.com> * address grumbles * cargo fmt * addrs already slice Co-authored-by: Maciej Hirsz <1096222+maciejhirsz@users.noreply.github.com> * Update jsonrpsee to 092081a0a2b8904c6ebd2cd99e16c7bc13ffc3ae * lockfile * update jsonrpsee * fix warning * Don't fetch jsonrpsee from crates * make tests compile again * fix rpc tests * remove unused deps * update tokio * fix rpc tests again * fix: test runner `HttpServerBuilder::builder` fails unless it's called within tokio runtime * cargo fmt * grumbles: fix subscription aliases * make clippy happy * update remaining subscriptions alias * cleanup * cleanup * fix chain subscription: less boiler plate (#10285) * fix chain subscription: less boiler plate * fix bad merge * cargo fmt * Switch to jsonrpsee 0.5 * fix build * add missing features * fix nit: remove needless Box::pin * Integrate jsonrpsee metrics (#10395) * draft metrics impl * Use latest api * Add missing file * Http server metrics * cleanup * bump jsonrpsee * Remove `ServerMetrics` and use a single middleware for both connection counting (aka sessions) and call metrics. * fix build * remove needless Arc::clone * Update to jsonrpsee 0.6 * lolz * fix metrics * Revert "lolz" This reverts commit eed6c6a56e78d8e307b4950f4c52a1c3a2322ba1. * fix: in-memory rpc support subscriptions * commit Cargo.lock * Update tests to 0.7 * fix TODOs * ws server: generate subscriptionIDs as Strings Some libraries seems to expect the subscription IDs to be Strings, let's not break this in this PR. * Increase timeout * Port over tests * cleanup * Using error codes from the spec * fix clippy * cargo fmt * update jsonrpsee * fix nits * fix: rpc_query * enable custom subid gen through spawn_tasks * remove unsed deps * unify tokio deps * Revert "enable custom subid gen through spawn_tasks" This reverts commit 5c5eb70328fe39d154fdb55c56e637b4548cf470. * fix bad merge of `test-utils` * fix more nits * downgrade wasm-instrument to 0.1.0 * [jsonrpsee]: enable custom RPC subscription ID generatation (#10731) * enable custom subid gen through spawn_tasks * fix nits * Update client/service/src/builder.rs Co-authored-by: David * add Poc; needs jsonrpsee pr * update jsonrpsee * add re-exports * add docs Co-authored-by: David * cargo fmt * fmt * port RPC-API dev * Remove unused file * fix nit: remove async trait * fix doc links * fix merge nit: remove jsonrpc deps * kill namespace on rpc apis * companion for jsonrpsee v0.10 (#11158) * companion for jsonrpsee v0.10 * update versions v0.10.0 * add some fixes * spelling * fix spaces Co-authored-by: Niklas Adolfsson * send error before subs are closed * fix unsubscribe method names: chain * fix tests * jsonrpc server: print binded local address * grumbles: kill SubscriptionTaskExecutor * Update client/sync-state-rpc/src/lib.rs Co-authored-by: Bastian Köcher * Update client/rpc/src/chain/chain_full.rs Co-authored-by: Bastian Köcher * Update client/rpc/src/chain/chain_full.rs Co-authored-by: Bastian Köcher * sync-state-rpc: kill anyhow * no more anyhow * remove todo * jsonrpsee: fix bad params in subscriptions. (#11251) * update jsonrpsee * fix error responses * revert error codes * dont do weird stuff in drop impl * rpc servers: remove needless clone * Remove silly constants * chore: update jsonrpsee v0.12 * commit Cargo.lock * deps: downgrade git2 * feat: CLI flag max subscriptions per connection * metrics: use old logging format * fix: read WS address from substrate output (#11379) Co-authored-by: Niklas Adolfsson Co-authored-by: James Wilson Co-authored-by: Maciej Hirsz Co-authored-by: Maciej Hirsz <1096222+maciejhirsz@users.noreply.github.com> Co-authored-by: Bastian Köcher --- Cargo.lock | 1701 +++++------------ bin/node-template/node/Cargo.toml | 2 +- bin/node-template/node/src/rpc.rs | 23 +- bin/node-template/node/src/service.rs | 5 +- bin/node/cli/Cargo.toml | 1 + bin/node/cli/benches/block_production.rs | 4 + bin/node/cli/benches/transaction_pool.rs | 4 + bin/node/cli/src/service.rs | 10 +- bin/node/cli/tests/common.rs | 52 +- .../tests/running_the_node_and_interrupt.rs | 19 +- bin/node/cli/tests/temp_base_path_works.rs | 10 +- bin/node/rpc/Cargo.toml | 2 +- bin/node/rpc/src/lib.rs | 92 +- client/beefy/Cargo.toml | 2 +- client/beefy/rpc/Cargo.toml | 6 +- client/beefy/rpc/src/lib.rs | 308 ++- client/cli/src/commands/run_cmd.rs | 18 +- client/cli/src/config.rs | 14 + client/consensus/babe/rpc/Cargo.toml | 5 +- client/consensus/babe/rpc/src/lib.rs | 204 +- client/consensus/manual-seal/Cargo.toml | 4 +- client/consensus/manual-seal/src/error.rs | 34 +- client/consensus/manual-seal/src/rpc.rs | 83 +- client/finality-grandpa/rpc/Cargo.toml | 6 +- client/finality-grandpa/rpc/src/error.rs | 17 +- client/finality-grandpa/rpc/src/lib.rs | 345 ++-- client/rpc-api/Cargo.toml | 5 +- client/rpc-api/src/author/error.rs | 180 +- client/rpc-api/src/author/mod.rs | 70 +- client/rpc-api/src/chain/error.rs | 25 +- client/rpc-api/src/chain/mod.rs | 111 +- client/rpc-api/src/child_state/mod.rs | 59 +- client/rpc-api/src/dev/error.rs | 42 +- client/rpc-api/src/dev/mod.rs | 9 +- client/rpc-api/src/errors.rs | 28 - client/rpc-api/src/helpers.rs | 41 - client/rpc-api/src/lib.rs | 8 +- client/rpc-api/src/metadata.rs | 60 - client/rpc-api/src/offchain/error.rs | 20 +- client/rpc-api/src/offchain/mod.rs | 17 +- client/rpc-api/src/policy.rs | 24 +- client/rpc-api/src/state/error.rs | 34 +- client/rpc-api/src/state/mod.rs | 155 +- client/rpc-api/src/system/error.rs | 33 +- client/rpc-api/src/system/mod.rs | 95 +- client/rpc-servers/Cargo.toml | 6 +- client/rpc-servers/src/lib.rs | 340 ++-- client/rpc-servers/src/middleware.rs | 227 +-- client/rpc/Cargo.toml | 10 +- client/rpc/src/author/mod.rs | 133 +- client/rpc/src/author/tests.rs | 380 ++-- client/rpc/src/chain/chain_full.rs | 114 +- client/rpc/src/chain/mod.rs | 221 +-- client/rpc/src/chain/tests.rs | 261 ++- client/rpc/src/dev/mod.rs | 16 +- client/rpc/src/dev/tests.rs | 35 +- client/rpc/src/lib.rs | 37 +- client/rpc/src/offchain/mod.rs | 17 +- client/rpc/src/offchain/tests.rs | 9 +- client/rpc/src/state/mod.rs | 385 ++-- client/rpc/src/state/state_full.rs | 417 ++-- client/rpc/src/state/tests.rs | 450 ++--- client/rpc/src/system/mod.rs | 141 +- client/rpc/src/system/tests.rs | 250 ++- client/rpc/src/testing.rs | 33 +- client/service/Cargo.toml | 3 +- client/service/src/builder.rs | 190 +- client/service/src/config.rs | 12 + client/service/src/lib.rs | 274 ++- client/service/test/src/lib.rs | 4 + client/sync-state-rpc/Cargo.toml | 4 +- client/sync-state-rpc/src/lib.rs | 60 +- client/tracing/src/block/mod.rs | 2 +- client/transaction-pool/api/src/error.rs | 4 +- client/transaction-pool/api/src/lib.rs | 11 +- frame/bags-list/remote-tests/Cargo.toml | 1 - frame/contracts/rpc/Cargo.toml | 4 +- frame/contracts/rpc/src/lib.rs | 126 +- frame/merkle-mountain-range/rpc/Cargo.toml | 4 +- frame/merkle-mountain-range/rpc/src/lib.rs | 99 +- frame/state-trie-migration/Cargo.toml | 2 +- frame/transaction-payment/rpc/Cargo.toml | 4 +- frame/transaction-payment/rpc/src/lib.rs | 115 +- test-utils/client/src/lib.rs | 106 +- test-utils/runtime/src/lib.rs | 13 + utils/frame/remote-externalities/Cargo.toml | 2 +- .../rpc/state-trie-migration-rpc/Cargo.toml | 4 +- .../rpc/state-trie-migration-rpc/src/lib.rs | 33 +- utils/frame/rpc/support/Cargo.toml | 3 +- utils/frame/rpc/support/src/lib.rs | 56 +- utils/frame/rpc/system/Cargo.toml | 7 +- utils/frame/rpc/system/src/lib.rs | 229 ++- utils/frame/try-runtime/cli/Cargo.toml | 2 +- 93 files changed, 3781 insertions(+), 5062 deletions(-) delete mode 100644 client/rpc-api/src/errors.rs delete mode 100644 client/rpc-api/src/helpers.rs delete mode 100644 client/rpc-api/src/metadata.rs diff --git a/Cargo.lock b/Cargo.lock index 907c917d723a7..928be6b18fc15 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -70,7 +70,7 @@ checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ "getrandom 0.2.3", "once_cell", - "version_check 0.9.2", + "version_check", ] [[package]] @@ -88,7 +88,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -225,21 +225,21 @@ dependencies = [ "concurrent-queue", "futures-lite", "libc", - "log 0.4.16", + "log", "once_cell", "parking", "polling", "slab", "socket2 0.4.4", "waker-fn", - "winapi 0.3.9", + "winapi", ] [[package]] name = "async-lock" -version = "2.3.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1996609732bde4a9988bc42125f55f2af5f3c36370e27c778d5191a4a1b63bfb" +checksum = "e97a171d191782fba31bb902b14ad94e24a68145032b7eedf871ab0bc0d077b6" dependencies = [ "event-listener", ] @@ -255,9 +255,9 @@ dependencies = [ [[package]] name = "async-process" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83137067e3a2a6a06d67168e49e68a0957d215410473a740cea95a2425c0b7c6" +checksum = "cf2c06e30a24e8c78a3987d07f0930edf76ef35e027e7bdb063fccafdad1f60c" dependencies = [ "async-io", "blocking", @@ -267,7 +267,7 @@ dependencies = [ "libc", "once_cell", "signal-hook", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -282,14 +282,14 @@ dependencies = [ "async-io", "async-lock", "async-process", - "crossbeam-utils 0.8.5", + "crossbeam-utils", "futures-channel", "futures-core", "futures-io", "futures-lite", "gloo-timers", "kv-log-macro", - "log 0.4.16", + "log", "memchr", "num_cpus", "once_cell", @@ -358,7 +358,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0de5164e5edbf51c45fb8c2d9664ae1c095cce1b265ecf7569093c0d66ef690" dependencies = [ - "bytes 1.1.0", + "bytes", "futures-sink", "futures-util", "memchr", @@ -388,7 +388,7 @@ checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ "hermit-abi", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -436,25 +436,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" -[[package]] -name = "base64" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" -dependencies = [ - "byteorder", - "safemem", -] - -[[package]] -name = "base64" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e" -dependencies = [ - "byteorder", -] - [[package]] name = "base64" version = "0.13.0" @@ -476,10 +457,10 @@ version = "4.0.0-dev" dependencies = [ "beefy-primitives", "fnv", - "futures 0.3.21", + "futures", "futures-timer", "hex", - "log 0.4.16", + "log", "parity-scale-codec", "parking_lot 0.12.0", "sc-chain-spec", @@ -519,12 +500,9 @@ version = "4.0.0-dev" dependencies = [ "beefy-gadget", "beefy-primitives", - "futures 0.3.21", - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-pubsub", - "log 0.4.16", + "futures", + "jsonrpsee", + "log", "parity-scale-codec", "parking_lot 0.12.0", "sc-rpc", @@ -535,6 +513,7 @@ dependencies = [ "sp-runtime", "substrate-test-runtime-client", "thiserror", + "tokio", ] [[package]] @@ -544,7 +523,7 @@ dependencies = [ "env_logger 0.9.0", "hex", "hex-literal", - "log 0.4.16", + "log", "tiny-keccak", ] @@ -814,16 +793,6 @@ version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" -[[package]] -name = "bytes" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206fdffcfa2df7cbe15601ef46c813fce0965eb3286db6b56c583b814b51c81c" -dependencies = [ - "byteorder", - "iovec", -] - [[package]] name = "bytes" version = "1.1.0" @@ -966,7 +935,7 @@ dependencies = [ "num-integer", "num-traits", "time", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1185,9 +1154,9 @@ dependencies = [ "cranelift-codegen-shared 0.76.0", "cranelift-entity 0.76.0", "gimli 0.25.0", - "log 0.4.16", + "log", "regalloc 0.0.31", - "smallvec 1.8.0", + "smallvec", "target-lexicon", ] @@ -1202,9 +1171,9 @@ dependencies = [ "cranelift-codegen-shared 0.82.3", "cranelift-entity 0.82.3", "gimli 0.26.1", - "log 0.4.16", + "log", "regalloc 0.0.34", - "smallvec 1.8.0", + "smallvec", "target-lexicon", ] @@ -1261,8 +1230,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "279afcc0d3e651b773f94837c3d581177b348c8d69e928104b2e9fccb226f921" dependencies = [ "cranelift-codegen 0.76.0", - "log 0.4.16", - "smallvec 1.8.0", + "log", + "smallvec", "target-lexicon", ] @@ -1273,8 +1242,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a006e3e32d80ce0e4ba7f1f9ddf66066d052a8c884a110b91d05404d6ce26dce" dependencies = [ "cranelift-codegen 0.82.3", - "log 0.4.16", - "smallvec 1.8.0", + "log", + "smallvec", "target-lexicon", ] @@ -1299,8 +1268,8 @@ dependencies = [ "cranelift-entity 0.82.3", "cranelift-frontend 0.82.3", "itertools", - "log 0.4.16", - "smallvec 1.8.0", + "log", + "smallvec", "wasmparser 0.83.0", "wasmtime-types", ] @@ -1325,7 +1294,7 @@ dependencies = [ "clap 2.34.0", "criterion-plot", "csv", - "futures 0.3.21", + "futures", "itertools", "lazy_static", "num-traits", @@ -1359,7 +1328,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.5", + "crossbeam-utils", ] [[package]] @@ -1370,7 +1339,7 @@ checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils 0.8.5", + "crossbeam-utils", ] [[package]] @@ -1380,23 +1349,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.5", + "crossbeam-utils", "lazy_static", "memoffset", "scopeguard", ] -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg 1.0.1", - "cfg-if 0.1.10", - "lazy_static", -] - [[package]] name = "crossbeam-utils" version = "0.8.5" @@ -1706,7 +1664,7 @@ checksum = "3fd78930633bd1c6e35c4b42b1df7b0cbc6bc191146e512bb3bedf243fcc3901" dependencies = [ "libc", "redox_users 0.3.5", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1717,7 +1675,7 @@ checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" dependencies = [ "libc", "redox_users 0.4.0", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1728,7 +1686,7 @@ checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" dependencies = [ "libc", "redox_users 0.4.0", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -1964,7 +1922,7 @@ checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" dependencies = [ "atty", "humantime 1.3.0", - "log 0.4.16", + "log", "regex", "termcolor", ] @@ -1975,7 +1933,7 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" dependencies = [ - "log 0.4.16", + "log", "regex", ] @@ -1987,7 +1945,7 @@ checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" dependencies = [ "atty", "humantime 2.1.0", - "log 0.4.16", + "log", "regex", "termcolor", ] @@ -2006,7 +1964,7 @@ checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" dependencies = [ "errno-dragonfly", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2031,7 +1989,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e43f2f1833d64e33f15592464d6fdd70f349dda7b1a53088eb83cd94014008c5" dependencies = [ - "futures 0.3.21", + "futures", ] [[package]] @@ -2081,7 +2039,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fdbe0d94371f9ce939b555dd342d0686cc4c0cadbcd4b61d70af5ff97eb4126" dependencies = [ "env_logger 0.7.1", - "log 0.4.16", + "log", ] [[package]] @@ -2091,9 +2049,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9def033d8505edf199f6a5d07aa7e6d2d6185b164293b77f0efd108f4f3e11d" dependencies = [ "either", - "futures 0.3.21", + "futures", "futures-timer", - "log 0.4.16", + "log", "num-traits", "parity-scale-codec", "parking_lot 0.11.2", @@ -2138,21 +2096,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "fork-tree" version = "3.0.0" @@ -2167,7 +2110,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" dependencies = [ "matches", - "percent-encoding 2.1.0", + "percent-encoding", ] [[package]] @@ -2178,7 +2121,7 @@ dependencies = [ "frame-system", "hex-literal", "linregress", - "log 0.4.16", + "log", "parity-scale-codec", "paste 1.0.6", "scale-info", @@ -2210,7 +2153,7 @@ dependencies = [ "kvdb", "lazy_static", "linked-hash-map", - "log 0.4.16", + "log", "memory-db", "parity-scale-codec", "prettytable-rs", @@ -2249,7 +2192,7 @@ dependencies = [ "frame-election-provider-support", "frame-support", "parity-scale-codec", - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro2", "quote", "scale-info", @@ -2336,7 +2279,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "k256", - "log 0.4.16", + "log", "once_cell", "parity-scale-codec", "parity-util-mem", @@ -2344,7 +2287,7 @@ dependencies = [ "pretty_assertions", "scale-info", "serde", - "smallvec 1.8.0", + "smallvec", "sp-arithmetic", "sp-core", "sp-core-hashing-proc-macro", @@ -2374,7 +2317,7 @@ name = "frame-support-procedural-tools" version = "4.0.0-dev" dependencies = [ "frame-support-procedural-tools-derive", - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro2", "quote", "syn", @@ -2440,7 +2383,7 @@ version = "4.0.0-dev" dependencies = [ "criterion", "frame-support", - "log 0.4.16", + "log", "parity-scale-codec", "scale-info", "serde", @@ -2495,7 +2438,7 @@ dependencies = [ "lazy_static", "libc", "libloading 0.5.2", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2505,7 +2448,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2520,34 +2463,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" -[[package]] -name = "fuchsia-zircon" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e9763c69ebaae630ba35f74888db465e49e259ba1bc0eda7d06f4a067615d82" -dependencies = [ - "bitflags", - "fuchsia-zircon-sys", -] - -[[package]] -name = "fuchsia-zircon-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" - [[package]] name = "funty" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" -[[package]] -name = "futures" -version = "0.1.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" - [[package]] name = "futures" version = "0.3.21" @@ -2658,7 +2579,6 @@ version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" dependencies = [ - "futures 0.1.31", "futures-channel", "futures-core", "futures-io", @@ -2707,7 +2627,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" dependencies = [ "typenum", - "version_check 0.9.2", + "version_check", ] [[package]] @@ -2775,8 +2695,8 @@ dependencies = [ "bitflags", "libc", "libgit2-sys", - "log 0.4.16", - "url 2.2.1", + "log", + "url", ] [[package]] @@ -2794,7 +2714,7 @@ dependencies = [ "aho-corasick", "bstr", "fnv", - "log 0.4.16", + "log", "regex", ] @@ -2828,7 +2748,7 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f072413d126e57991455e0a922b31e4c8ba7c2ffbebf6b78b4f8521397d65cd" dependencies = [ - "bytes 1.1.0", + "bytes", "fnv", "futures-core", "futures-sink", @@ -2853,7 +2773,7 @@ version = "4.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d6a30320f094710245150395bc763ad23128d6a1ebbad7594dc4164b62c56b" dependencies = [ - "log 0.4.16", + "log", "pest", "pest_derive", "quick-error 2.0.0", @@ -2986,7 +2906,7 @@ checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -2995,7 +2915,7 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes 1.1.0", + "bytes", "fnv", "itoa 0.4.8", ] @@ -3006,7 +2926,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60daa14be0e0786db0f03a9e57cb404c9d756eed2b6c62b9ea98ec5743ec75a9" dependencies = [ - "bytes 1.1.0", + "bytes", "http", "pin-project-lite 0.2.6", ] @@ -3038,32 +2958,13 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" -[[package]] -name = "hyper" -version = "0.10.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a0652d9a2609a968c14be1a9ea00bf4b1d64e2e1f53a1b51b6fff3a6e829273" -dependencies = [ - "base64 0.9.3", - "httparse", - "language-tags", - "log 0.3.9", - "mime", - "num_cpus", - "time", - "traitobject", - "typeable", - "unicase 1.4.2", - "url 1.7.2", -] - [[package]] name = "hyper" version = "0.14.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" dependencies = [ - "bytes 1.1.0", + "bytes", "futures-channel", "futures-core", "futures-util", @@ -3089,8 +2990,8 @@ checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" dependencies = [ "ct-logs", "futures-util", - "hyper 0.14.16", - "log 0.4.16", + "hyper", + "log", "rustls 0.19.1", "rustls-native-certs 0.5.0", "tokio", @@ -3098,36 +2999,12 @@ dependencies = [ "webpki 0.21.4", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes 1.1.0", - "hyper 0.14.16", - "native-tls", - "tokio", - "tokio-native-tls", -] - [[package]] name = "ident_case" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" -[[package]] -name = "idna" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" -dependencies = [ - "matches", - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "0.2.3" @@ -3146,7 +3023,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cbc0fa01ffc752e9dbc72818cdb072cd028b86be5e09dd04c5a643704fe101a9" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -3158,10 +3035,10 @@ dependencies = [ "async-io", "core-foundation", "fnv", - "futures 0.3.21", + "futures", "if-addrs", "ipnet", - "log 0.4.16", + "log", "rtnetlink", "system-configuration", "windows", @@ -3231,15 +3108,6 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec58677acfea8a15352d42fc87d11d63596ade9239e0a7c9352914417515dbe6" -[[package]] -name = "iovec" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2b3ea6ff95e175473f8ffe6a7eb7c00d054240321b84c57051175fe3c1e075e" -dependencies = [ - "libc", -] - [[package]] name = "ip_network" version = "0.4.1" @@ -3254,7 +3122,7 @@ checksum = "723519edce41262b05d4143ceb95050e4c614f483e78e9fd9e39a8275a84ad98" dependencies = [ "socket2 0.4.4", "widestring", - "winapi 0.3.9", + "winapi", "winreg", ] @@ -3303,161 +3171,28 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "jsonrpc-client-transports" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b99d4207e2a04fb4581746903c2bb7eb376f88de9c699d0f3e10feeac0cd3a" -dependencies = [ - "derive_more", - "futures 0.3.21", - "hyper 0.14.16", - "hyper-tls", - "jsonrpc-core", - "jsonrpc-pubsub", - "log 0.4.16", - "serde", - "serde_json", - "tokio", - "url 1.7.2", - "websocket", -] - -[[package]] -name = "jsonrpc-core" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14f7f76aef2d054868398427f6c54943cf3d1caa9a7ec7d0c38d69df97a965eb" -dependencies = [ - "futures 0.3.21", - "futures-executor", - "futures-util", - "log 0.4.16", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "jsonrpc-core-client" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b51da17abecbdab3e3d4f26b01c5ec075e88d3abe3ab3b05dc9aa69392764ec0" -dependencies = [ - "futures 0.3.21", - "jsonrpc-client-transports", -] - -[[package]] -name = "jsonrpc-derive" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b939a78fa820cdfcb7ee7484466746a7377760970f6f9c6fe19f9edcc8a38d2" -dependencies = [ - "proc-macro-crate 0.1.5", - "proc-macro2", - "quote", - "syn", -] - -[[package]] -name = "jsonrpc-http-server" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1dea6e07251d9ce6a552abfb5d7ad6bc290a4596c8dcc3d795fae2bbdc1f3ff" -dependencies = [ - "futures 0.3.21", - "hyper 0.14.16", - "jsonrpc-core", - "jsonrpc-server-utils", - "log 0.4.16", - "net2", - "parking_lot 0.11.2", - "unicase 2.6.0", -] - -[[package]] -name = "jsonrpc-ipc-server" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "382bb0206323ca7cda3dcd7e245cea86d37d02457a02a975e3378fb149a48845" -dependencies = [ - "futures 0.3.21", - "jsonrpc-core", - "jsonrpc-server-utils", - "log 0.4.16", - "parity-tokio-ipc", - "parking_lot 0.11.2", - "tower-service", -] - -[[package]] -name = "jsonrpc-pubsub" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240f87695e6c6f62fb37f05c02c04953cf68d6408b8c1c89de85c7a0125b1011" -dependencies = [ - "futures 0.3.21", - "jsonrpc-core", - "lazy_static", - "log 0.4.16", - "parking_lot 0.11.2", - "rand 0.7.3", - "serde", -] - -[[package]] -name = "jsonrpc-server-utils" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4fdea130485b572c39a460d50888beb00afb3e35de23ccd7fad8ff19f0e0d4" -dependencies = [ - "bytes 1.1.0", - "futures 0.3.21", - "globset", - "jsonrpc-core", - "lazy_static", - "log 0.4.16", - "tokio", - "tokio-stream", - "tokio-util 0.6.7", - "unicase 2.6.0", -] - -[[package]] -name = "jsonrpc-ws-server" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f892c7d766369475ab7b0669f417906302d7c0fb521285c0a0c92e52e7c8e946" -dependencies = [ - "futures 0.3.21", - "jsonrpc-core", - "jsonrpc-server-utils", - "log 0.4.16", - "parity-ws", - "parking_lot 0.11.2", - "slab", -] - [[package]] name = "jsonrpsee" -version = "0.10.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91dc760c341fa81173f9a434931aaf32baad5552b0230cc6c93e8fb7eaad4c19" +checksum = "ad6f9ff3481f3069c92474b697c104502f7e9191d29b34bfa38ae9a19415f1cd" dependencies = [ "jsonrpsee-core", + "jsonrpsee-http-server", "jsonrpsee-proc-macros", "jsonrpsee-types", "jsonrpsee-ws-client", + "jsonrpsee-ws-server", + "tracing", ] [[package]] name = "jsonrpsee-client-transport" -version = "0.10.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765f7a36d5087f74e3b3b47805c2188fef8eb54afcb587b078d9f8ebfe9c7220" +checksum = "4358e100faf43b2f3b7b0ecf0ad4ce3e6275fe12fda8428dedda2979751dd184" dependencies = [ - "futures 0.3.21", + "futures-util", "http", "jsonrpsee-core", "jsonrpsee-types", @@ -3474,18 +3209,22 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.10.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82ef77ecd20c2254d54f5da8c0738eacca61e6b6511268a8f2753e3148c6c706" +checksum = "8e1d26ab3868749d6f716345a5fbd3334a100c0709fe464bd9189ee9d78adcde" dependencies = [ "anyhow", "arrayvec 0.7.1", + "async-lock", "async-trait", "beef", "futures-channel", + "futures-timer", "futures-util", - "hyper 0.14.16", + "hyper", "jsonrpsee-types", + "parking_lot 0.12.0", + "rand 0.8.4", "rustc-hash", "serde", "serde_json", @@ -3495,13 +3234,32 @@ dependencies = [ "tracing", ] +[[package]] +name = "jsonrpsee-http-server" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee87f19a7a01a55248fc4b4861d822331c4fd60151d99e7ac9c6771999132671" +dependencies = [ + "futures-channel", + "futures-util", + "globset", + "hyper", + "jsonrpsee-core", + "jsonrpsee-types", + "lazy_static", + "serde_json", + "tokio", + "tracing", + "unicase", +] + [[package]] name = "jsonrpsee-proc-macros" -version = "0.10.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7291c72805bc7d413b457e50d8ef3e87aa554da65ecbbc278abb7dfc283e7f0" +checksum = "b75da57d54817577801c2f7a1b638610819dfd86f0470c21a2af81b06eb41ba6" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro2", "quote", "syn", @@ -3509,9 +3267,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.10.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b6aa52f322cbf20c762407629b8300f39bcc0cf0619840d9252a2f65fd2dd9" +checksum = "f5fe5a629443d17a30ff564881ba68881a710fd7eb02a538087b0bc51cb4962c" dependencies = [ "anyhow", "beef", @@ -3523,15 +3281,32 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.10.1" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd66d18bab78d956df24dd0d2e41e4c00afbb818fda94a98264bdd12ce8506ac" +checksum = "ba31eb2b9a4b73d8833f53fe55e579516289f8b31adb6104b3dbc629755acf7d" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", "jsonrpsee-types", ] +[[package]] +name = "jsonrpsee-ws-server" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "179fe584af5c0145f922c581770d073c661a514ae6cdfa5b1a0bce41fdfdf646" +dependencies = [ + "futures-channel", + "futures-util", + "jsonrpsee-core", + "jsonrpsee-types", + "serde_json", + "soketto", + "tokio", + "tokio-util 0.7.1", + "tracing", +] + [[package]] name = "k256" version = "0.10.4" @@ -3561,23 +3336,13 @@ dependencies = [ "tiny-keccak", ] -[[package]] -name = "kernel32-sys" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -dependencies = [ - "winapi 0.2.8", - "winapi-build", -] - [[package]] name = "kv-log-macro" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" dependencies = [ - "log 0.4.16", + "log", ] [[package]] @@ -3587,7 +3352,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a301d8ecb7989d4a6e2c57a49baca77d353bdbf879909debe3f375fe25d61f86" dependencies = [ "parity-util-mem", - "smallvec 1.8.0", + "smallvec", ] [[package]] @@ -3609,22 +3374,16 @@ checksum = "ca7fbdfd71cd663dceb0faf3367a99f8cf724514933e9867cec4995b6027cbc1" dependencies = [ "fs-swap", "kvdb", - "log 0.4.16", + "log", "num_cpus", "owning_ref", "parity-util-mem", "parking_lot 0.12.0", "regex", "rocksdb", - "smallvec 1.8.0", + "smallvec", ] -[[package]] -name = "language-tags" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" - [[package]] name = "lazy_static" version = "1.4.0" @@ -3651,9 +3410,9 @@ checksum = "efaa7b300f3b5fe8eb6bf21ce3895e1751d9665086af2d64b42f19701015ff4f" [[package]] name = "libgit2-sys" -version = "0.13.2+1.4.2" +version = "0.13.3+1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a42de9a51a5c12e00fc0e4ca6bc2ea43582fc6418488e8f615e905d886f258b" +checksum = "c24d36c3ac9b9996a2418d6bf428cc0bc5d1a814a84303fc60986088c5ed60de" dependencies = [ "cc", "libc", @@ -3668,7 +3427,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" dependencies = [ "cc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -3678,7 +3437,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a" dependencies = [ "cfg-if 1.0.0", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -3694,8 +3453,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "475ce2ac4a9727e53a519f6ee05b38abfcba8f0d39c4d24f103d184e36fd5b0f" dependencies = [ "atomic", - "bytes 1.1.0", - "futures 0.3.21", + "bytes", + "futures", "futures-timer", "getrandom 0.2.3", "instant", @@ -3729,7 +3488,7 @@ dependencies = [ "parking_lot 0.12.0", "pin-project 1.0.10", "rand 0.7.3", - "smallvec 1.8.0", + "smallvec", ] [[package]] @@ -3739,13 +3498,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a13b690e65046af6a09c0b27bd9508fa1cab0efce889de74b0b643b9d2a98f9a" dependencies = [ "async-trait", - "futures 0.3.21", + "futures", "futures-timer", "instant", "libp2p-core", "libp2p-request-response", "libp2p-swarm", - "log 0.4.16", + "log", "prost", "prost-build", "rand 0.8.4", @@ -3762,12 +3521,12 @@ dependencies = [ "ed25519-dalek", "either", "fnv", - "futures 0.3.21", + "futures", "futures-timer", "instant", "lazy_static", "libsecp256k1", - "log 0.4.16", + "log", "multiaddr", "multihash", "multistream-select", @@ -3779,7 +3538,7 @@ dependencies = [ "ring", "rw-stream-sink", "sha2 0.10.2", - "smallvec 1.8.0", + "smallvec", "thiserror", "unsigned-varint", "void", @@ -3793,7 +3552,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b1d37f042f748e224f04785d0e987ae09a2aa518d6401d82d412dad83e360ed" dependencies = [ "flate2", - "futures 0.3.21", + "futures", "libp2p-core", ] @@ -3804,10 +3563,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "066e33e854e10b5c93fc650458bf2179c7e0d143db260b0963e44a94859817f1" dependencies = [ "async-std-resolver", - "futures 0.3.21", + "futures", "libp2p-core", - "log 0.4.16", - "smallvec 1.8.0", + "log", + "smallvec", "trust-dns-resolver", ] @@ -3819,14 +3578,14 @@ checksum = "733d3ea6ebe7a7a85df2bc86678b93f24b015fae5fe3b3acc4c400e795a55d2d" dependencies = [ "cuckoofilter", "fnv", - "futures 0.3.21", + "futures", "libp2p-core", "libp2p-swarm", - "log 0.4.16", + "log", "prost", "prost-build", "rand 0.7.3", - "smallvec 1.8.0", + "smallvec", ] [[package]] @@ -3836,23 +3595,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a90c989a7c0969c2ab63e898da9bc735e3be53fb4f376e9c045ce516bcc9f928" dependencies = [ "asynchronous-codec", - "base64 0.13.0", + "base64", "byteorder", - "bytes 1.1.0", + "bytes", "fnv", - "futures 0.3.21", + "futures", "hex_fmt", "instant", "libp2p-core", "libp2p-swarm", - "log 0.4.16", + "log", "prometheus-client", "prost", "prost-build", "rand 0.7.3", "regex", "sha2 0.10.2", - "smallvec 1.8.0", + "smallvec", "unsigned-varint", "wasm-timer", ] @@ -3863,15 +3622,15 @@ version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c5ef5a5b57904c7c33d6713ef918d239dc6b7553458f3475d87f8a18e9c651c8" dependencies = [ - "futures 0.3.21", + "futures", "futures-timer", "libp2p-core", "libp2p-swarm", - "log 0.4.16", + "log", "lru", "prost", "prost-build", - "smallvec 1.8.0", + "smallvec", ] [[package]] @@ -3882,20 +3641,20 @@ checksum = "564e6bd64d177446399ed835b9451a8825b07929d6daa6a94e6405592974725e" dependencies = [ "arrayvec 0.5.2", "asynchronous-codec", - "bytes 1.1.0", + "bytes", "either", "fnv", - "futures 0.3.21", + "futures", "futures-timer", "instant", "libp2p-core", "libp2p-swarm", - "log 0.4.16", + "log", "prost", "prost-build", "rand 0.7.3", "sha2 0.10.2", - "smallvec 1.8.0", + "smallvec", "thiserror", "uint", "unsigned-varint", @@ -3911,14 +3670,14 @@ dependencies = [ "async-io", "data-encoding", "dns-parser", - "futures 0.3.21", + "futures", "if-watch", "lazy_static", "libp2p-core", "libp2p-swarm", - "log 0.4.16", + "log", "rand 0.8.4", - "smallvec 1.8.0", + "smallvec", "socket2 0.4.4", "void", ] @@ -3946,14 +3705,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "442eb0c9fff0bf22a34f015724b4143ce01877e079ed0963c722d94c07c72160" dependencies = [ "asynchronous-codec", - "bytes 1.1.0", - "futures 0.3.21", + "bytes", + "futures", "libp2p-core", - "log 0.4.16", + "log", "nohash-hasher", "parking_lot 0.12.0", "rand 0.7.3", - "smallvec 1.8.0", + "smallvec", "unsigned-varint", ] @@ -3963,12 +3722,12 @@ version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dd7e0c94051cda67123be68cf6b65211ba3dde7277be9068412de3e7ffd63ef" dependencies = [ - "bytes 1.1.0", + "bytes", "curve25519-dalek 3.0.2", - "futures 0.3.21", + "futures", "lazy_static", "libp2p-core", - "log 0.4.16", + "log", "prost", "prost-build", "rand 0.8.4", @@ -3985,12 +3744,12 @@ version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf57a3c2e821331dda9fe612d4654d676ab6e33d18d9434a18cced72630df6ad" dependencies = [ - "futures 0.3.21", + "futures", "futures-timer", "instant", "libp2p-core", "libp2p-swarm", - "log 0.4.16", + "log", "rand 0.7.3", "void", ] @@ -4002,10 +3761,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "962c0fb0e7212fb96a69b87f2d09bcefd317935239bdc79cda900e7a8897a3fe" dependencies = [ "asynchronous-codec", - "bytes 1.1.0", - "futures 0.3.21", + "bytes", + "futures", "libp2p-core", - "log 0.4.16", + "log", "prost", "prost-build", "unsigned-varint", @@ -4018,8 +3777,8 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f1a458bbda880107b5b36fcb9b5a1ef0c329685da0e203ed692a8ebe64cc92c" dependencies = [ - "futures 0.3.21", - "log 0.4.16", + "futures", + "log", "pin-project 1.0.10", "rand 0.7.3", "salsa20", @@ -4033,19 +3792,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3aa754cb7bccef51ebc3c458c6bbcef89d83b578a9925438389be841527d408f" dependencies = [ "asynchronous-codec", - "bytes 1.1.0", + "bytes", "either", - "futures 0.3.21", + "futures", "futures-timer", "instant", "libp2p-core", "libp2p-swarm", - "log 0.4.16", + "log", "pin-project 1.0.10", "prost", "prost-build", "rand 0.8.4", - "smallvec 1.8.0", + "smallvec", "static_assertions", "thiserror", "unsigned-varint", @@ -4060,12 +3819,12 @@ checksum = "bbd0baab894c5b84da510b915d53264d566c3c35889f09931fe9edbd2a773bee" dependencies = [ "asynchronous-codec", "bimap", - "futures 0.3.21", + "futures", "futures-timer", "instant", "libp2p-core", "libp2p-swarm", - "log 0.4.16", + "log", "prost", "prost-build", "rand 0.8.4", @@ -4082,14 +3841,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6a6fc6c9ad95661f46989473b34bd2993d14a4de497ff3b2668a910d4b869" dependencies = [ "async-trait", - "bytes 1.1.0", - "futures 0.3.21", + "bytes", + "futures", "instant", "libp2p-core", "libp2p-swarm", - "log 0.4.16", + "log", "rand 0.7.3", - "smallvec 1.8.0", + "smallvec", "unsigned-varint", ] @@ -4101,14 +3860,14 @@ checksum = "8f0c69ad9e8f7c5fc50ad5ad9c7c8b57f33716532a2b623197f69f93e374d14c" dependencies = [ "either", "fnv", - "futures 0.3.21", + "futures", "futures-timer", "instant", "libp2p-core", - "log 0.4.16", + "log", "pin-project 1.0.10", "rand 0.7.3", - "smallvec 1.8.0", + "smallvec", "thiserror", "void", ] @@ -4130,13 +3889,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "193447aa729c85aac2376828df76d171c1a589c9e6b58fcc7f9d9a020734122c" dependencies = [ "async-io", - "futures 0.3.21", + "futures", "futures-timer", "if-watch", "ipnet", "libc", "libp2p-core", - "log 0.4.16", + "log", "socket2 0.4.4", ] @@ -4147,9 +3906,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24bdab114f7f2701757d6541266e1131b429bbae382008f207f2114ee4222dcb" dependencies = [ "async-std", - "futures 0.3.21", + "futures", "libp2p-core", - "log 0.4.16", + "log", ] [[package]] @@ -4158,7 +3917,7 @@ version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f6ea0f84a967ef59a16083f222c18115ae2e91db69809dce275df62e101b279" dependencies = [ - "futures 0.3.21", + "futures", "js-sys", "libp2p-core", "parity-send-wrapper", @@ -4173,14 +3932,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c932834c3754501c368d1bf3d0fb458487a642b90fc25df082a3a2f3d3b32e37" dependencies = [ "either", - "futures 0.3.21", + "futures", "futures-rustls", "libp2p-core", - "log 0.4.16", + "log", "quicksink", "rw-stream-sink", "soketto", - "url 2.2.1", + "url", "webpki-roots", ] @@ -4190,7 +3949,7 @@ version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be902ebd89193cd020e89e89107726a38cfc0d16d18f613f4a37d046e92c7517" dependencies = [ - "futures 0.3.21", + "futures", "libp2p-core", "parking_lot 0.12.0", "thiserror", @@ -4219,7 +3978,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" dependencies = [ "arrayref", - "base64 0.13.0", + "base64", "digest 0.9.0", "hmac-drbg", "libsecp256k1-core", @@ -4321,15 +4080,6 @@ dependencies = [ "paste 0.1.18", ] -[[package]] -name = "lock_api" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4da24a77a3d8a6d4862d95f72e6fdb9c09a643ecdb402d754004a557f2bec75" -dependencies = [ - "scopeguard", -] - [[package]] name = "lock_api" version = "0.4.6" @@ -4339,15 +4089,6 @@ dependencies = [ "scopeguard", ] -[[package]] -name = "log" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" -dependencies = [ - "log 0.4.16", -] - [[package]] name = "log" version = "0.4.16" @@ -4462,12 +4203,6 @@ dependencies = [ "rawpointer", ] -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "memchr" version = "2.4.1" @@ -4481,7 +4216,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -4540,15 +4275,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "mime" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" -dependencies = [ - "log 0.3.9", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -4565,25 +4291,6 @@ dependencies = [ "autocfg 1.0.1", ] -[[package]] -name = "mio" -version = "0.6.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" -dependencies = [ - "cfg-if 0.1.10", - "fuchsia-zircon", - "fuchsia-zircon-sys", - "iovec", - "kernel32-sys", - "libc", - "log 0.4.16", - "miow 0.2.2", - "net2", - "slab", - "winapi 0.2.8", -] - [[package]] name = "mio" version = "0.8.0" @@ -4591,34 +4298,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba272f85fa0b41fc91872be579b3bbe0f56b792aa361a380eb669469f68dafb2" dependencies = [ "libc", - "log 0.4.16", - "miow 0.3.6", + "log", + "miow", "ntapi", - "winapi 0.3.9", -] - -[[package]] -name = "mio-extras" -version = "2.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" -dependencies = [ - "lazycell", - "log 0.4.16", - "mio 0.6.23", - "slab", -] - -[[package]] -name = "miow" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" -dependencies = [ - "kernel32-sys", - "net2", - "winapi 0.2.8", - "ws2_32-sys", + "winapi", ] [[package]] @@ -4628,7 +4311,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a33c1b55807fbed163481b5ba66db4b2fa6cde694a5027be10fb724206c5897" dependencies = [ "socket2 0.3.19", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -4648,11 +4331,11 @@ dependencies = [ "byteorder", "data-encoding", "multihash", - "percent-encoding 2.1.0", + "percent-encoding", "serde", "static_assertions", "unsigned-varint", - "url 2.2.1", + "url", ] [[package]] @@ -4689,7 +4372,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc076939022111618a5026d3be019fd8b366e76314538ff9a1b59ffbcbf98bcd" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro-error", "proc-macro2", "quote", @@ -4709,11 +4392,11 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "363a84be6453a70e63513660f4894ef815daf88e3356bffcda9ca27d810ce83b" dependencies = [ - "bytes 1.1.0", - "futures 0.3.21", - "log 0.4.16", + "bytes", + "futures", + "log", "pin-project 1.0.10", - "smallvec 1.8.0", + "smallvec", "unsigned-varint", ] @@ -4755,35 +4438,6 @@ dependencies = [ "rand 0.8.4", ] -[[package]] -name = "native-tls" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" -dependencies = [ - "lazy_static", - "libc", - "log 0.4.16", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "net2" -version = "0.2.37" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" -dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi 0.3.9", -] - [[package]] name = "netlink-packet-core" version = "0.4.2" @@ -4828,9 +4482,9 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef8785b8141e8432aa45fceb922a7e876d7da3fad37fa7e7ec702ace3aa0826b" dependencies = [ - "bytes 1.1.0", - "futures 0.3.21", - "log 0.4.16", + "bytes", + "futures", + "log", "netlink-packet-core", "netlink-sys", "tokio", @@ -4843,10 +4497,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e4c9f9547a08241bee7b6558b9b98e1f290d187de8b7cfca2bbb4937bcaa8f8" dependencies = [ "async-io", - "bytes 1.1.0", - "futures 0.3.21", + "bytes", + "futures", "libc", - "log 0.4.16", + "log", ] [[package]] @@ -4882,13 +4536,13 @@ dependencies = [ "clap 3.1.6", "derive_more", "fs_extra", - "futures 0.3.21", + "futures", "hash-db", "hex", "kvdb", "kvdb-rocksdb", "lazy_static", - "log 0.4.16", + "log", "node-primitives", "node-runtime", "node-testing", @@ -4924,9 +4578,10 @@ dependencies = [ "frame-benchmarking-cli", "frame-system", "frame-system-rpc-runtime-api", - "futures 0.3.21", + "futures", "hex-literal", - "log 0.4.16", + "jsonrpsee", + "log", "nix 0.23.1", "node-executor", "node-inspect", @@ -5003,7 +4658,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "futures 0.3.21", + "futures", "node-primitives", "node-runtime", "node-testing", @@ -5060,7 +4715,7 @@ dependencies = [ name = "node-rpc" version = "3.0.0-dev" dependencies = [ - "jsonrpc-core", + "jsonrpsee", "node-primitives", "pallet-contracts-rpc", "pallet-mmr-rpc", @@ -5100,7 +4755,7 @@ dependencies = [ "frame-system-rpc-runtime-api", "frame-try-runtime", "hex-literal", - "log 0.4.16", + "log", "node-primitives", "pallet-asset-tx-payment", "pallet-assets", @@ -5195,7 +4850,7 @@ dependencies = [ "frame-benchmarking", "frame-benchmarking-cli", "frame-system", - "jsonrpc-core", + "jsonrpsee", "node-template-runtime", "pallet-transaction-payment", "pallet-transaction-payment-rpc", @@ -5272,8 +4927,8 @@ version = "3.0.0-dev" dependencies = [ "frame-system", "fs_extra", - "futures 0.3.21", - "log 0.4.16", + "futures", + "log", "node-executor", "node-primitives", "node-runtime", @@ -5320,7 +4975,7 @@ checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" dependencies = [ "memchr", "minimal-lexical", - "version_check 0.9.2", + "version_check", ] [[package]] @@ -5329,7 +4984,7 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -5462,39 +5117,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" -[[package]] -name = "openssl" -version = "0.10.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "549430950c79ae24e6d02e0b7404534ecf311d94cc9f861e9e4020187d13d885" -dependencies = [ - "bitflags", - "cfg-if 1.0.0", - "foreign-types", - "libc", - "once_cell", - "openssl-sys", -] - [[package]] name = "openssl-probe" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" -[[package]] -name = "openssl-sys" -version = "0.9.65" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a7907e3bfa08bb85105209cdfcb6c63d109f8f6c1ed6ca318fff5c1853fbc1d" -dependencies = [ - "autocfg 1.0.1", - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "os_str_bytes" version = "6.0.0" @@ -5510,7 +5138,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -5536,7 +5164,7 @@ dependencies = [ "scale-info", "serde", "serde_json", - "smallvec 1.8.0", + "smallvec", "sp-core", "sp-io", "sp-runtime", @@ -5633,7 +5261,7 @@ dependencies = [ "frame-election-provider-support", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-authorship", "pallet-balances", "pallet-offences", @@ -5662,7 +5290,7 @@ dependencies = [ "frame-election-provider-support", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-balances", "parity-scale-codec", "scale-info", @@ -5690,7 +5318,7 @@ dependencies = [ "frame-election-provider-support", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-bags-list", "pallet-staking", "remote-externalities", @@ -5699,7 +5327,6 @@ dependencies = [ "sp-std", "sp-storage", "sp-tracing", - "tokio", ] [[package]] @@ -5709,7 +5336,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-transaction-payment", "parity-scale-codec", "scale-info", @@ -5747,7 +5374,7 @@ dependencies = [ "frame-system", "hex", "hex-literal", - "log 0.4.16", + "log", "pallet-beefy", "pallet-mmr", "pallet-session", @@ -5768,7 +5395,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-balances", "pallet-treasury", "parity-scale-codec", @@ -5786,7 +5413,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-balances", "pallet-bounties", "pallet-treasury", @@ -5805,7 +5432,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.16", + "log", "parity-scale-codec", "scale-info", "sp-core", @@ -5825,7 +5452,7 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", - "log 0.4.16", + "log", "pallet-balances", "pallet-contracts-primitives", "pallet-contracts-proc-macro", @@ -5838,7 +5465,7 @@ dependencies = [ "rand_pcg 0.3.1", "scale-info", "serde", - "smallvec 1.8.0", + "smallvec", "sp-core", "sp-io", "sp-keystore", @@ -5877,9 +5504,7 @@ dependencies = [ name = "pallet-contracts-rpc" version = "4.0.0-dev" dependencies = [ - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", + "jsonrpsee", "pallet-contracts-primitives", "pallet-contracts-rpc-runtime-api", "parity-scale-codec", @@ -5949,7 +5574,7 @@ dependencies = [ "frame-election-provider-support", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-balances", "pallet-election-provider-support-benchmarking", "parity-scale-codec", @@ -5986,7 +5611,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-balances", "parity-scale-codec", "scale-info", @@ -6005,7 +5630,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-balances", "parity-scale-codec", "scale-info", @@ -6022,7 +5647,7 @@ dependencies = [ "frame-support", "frame-system", "lite-json", - "log 0.4.16", + "log", "parity-scale-codec", "scale-info", "sp-core", @@ -6073,7 +5698,7 @@ dependencies = [ "frame-election-provider-support", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-authorship", "pallet-balances", "pallet-offences", @@ -6118,7 +5743,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-authorship", "pallet-session", "parity-scale-codec", @@ -6172,7 +5797,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.16", + "log", "parity-scale-codec", "scale-info", "sp-core", @@ -6204,9 +5829,7 @@ dependencies = [ name = "pallet-mmr-rpc" version = "3.0.0" dependencies = [ - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", + "jsonrpsee", "parity-scale-codec", "serde", "serde_json", @@ -6254,7 +5877,7 @@ version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", - "log 0.4.16", + "log", "parity-scale-codec", "scale-info", "sp-core", @@ -6309,7 +5932,7 @@ version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-balances", "parity-scale-codec", "scale-info", @@ -6454,7 +6077,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-preimage", "parity-scale-codec", "scale-info", @@ -6487,7 +6110,7 @@ dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", - "log 0.4.16", + "log", "pallet-timestamp", "parity-scale-codec", "scale-info", @@ -6548,7 +6171,7 @@ dependencies = [ "frame-election-provider-support", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-authorship", "pallet-bags-list", "pallet-balances", @@ -6574,7 +6197,7 @@ dependencies = [ name = "pallet-staking-reward-curve" version = "4.0.0-dev" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro2", "quote", "sp-runtime", @@ -6585,7 +6208,7 @@ dependencies = [ name = "pallet-staking-reward-fn" version = "4.0.0-dev" dependencies = [ - "log 0.4.16", + "log", "sp-arithmetic", ] @@ -6596,7 +6219,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-balances", "parity-scale-codec", "parking_lot 0.12.0", @@ -6649,7 +6272,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.16", + "log", "parity-scale-codec", "scale-info", "sp-core", @@ -6667,7 +6290,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-balances", "pallet-treasury", "parity-scale-codec", @@ -6691,7 +6314,7 @@ dependencies = [ "scale-info", "serde", "serde_json", - "smallvec 1.8.0", + "smallvec", "sp-core", "sp-io", "sp-runtime", @@ -6702,9 +6325,7 @@ dependencies = [ name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" dependencies = [ - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", + "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", "parity-scale-codec", "sp-api", @@ -6769,7 +6390,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-balances", "parity-scale-codec", "scale-info", @@ -6802,7 +6423,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log 0.4.16", + "log", "pallet-balances", "parity-scale-codec", "scale-info", @@ -6841,7 +6462,7 @@ dependencies = [ "fs2", "hex", "libc", - "log 0.4.16", + "log", "lz4", "memmap2 0.2.1", "parking_lot 0.11.2", @@ -6869,7 +6490,7 @@ version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6e626dc84025ff56bf1476ed0e30d10c84d7f89a475ef46ebabee1095a8fba" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro2", "quote", "syn", @@ -6881,20 +6502,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" -[[package]] -name = "parity-tokio-ipc" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" -dependencies = [ - "futures 0.3.21", - "libc", - "log 0.4.16", - "rand 0.7.3", - "tokio", - "winapi 0.3.9", -] - [[package]] name = "parity-util-mem" version = "0.11.0" @@ -6907,8 +6514,8 @@ dependencies = [ "parity-util-mem-derive", "parking_lot 0.12.0", "primitive-types", - "smallvec 1.8.0", - "winapi 0.3.9", + "smallvec", + "winapi", ] [[package]] @@ -6937,41 +6544,12 @@ version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be5e13c266502aadf83426d87d81a0f5d1ef45b8027f5a471c360abfe4bfae92" -[[package]] -name = "parity-ws" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0ab8a461779bd022964cae2b4989fa9c99deb270bec162da2125ec03c09fcaa" -dependencies = [ - "byteorder", - "bytes 0.4.12", - "httparse", - "log 0.4.16", - "mio 0.6.23", - "mio-extras", - "rand 0.7.3", - "sha-1 0.8.2", - "slab", - "url 2.2.1", -] - [[package]] name = "parking" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" -[[package]] -name = "parking_lot" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" -dependencies = [ - "lock_api 0.3.4", - "parking_lot_core 0.6.2", - "rustc_version 0.2.3", -] - [[package]] name = "parking_lot" version = "0.11.2" @@ -6979,7 +6557,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", - "lock_api 0.4.6", + "lock_api", "parking_lot_core 0.8.5", ] @@ -6989,25 +6567,10 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" dependencies = [ - "lock_api 0.4.6", + "lock_api", "parking_lot_core 0.9.1", ] -[[package]] -name = "parking_lot_core" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" -dependencies = [ - "cfg-if 0.1.10", - "cloudabi", - "libc", - "redox_syscall 0.1.57", - "rustc_version 0.2.3", - "smallvec 0.6.14", - "winapi 0.3.9", -] - [[package]] name = "parking_lot_core" version = "0.8.5" @@ -7018,8 +6581,8 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.10", - "smallvec 1.8.0", - "winapi 0.3.9", + "smallvec", + "winapi", ] [[package]] @@ -7031,7 +6594,7 @@ dependencies = [ "cfg-if 1.0.0", "libc", "redox_syscall 0.2.10", - "smallvec 1.8.0", + "smallvec", "windows-sys", ] @@ -7084,12 +6647,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" -[[package]] -name = "percent-encoding" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" - [[package]] name = "percent-encoding" version = "2.1.0" @@ -7255,9 +6812,9 @@ checksum = "a2a7bc6b2a29e632e45451c941832803a18cce6781db04de8a04696cdca8bde4" dependencies = [ "cfg-if 0.1.10", "libc", - "log 0.4.16", + "log", "wepoll-sys", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -7355,15 +6912,6 @@ dependencies = [ "uint", ] -[[package]] -name = "proc-macro-crate" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" -dependencies = [ - "toml", -] - [[package]] name = "proc-macro-crate" version = "1.1.3" @@ -7384,7 +6932,7 @@ dependencies = [ "proc-macro2", "quote", "syn", - "version_check 0.9.2", + "version_check", ] [[package]] @@ -7395,7 +6943,7 @@ checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2", "quote", - "version_check 0.9.2", + "version_check", ] [[package]] @@ -7406,9 +6954,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1" +checksum = "9027b48e9d4c9175fa2218adf3557f91c1137021739951d4932f5f8268ac48aa" dependencies = [ "unicode-xid", ] @@ -7456,7 +7004,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" dependencies = [ - "bytes 1.1.0", + "bytes", "prost-derive", ] @@ -7466,11 +7014,11 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" dependencies = [ - "bytes 1.1.0", + "bytes", "heck 0.3.2", "itertools", "lazy_static", - "log 0.4.16", + "log", "multimap", "petgraph", "prost", @@ -7499,7 +7047,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" dependencies = [ - "bytes 1.1.0", + "bytes", "prost", ] @@ -7551,7 +7099,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ "env_logger 0.8.4", - "log 0.4.16", + "log", "rand 0.8.4", ] @@ -7597,7 +7145,7 @@ dependencies = [ "rand_os", "rand_pcg 0.1.2", "rand_xorshift", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -7743,7 +7291,7 @@ checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" dependencies = [ "libc", "rand_core 0.4.2", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -7757,7 +7305,7 @@ dependencies = [ "libc", "rand_core 0.4.2", "rdrand", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -7823,7 +7371,7 @@ checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" dependencies = [ "crossbeam-channel", "crossbeam-deque", - "crossbeam-utils 0.8.5", + "crossbeam-utils", "lazy_static", "num_cpus", ] @@ -7899,9 +7447,9 @@ version = "0.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5" dependencies = [ - "log 0.4.16", + "log", "rustc-hash", - "smallvec 1.8.0", + "smallvec", ] [[package]] @@ -7910,9 +7458,9 @@ version = "0.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62446b1d3ebf980bdc68837700af1d77b37bc430e524bf95319c6eada2a4cc02" dependencies = [ - "log 0.4.16", + "log", "rustc-hash", - "smallvec 1.8.0", + "smallvec", ] [[package]] @@ -7951,7 +7499,7 @@ dependencies = [ "bitflags", "libc", "mach", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -7963,7 +7511,7 @@ dependencies = [ "bitflags", "libc", "mach", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -7973,7 +7521,7 @@ dependencies = [ "env_logger 0.9.0", "frame-support", "jsonrpsee", - "log 0.4.16", + "log", "pallet-elections-phragmen", "parity-scale-codec", "serde", @@ -7991,7 +7539,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -8042,7 +7590,7 @@ dependencies = [ "spin", "untrusted", "web-sys", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -8087,7 +7635,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffc936cf8a7ea60c58f030fd36a612a48f440610214dc54bc36431f9ea0c3efb" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -8097,8 +7645,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f54290e54521dac3de4149d83ddf9f62a359b3cc93bcb494a794a41e6f4744b" dependencies = [ "async-global-executor", - "futures 0.3.21", - "log 0.4.16", + "futures", + "log", "netlink-packet-route", "netlink-proto", "nix 0.22.3", @@ -8111,10 +7659,10 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64 0.13.0", + "base64", "blake2b_simd 0.5.11", "constant_time_eq", - "crossbeam-utils 0.8.5", + "crossbeam-utils", ] [[package]] @@ -8173,7 +7721,7 @@ dependencies = [ "io-lifetimes", "libc", "linux-raw-sys", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -8182,8 +7730,8 @@ version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" dependencies = [ - "base64 0.13.0", - "log 0.4.16", + "base64", + "log", "ring", "sct 0.6.0", "webpki 0.21.4", @@ -8195,7 +7743,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" dependencies = [ - "log 0.4.16", + "log", "ring", "sct 0.7.0", "webpki 0.22.0", @@ -8231,7 +7779,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" dependencies = [ - "base64 0.13.0", + "base64", ] [[package]] @@ -8246,7 +7794,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4da5fcb054c46f5a5dff833b129285a93d3f0179531735e6c866e8cc307d2020" dependencies = [ - "futures 0.3.21", + "futures", "pin-project 0.4.27", "static_assertions", ] @@ -8266,12 +7814,6 @@ dependencies = [ "rustc_version 0.2.3", ] -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "salsa20" version = "0.9.0" @@ -8294,7 +7836,7 @@ dependencies = [ name = "sc-allocator" version = "4.1.0-dev" dependencies = [ - "log 0.4.16", + "log", "sp-core", "sp-wasm-interface", "thiserror", @@ -8305,11 +7847,11 @@ name = "sc-authority-discovery" version = "0.10.0-dev" dependencies = [ "async-trait", - "futures 0.3.21", + "futures", "futures-timer", "ip_network", "libp2p", - "log 0.4.16", + "log", "parity-scale-codec", "prost", "prost-build", @@ -8333,9 +7875,9 @@ dependencies = [ name = "sc-basic-authorship" version = "0.10.0-dev" dependencies = [ - "futures 0.3.21", + "futures", "futures-timer", - "log 0.4.16", + "log", "parity-scale-codec", "parking_lot 0.12.0", "sc-block-builder", @@ -8390,7 +7932,7 @@ dependencies = [ name = "sc-chain-spec-derive" version = "4.0.0-dev" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro2", "quote", "syn", @@ -8403,10 +7945,10 @@ dependencies = [ "chrono", "clap 3.1.6", "fdlimit", - "futures 0.3.21", + "futures", "hex", "libp2p", - "log 0.4.16", + "log", "names", "parity-scale-codec", "rand 0.7.3", @@ -8440,9 +7982,9 @@ name = "sc-client-api" version = "4.0.0-dev" dependencies = [ "fnv", - "futures 0.3.21", + "futures", "hash-db", - "log 0.4.16", + "log", "parity-scale-codec", "parking_lot 0.12.0", "sc-executor", @@ -8474,7 +8016,7 @@ dependencies = [ "kvdb-memorydb", "kvdb-rocksdb", "linked-hash-map", - "log 0.4.16", + "log", "parity-db", "parity-scale-codec", "parking_lot 0.12.0", @@ -8498,10 +8040,10 @@ name = "sc-consensus" version = "0.10.0-dev" dependencies = [ "async-trait", - "futures 0.3.21", + "futures", "futures-timer", "libp2p", - "log 0.4.16", + "log", "parking_lot 0.12.0", "sc-client-api", "sc-utils", @@ -8522,8 +8064,8 @@ name = "sc-consensus-aura" version = "0.10.0-dev" dependencies = [ "async-trait", - "futures 0.3.21", - "log 0.4.16", + "futures", + "log", "parity-scale-codec", "parking_lot 0.12.0", "sc-block-builder", @@ -8560,8 +8102,8 @@ version = "0.10.0-dev" dependencies = [ "async-trait", "fork-tree", - "futures 0.3.21", - "log 0.4.16", + "futures", + "log", "merlin", "num-bigint", "num-rational 0.2.4", @@ -8608,10 +8150,8 @@ dependencies = [ name = "sc-consensus-babe-rpc" version = "0.10.0-dev" dependencies = [ - "futures 0.3.21", - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", + "futures", + "jsonrpsee", "sc-consensus", "sc-consensus-babe", "sc-consensus-epochs", @@ -8631,6 +8171,7 @@ dependencies = [ "substrate-test-runtime-client", "tempfile", "thiserror", + "tokio", ] [[package]] @@ -8651,11 +8192,9 @@ version = "0.10.0-dev" dependencies = [ "assert_matches", "async-trait", - "futures 0.3.21", - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", - "log 0.4.16", + "futures", + "jsonrpsee", + "log", "parity-scale-codec", "sc-basic-authorship", "sc-client-api", @@ -8689,9 +8228,9 @@ name = "sc-consensus-pow" version = "0.10.0-dev" dependencies = [ "async-trait", - "futures 0.3.21", + "futures", "futures-timer", - "log 0.4.16", + "log", "parity-scale-codec", "parking_lot 0.12.0", "sc-client-api", @@ -8713,9 +8252,9 @@ name = "sc-consensus-slots" version = "0.10.0-dev" dependencies = [ "async-trait", - "futures 0.3.21", + "futures", "futures-timer", - "log 0.4.16", + "log", "parity-scale-codec", "sc-client-api", "sc-consensus", @@ -8803,7 +8342,7 @@ dependencies = [ name = "sc-executor-wasmi" version = "0.10.0-dev" dependencies = [ - "log 0.4.16", + "log", "parity-scale-codec", "sc-allocator", "sc-executor-common", @@ -8819,7 +8358,7 @@ version = "0.10.0-dev" dependencies = [ "cfg-if 1.0.0", "libc", - "log 0.4.16", + "log", "parity-scale-codec", "parity-wasm 0.42.2", "sc-allocator", @@ -8843,10 +8382,10 @@ dependencies = [ "dyn-clone", "finality-grandpa", "fork-tree", - "futures 0.3.21", + "futures", "futures-timer", "hex", - "log 0.4.16", + "log", "parity-scale-codec", "parking_lot 0.12.0", "rand 0.8.4", @@ -8885,12 +8424,9 @@ name = "sc-finality-grandpa-rpc" version = "0.10.0-dev" dependencies = [ "finality-grandpa", - "futures 0.3.21", - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-pubsub", - "log 0.4.16", + "futures", + "jsonrpsee", + "log", "parity-scale-codec", "sc-block-builder", "sc-client-api", @@ -8905,6 +8441,7 @@ dependencies = [ "sp-runtime", "substrate-test-runtime-client", "thiserror", + "tokio", ] [[package]] @@ -8912,9 +8449,9 @@ name = "sc-informant" version = "0.10.0-dev" dependencies = [ "ansi_term", - "futures 0.3.21", + "futures", "futures-timer", - "log 0.4.16", + "log", "parity-util-mem", "sc-client-api", "sc-network", @@ -8947,19 +8484,19 @@ dependencies = [ "async-trait", "asynchronous-codec", "bitflags", - "bytes 1.1.0", + "bytes", "cid", "either", "fnv", "fork-tree", - "futures 0.3.21", + "futures", "futures-timer", "hex", "ip_network", "libp2p", "linked-hash-map", "linked_hash_set", - "log 0.4.16", + "log", "lru", "parity-scale-codec", "parking_lot 0.12.0", @@ -8977,7 +8514,7 @@ dependencies = [ "sc-utils", "serde", "serde_json", - "smallvec 1.8.0", + "smallvec", "sp-arithmetic", "sp-blockchain", "sp-consensus", @@ -9000,12 +8537,12 @@ dependencies = [ name = "sc-network-common" version = "0.10.0-dev" dependencies = [ - "futures 0.3.21", + "futures", "libp2p", "parity-scale-codec", "prost-build", "sc-peerset", - "smallvec 1.8.0", + "smallvec", ] [[package]] @@ -9014,10 +8551,10 @@ version = "0.10.0-dev" dependencies = [ "ahash", "async-std", - "futures 0.3.21", + "futures", "futures-timer", "libp2p", - "log 0.4.16", + "log", "lru", "quickcheck", "sc-network", @@ -9034,9 +8571,9 @@ dependencies = [ "bitflags", "either", "fork-tree", - "futures 0.3.21", + "futures", "libp2p", - "log 0.4.16", + "log", "lru", "parity-scale-codec", "prost", @@ -9047,7 +8584,7 @@ dependencies = [ "sc-consensus", "sc-network-common", "sc-peerset", - "smallvec 1.8.0", + "smallvec", "sp-arithmetic", "sp-blockchain", "sp-consensus", @@ -9066,10 +8603,10 @@ version = "0.8.0" dependencies = [ "async-std", "async-trait", - "futures 0.3.21", + "futures", "futures-timer", "libp2p", - "log 0.4.16", + "log", "parking_lot 0.12.0", "rand 0.7.3", "sc-block-builder", @@ -9092,12 +8629,12 @@ dependencies = [ name = "sc-offchain" version = "4.0.0-dev" dependencies = [ - "bytes 1.1.0", + "bytes", "fnv", - "futures 0.3.21", + "futures", "futures-timer", "hex", - "hyper 0.14.16", + "hyper", "hyper-rustls", "lazy_static", "num_cpus", @@ -9128,9 +8665,9 @@ dependencies = [ name = "sc-peerset" version = "4.0.0-dev" dependencies = [ - "futures 0.3.21", + "futures", "libp2p", - "log 0.4.16", + "log", "rand 0.7.3", "sc-utils", "serde_json", @@ -9141,7 +8678,7 @@ dependencies = [ name = "sc-proposer-metrics" version = "0.10.0-dev" dependencies = [ - "log 0.4.16", + "log", "substrate-prometheus-endpoint", ] @@ -9150,12 +8687,12 @@ name = "sc-rpc" version = "4.0.0-dev" dependencies = [ "assert_matches", - "futures 0.3.21", + "env_logger 0.9.0", + "futures", "hash-db", - "jsonrpc-core", - "jsonrpc-pubsub", + "jsonrpsee", "lazy_static", - "log 0.4.16", + "log", "parity-scale-codec", "parking_lot 0.12.0", "sc-block-builder", @@ -9180,18 +8717,16 @@ dependencies = [ "sp-session", "sp-version", "substrate-test-runtime-client", + "tokio", ] [[package]] name = "sc-rpc-api" version = "0.10.0-dev" dependencies = [ - "futures 0.3.21", - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", - "jsonrpc-pubsub", - "log 0.4.16", + "futures", + "jsonrpsee", + "log", "parity-scale-codec", "parking_lot 0.12.0", "sc-chain-spec", @@ -9211,13 +8746,9 @@ dependencies = [ name = "sc-rpc-server" version = "4.0.0-dev" dependencies = [ - "futures 0.3.21", - "jsonrpc-core", - "jsonrpc-http-server", - "jsonrpc-ipc-server", - "jsonrpc-pubsub", - "jsonrpc-ws-server", - "log 0.4.16", + "futures", + "jsonrpsee", + "log", "serde_json", "substrate-prometheus-endpoint", "tokio", @@ -9245,12 +8776,11 @@ dependencies = [ "async-trait", "directories", "exit-future", - "futures 0.3.21", + "futures", "futures-timer", "hash-db", - "jsonrpc-core", - "jsonrpc-pubsub", - "log 0.4.16", + "jsonrpsee", + "log", "parity-scale-codec", "parity-util-mem", "parking_lot 0.12.0", @@ -9310,10 +8840,10 @@ name = "sc-service-test" version = "2.0.0" dependencies = [ "fdlimit", - "futures 0.3.21", + "futures", "hex", "hex-literal", - "log 0.4.16", + "log", "parity-scale-codec", "parking_lot 0.12.0", "sc-block-builder", @@ -9345,7 +8875,7 @@ dependencies = [ name = "sc-state-db" version = "0.10.0-dev" dependencies = [ - "log 0.4.16", + "log", "parity-scale-codec", "parity-util-mem", "parity-util-mem-derive", @@ -9358,9 +8888,7 @@ dependencies = [ name = "sc-sync-state-rpc" version = "0.10.0-dev" dependencies = [ - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", + "jsonrpsee", "parity-scale-codec", "sc-chain-spec", "sc-client-api", @@ -9378,9 +8906,9 @@ dependencies = [ name = "sc-sysinfo" version = "6.0.0-dev" dependencies = [ - "futures 0.3.21", + "futures", "libc", - "log 0.4.16", + "log", "rand 0.7.3", "rand_pcg 0.2.1", "regex", @@ -9397,9 +8925,9 @@ name = "sc-telemetry" version = "4.0.0-dev" dependencies = [ "chrono", - "futures 0.3.21", + "futures", "libp2p", - "log 0.4.16", + "log", "parking_lot 0.12.0", "pin-project 1.0.10", "rand 0.7.3", @@ -9419,7 +8947,7 @@ dependencies = [ "criterion", "lazy_static", "libc", - "log 0.4.16", + "log", "once_cell", "parking_lot 0.12.0", "regex", @@ -9444,7 +8972,7 @@ dependencies = [ name = "sc-tracing-proc-macro" version = "4.0.0-dev" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro2", "quote", "syn", @@ -9456,11 +8984,11 @@ version = "4.0.0-dev" dependencies = [ "assert_matches", "criterion", - "futures 0.3.21", + "futures", "futures-timer", "hex", "linked-hash-map", - "log 0.4.16", + "log", "parity-scale-codec", "parity-util-mem", "parking_lot 0.12.0", @@ -9488,8 +9016,8 @@ dependencies = [ name = "sc-transaction-pool-api" version = "4.0.0-dev" dependencies = [ - "futures 0.3.21", - "log 0.4.16", + "futures", + "log", "serde", "sp-blockchain", "sp-runtime", @@ -9500,10 +9028,10 @@ dependencies = [ name = "sc-utils" version = "4.0.0-dev" dependencies = [ - "futures 0.3.21", + "futures", "futures-timer", "lazy_static", - "log 0.4.16", + "log", "parking_lot 0.12.0", "prometheus", "tokio-test", @@ -9529,7 +9057,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7805950c36512db9e3251c970bb7ac425f326716941862205d612ab3b5e46e2" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro2", "quote", "syn", @@ -9542,7 +9070,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" dependencies = [ "lazy_static", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -9793,12 +9321,6 @@ dependencies = [ "opaque-debug 0.3.0", ] -[[package]] -name = "sha1" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" - [[package]] name = "sha2" version = "0.8.2" @@ -9919,15 +9441,6 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" -[[package]] -name = "smallvec" -version = "0.6.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" -dependencies = [ - "maybe-uninit", -] - [[package]] name = "smallvec" version = "1.8.0" @@ -9965,7 +9478,7 @@ checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ "cfg-if 1.0.0", "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -9975,7 +9488,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" dependencies = [ "libc", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -9984,12 +9497,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" dependencies = [ - "base64 0.13.0", - "bytes 1.1.0", + "base64", + "bytes", "flate2", - "futures 0.3.21", + "futures", "httparse", - "log 0.4.16", + "log", "rand 0.8.4", "sha-1 0.9.4", ] @@ -9999,7 +9512,7 @@ name = "sp-api" version = "4.0.0-dev" dependencies = [ "hash-db", - "log 0.4.16", + "log", "parity-scale-codec", "sp-api-proc-macro", "sp-core", @@ -10016,7 +9529,7 @@ name = "sp-api-proc-macro" version = "4.0.0-dev" dependencies = [ "blake2", - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro2", "quote", "syn", @@ -10027,8 +9540,8 @@ name = "sp-api-test" version = "2.0.1" dependencies = [ "criterion", - "futures 0.3.21", - "log 0.4.16", + "futures", + "log", "parity-scale-codec", "rustversion", "sc-block-builder", @@ -10132,8 +9645,8 @@ dependencies = [ name = "sp-blockchain" version = "4.0.0-dev" dependencies = [ - "futures 0.3.21", - "log 0.4.16", + "futures", + "log", "lru", "parity-scale-codec", "parking_lot 0.12.0", @@ -10150,9 +9663,9 @@ name = "sp-consensus" version = "0.10.0-dev" dependencies = [ "async-trait", - "futures 0.3.21", + "futures", "futures-timer", - "log 0.4.16", + "log", "parity-scale-codec", "sp-core", "sp-inherents", @@ -10250,7 +9763,7 @@ dependencies = [ "criterion", "dyn-clonable", "ed25519-dalek", - "futures 0.3.21", + "futures", "hash-db", "hash256-std-hasher", "hex", @@ -10258,7 +9771,7 @@ dependencies = [ "impl-serde", "lazy_static", "libsecp256k1", - "log 0.4.16", + "log", "merlin", "num-traits", "parity-scale-codec", @@ -10344,7 +9857,7 @@ name = "sp-finality-grandpa" version = "4.0.0-dev" dependencies = [ "finality-grandpa", - "log 0.4.16", + "log", "parity-scale-codec", "scale-info", "serde", @@ -10361,7 +9874,7 @@ name = "sp-inherents" version = "4.0.0-dev" dependencies = [ "async-trait", - "futures 0.3.21", + "futures", "impl-trait-for-tuples", "parity-scale-codec", "sp-core", @@ -10374,10 +9887,10 @@ dependencies = [ name = "sp-io" version = "6.0.0" dependencies = [ - "futures 0.3.21", + "futures", "hash-db", "libsecp256k1", - "log 0.4.16", + "log", "parity-scale-codec", "parking_lot 0.12.0", "secp256k1", @@ -10409,7 +9922,7 @@ name = "sp-keystore" version = "0.12.0" dependencies = [ "async-trait", - "futures 0.3.21", + "futures", "merlin", "parity-scale-codec", "parking_lot 0.12.0", @@ -10435,7 +9948,7 @@ name = "sp-mmr-primitives" version = "4.0.0-dev" dependencies = [ "hex-literal", - "log 0.4.16", + "log", "parity-scale-codec", "serde", "sp-api", @@ -10508,7 +10021,7 @@ dependencies = [ "either", "hash256-std-hasher", "impl-trait-for-tuples", - "log 0.4.16", + "log", "parity-scale-codec", "parity-util-mem", "paste 1.0.6", @@ -10555,7 +10068,7 @@ name = "sp-runtime-interface-proc-macro" version = "5.0.0" dependencies = [ "Inflector", - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro2", "quote", "syn", @@ -10604,7 +10117,7 @@ name = "sp-sandbox" version = "0.10.0-dev" dependencies = [ "assert_matches", - "log 0.4.16", + "log", "parity-scale-codec", "sp-core", "sp-io", @@ -10651,13 +10164,13 @@ version = "0.12.0" dependencies = [ "hash-db", "hex-literal", - "log 0.4.16", + "log", "num-traits", "parity-scale-codec", "parking_lot 0.12.0", "pretty_assertions", "rand 0.7.3", - "smallvec 1.8.0", + "smallvec", "sp-core", "sp-externalities", "sp-panic-handler", @@ -10689,7 +10202,7 @@ dependencies = [ name = "sp-tasks" version = "4.0.0-dev" dependencies = [ - "log 0.4.16", + "log", "parity-scale-codec", "sp-core", "sp-externalities", @@ -10716,7 +10229,7 @@ version = "4.0.0-dev" dependencies = [ "async-trait", "futures-timer", - "log 0.4.16", + "log", "parity-scale-codec", "sp-api", "sp-inherents", @@ -10749,7 +10262,7 @@ name = "sp-transaction-storage-proof" version = "4.0.0-dev" dependencies = [ "async-trait", - "log 0.4.16", + "log", "parity-scale-codec", "scale-info", "sp-core", @@ -10811,7 +10324,7 @@ name = "sp-wasm-interface" version = "6.0.0" dependencies = [ "impl-trait-for-tuples", - "log 0.4.16", + "log", "parity-scale-codec", "sp-std", "wasmi", @@ -10938,8 +10451,8 @@ version = "3.0.0" dependencies = [ "frame-support", "frame-system", - "futures 0.3.21", - "jsonrpc-client-transports", + "futures", + "jsonrpsee", "parity-scale-codec", "sc-rpc-api", "scale-info", @@ -10952,17 +10465,17 @@ dependencies = [ name = "substrate-frame-rpc-system" version = "4.0.0-dev" dependencies = [ + "assert_matches", "frame-system-rpc-runtime-api", - "futures 0.3.21", - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", - "log 0.4.16", + "futures", + "jsonrpsee", + "log", "parity-scale-codec", "sc-client-api", "sc-rpc-api", "sc-transaction-pool", "sc-transaction-pool-api", + "serde_json", "sp-api", "sp-block-builder", "sp-blockchain", @@ -10970,6 +10483,7 @@ dependencies = [ "sp-runtime", "sp-tracing", "substrate-test-runtime-client", + "tokio", ] [[package]] @@ -10977,8 +10491,8 @@ name = "substrate-prometheus-endpoint" version = "0.10.0-dev" dependencies = [ "futures-util", - "hyper 0.14.16", - "log 0.4.16", + "hyper", + "log", "prometheus", "thiserror", "tokio", @@ -10988,10 +10502,8 @@ dependencies = [ name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" dependencies = [ - "jsonrpc-core", - "jsonrpc-core-client", - "jsonrpc-derive", - "log 0.4.16", + "jsonrpsee", + "log", "parity-scale-codec", "sc-client-api", "sc-rpc-api", @@ -11012,7 +10524,7 @@ name = "substrate-test-client" version = "2.0.1" dependencies = [ "async-trait", - "futures 0.3.21", + "futures", "hex", "parity-scale-codec", "sc-client-api", @@ -11041,8 +10553,8 @@ dependencies = [ "frame-support", "frame-system", "frame-system-rpc-runtime-api", - "futures 0.3.21", - "log 0.4.16", + "futures", + "log", "memory-db", "pallet-babe", "pallet-timestamp", @@ -11083,7 +10595,7 @@ dependencies = [ name = "substrate-test-runtime-client" version = "2.0.0" dependencies = [ - "futures 0.3.21", + "futures", "parity-scale-codec", "sc-block-builder", "sc-client-api", @@ -11101,7 +10613,7 @@ dependencies = [ name = "substrate-test-runtime-transaction-pool" version = "2.0.0" dependencies = [ - "futures 0.3.21", + "futures", "parity-scale-codec", "parking_lot 0.12.0", "sc-transaction-pool", @@ -11116,7 +10628,7 @@ dependencies = [ name = "substrate-test-utils" version = "4.0.0-dev" dependencies = [ - "futures 0.3.21", + "futures", "sc-service", "substrate-test-utils-derive", "tokio", @@ -11127,7 +10639,7 @@ dependencies = [ name = "substrate-test-utils-derive" version = "0.10.0-dev" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate", "proc-macro2", "quote", "syn", @@ -11230,7 +10742,7 @@ dependencies = [ "libc", "redox_syscall 0.2.10", "remove_dir_all", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -11241,7 +10753,7 @@ checksum = "edd106a334b7657c10b7c540a0106114feadeb4dc314513e97df481d5d966f42" dependencies = [ "byteorder", "dirs", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -11331,7 +10843,7 @@ checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", "wasi 0.10.0+wasi-snapshot-preview1", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -11393,10 +10905,10 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" dependencies = [ - "bytes 1.1.0", + "bytes", "libc", "memchr", - "mio 0.8.0", + "mio", "num_cpus", "once_cell", "parking_lot 0.12.0", @@ -11404,39 +10916,7 @@ dependencies = [ "signal-hook-registry", "socket2 0.4.4", "tokio-macros", - "winapi 0.3.9", -] - -[[package]] -name = "tokio-codec" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "tokio-io", -] - -[[package]] -name = "tokio-executor" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" -dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.31", -] - -[[package]] -name = "tokio-io" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "log 0.4.16", + "winapi", ] [[package]] @@ -11450,35 +10930,6 @@ dependencies = [ "syn", ] -[[package]] -name = "tokio-native-tls" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" -dependencies = [ - "native-tls", - "tokio", -] - -[[package]] -name = "tokio-reactor" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" -dependencies = [ - "crossbeam-utils 0.7.2", - "futures 0.1.31", - "lazy_static", - "log 0.4.16", - "mio 0.6.23", - "num_cpus", - "parking_lot 0.9.0", - "slab", - "tokio-executor", - "tokio-io", - "tokio-sync", -] - [[package]] name = "tokio-rustls" version = "0.22.0" @@ -11512,30 +10963,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-sync" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" -dependencies = [ - "fnv", - "futures 0.1.31", -] - -[[package]] -name = "tokio-tcp" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "iovec", - "mio 0.6.23", - "tokio-io", - "tokio-reactor", -] - [[package]] name = "tokio-test" version = "0.4.2" @@ -11543,33 +10970,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53474327ae5e166530d17f2d956afcb4f8a004de581b3cae10f12006bc8163e3" dependencies = [ "async-stream", - "bytes 1.1.0", + "bytes", "futures-core", "tokio", "tokio-stream", ] -[[package]] -name = "tokio-tls" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "354b8cd83825b3c20217a9dc174d6a0c67441a2fae5c41bcb1ea6679f6ae0f7c" -dependencies = [ - "futures 0.1.31", - "native-tls", - "tokio-io", -] - [[package]] name = "tokio-util" version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" dependencies = [ - "bytes 1.1.0", + "bytes", "futures-core", "futures-sink", - "log 0.4.16", + "log", "pin-project-lite 0.2.6", "tokio", ] @@ -11580,7 +10996,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" dependencies = [ - "bytes 1.1.0", + "bytes", "futures-core", "futures-io", "futures-sink", @@ -11610,7 +11026,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ "cfg-if 1.0.0", - "log 0.4.16", + "log", "pin-project-lite 0.2.6", "tracing-attributes", "tracing-core", @@ -11655,7 +11071,7 @@ checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ "ahash", "lazy_static", - "log 0.4.16", + "log", "lru", "tracing-core", ] @@ -11685,7 +11101,7 @@ dependencies = [ "serde", "serde_json", "sharded-slab", - "smallvec 1.8.0", + "smallvec", "thread_local", "tracing", "tracing-core", @@ -11693,12 +11109,6 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "traitobject" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" - [[package]] name = "treeline" version = "0.1.0" @@ -11729,9 +11139,9 @@ checksum = "d32d034c0d3db64b43c31de38e945f15b40cd4ca6d2dcfc26d4798ce8de4ab83" dependencies = [ "hash-db", "hashbrown 0.12.0", - "log 0.4.16", + "log", "rustc-hex", - "smallvec 1.8.0", + "smallvec", ] [[package]] @@ -11766,15 +11176,15 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna 0.2.3", + "idna", "ipnet", "lazy_static", - "log 0.4.16", + "log", "rand 0.8.4", - "smallvec 1.8.0", + "smallvec", "thiserror", "tinyvec", - "url 2.2.1", + "url", ] [[package]] @@ -11787,11 +11197,11 @@ dependencies = [ "futures-util", "ipconfig", "lazy_static", - "log 0.4.16", + "log", "lru-cache", "parking_lot 0.12.0", "resolv-conf", - "smallvec 1.8.0", + "smallvec", "thiserror", "trust-dns-proto", ] @@ -11808,7 +11218,7 @@ version = "0.10.0-dev" dependencies = [ "clap 3.1.6", "jsonrpsee", - "log 0.4.16", + "log", "parity-scale-codec", "remote-externalities", "sc-chain-spec", @@ -11860,12 +11270,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "typeable" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" - [[package]] name = "typenum" version = "1.15.0" @@ -11890,22 +11294,13 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "unicase" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" -dependencies = [ - "version_check 0.1.5", -] - [[package]] name = "unicase" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" dependencies = [ - "version_check 0.9.2", + "version_check", ] [[package]] @@ -11940,9 +11335,9 @@ checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" [[package]] name = "unicode-xid" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" [[package]] name = "universal-hash" @@ -11961,7 +11356,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d86a8dc7f45e4c1b0d30e43038c38f274e77af056aa5f74b93c2cf9eb3c1c836" dependencies = [ "asynchronous-codec", - "bytes 1.1.0", + "bytes", "futures-io", "futures-util", ] @@ -11972,17 +11367,6 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" -[[package]] -name = "url" -version = "1.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" -dependencies = [ - "idna 0.1.5", - "matches", - "percent-encoding 1.0.1", -] - [[package]] name = "url" version = "2.2.1" @@ -11990,9 +11374,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" dependencies = [ "form_urlencoded", - "idna 0.2.3", + "idna", "matches", - "percent-encoding 2.1.0", + "percent-encoding", ] [[package]] @@ -12008,7 +11392,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79923f7731dc61ebfba3633098bf3ac533bbd35ccd8c57e7088d9a5eebe0263f" dependencies = [ "ctor", - "version_check 0.9.2", + "version_check", ] [[package]] @@ -12023,12 +11407,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eafc1b9b2dfc6f5529177b62cf806484db55b32dc7c9658a118e11bbeb33061d" -[[package]] -name = "version_check" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" - [[package]] name = "version_check" version = "0.9.2" @@ -12063,7 +11441,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" dependencies = [ "same-file", - "winapi 0.3.9", + "winapi", "winapi-util", ] @@ -12073,7 +11451,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" dependencies = [ - "log 0.4.16", + "log", "try-lock", ] @@ -12107,7 +11485,7 @@ checksum = "f34c405b4f0658583dba0c1c7c9b694f3cac32655db463b56c254a1c75269523" dependencies = [ "bumpalo", "lazy_static", - "log 0.4.16", + "log", "proc-macro2", "quote", "syn", @@ -12161,7 +11539,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0c32691b6c7e6c14e7f8fd55361a9088b507aa49620fcd06c09b3a1082186b9" dependencies = [ - "log 0.4.16", + "log", "parity-wasm 0.32.0", "rustc-demangle", ] @@ -12181,7 +11559,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" dependencies = [ - "futures 0.3.21", + "futures", "js-sys", "parking_lot 0.11.2", "pin-utils", @@ -12214,7 +11592,7 @@ dependencies = [ "wasmer-types", "wasmer-vm", "wat", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -12228,7 +11606,7 @@ dependencies = [ "rkyv", "serde", "serde_bytes", - "smallvec 1.8.0", + "smallvec", "target-lexicon", "thiserror", "wasmer-types", @@ -12249,7 +11627,7 @@ dependencies = [ "loupe", "more-asserts", "rayon", - "smallvec 1.8.0", + "smallvec", "target-lexicon", "tracing", "wasmer-compiler", @@ -12270,7 +11648,7 @@ dependencies = [ "loupe", "more-asserts", "rayon", - "smallvec 1.8.0", + "smallvec", "wasmer-compiler", "wasmer-types", "wasmer-vm", @@ -12352,7 +11730,7 @@ dependencies = [ "wasmer-engine", "wasmer-types", "wasmer-vm", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -12400,7 +11778,7 @@ dependencies = [ "serde", "thiserror", "wasmer-types", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -12454,7 +11832,7 @@ dependencies = [ "indexmap", "lazy_static", "libc", - "log 0.4.16", + "log", "object 0.27.1", "once_cell", "paste 1.0.6", @@ -12469,7 +11847,7 @@ dependencies = [ "wasmtime-environ", "wasmtime-jit", "wasmtime-runtime", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -12479,16 +11857,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85c6ab24291fa7cb3a181f5669f6c72599b7ef781669759b45c7828c5999d0c0" dependencies = [ "anyhow", - "base64 0.13.0", + "base64", "bincode", "directories-next", "file-per-thread-logger", - "log 0.4.16", + "log", "rustix", "serde", "sha2 0.9.8", "toml", - "winapi 0.3.9", + "winapi", "zstd", ] @@ -12505,7 +11883,7 @@ dependencies = [ "cranelift-native", "cranelift-wasm", "gimli 0.26.1", - "log 0.4.16", + "log", "more-asserts", "object 0.27.1", "target-lexicon", @@ -12524,7 +11902,7 @@ dependencies = [ "cranelift-entity 0.82.3", "gimli 0.26.1", "indexmap", - "log 0.4.16", + "log", "more-asserts", "object 0.27.1", "serde", @@ -12546,7 +11924,7 @@ dependencies = [ "cfg-if 1.0.0", "cpp_demangle", "gimli 0.26.1", - "log 0.4.16", + "log", "object 0.27.1", "region 2.2.0", "rustc-demangle", @@ -12557,7 +11935,7 @@ dependencies = [ "wasmtime-environ", "wasmtime-jit-debug", "wasmtime-runtime", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -12583,7 +11961,7 @@ dependencies = [ "cfg-if 1.0.0", "indexmap", "libc", - "log 0.4.16", + "log", "mach", "memoffset", "more-asserts", @@ -12593,7 +11971,7 @@ dependencies = [ "thiserror", "wasmtime-environ", "wasmtime-jit-debug", - "winapi 0.3.9", + "winapi", ] [[package]] @@ -12665,47 +12043,6 @@ dependencies = [ "webpki 0.22.0", ] -[[package]] -name = "websocket" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413b37840b9e27b340ce91b319ede10731de8c72f5bc4cb0206ec1ca4ce581d0" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.31", - "hyper 0.10.16", - "native-tls", - "rand 0.6.5", - "tokio-codec", - "tokio-io", - "tokio-reactor", - "tokio-tcp", - "tokio-tls", - "unicase 1.4.2", - "url 1.7.2", - "websocket-base", -] - -[[package]] -name = "websocket-base" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e3810f0d00c4dccb54c30a4eee815e703232819dec7b007db115791c42aa374" -dependencies = [ - "base64 0.10.1", - "bitflags", - "byteorder", - "bytes 0.4.12", - "futures 0.1.31", - "native-tls", - "rand 0.6.5", - "sha1", - "tokio-codec", - "tokio-io", - "tokio-tcp", - "tokio-tls", -] - [[package]] name = "wepoll-sys" version = "3.0.1" @@ -12731,12 +12068,6 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" -[[package]] -name = "winapi" -version = "0.2.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" - [[package]] name = "winapi" version = "0.3.9" @@ -12747,12 +12078,6 @@ dependencies = [ "winapi-x86_64-pc-windows-gnu", ] -[[package]] -name = "winapi-build" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" - [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" @@ -12765,7 +12090,7 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" dependencies = [ - "winapi 0.3.9", + "winapi", ] [[package]] @@ -12866,17 +12191,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" dependencies = [ - "winapi 0.3.9", -] - -[[package]] -name = "ws2_32-sys" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" -dependencies = [ - "winapi 0.2.8", - "winapi-build", + "winapi", ] [[package]] @@ -12905,8 +12220,8 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c0608f53c1dc0bad505d03a34bbd49fbf2ad7b51eb036123e896365532745a1" dependencies = [ - "futures 0.3.21", - "log 0.4.16", + "futures", + "log", "nohash-hasher", "parking_lot 0.12.0", "rand 0.8.4", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index e642ce3c0411e..ab91dc7990380 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -42,7 +42,7 @@ frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } # These dependencies are used for the node template's RPCs -jsonrpc-core = "18.0.0" +jsonrpsee = { version = "0.12.0", features = ["server"] } sc-rpc = { version = "4.0.0-dev", path = "../../../client/rpc" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../client/rpc-api" } diff --git a/bin/node-template/node/src/rpc.rs b/bin/node-template/node/src/rpc.rs index 7f3701b5ab74f..7edae4d81474f 100644 --- a/bin/node-template/node/src/rpc.rs +++ b/bin/node-template/node/src/rpc.rs @@ -7,13 +7,15 @@ use std::sync::Arc; +use jsonrpsee::RpcModule; use node_template_runtime::{opaque::Block, AccountId, Balance, Index}; -pub use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::TransactionPool; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +pub use sc_rpc_api::DenyUnsafe; + /// Full client dependencies. pub struct FullDeps { /// The client instance to use. @@ -25,7 +27,9 @@ pub struct FullDeps { } /// Instantiate all full RPC extensions. -pub fn create_full(deps: FullDeps) -> jsonrpc_core::IoHandler +pub fn create_full( + deps: FullDeps, +) -> Result, Box> where C: ProvideRuntimeApi, C: HeaderBackend + HeaderMetadata + 'static, @@ -35,20 +39,19 @@ where C::Api: BlockBuilder, P: TransactionPool + 'static, { - use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; - use substrate_frame_rpc_system::{FullSystem, SystemApi}; + use pallet_transaction_payment_rpc::{TransactionPaymentApiServer, TransactionPaymentRpc}; + use substrate_frame_rpc_system::{SystemApiServer, SystemRpc}; - let mut io = jsonrpc_core::IoHandler::default(); + let mut module = RpcModule::new(()); let FullDeps { client, pool, deny_unsafe } = deps; - io.extend_with(SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe))); - - io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(client))); + module.merge(SystemRpc::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?; + module.merge(TransactionPaymentRpc::new(client).into_rpc())?; // Extend this RPC with a custom API by using the following syntax. // `YourRpcStruct` should have a reference to a client, which is needed // to call into the runtime. - // `io.extend_with(YourRpcTrait::to_delegate(YourRpcStruct::new(ReferenceToClient, ...)));` + // `module.merge(YourRpcTrait::into_rpc(YourRpcStruct::new(ReferenceToClient, ...)))?;` - io + Ok(module) } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 5f46a16a9668f..f45f914d94f44 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -228,8 +228,7 @@ pub fn new_full(mut config: Configuration) -> Result Box::new(move |deny_unsafe, _| { let deps = crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; - - Ok(crate::rpc::create_full(deps)) + crate::rpc::create_full(deps).map_err(Into::into) }) }; @@ -239,7 +238,7 @@ pub fn new_full(mut config: Configuration) -> Result keystore: keystore_container.sync_keystore(), task_manager: &mut task_manager, transaction_pool: transaction_pool.clone(), - rpc_extensions_builder, + rpc_builder: rpc_extensions_builder, backend, system_rpc_tx, config, diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 6bb36b9f9ab94..c18f2f5d1a108 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -37,6 +37,7 @@ crate-type = ["cdylib", "rlib"] clap = { version = "3.1.6", features = ["derive"], optional = true } codec = { package = "parity-scale-codec", version = "3.0.0" } serde = { version = "1.0.136", features = ["derive"] } +jsonrpsee = { version = "0.12.0", features = ["server"] } futures = "0.3.21" hex-literal = "0.3.4" log = "0.4.16" diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index 6eab08c39e5a2..376241d8157bf 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -92,6 +92,10 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { rpc_cors: None, rpc_methods: Default::default(), rpc_max_payload: None, + rpc_max_request_size: None, + rpc_max_response_size: None, + rpc_id_provider: None, + rpc_max_subs_per_conn: None, ws_max_out_buffer_capacity: None, prometheus_config: None, telemetry_endpoints: None, diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index eb0e24d2fdd37..f1fce16d8c1b3 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -84,6 +84,10 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { rpc_cors: None, rpc_methods: Default::default(), rpc_max_payload: None, + rpc_max_request_size: None, + rpc_max_response_size: None, + rpc_id_provider: None, + rpc_max_subs_per_conn: None, ws_max_out_buffer_capacity: None, prometheus_config: None, telemetry_endpoints: None, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 01c7eb9abe1b7..bff4be88002fb 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -134,7 +134,7 @@ pub fn new_partial( impl Fn( node_rpc::DenyUnsafe, sc_rpc::SubscriptionTaskExecutor, - ) -> Result, + ) -> Result, sc_service::Error>, ( sc_consensus_babe::BabeBlockImport, grandpa::LinkHalf, @@ -236,7 +236,7 @@ pub fn new_partial( let justification_stream = grandpa_link.justification_stream(); let shared_authority_set = grandpa_link.shared_authority_set().clone(); let shared_voter_state = grandpa::SharedVoterState::empty(); - let rpc_setup = shared_voter_state.clone(); + let shared_voter_state2 = shared_voter_state.clone(); let finality_proof_provider = grandpa::FinalityProofProvider::new_for_service( backend.clone(), @@ -277,7 +277,7 @@ pub fn new_partial( node_rpc::create_full(deps, rpc_backend.clone()).map_err(Into::into) }; - (rpc_extensions_builder, rpc_setup) + (rpc_extensions_builder, shared_voter_state2) }; Ok(sc_service::PartialComponents { @@ -332,7 +332,7 @@ pub fn new_full_base( keystore_container, select_chain, transaction_pool, - other: (rpc_extensions_builder, import_setup, rpc_setup, mut telemetry), + other: (rpc_builder, import_setup, rpc_setup, mut telemetry), } = new_partial(&config)?; let shared_voter_state = rpc_setup; @@ -386,7 +386,7 @@ pub fn new_full_base( client: client.clone(), keystore: keystore_container.sync_keystore(), network: network.clone(), - rpc_extensions_builder: Box::new(rpc_extensions_builder), + rpc_builder: Box::new(rpc_builder), transaction_pool: transaction_pool.clone(), task_manager: &mut task_manager, system_rpc_tx, diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs index c17cabfa1d38a..9c739c2cf2d28 100644 --- a/bin/node/cli/tests/common.rs +++ b/bin/node/cli/tests/common.rs @@ -26,15 +26,14 @@ use nix::{ use node_primitives::Block; use remote_externalities::rpc_api; use std::{ + io::{BufRead, BufReader, Read}, ops::{Deref, DerefMut}, path::Path, - process::{Child, Command, ExitStatus}, + process::{self, Child, Command, ExitStatus}, time::Duration, }; use tokio::time::timeout; -static LOCALHOST_WS: &str = "ws://127.0.0.1:9944/"; - /// Wait for the given `child` the given number of `secs`. /// /// Returns the `Some(exit status)` or `None` if the process did not finish in the given time. @@ -63,8 +62,9 @@ pub fn wait_for(child: &mut Child, secs: u64) -> Result { pub async fn wait_n_finalized_blocks( n: usize, timeout_secs: u64, + url: &str, ) -> Result<(), tokio::time::error::Elapsed> { - timeout(Duration::from_secs(timeout_secs), wait_n_finalized_blocks_from(n, LOCALHOST_WS)).await + timeout(Duration::from_secs(timeout_secs), wait_n_finalized_blocks_from(n, url)).await } /// Wait for at least n blocks to be finalized from a specified node @@ -85,12 +85,23 @@ pub async fn wait_n_finalized_blocks_from(n: usize, url: &str) { /// Run the node for a while (3 blocks) pub async fn run_node_for_a_while(base_path: &Path, args: &[&str]) { - let mut cmd = Command::new(cargo_bin("substrate")); + let mut cmd = Command::new(cargo_bin("substrate")) + .stdout(process::Stdio::piped()) + .stderr(process::Stdio::piped()) + .args(args) + .arg("-d") + .arg(base_path) + .spawn() + .unwrap(); - let mut child = KillChildOnDrop(cmd.args(args).arg("-d").arg(base_path).spawn().unwrap()); + let stderr = cmd.stderr.take().unwrap(); + + let mut child = KillChildOnDrop(cmd); + + let (ws_url, _) = find_ws_url_from_output(stderr); // Let it produce some blocks. - let _ = wait_n_finalized_blocks(3, 30).await; + let _ = wait_n_finalized_blocks(3, 30, &ws_url).await; assert!(child.try_wait().unwrap().is_none(), "the process should still be running"); @@ -134,3 +145,30 @@ impl DerefMut for KillChildOnDrop { &mut self.0 } } + +/// Read the WS address from the output. +/// +/// This is hack to get the actual binded sockaddr because +/// substrate assigns a random port if the specified port was already binded. +pub fn find_ws_url_from_output(read: impl Read + Send) -> (String, String) { + let mut data = String::new(); + + let ws_url = BufReader::new(read) + .lines() + .find_map(|line| { + let line = + line.expect("failed to obtain next line from stdout for WS address discovery"); + data.push_str(&line); + + // does the line contain our port (we expect this specific output from substrate). + let sock_addr = match line.split_once("Running JSON-RPC WS server: addr=") { + None => return None, + Some((_, after)) => after.split_once(",").unwrap().0, + }; + + Some(format!("ws://{}", sock_addr)) + }) + .expect("We should get a WebSocket address"); + + (ws_url, data) +} diff --git a/bin/node/cli/tests/running_the_node_and_interrupt.rs b/bin/node/cli/tests/running_the_node_and_interrupt.rs index 35f0fc106613c..6d4a4b40425c4 100644 --- a/bin/node/cli/tests/running_the_node_and_interrupt.rs +++ b/bin/node/cli/tests/running_the_node_and_interrupt.rs @@ -25,7 +25,7 @@ use nix::{ }, unistd::Pid, }; -use std::process::{Child, Command}; +use std::process::{self, Child, Command}; use tempfile::tempdir; pub mod common; @@ -36,6 +36,8 @@ async fn running_the_node_works_and_can_be_interrupted() { let base_path = tempdir().expect("could not create a temp dir"); let mut cmd = common::KillChildOnDrop( Command::new(cargo_bin("substrate")) + .stdout(process::Stdio::piped()) + .stderr(process::Stdio::piped()) .args(&["--dev", "-d"]) .arg(base_path.path()) .arg("--db=paritydb") @@ -44,7 +46,13 @@ async fn running_the_node_works_and_can_be_interrupted() { .unwrap(), ); - common::wait_n_finalized_blocks(3, 30).await.unwrap(); + let stderr = cmd.stderr.take().unwrap(); + + let (ws_url, _) = common::find_ws_url_from_output(stderr); + + common::wait_n_finalized_blocks(3, 30, &ws_url) + .await + .expect("Blocks are produced in time"); assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); kill(Pid::from_raw(cmd.id().try_into().unwrap()), signal).unwrap(); assert_eq!( @@ -69,6 +77,8 @@ async fn running_the_node_works_and_can_be_interrupted() { async fn running_two_nodes_with_the_same_ws_port_should_work() { fn start_node() -> Child { Command::new(cargo_bin("substrate")) + .stdout(process::Stdio::piped()) + .stderr(process::Stdio::piped()) .args(&["--dev", "--tmp", "--ws-port=45789", "--no-hardware-benchmarks"]) .spawn() .unwrap() @@ -77,7 +87,10 @@ async fn running_two_nodes_with_the_same_ws_port_should_work() { let mut first_node = common::KillChildOnDrop(start_node()); let mut second_node = common::KillChildOnDrop(start_node()); - let _ = common::wait_n_finalized_blocks(3, 30).await; + let stderr = first_node.stderr.take().unwrap(); + let (ws_url, _) = common::find_ws_url_from_output(stderr); + + common::wait_n_finalized_blocks(3, 30, &ws_url).await.unwrap(); assert!(first_node.try_wait().unwrap().is_none(), "The first node should still be running"); assert!(second_node.try_wait().unwrap().is_none(), "The second node should still be running"); diff --git a/bin/node/cli/tests/temp_base_path_works.rs b/bin/node/cli/tests/temp_base_path_works.rs index df293161e3234..98422a21f5308 100644 --- a/bin/node/cli/tests/temp_base_path_works.rs +++ b/bin/node/cli/tests/temp_base_path_works.rs @@ -43,8 +43,11 @@ async fn temp_base_path_works() { .unwrap(), ); + let mut stderr = child.stderr.take().unwrap(); + let (ws_url, mut data) = common::find_ws_url_from_output(&mut stderr); + // Let it produce some blocks. - common::wait_n_finalized_blocks(3, 30).await.unwrap(); + common::wait_n_finalized_blocks(3, 30, &ws_url).await.unwrap(); assert!(child.try_wait().unwrap().is_none(), "the process should still be running"); // Stop the process @@ -52,10 +55,9 @@ async fn temp_base_path_works() { assert!(common::wait_for(&mut child, 40).map(|x| x.success()).unwrap_or_default()); // Ensure the database has been deleted - let mut stderr = String::new(); - child.stderr.as_mut().unwrap().read_to_string(&mut stderr).unwrap(); + stderr.read_to_string(&mut data).unwrap(); let re = Regex::new(r"Database: .+ at (\S+)").unwrap(); - let db_path = PathBuf::from(re.captures(stderr.as_str()).unwrap().get(1).unwrap().as_str()); + let db_path = PathBuf::from(re.captures(data.as_str()).unwrap().get(1).unwrap().as_str()); assert!(!db_path.exists()); } diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index f0ae8b42e6398..9520c621d3165 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -11,7 +11,7 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpc-core = "18.0.0" +jsonrpsee = { version = "0.12.0", features = ["server"] } node-primitives = { version = "2.0.0", path = "../primitives" } pallet-contracts-rpc = { version = "4.0.0-dev", path = "../../../frame/contracts/rpc/" } pallet-mmr-rpc = { version = "3.0.0", path = "../../../frame/merkle-mountain-range/rpc/" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index b8349e26cd1da..05aa973e102b1 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -29,18 +29,20 @@ //! be placed here or imported from corresponding FRAME RPC definitions. #![warn(missing_docs)] +#![warn(unused_crate_dependencies)] use std::sync::Arc; +use jsonrpsee::RpcModule; use node_primitives::{AccountId, Balance, Block, BlockNumber, Hash, Index}; use sc_client_api::AuxStore; use sc_consensus_babe::{Config, Epoch}; -use sc_consensus_babe_rpc::BabeRpcHandler; +use sc_consensus_babe_rpc::BabeRpc; use sc_consensus_epochs::SharedEpochChanges; use sc_finality_grandpa::{ FinalityProofProvider, GrandpaJustificationStream, SharedAuthoritySet, SharedVoterState, }; -use sc_finality_grandpa_rpc::GrandpaRpcHandler; +use sc_finality_grandpa_rpc::GrandpaRpc; use sc_rpc::SubscriptionTaskExecutor; pub use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::TransactionPool; @@ -93,14 +95,11 @@ pub struct FullDeps { pub grandpa: GrandpaDeps, } -/// A IO handler that uses all Full RPC extensions. -pub type IoHandler = jsonrpc_core::IoHandler; - /// Instantiate all Full RPC extensions. pub fn create_full( deps: FullDeps, backend: Arc, -) -> Result, Box> +) -> Result, Box> where C: ProvideRuntimeApi + sc_client_api::BlockBackend @@ -121,13 +120,17 @@ where B: sc_client_api::Backend + Send + Sync + 'static, B::State: sc_client_api::backend::StateBackend>, { - use pallet_contracts_rpc::{Contracts, ContractsApi}; - use pallet_mmr_rpc::{Mmr, MmrApi}; - use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; - use sc_rpc::dev::{Dev, DevApi}; - use substrate_frame_rpc_system::{FullSystem, SystemApi}; + use pallet_contracts_rpc::{ContractsApiServer, ContractsRpc}; + use pallet_mmr_rpc::{MmrApiServer, MmrRpc}; + use pallet_transaction_payment_rpc::{TransactionPaymentApiServer, TransactionPaymentRpc}; + use sc_consensus_babe_rpc::BabeApiServer; + use sc_finality_grandpa_rpc::GrandpaApiServer; + use sc_rpc::dev::{Dev, DevApiServer}; + use sc_sync_state_rpc::{SyncStateRpc, SyncStateRpcApiServer}; + use substrate_frame_rpc_system::{SystemApiServer, SystemRpc}; + use substrate_state_trie_migration_rpc::StateMigrationApiServer; - let mut io = jsonrpc_core::IoHandler::default(); + let mut io = RpcModule::new(()); let FullDeps { client, pool, select_chain, chain_spec, deny_unsafe, babe, grandpa } = deps; let BabeDeps { keystore, babe_config, shared_epoch_changes } = babe; @@ -139,40 +142,45 @@ where finality_provider, } = grandpa; - io.extend_with(SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe))); + io.merge(SystemRpc::new(client.clone(), pool, deny_unsafe).into_rpc())?; // Making synchronous calls in light client freezes the browser currently, // more context: https://github.com/paritytech/substrate/pull/3480 // These RPCs should use an asynchronous caller instead. - io.extend_with(ContractsApi::to_delegate(Contracts::new(client.clone()))); - io.extend_with(MmrApi::to_delegate(Mmr::new(client.clone()))); - io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone()))); - io.extend_with(sc_consensus_babe_rpc::BabeApi::to_delegate(BabeRpcHandler::new( - client.clone(), - shared_epoch_changes.clone(), - keystore, - babe_config, - select_chain, - deny_unsafe, - ))); - io.extend_with(sc_finality_grandpa_rpc::GrandpaApi::to_delegate(GrandpaRpcHandler::new( - shared_authority_set.clone(), - shared_voter_state, - justification_stream, - subscription_executor, - finality_provider, - ))); - io.extend_with(substrate_state_trie_migration_rpc::StateMigrationApi::to_delegate( - substrate_state_trie_migration_rpc::MigrationRpc::new(client.clone(), backend, deny_unsafe), - )); - io.extend_with(sc_sync_state_rpc::SyncStateRpcApi::to_delegate( - sc_sync_state_rpc::SyncStateRpcHandler::new( - chain_spec, + io.merge(ContractsRpc::new(client.clone()).into_rpc())?; + io.merge(MmrRpc::new(client.clone()).into_rpc())?; + io.merge(TransactionPaymentRpc::new(client.clone()).into_rpc())?; + io.merge( + BabeRpc::new( client.clone(), - shared_authority_set, - shared_epoch_changes, - )?, - )); - io.extend_with(DevApi::to_delegate(Dev::new(client, deny_unsafe))); + shared_epoch_changes.clone(), + keystore, + babe_config, + select_chain, + deny_unsafe, + ) + .into_rpc(), + )?; + io.merge( + GrandpaRpc::new( + subscription_executor, + shared_authority_set.clone(), + shared_voter_state, + justification_stream, + finality_provider, + ) + .into_rpc(), + )?; + + io.merge( + SyncStateRpc::new(chain_spec, client.clone(), shared_authority_set, shared_epoch_changes)? + .into_rpc(), + )?; + + io.merge( + substrate_state_trie_migration_rpc::MigrationRpc::new(client.clone(), backend, deny_unsafe) + .into_rpc(), + )?; + io.merge(Dev::new(client, deny_unsafe).into_rpc())?; Ok(io) } diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml index 4b6496e52c2f2..bd25496f2dfea 100644 --- a/client/beefy/Cargo.toml +++ b/client/beefy/Cargo.toml @@ -41,7 +41,7 @@ sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } serde = "1.0.136" strum = { version = "0.23", features = ["derive"] } tempfile = "3.1.0" -tokio = "1.15" +tokio = "1.17.0" sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sc-network-test = { version = "0.8.0", path = "../network/test" } sp-finality-grandpa = { version = "4.0.0-dev", path = "../../primitives/finality-grandpa" } diff --git a/client/beefy/rpc/Cargo.toml b/client/beefy/rpc/Cargo.toml index adf29f1cef732..f8ca6470f267a 100644 --- a/client/beefy/rpc/Cargo.toml +++ b/client/beefy/rpc/Cargo.toml @@ -11,10 +11,7 @@ homepage = "https://substrate.io" [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0", features = ["derive"] } futures = "0.3.21" -jsonrpc-core = "18.0.0" -jsonrpc-core-client = "18.0.0" -jsonrpc-derive = "18.0.0" -jsonrpc-pubsub = "18.0.0" +jsonrpsee = { version = "0.12.0", features = ["server", "macros"] } log = "0.4" parking_lot = "0.12.0" serde = { version = "1.0.136", features = ["derive"] } @@ -32,3 +29,4 @@ sc-rpc = { version = "4.0.0-dev", features = [ "test-helpers", ], path = "../../rpc" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +tokio = { version = "1.17.0", features = ["macros"] } diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs index e49af3352ae4e..e4c8c76419ccb 100644 --- a/client/beefy/rpc/src/lib.rs +++ b/client/beefy/rpc/src/lib.rs @@ -23,19 +23,22 @@ use parking_lot::RwLock; use std::sync::Arc; +use sc_rpc::SubscriptionTaskExecutor; use sp_runtime::traits::Block as BlockT; -use futures::{task::SpawnError, FutureExt, SinkExt, StreamExt, TryFutureExt}; -use jsonrpc_derive::rpc; -use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; +use futures::{task::SpawnError, FutureExt, StreamExt}; +use jsonrpsee::{ + core::{async_trait, Error as JsonRpseeError, RpcResult}, + proc_macros::rpc, + types::{error::CallError, ErrorObject}, + PendingSubscription, +}; use log::warn; use beefy_gadget::notification::{BeefyBestBlockStream, BeefySignedCommitmentStream}; mod notification; -type FutureResult = jsonrpc_core::BoxFuture>; - #[derive(Debug, thiserror::Error)] /// Top-level error type for the RPC handler pub enum Error { @@ -64,195 +67,149 @@ impl From for ErrorCode { } } -impl From for jsonrpc_core::Error { +impl From for JsonRpseeError { fn from(error: Error) -> Self { - let message = format!("{}", error); + let message = error.to_string(); let code = ErrorCode::from(error); - jsonrpc_core::Error { + JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( + code as i32, message, - code: jsonrpc_core::ErrorCode::ServerError(code as i64), - data: None, - } + None::<()>, + ))) } } -/// Provides RPC methods for interacting with BEEFY. -#[rpc] +// Provides RPC methods for interacting with BEEFY. +#[rpc(client, server)] pub trait BeefyApi { - /// RPC Metadata - type Metadata; - /// Returns the block most recently finalized by BEEFY, alongside side its justification. - #[pubsub( - subscription = "beefy_justifications", - subscribe, - name = "beefy_subscribeJustifications" - )] - fn subscribe_justifications( - &self, - metadata: Self::Metadata, - subscriber: Subscriber, - ); - - /// Unsubscribe from receiving notifications about recently finalized blocks. - #[pubsub( - subscription = "beefy_justifications", - unsubscribe, - name = "beefy_unsubscribeJustifications" + #[subscription( + name = "beefy_subscribeJustifications" => "beefy_justifications", + unsubscribe = "beefy_unsubscribeJustifications", + item = Notification, )] - fn unsubscribe_justifications( - &self, - metadata: Option, - id: SubscriptionId, - ) -> jsonrpc_core::Result; + fn subscribe_justifications(&self); /// Returns hash of the latest BEEFY finalized block as seen by this client. /// /// The latest BEEFY block might not be available if the BEEFY gadget is not running /// in the network or if the client is still initializing or syncing with the network. /// In such case an error would be returned. - #[rpc(name = "beefy_getFinalizedHead")] - fn latest_finalized(&self) -> FutureResult; + #[method(name = "beefy_getFinalizedHead")] + async fn latest_finalized(&self) -> RpcResult; } /// Implements the BeefyApi RPC trait for interacting with BEEFY. pub struct BeefyRpcHandler { signed_commitment_stream: BeefySignedCommitmentStream, beefy_best_block: Arc>>, - manager: SubscriptionManager, + executor: SubscriptionTaskExecutor, } -impl BeefyRpcHandler { +impl BeefyRpcHandler +where + Block: BlockT, +{ /// Creates a new BeefyRpcHandler instance. - pub fn new( + pub fn new( signed_commitment_stream: BeefySignedCommitmentStream, best_block_stream: BeefyBestBlockStream, - executor: E, - ) -> Result - where - E: futures::task::Spawn + Send + Sync + 'static, - { + executor: SubscriptionTaskExecutor, + ) -> Result { let beefy_best_block = Arc::new(RwLock::new(None)); let stream = best_block_stream.subscribe(); let closure_clone = beefy_best_block.clone(); let future = stream.for_each(move |best_beefy| { let async_clone = closure_clone.clone(); - async move { - *async_clone.write() = Some(best_beefy); - } + async move { *async_clone.write() = Some(best_beefy) } }); - executor - .spawn_obj(futures::task::FutureObj::new(Box::pin(future))) - .map_err(|e| { - log::error!("Failed to spawn BEEFY RPC background task; err: {}", e); - e - })?; - - let manager = SubscriptionManager::new(Arc::new(executor)); - Ok(Self { signed_commitment_stream, beefy_best_block, manager }) + executor.spawn("substrate-rpc-subscription", Some("rpc"), future.map(drop).boxed()); + Ok(Self { signed_commitment_stream, beefy_best_block, executor }) } } -impl BeefyApi for BeefyRpcHandler +#[async_trait] +impl BeefyApiServer + for BeefyRpcHandler where Block: BlockT, { - type Metadata = sc_rpc::Metadata; - - fn subscribe_justifications( - &self, - _metadata: Self::Metadata, - subscriber: Subscriber, - ) { + fn subscribe_justifications(&self, pending: PendingSubscription) { let stream = self .signed_commitment_stream .subscribe() - .map(|x| Ok::<_, ()>(Ok(notification::EncodedSignedCommitment::new::(x)))); + .map(|sc| notification::EncodedSignedCommitment::new::(sc)); - self.manager.add(subscriber, |sink| { - stream - .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) - .map(|_| ()) - }); - } + let fut = async move { + if let Some(mut sink) = pending.accept() { + sink.pipe_from_stream(stream).await; + } + } + .boxed(); - fn unsubscribe_justifications( - &self, - _metadata: Option, - id: SubscriptionId, - ) -> jsonrpc_core::Result { - Ok(self.manager.cancel(id)) + self.executor + .spawn("substrate-rpc-subscription", Some("rpc"), fut.map(drop).boxed()); } - fn latest_finalized(&self) -> FutureResult { - let result: Result = self - .beefy_best_block + async fn latest_finalized(&self) -> RpcResult { + self.beefy_best_block .read() .as_ref() .cloned() - .ok_or_else(|| Error::EndpointNotReady.into()); - let future = async move { result }.boxed(); - future.map_err(jsonrpc_core::Error::from).boxed() + .ok_or(Error::EndpointNotReady) + .map_err(Into::into) } } #[cfg(test)] mod tests { use super::*; - use jsonrpc_core::{types::Params, Notification, Output}; - use beefy_gadget::notification::{BeefySignedCommitment, BeefySignedCommitmentSender}; + use beefy_gadget::notification::{ + BeefyBestBlockStream, BeefySignedCommitment, BeefySignedCommitmentSender, + }; use beefy_primitives::{known_payload_ids, Payload}; use codec::{Decode, Encode}; + use jsonrpsee::{types::EmptyParams, RpcModule}; use sp_runtime::traits::{BlakeTwo256, Hash}; use substrate_test_runtime_client::runtime::Block; - fn setup_io_handler( - ) -> (jsonrpc_core::MetaIoHandler, BeefySignedCommitmentSender) { + fn setup_io_handler() -> (RpcModule>, BeefySignedCommitmentSender) + { let (_, stream) = BeefyBestBlockStream::::channel(); setup_io_handler_with_best_block_stream(stream) } fn setup_io_handler_with_best_block_stream( best_block_stream: BeefyBestBlockStream, - ) -> (jsonrpc_core::MetaIoHandler, BeefySignedCommitmentSender) { + ) -> (RpcModule>, BeefySignedCommitmentSender) { let (commitment_sender, commitment_stream) = BeefySignedCommitmentStream::::channel(); - let handler: BeefyRpcHandler = BeefyRpcHandler::new( + let handler = BeefyRpcHandler::new( commitment_stream, best_block_stream, - sc_rpc::testing::TaskExecutor, + sc_rpc::testing::test_executor(), ) - .unwrap(); + .expect("Setting up the BEEFY RPC handler works"); - let mut io = jsonrpc_core::MetaIoHandler::default(); - io.extend_with(BeefyApi::to_delegate(handler)); - - (io, commitment_sender) - } - - fn setup_session() -> (sc_rpc::Metadata, futures::channel::mpsc::UnboundedReceiver) { - let (tx, rx) = futures::channel::mpsc::unbounded(); - let meta = sc_rpc::Metadata::new(tx); - (meta, rx) + (handler.into_rpc(), commitment_sender) } - #[test] - fn uninitialized_rpc_handler() { - let (io, _) = setup_io_handler(); - + #[tokio::test] + async fn uninitialized_rpc_handler() { + let (rpc, _) = setup_io_handler(); let request = r#"{"jsonrpc":"2.0","method":"beefy_getFinalizedHead","params":[],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":1,"message":"BEEFY RPC endpoint not ready"},"id":1}"#; + let expected_response = r#"{"jsonrpc":"2.0","error":{"code":1,"message":"BEEFY RPC endpoint not ready"},"id":1}"#.to_string(); + let (result, _) = rpc.raw_json_request(&request).await.unwrap(); - let meta = sc_rpc::Metadata::default(); - assert_eq!(Some(response.into()), io.handle_request_sync(request, meta)); + assert_eq!(expected_response, result,); } - #[test] - fn latest_finalized_rpc() { + #[tokio::test] + async fn latest_finalized_rpc() { let (sender, stream) = BeefyBestBlockStream::::channel(); let (io, _) = setup_io_handler_with_best_block_stream(stream); @@ -266,83 +223,78 @@ mod tests { \"jsonrpc\":\"2.0\",\ \"result\":\"0x2f0039e93a27221fcf657fb877a1d4f60307106113e885096cb44a461cd0afbf\",\ \"id\":1\ - }"; + }" + .to_string(); let not_ready = "{\ \"jsonrpc\":\"2.0\",\ \"error\":{\"code\":1,\"message\":\"BEEFY RPC endpoint not ready\"},\ \"id\":1\ - }"; + }" + .to_string(); let deadline = std::time::Instant::now() + std::time::Duration::from_secs(2); while std::time::Instant::now() < deadline { - let meta = sc_rpc::Metadata::default(); - let response = io.handle_request_sync(request, meta); - // Retry "not ready" responses. - if response != Some(not_ready.into()) { - assert_eq!(response, Some(expected.into())); + let (response, _) = io.raw_json_request(request).await.expect("RPC requests work"); + if response != not_ready { + assert_eq!(response, expected); // Success return } - std::thread::sleep(std::time::Duration::from_millis(50)); + std::thread::sleep(std::time::Duration::from_millis(50)) } + panic!( "Deadline reached while waiting for best BEEFY block to update. Perhaps the background task is broken?" ); } - #[test] - fn subscribe_and_unsubscribe_to_justifications() { - let (io, _) = setup_io_handler(); - let (meta, _) = setup_session(); + #[tokio::test] + async fn subscribe_and_unsubscribe_to_justifications() { + let (rpc, _) = setup_io_handler(); - // Subscribe - let sub_request = - r#"{"jsonrpc":"2.0","method":"beefy_subscribeJustifications","params":[],"id":1}"#; - let resp = io.handle_request_sync(sub_request, meta.clone()); - let resp: Output = serde_json::from_str(&resp.unwrap()).unwrap(); + // Subscribe call. + let sub = rpc + .subscribe("beefy_subscribeJustifications", EmptyParams::new()) + .await + .unwrap(); - let sub_id = match resp { - Output::Success(success) => success.result, - _ => panic!(), - }; + let ser_id = serde_json::to_string(sub.subscription_id()).unwrap(); // Unsubscribe let unsub_req = format!( - r#"{{"jsonrpc":"2.0","method":"beefy_unsubscribeJustifications","params":[{}],"id":1}}"#, - sub_id - ); - assert_eq!( - io.handle_request_sync(&unsub_req, meta.clone()), - Some(r#"{"jsonrpc":"2.0","result":true,"id":1}"#.into()), + "{{\"jsonrpc\":\"2.0\",\"method\":\"beefy_unsubscribeJustifications\",\"params\":[{}],\"id\":1}}", + ser_id ); + let (response, _) = rpc.raw_json_request(&unsub_req).await.unwrap(); + + assert_eq!(response, r#"{"jsonrpc":"2.0","result":true,"id":1}"#); // Unsubscribe again and fail - assert_eq!( - io.handle_request_sync(&unsub_req, meta), - Some(r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid subscription id."},"id":1}"#.into()), - ); - } + let (response, _) = rpc.raw_json_request(&unsub_req).await.unwrap(); + let expected = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; - #[test] - fn subscribe_and_unsubscribe_with_wrong_id() { - let (io, _) = setup_io_handler(); - let (meta, _) = setup_session(); + assert_eq!(response, expected); + } - // Subscribe - let sub_request = - r#"{"jsonrpc":"2.0","method":"beefy_subscribeJustifications","params":[],"id":1}"#; - let resp = io.handle_request_sync(sub_request, meta.clone()); - let resp: Output = serde_json::from_str(&resp.unwrap()).unwrap(); - assert!(matches!(resp, Output::Success(_))); + #[tokio::test] + async fn subscribe_and_unsubscribe_with_wrong_id() { + let (rpc, _) = setup_io_handler(); + // Subscribe call. + let _sub = rpc + .subscribe("beefy_subscribeJustifications", EmptyParams::new()) + .await + .unwrap(); // Unsubscribe with wrong ID - assert_eq!( - io.handle_request_sync( + let (response, _) = rpc + .raw_json_request( r#"{"jsonrpc":"2.0","method":"beefy_unsubscribeJustifications","params":["FOO"],"id":1}"#, - meta.clone() - ), - Some(r#"{"jsonrpc":"2.0","error":{"code":-32602,"message":"Invalid subscription id."},"id":1}"#.into()) - ); + ) + .await + .unwrap(); + let expected = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; + + assert_eq!(response, expected); } fn create_commitment() -> BeefySignedCommitment { @@ -357,18 +309,15 @@ mod tests { } } - #[test] - fn subscribe_and_listen_to_one_justification() { - let (io, commitment_sender) = setup_io_handler(); - let (meta, receiver) = setup_session(); + #[tokio::test] + async fn subscribe_and_listen_to_one_justification() { + let (rpc, commitment_sender) = setup_io_handler(); // Subscribe - let sub_request = - r#"{"jsonrpc":"2.0","method":"beefy_subscribeJustifications","params":[],"id":1}"#; - - let resp = io.handle_request_sync(sub_request, meta.clone()); - let mut resp: serde_json::Value = serde_json::from_str(&resp.unwrap()).unwrap(); - let sub_id: String = serde_json::from_value(resp["result"].take()).unwrap(); + let mut sub = rpc + .subscribe("beefy_subscribeJustifications", EmptyParams::new()) + .await + .unwrap(); // Notify with commitment let commitment = create_commitment(); @@ -376,21 +325,10 @@ mod tests { r.unwrap(); // Inspect what we received - let recv = futures::executor::block_on(receiver.take(1).collect::>()); - let recv: Notification = serde_json::from_str(&recv[0]).unwrap(); - let mut json_map = match recv.params { - Params::Map(json_map) => json_map, - _ => panic!(), - }; - - let recv_sub_id: String = serde_json::from_value(json_map["subscription"].take()).unwrap(); - let recv_commitment: sp_core::Bytes = - serde_json::from_value(json_map["result"].take()).unwrap(); + let (bytes, recv_sub_id) = sub.next::().await.unwrap().unwrap(); let recv_commitment: BeefySignedCommitment = - Decode::decode(&mut &recv_commitment[..]).unwrap(); - - assert_eq!(recv.method, "beefy_justifications"); - assert_eq!(recv_sub_id, sub_id); + Decode::decode(&mut &bytes[..]).unwrap(); + assert_eq!(&recv_sub_id, sub.subscription_id()); assert_eq!(recv_commitment, commitment); } } diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index dea1e2a6dae5b..6cb0de0ebd04c 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -100,18 +100,28 @@ pub struct RunCmd { #[clap(long)] pub unsafe_ws_external: bool, - /// Set the the maximum RPC payload size for both requests and responses (both http and ws), in - /// megabytes. Default is 15MiB. + /// DEPRECATED, this has no affect anymore. Use `rpc_max_request_size` or + /// `rpc_max_response_size` instead. #[clap(long)] pub rpc_max_payload: Option, + /// Set the the maximum RPC request payload size for both HTTP and WS in megabytes. + /// Default is 15MiB. + #[clap(long)] + pub rpc_max_request_size: Option, + + /// Set the the maximum RPC response payload size for both HTTP and WS in megabytes. + /// Default is 15MiB. + #[clap(long)] + pub rpc_max_response_size: Option, + /// Expose Prometheus exporter on all interfaces. /// /// Default is local. #[clap(long)] pub prometheus_external: bool, - /// Specify IPC RPC server path + /// DEPRECATED, IPC support has been removed. #[clap(long, value_name = "PATH")] pub ipc_path: Option, @@ -127,7 +137,7 @@ pub struct RunCmd { #[clap(long, value_name = "COUNT")] pub ws_max_connections: Option, - /// Set the the maximum WebSocket output buffer size in MiB. Default is 16. + /// DEPRECATED, this has no affect anymore. Use `rpc_max_response_size` instead. #[clap(long)] pub ws_max_out_buffer_capacity: Option, diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index aef1da8193757..e38d34b92c74d 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -359,6 +359,16 @@ pub trait CliConfiguration: Sized { Ok(None) } + /// Get maximum RPC request payload size. + fn rpc_max_request_size(&self) -> Result> { + Ok(None) + } + + /// Get maximum RPC response payload size. + fn rpc_max_response_size(&self) -> Result> { + Ok(None) + } + /// Get maximum WS output buffer capacity. fn ws_max_out_buffer_capacity(&self) -> Result> { Ok(None) @@ -526,6 +536,10 @@ pub trait CliConfiguration: Sized { rpc_ws_max_connections: self.rpc_ws_max_connections()?, rpc_cors: self.rpc_cors(is_dev)?, rpc_max_payload: self.rpc_max_payload()?, + rpc_max_request_size: self.rpc_max_request_size()?, + rpc_max_response_size: self.rpc_max_response_size()?, + rpc_id_provider: None, + rpc_max_subs_per_conn: None, ws_max_out_buffer_capacity: self.ws_max_out_buffer_capacity()?, prometheus_config: self .prometheus_config(DCV::prometheus_listen_port(), &chain_spec)?, diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 0e7141c77f8b2..4be5d1f8bba90 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -13,10 +13,8 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +jsonrpsee = { version = "0.12.0", features = ["server", "macros"] } futures = "0.3.21" -jsonrpc-core = "18.0.0" -jsonrpc-core-client = "18.0.0" -jsonrpc-derive = "18.0.0" serde = { version = "1.0.136", features = ["derive"] } thiserror = "1.0" sc-consensus-babe = { version = "0.10.0-dev", path = "../" } @@ -34,6 +32,7 @@ sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } [dev-dependencies] serde_json = "1.0.79" tempfile = "3.1.0" +tokio = "1.17.0" sc-consensus = { version = "0.10.0-dev", path = "../../../consensus/common" } sc-keystore = { version = "4.0.0-dev", path = "../../../keystore" } sp-keyring = { version = "6.0.0", path = "../../../../primitives/keyring" } diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index 2d0c81afc7775..d5f21606c62ed 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -18,9 +18,13 @@ //! RPC api for babe. -use futures::{FutureExt, TryFutureExt}; -use jsonrpc_core::Error as RpcError; -use jsonrpc_derive::rpc; +use futures::TryFutureExt; +use jsonrpsee::{ + core::{async_trait, Error as JsonRpseeError, RpcResult}, + proc_macros::rpc, + types::{error::CallError, ErrorObject}, +}; + use sc_consensus_babe::{authorship, Config, Epoch}; use sc_consensus_epochs::{descendent_query, Epoch as EpochT, SharedEpochChanges}; use sc_rpc_api::DenyUnsafe; @@ -35,19 +39,17 @@ use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::traits::{Block as BlockT, Header as _}; use std::{collections::HashMap, sync::Arc}; -type FutureResult = jsonrpc_core::BoxFuture>; - /// Provides rpc methods for interacting with Babe. -#[rpc] +#[rpc(client, server)] pub trait BabeApi { /// Returns data about which slots (primary or secondary) can be claimed in the current epoch /// with the keys in the keystore. - #[rpc(name = "babe_epochAuthorship")] - fn epoch_authorship(&self) -> FutureResult>; + #[method(name = "babe_epochAuthorship")] + async fn epoch_authorship(&self) -> RpcResult>; } -/// Implements the BabeRpc trait for interacting with Babe. -pub struct BabeRpcHandler { +/// Provides RPC methods for interacting with Babe. +pub struct BabeRpc { /// shared reference to the client. client: Arc, /// shared reference to EpochChanges @@ -62,7 +64,7 @@ pub struct BabeRpcHandler { deny_unsafe: DenyUnsafe, } -impl BabeRpcHandler { +impl BabeRpc { /// Creates a new instance of the BabeRpc handler. pub fn new( client: Arc, @@ -76,7 +78,8 @@ impl BabeRpcHandler { } } -impl BabeApi for BabeRpcHandler +#[async_trait] +impl BabeApiServer for BabeRpc where B: BlockT, C: ProvideRuntimeApi @@ -86,71 +89,63 @@ where C::Api: BabeRuntimeApi, SC: SelectChain + Clone + 'static, { - fn epoch_authorship(&self) -> FutureResult> { - if let Err(err) = self.deny_unsafe.check_if_safe() { - return async move { Err(err.into()) }.boxed() - } - - let (babe_config, keystore, shared_epoch, client, select_chain) = ( - self.babe_config.clone(), - self.keystore.clone(), - self.shared_epoch_changes.clone(), - self.client.clone(), - self.select_chain.clone(), - ); - - async move { - let header = select_chain.best_chain().map_err(Error::Consensus).await?; - let epoch_start = client - .runtime_api() - .current_epoch_start(&BlockId::Hash(header.hash())) - .map_err(|err| Error::StringError(err.to_string()))?; - let epoch = - epoch_data(&shared_epoch, &client, &babe_config, *epoch_start, &select_chain) - .await?; - let (epoch_start, epoch_end) = (epoch.start_slot(), epoch.end_slot()); - - let mut claims: HashMap = HashMap::new(); - - let keys = { - epoch - .authorities - .iter() - .enumerate() - .filter_map(|(i, a)| { - if SyncCryptoStore::has_keys( - &*keystore, - &[(a.0.to_raw_vec(), AuthorityId::ID)], - ) { - Some((a.0.clone(), i)) - } else { - None - } - }) - .collect::>() - }; - - for slot in *epoch_start..*epoch_end { - if let Some((claim, key)) = - authorship::claim_slot_using_keys(slot.into(), &epoch, &keystore, &keys) - { - match claim { - PreDigest::Primary { .. } => { - claims.entry(key).or_default().primary.push(slot); - }, - PreDigest::SecondaryPlain { .. } => { - claims.entry(key).or_default().secondary.push(slot); - }, - PreDigest::SecondaryVRF { .. } => { - claims.entry(key).or_default().secondary_vrf.push(slot); - }, - }; - } + async fn epoch_authorship(&self) -> RpcResult> { + self.deny_unsafe.check_if_safe()?; + let header = self.select_chain.best_chain().map_err(Error::Consensus).await?; + let epoch_start = self + .client + .runtime_api() + .current_epoch_start(&BlockId::Hash(header.hash())) + .map_err(|err| Error::StringError(format!("{:?}", err)))?; + + let epoch = epoch_data( + &self.shared_epoch_changes, + &self.client, + &self.babe_config, + *epoch_start, + &self.select_chain, + ) + .await?; + let (epoch_start, epoch_end) = (epoch.start_slot(), epoch.end_slot()); + let mut claims: HashMap = HashMap::new(); + + let keys = { + epoch + .authorities + .iter() + .enumerate() + .filter_map(|(i, a)| { + if SyncCryptoStore::has_keys( + &*self.keystore, + &[(a.0.to_raw_vec(), AuthorityId::ID)], + ) { + Some((a.0.clone(), i)) + } else { + None + } + }) + .collect::>() + }; + + for slot in *epoch_start..*epoch_end { + if let Some((claim, key)) = + authorship::claim_slot_using_keys(slot.into(), &epoch, &self.keystore, &keys) + { + match claim { + PreDigest::Primary { .. } => { + claims.entry(key).or_default().primary.push(slot); + }, + PreDigest::SecondaryPlain { .. } => { + claims.entry(key).or_default().secondary.push(slot); + }, + PreDigest::SecondaryVRF { .. } => { + claims.entry(key).or_default().secondary_vrf.push(slot.into()); + }, + }; } - - Ok(claims) } - .boxed() + + Ok(claims) } } @@ -176,13 +171,13 @@ pub enum Error { StringError(String), } -impl From for jsonrpc_core::Error { +impl From for JsonRpseeError { fn from(error: Error) -> Self { - jsonrpc_core::Error { - message: format!("{}", error), - code: jsonrpc_core::ErrorCode::ServerError(1234), - data: None, - } + JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( + 1234, + error.to_string(), + None::<()>, + ))) } } @@ -226,7 +221,6 @@ mod tests { TestClientBuilderExt, }; - use jsonrpc_core::IoHandler; use sc_consensus_babe::{block_import, AuthorityPair, Config}; use std::sync::Arc; @@ -243,9 +237,9 @@ mod tests { (keystore, keystore_path) } - fn test_babe_rpc_handler( + fn test_babe_rpc_module( deny_unsafe: DenyUnsafe, - ) -> BabeRpcHandler> { + ) -> BabeRpc> { let builder = TestClientBuilder::new(); let (client, longest_chain) = builder.build_with_longest_chain(); let client = Arc::new(client); @@ -256,40 +250,30 @@ mod tests { let epoch_changes = link.epoch_changes().clone(); let keystore = create_temp_keystore::(Sr25519Keyring::Alice).0; - BabeRpcHandler::new( - client.clone(), - epoch_changes, - keystore, - config, - longest_chain, - deny_unsafe, - ) + BabeRpc::new(client.clone(), epoch_changes, keystore, config, longest_chain, deny_unsafe) } - #[test] - fn epoch_authorship_works() { - let handler = test_babe_rpc_handler(DenyUnsafe::No); - let mut io = IoHandler::new(); + #[tokio::test] + async fn epoch_authorship_works() { + let babe_rpc = test_babe_rpc_module(DenyUnsafe::No); + let api = babe_rpc.into_rpc(); - io.extend_with(BabeApi::to_delegate(handler)); let request = r#"{"jsonrpc":"2.0","method":"babe_epochAuthorship","params": [],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY":{"primary":[0],"secondary":[1,2,4],"secondary_vrf":[]}},"id":1}"#; + let (response, _) = api.raw_json_request(request).await.unwrap(); + let expected = r#"{"jsonrpc":"2.0","result":{"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY":{"primary":[0],"secondary":[1,2,4],"secondary_vrf":[]}},"id":1}"#; - assert_eq!(Some(response.into()), io.handle_request_sync(request)); + assert_eq!(&response, expected); } - #[test] - fn epoch_authorship_is_unsafe() { - let handler = test_babe_rpc_handler(DenyUnsafe::Yes); - let mut io = IoHandler::new(); - - io.extend_with(BabeApi::to_delegate(handler)); - let request = r#"{"jsonrpc":"2.0","method":"babe_epochAuthorship","params": [],"id":1}"#; + #[tokio::test] + async fn epoch_authorship_is_unsafe() { + let babe_rpc = test_babe_rpc_module(DenyUnsafe::Yes); + let api = babe_rpc.into_rpc(); - let response = io.handle_request_sync(request).unwrap(); - let mut response: serde_json::Value = serde_json::from_str(&response).unwrap(); - let error: RpcError = serde_json::from_value(response["error"].take()).unwrap(); + let request = r#"{"jsonrpc":"2.0","method":"babe_epochAuthorship","params":[],"id":1}"#; + let (response, _) = api.raw_json_request(request).await.unwrap(); + let expected = r#"{"jsonrpc":"2.0","error":{"code":-32601,"message":"RPC call is unsafe to be called externally"},"id":1}"#; - assert_eq!(error, sc_rpc_api::UnsafeRpcError.into()) + assert_eq!(&response, expected); } } diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 9452d2b1afd08..e8f4e20ab0e55 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -13,13 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +jsonrpsee = { version = "0.12.0", features = ["server", "macros"] } assert_matches = "1.3.0" async-trait = "0.1.50" codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" -jsonrpc-core = "18.0.0" -jsonrpc-core-client = "18.0.0" -jsonrpc-derive = "18.0.0" log = "0.4.16" serde = { version = "1.0", features = ["derive"] } thiserror = "1.0" diff --git a/client/consensus/manual-seal/src/error.rs b/client/consensus/manual-seal/src/error.rs index 7c3211203bf54..a056c541c3cef 100644 --- a/client/consensus/manual-seal/src/error.rs +++ b/client/consensus/manual-seal/src/error.rs @@ -20,6 +20,10 @@ //! This is suitable for a testing environment. use futures::channel::{mpsc::SendError, oneshot}; +use jsonrpsee::{ + core::Error as JsonRpseeError, + types::error::{CallError, ErrorObject}, +}; use sc_consensus::ImportResult; use sp_blockchain::Error as BlockchainError; use sp_consensus::Error as ConsensusError; @@ -27,14 +31,14 @@ use sp_inherents::Error as InherentsError; /// Error code for rpc mod codes { - pub const SERVER_SHUTTING_DOWN: i64 = 10_000; - pub const BLOCK_IMPORT_FAILED: i64 = 11_000; - pub const EMPTY_TRANSACTION_POOL: i64 = 12_000; - pub const BLOCK_NOT_FOUND: i64 = 13_000; - pub const CONSENSUS_ERROR: i64 = 14_000; - pub const INHERENTS_ERROR: i64 = 15_000; - pub const BLOCKCHAIN_ERROR: i64 = 16_000; - pub const UNKNOWN_ERROR: i64 = 20_000; + pub const SERVER_SHUTTING_DOWN: i32 = 10_000; + pub const BLOCK_IMPORT_FAILED: i32 = 11_000; + pub const EMPTY_TRANSACTION_POOL: i32 = 12_000; + pub const BLOCK_NOT_FOUND: i32 = 13_000; + pub const CONSENSUS_ERROR: i32 = 14_000; + pub const INHERENTS_ERROR: i32 = 15_000; + pub const BLOCKCHAIN_ERROR: i32 = 16_000; + pub const UNKNOWN_ERROR: i32 = 20_000; } /// errors encountered by background block authorship task @@ -71,7 +75,7 @@ pub enum Error { SendError(#[from] SendError), /// Some other error. #[error("Other error: {0}")] - Other(#[from] Box), + Other(Box), } impl From for Error { @@ -87,7 +91,7 @@ impl From for Error { } impl Error { - fn to_code(&self) -> i64 { + fn to_code(&self) -> i32 { use Error::*; match self { BlockImportError(_) => codes::BLOCK_IMPORT_FAILED, @@ -102,12 +106,8 @@ impl Error { } } -impl From for jsonrpc_core::Error { - fn from(error: Error) -> Self { - jsonrpc_core::Error { - code: jsonrpc_core::ErrorCode::ServerError(error.to_code()), - message: format!("{}", error), - data: None, - } +impl From for JsonRpseeError { + fn from(err: Error) -> Self { + CallError::Custom(ErrorObject::owned(err.to_code(), err.to_string(), None::<()>)).into() } } diff --git a/client/consensus/manual-seal/src/rpc.rs b/client/consensus/manual-seal/src/rpc.rs index 4a8dcbc0cb765..b9bb06551f818 100644 --- a/client/consensus/manual-seal/src/rpc.rs +++ b/client/consensus/manual-seal/src/rpc.rs @@ -18,21 +18,21 @@ //! RPC interface for the `ManualSeal` Engine. -pub use self::gen_client::Client as ManualSealClient; +use crate::error::Error; use futures::{ channel::{mpsc, oneshot}, - FutureExt, SinkExt, TryFutureExt, + SinkExt, +}; +use jsonrpsee::{ + core::{async_trait, Error as JsonRpseeError, RpcResult}, + proc_macros::rpc, }; -use jsonrpc_core::Error; -use jsonrpc_derive::rpc; use sc_consensus::ImportedAux; use serde::{Deserialize, Serialize}; use sp_runtime::EncodedJustification; -/// Future's type for jsonrpc -type FutureResult = jsonrpc_core::BoxFuture>; -/// sender passed to the authorship task to report errors or successes. -pub type Sender = Option>>; +/// Sender passed to the authorship task to report errors or successes. +pub type Sender = Option>>; /// Message sent to the background authorship task, usually by RPC. pub enum EngineCommand { @@ -65,27 +65,27 @@ pub enum EngineCommand { } /// RPC trait that provides methods for interacting with the manual-seal authorship task over rpc. -#[rpc] +#[rpc(client, server)] pub trait ManualSealApi { /// Instructs the manual-seal authorship task to create a new block - #[rpc(name = "engine_createBlock")] - fn create_block( + #[method(name = "engine_createBlock")] + async fn create_block( &self, create_empty: bool, finalize: bool, parent_hash: Option, - ) -> FutureResult>; + ) -> RpcResult>; /// Instructs the manual-seal authorship task to finalize a block - #[rpc(name = "engine_finalizeBlock")] - fn finalize_block( + #[method(name = "engine_finalizeBlock")] + async fn finalize_block( &self, hash: Hash, justification: Option, - ) -> FutureResult; + ) -> RpcResult; } -/// A struct that implements the [`ManualSealApi`]. +/// A struct that implements the [`ManualSealApiServer`]. pub struct ManualSeal { import_block_channel: mpsc::Sender>, } @@ -106,44 +106,43 @@ impl ManualSeal { } } -impl ManualSealApi for ManualSeal { - fn create_block( +#[async_trait] +impl ManualSealApiServer for ManualSeal { + async fn create_block( &self, create_empty: bool, finalize: bool, parent_hash: Option, - ) -> FutureResult> { + ) -> RpcResult> { let mut sink = self.import_block_channel.clone(); - async move { - let (sender, receiver) = oneshot::channel(); - let command = EngineCommand::SealNewBlock { - create_empty, - finalize, - parent_hash, - sender: Some(sender), - }; - sink.send(command).await?; - receiver.await? + let (sender, receiver) = oneshot::channel(); + // NOTE: this sends a Result over the channel. + let command = EngineCommand::SealNewBlock { + create_empty, + finalize, + parent_hash, + sender: Some(sender), + }; + + sink.send(command).await?; + + match receiver.await { + Ok(Ok(rx)) => Ok(rx), + Ok(Err(e)) => Err(e.into()), + Err(e) => Err(JsonRpseeError::to_call_error(e)), } - .map_err(Error::from) - .boxed() } - fn finalize_block( + async fn finalize_block( &self, hash: Hash, justification: Option, - ) -> FutureResult { + ) -> RpcResult { let mut sink = self.import_block_channel.clone(); - async move { - let (sender, receiver) = oneshot::channel(); - sink.send(EngineCommand::FinalizeBlock { hash, sender: Some(sender), justification }) - .await?; - - receiver.await?.map(|_| true) - } - .map_err(Error::from) - .boxed() + let (sender, receiver) = oneshot::channel(); + let command = EngineCommand::FinalizeBlock { hash, sender: Some(sender), justification }; + sink.send(command).await?; + receiver.await.map(|_| true).map_err(|e| JsonRpseeError::to_call_error(e)) } } diff --git a/client/finality-grandpa/rpc/Cargo.toml b/client/finality-grandpa/rpc/Cargo.toml index 40a4150f8dd98..c124712e3fa84 100644 --- a/client/finality-grandpa/rpc/Cargo.toml +++ b/client/finality-grandpa/rpc/Cargo.toml @@ -12,10 +12,7 @@ homepage = "https://substrate.io" [dependencies] finality-grandpa = { version = "0.15.0", features = ["derive-codec"] } futures = "0.3.16" -jsonrpc-core = "18.0.0" -jsonrpc-core-client = "18.0.0" -jsonrpc-derive = "18.0.0" -jsonrpc-pubsub = "18.0.0" +jsonrpsee = { version = "0.12.0", features = ["server", "macros"] } log = "0.4.8" parity-scale-codec = { version = "3.0.0", features = ["derive"] } serde = { version = "1.0.105", features = ["derive"] } @@ -37,3 +34,4 @@ sp-core = { version = "6.0.0", path = "../../../primitives/core" } sp-finality-grandpa = { version = "4.0.0-dev", path = "../../../primitives/finality-grandpa" } sp-keyring = { version = "6.0.0", path = "../../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +tokio = { version = "1.17.0", features = ["macros"] } diff --git a/client/finality-grandpa/rpc/src/error.rs b/client/finality-grandpa/rpc/src/error.rs index 845b4d99dcc1a..197c0b8a72102 100644 --- a/client/finality-grandpa/rpc/src/error.rs +++ b/client/finality-grandpa/rpc/src/error.rs @@ -16,6 +16,11 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use jsonrpsee::{ + core::Error as JsonRpseeError, + types::error::{CallError, ErrorObject}, +}; + #[derive(Debug, thiserror::Error)] /// Top-level error type for the RPC handler pub enum Error { @@ -56,15 +61,15 @@ impl From for ErrorCode { } } -impl From for jsonrpc_core::Error { +impl From for JsonRpseeError { fn from(error: Error) -> Self { - let message = format!("{}", error); + let message = error.to_string(); let code = ErrorCode::from(error); - jsonrpc_core::Error { + JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( + code as i32, message, - code: jsonrpc_core::ErrorCode::ServerError(code as i64), - data: None, - } + None::<()>, + ))) } } diff --git a/client/finality-grandpa/rpc/src/lib.rs b/client/finality-grandpa/rpc/src/lib.rs index 9c51bc3d226a7..82962d716d589 100644 --- a/client/finality-grandpa/rpc/src/lib.rs +++ b/client/finality-grandpa/rpc/src/lib.rs @@ -19,167 +19,137 @@ //! RPC API for GRANDPA. #![warn(missing_docs)] -use futures::{task::Spawn, FutureExt, SinkExt, StreamExt, TryFutureExt}; -use jsonrpc_derive::rpc; -use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; +use futures::{FutureExt, StreamExt}; use log::warn; use std::sync::Arc; +use jsonrpsee::{ + core::{async_trait, RpcResult}, + proc_macros::rpc, + PendingSubscription, +}; + mod error; mod finality; mod notification; mod report; use sc_finality_grandpa::GrandpaJustificationStream; +use sc_rpc::SubscriptionTaskExecutor; use sp_runtime::traits::{Block as BlockT, NumberFor}; use finality::{EncodedFinalityProof, RpcFinalityProofProvider}; use notification::JustificationNotification; use report::{ReportAuthoritySet, ReportVoterState, ReportedRoundStates}; -type FutureResult = jsonrpc_core::BoxFuture>; - /// Provides RPC methods for interacting with GRANDPA. -#[rpc] +#[rpc(client, server)] pub trait GrandpaApi { - /// RPC Metadata - type Metadata; - /// Returns the state of the current best round state as well as the /// ongoing background rounds. - #[rpc(name = "grandpa_roundState")] - fn round_state(&self) -> FutureResult; + #[method(name = "grandpa_roundState")] + async fn round_state(&self) -> RpcResult; /// Returns the block most recently finalized by Grandpa, alongside /// side its justification. - #[pubsub( - subscription = "grandpa_justifications", - subscribe, - name = "grandpa_subscribeJustifications" - )] - fn subscribe_justifications( - &self, - metadata: Self::Metadata, - subscriber: Subscriber, - ); - - /// Unsubscribe from receiving notifications about recently finalized blocks. - #[pubsub( - subscription = "grandpa_justifications", - unsubscribe, - name = "grandpa_unsubscribeJustifications" + #[subscription( + name = "grandpa_subscribeJustifications" => "grandpa_justifications", + unsubscribe = "grandpa_unsubscribeJustifications", + item = Notification )] - fn unsubscribe_justifications( - &self, - metadata: Option, - id: SubscriptionId, - ) -> jsonrpc_core::Result; + fn subscribe_justifications(&self); /// Prove finality for the given block number by returning the Justification for the last block /// in the set and all the intermediary headers to link them together. - #[rpc(name = "grandpa_proveFinality")] - fn prove_finality(&self, block: Number) -> FutureResult>; + #[method(name = "grandpa_proveFinality")] + async fn prove_finality(&self, block: Number) -> RpcResult>; } -/// Implements the GrandpaApi RPC trait for interacting with GRANDPA. -pub struct GrandpaRpcHandler { +/// Provides RPC methods for interacting with GRANDPA. +pub struct GrandpaRpc { + executor: SubscriptionTaskExecutor, authority_set: AuthoritySet, voter_state: VoterState, justification_stream: GrandpaJustificationStream, - manager: SubscriptionManager, finality_proof_provider: Arc, } - impl - GrandpaRpcHandler + GrandpaRpc { - /// Creates a new GrandpaRpcHandler instance. - pub fn new( + /// Prepare a new [`GrandpaRpc`] + pub fn new( + executor: SubscriptionTaskExecutor, authority_set: AuthoritySet, voter_state: VoterState, justification_stream: GrandpaJustificationStream, - executor: E, finality_proof_provider: Arc, - ) -> Self - where - E: Spawn + Sync + Send + 'static, - { - let manager = SubscriptionManager::new(Arc::new(executor)); - Self { authority_set, voter_state, justification_stream, manager, finality_proof_provider } + ) -> Self { + Self { executor, authority_set, voter_state, justification_stream, finality_proof_provider } } } +#[async_trait] impl - GrandpaApi> - for GrandpaRpcHandler + GrandpaApiServer> + for GrandpaRpc where VoterState: ReportVoterState + Send + Sync + 'static, AuthoritySet: ReportAuthoritySet + Send + Sync + 'static, Block: BlockT, ProofProvider: RpcFinalityProofProvider + Send + Sync + 'static, { - type Metadata = sc_rpc::Metadata; - - fn round_state(&self) -> FutureResult { - let round_states = ReportedRoundStates::from(&self.authority_set, &self.voter_state); - let future = async move { round_states }.boxed(); - future.map_err(jsonrpc_core::Error::from).boxed() + async fn round_state(&self) -> RpcResult { + ReportedRoundStates::from(&self.authority_set, &self.voter_state).map_err(Into::into) } - fn subscribe_justifications( - &self, - _metadata: Self::Metadata, - subscriber: Subscriber, - ) { - let stream = self - .justification_stream - .subscribe() - .map(|x| Ok(Ok::<_, jsonrpc_core::Error>(JustificationNotification::from(x)))); - - self.manager.add(subscriber, |sink| { - stream - .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) - .map(|_| ()) - }); - } + fn subscribe_justifications(&self, pending: PendingSubscription) { + let stream = self.justification_stream.subscribe().map( + |x: sc_finality_grandpa::GrandpaJustification| { + JustificationNotification::from(x) + }, + ); - fn unsubscribe_justifications( - &self, - _metadata: Option, - id: SubscriptionId, - ) -> jsonrpc_core::Result { - Ok(self.manager.cancel(id)) + let fut = async move { + if let Some(mut sink) = pending.accept() { + sink.pipe_from_stream(stream).await; + } + } + .boxed(); + + self.executor + .spawn("substrate-rpc-subscription", Some("rpc"), fut.map(drop).boxed()); } - fn prove_finality( + async fn prove_finality( &self, block: NumberFor, - ) -> FutureResult> { - let result = self.finality_proof_provider.rpc_prove_finality(block); - let future = async move { result }.boxed(); - future + ) -> RpcResult> { + self.finality_proof_provider + .rpc_prove_finality(block) .map_err(|e| { warn!("Error proving finality: {}", e); error::Error::ProveFinalityFailed(e) }) - .map_err(jsonrpc_core::Error::from) - .boxed() + .map_err(Into::into) } } #[cfg(test)] mod tests { use super::*; - use jsonrpc_core::{types::Params, Notification, Output}; - use std::{collections::HashSet, sync::Arc}; + use std::{collections::HashSet, convert::TryInto, sync::Arc}; + use jsonrpsee::{ + types::{EmptyParams, SubscriptionId}, + RpcModule, + }; use parity_scale_codec::{Decode, Encode}; use sc_block_builder::{BlockBuilder, RecordProof}; use sc_finality_grandpa::{ report, AuthorityId, FinalityProof, GrandpaJustification, GrandpaJustificationSender, }; use sp_blockchain::HeaderBackend; - use sp_core::crypto::ByteArray; + use sp_core::{crypto::ByteArray, testing::TaskExecutor}; use sp_keyring::Ed25519Keyring; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use substrate_test_runtime_client::{ @@ -274,7 +244,10 @@ mod tests { fn setup_io_handler( voter_state: VoterState, - ) -> (jsonrpc_core::MetaIoHandler, GrandpaJustificationSender) + ) -> ( + RpcModule>, + GrandpaJustificationSender, + ) where VoterState: ReportVoterState + Send + Sync + 'static, { @@ -284,120 +257,107 @@ mod tests { fn setup_io_handler_with_finality_proofs( voter_state: VoterState, finality_proof: Option>, - ) -> (jsonrpc_core::MetaIoHandler, GrandpaJustificationSender) + ) -> ( + RpcModule>, + GrandpaJustificationSender, + ) where VoterState: ReportVoterState + Send + Sync + 'static, { let (justification_sender, justification_stream) = GrandpaJustificationStream::channel(); let finality_proof_provider = Arc::new(TestFinalityProofProvider { finality_proof }); + let executor = Arc::new(TaskExecutor::default()); - let handler = GrandpaRpcHandler::new( + let rpc = GrandpaRpc::new( + executor, TestAuthoritySet, voter_state, justification_stream, - sc_rpc::testing::TaskExecutor, finality_proof_provider, - ); - - let mut io = jsonrpc_core::MetaIoHandler::default(); - io.extend_with(GrandpaApi::to_delegate(handler)); + ) + .into_rpc(); - (io, justification_sender) + (rpc, justification_sender) } - #[test] - fn uninitialized_rpc_handler() { - let (io, _) = setup_io_handler(EmptyVoterState); - - let request = r#"{"jsonrpc":"2.0","method":"grandpa_roundState","params":[],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","error":{"code":1,"message":"GRANDPA RPC endpoint not ready"},"id":1}"#; + #[tokio::test] + async fn uninitialized_rpc_handler() { + let (rpc, _) = setup_io_handler(EmptyVoterState); + let expected_response = r#"{"jsonrpc":"2.0","error":{"code":1,"message":"GRANDPA RPC endpoint not ready"},"id":0}"#.to_string(); + let request = r#"{"jsonrpc":"2.0","method":"grandpa_roundState","params":[],"id":0}"#; + let (result, _) = rpc.raw_json_request(&request).await.unwrap(); - let meta = sc_rpc::Metadata::default(); - assert_eq!(Some(response.into()), io.handle_request_sync(request, meta)); + assert_eq!(expected_response, result,); } - #[test] - fn working_rpc_handler() { - let (io, _) = setup_io_handler(TestVoterState); - - let request = r#"{"jsonrpc":"2.0","method":"grandpa_roundState","params":[],"id":1}"#; - let response = "{\"jsonrpc\":\"2.0\",\"result\":{\ - \"background\":[{\ - \"precommits\":{\"currentWeight\":100,\"missing\":[]},\ - \"prevotes\":{\"currentWeight\":100,\"missing\":[]},\ - \"round\":1,\"thresholdWeight\":67,\"totalWeight\":100\ - }],\ + #[tokio::test] + async fn working_rpc_handler() { + let (rpc, _) = setup_io_handler(TestVoterState); + let expected_response = "{\"jsonrpc\":\"2.0\",\"result\":{\ + \"setId\":1,\ \"best\":{\ - \"precommits\":{\"currentWeight\":0,\"missing\":[\"5C62Ck4UrFPiBtoCmeSrgF7x9yv9mn38446dhCpsi2mLHiFT\",\"5C7LYpP2ZH3tpKbvVvwiVe54AapxErdPBbvkYhe6y9ZBkqWt\"]},\ + \"round\":2,\"totalWeight\":100,\"thresholdWeight\":67,\ \"prevotes\":{\"currentWeight\":50,\"missing\":[\"5C7LYpP2ZH3tpKbvVvwiVe54AapxErdPBbvkYhe6y9ZBkqWt\"]},\ - \"round\":2,\"thresholdWeight\":67,\"totalWeight\":100\ + \"precommits\":{\"currentWeight\":0,\"missing\":[\"5C62Ck4UrFPiBtoCmeSrgF7x9yv9mn38446dhCpsi2mLHiFT\",\"5C7LYpP2ZH3tpKbvVvwiVe54AapxErdPBbvkYhe6y9ZBkqWt\"]}\ },\ - \"setId\":1\ - },\"id\":1}"; - - let meta = sc_rpc::Metadata::default(); - assert_eq!(io.handle_request_sync(request, meta), Some(response.into())); - } + \"background\":[{\ + \"round\":1,\"totalWeight\":100,\"thresholdWeight\":67,\ + \"prevotes\":{\"currentWeight\":100,\"missing\":[]},\ + \"precommits\":{\"currentWeight\":100,\"missing\":[]}\ + }]\ + },\"id\":0}".to_string(); - fn setup_session() -> (sc_rpc::Metadata, futures::channel::mpsc::UnboundedReceiver) { - let (tx, rx) = futures::channel::mpsc::unbounded(); - let meta = sc_rpc::Metadata::new(tx); - (meta, rx) + let request = r#"{"jsonrpc":"2.0","method":"grandpa_roundState","params":[],"id":0}"#; + let (result, _) = rpc.raw_json_request(&request).await.unwrap(); + assert_eq!(expected_response, result); } - #[test] - fn subscribe_and_unsubscribe_to_justifications() { - let (io, _) = setup_io_handler(TestVoterState); - let (meta, _) = setup_session(); + #[tokio::test] + async fn subscribe_and_unsubscribe_to_justifications() { + let (rpc, _) = setup_io_handler(TestVoterState); + // Subscribe call. + let sub = rpc + .subscribe("grandpa_subscribeJustifications", EmptyParams::new()) + .await + .unwrap(); - // Subscribe - let sub_request = - r#"{"jsonrpc":"2.0","method":"grandpa_subscribeJustifications","params":[],"id":1}"#; - let resp = io.handle_request_sync(sub_request, meta.clone()); - let resp: Output = serde_json::from_str(&resp.unwrap()).unwrap(); - - let sub_id = match resp { - Output::Success(success) => success.result, - _ => panic!(), - }; + let ser_id = serde_json::to_string(sub.subscription_id()).unwrap(); // Unsubscribe let unsub_req = format!( "{{\"jsonrpc\":\"2.0\",\"method\":\"grandpa_unsubscribeJustifications\",\"params\":[{}],\"id\":1}}", - sub_id - ); - assert_eq!( - io.handle_request_sync(&unsub_req, meta.clone()), - Some(r#"{"jsonrpc":"2.0","result":true,"id":1}"#.into()), + ser_id ); + let (response, _) = rpc.raw_json_request(&unsub_req).await.unwrap(); + + assert_eq!(response, r#"{"jsonrpc":"2.0","result":true,"id":1}"#); // Unsubscribe again and fail - assert_eq!( - io.handle_request_sync(&unsub_req, meta), - Some("{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32602,\"message\":\"Invalid subscription id.\"},\"id\":1}".into()), - ); - } + let (response, _) = rpc.raw_json_request(&unsub_req).await.unwrap(); + let expected = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; - #[test] - fn subscribe_and_unsubscribe_with_wrong_id() { - let (io, _) = setup_io_handler(TestVoterState); - let (meta, _) = setup_session(); + assert_eq!(response, expected); + } - // Subscribe - let sub_request = - r#"{"jsonrpc":"2.0","method":"grandpa_subscribeJustifications","params":[],"id":1}"#; - let resp = io.handle_request_sync(sub_request, meta.clone()); - let resp: Output = serde_json::from_str(&resp.unwrap()).unwrap(); - assert!(matches!(resp, Output::Success(_))); + #[tokio::test] + async fn subscribe_and_unsubscribe_with_wrong_id() { + let (rpc, _) = setup_io_handler(TestVoterState); + // Subscribe call. + let _sub = rpc + .subscribe("grandpa_subscribeJustifications", EmptyParams::new()) + .await + .unwrap(); // Unsubscribe with wrong ID - assert_eq!( - io.handle_request_sync( + let (response, _) = rpc + .raw_json_request( r#"{"jsonrpc":"2.0","method":"grandpa_unsubscribeJustifications","params":["FOO"],"id":1}"#, - meta.clone() - ), - Some("{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32602,\"message\":\"Invalid subscription id.\"},\"id\":1}".into()) - ); + ) + .await + .unwrap(); + let expected = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; + + assert_eq!(response, expected); } fn create_justification() -> GrandpaJustification { @@ -454,60 +414,41 @@ mod tests { justification } - #[test] - fn subscribe_and_listen_to_one_justification() { - let (io, justification_sender) = setup_io_handler(TestVoterState); - let (meta, receiver) = setup_session(); - - // Subscribe - let sub_request = - r#"{"jsonrpc":"2.0","method":"grandpa_subscribeJustifications","params":[],"id":1}"#; + #[tokio::test] + async fn subscribe_and_listen_to_one_justification() { + let (rpc, justification_sender) = setup_io_handler(TestVoterState); - let resp = io.handle_request_sync(sub_request, meta.clone()); - let mut resp: serde_json::Value = serde_json::from_str(&resp.unwrap()).unwrap(); - let sub_id: String = serde_json::from_value(resp["result"].take()).unwrap(); + let mut sub = rpc + .subscribe("grandpa_subscribeJustifications", EmptyParams::new()) + .await + .unwrap(); // Notify with a header and justification let justification = create_justification(); justification_sender.notify(|| Ok::<_, ()>(justification.clone())).unwrap(); // Inspect what we received - let recv = futures::executor::block_on(receiver.take(1).collect::>()); - let recv: Notification = serde_json::from_str(&recv[0]).unwrap(); - let mut json_map = match recv.params { - Params::Map(json_map) => json_map, - _ => panic!(), - }; - - let recv_sub_id: String = serde_json::from_value(json_map["subscription"].take()).unwrap(); - let recv_justification: sp_core::Bytes = - serde_json::from_value(json_map["result"].take()).unwrap(); + let (recv_justification, recv_sub_id): (sp_core::Bytes, SubscriptionId) = + sub.next().await.unwrap().unwrap(); let recv_justification: GrandpaJustification = Decode::decode(&mut &recv_justification[..]).unwrap(); - assert_eq!(recv.method, "grandpa_justifications"); - assert_eq!(recv_sub_id, sub_id); + assert_eq!(&recv_sub_id, sub.subscription_id()); assert_eq!(recv_justification, justification); } - #[test] - fn prove_finality_with_test_finality_proof_provider() { + #[tokio::test] + async fn prove_finality_with_test_finality_proof_provider() { let finality_proof = FinalityProof { block: header(42).hash(), justification: create_justification().encode(), unknown_headers: vec![header(2)], }; - let (io, _) = + let (rpc, _) = setup_io_handler_with_finality_proofs(TestVoterState, Some(finality_proof.clone())); - let request = - "{\"jsonrpc\":\"2.0\",\"method\":\"grandpa_proveFinality\",\"params\":[42],\"id\":1}"; - - let meta = sc_rpc::Metadata::default(); - let resp = io.handle_request_sync(request, meta); - let mut resp: serde_json::Value = serde_json::from_str(&resp.unwrap()).unwrap(); - let result: sp_core::Bytes = serde_json::from_value(resp["result"].take()).unwrap(); - let finality_proof_rpc: FinalityProof
= Decode::decode(&mut &result[..]).unwrap(); + let bytes: sp_core::Bytes = rpc.call("grandpa_proveFinality", [42]).await.unwrap(); + let finality_proof_rpc: FinalityProof
= Decode::decode(&mut &bytes[..]).unwrap(); assert_eq!(finality_proof_rpc, finality_proof); } } diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 0287b0fd30799..f8dfaab2a58a3 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -15,10 +15,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" -jsonrpc-core = "18.0.0" -jsonrpc-core-client = "18.0.0" -jsonrpc-derive = "18.0.0" -jsonrpc-pubsub = "18.0.0" log = "0.4.16" parking_lot = "0.12.0" scale-info = { version = "2.0.1", default-features = false, features = ["derive"] } @@ -32,3 +28,4 @@ sp-rpc = { version = "6.0.0", path = "../../primitives/rpc" } sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } sp-tracing = { version = "5.0.0", path = "../../primitives/tracing" } sp-version = { version = "5.0.0", path = "../../primitives/version" } +jsonrpsee = { version = "0.12.0", features = ["server", "macros"] } diff --git a/client/rpc-api/src/author/error.rs b/client/rpc-api/src/author/error.rs index 1f8c65c471398..57a27d48de3ad 100644 --- a/client/rpc-api/src/author/error.rs +++ b/client/rpc-api/src/author/error.rs @@ -18,28 +18,27 @@ //! Authoring RPC module errors. -use crate::errors; -use jsonrpc_core as rpc; +use jsonrpsee::{ + core::Error as JsonRpseeError, + types::error::{CallError, ErrorObject}, +}; use sp_runtime::transaction_validity::InvalidTransaction; /// Author RPC Result type. pub type Result = std::result::Result; -/// Author RPC future Result type. -pub type FutureResult = jsonrpc_core::BoxFuture>; - /// Author RPC errors. #[derive(Debug, thiserror::Error)] pub enum Error { /// Client error. #[error("Client error: {}", .0)] - Client(Box), + Client(Box), /// Transaction pool error, #[error("Transaction pool error: {}", .0)] Pool(#[from] sc_transaction_pool_api::error::Error), /// Verification error #[error("Extrinsic verification error: {}", .0)] - Verification(Box), + Verification(Box), /// Incorrect extrinsic format. #[error("Invalid extrinsic format: {}", .0)] BadFormat(#[from] codec::Error), @@ -58,98 +57,127 @@ pub enum Error { } /// Base code for all authorship errors. -const BASE_ERROR: i64 = 1000; +const BASE_ERROR: i32 = 1000; /// Extrinsic has an invalid format. -const BAD_FORMAT: i64 = BASE_ERROR + 1; +const BAD_FORMAT: i32 = BASE_ERROR + 1; /// Error during transaction verification in runtime. -const VERIFICATION_ERROR: i64 = BASE_ERROR + 2; +const VERIFICATION_ERROR: i32 = BASE_ERROR + 2; /// Pool rejected the transaction as invalid -const POOL_INVALID_TX: i64 = BASE_ERROR + 10; +const POOL_INVALID_TX: i32 = BASE_ERROR + 10; /// Cannot determine transaction validity. -const POOL_UNKNOWN_VALIDITY: i64 = POOL_INVALID_TX + 1; +const POOL_UNKNOWN_VALIDITY: i32 = POOL_INVALID_TX + 1; /// The transaction is temporarily banned. -const POOL_TEMPORARILY_BANNED: i64 = POOL_INVALID_TX + 2; +const POOL_TEMPORARILY_BANNED: i32 = POOL_INVALID_TX + 2; /// The transaction is already in the pool -const POOL_ALREADY_IMPORTED: i64 = POOL_INVALID_TX + 3; +const POOL_ALREADY_IMPORTED: i32 = POOL_INVALID_TX + 3; /// Transaction has too low priority to replace existing one in the pool. -const POOL_TOO_LOW_PRIORITY: i64 = POOL_INVALID_TX + 4; +const POOL_TOO_LOW_PRIORITY: i32 = POOL_INVALID_TX + 4; /// Including this transaction would cause a dependency cycle. -const POOL_CYCLE_DETECTED: i64 = POOL_INVALID_TX + 5; +const POOL_CYCLE_DETECTED: i32 = POOL_INVALID_TX + 5; /// The transaction was not included to the pool because of the limits. -const POOL_IMMEDIATELY_DROPPED: i64 = POOL_INVALID_TX + 6; +const POOL_IMMEDIATELY_DROPPED: i32 = POOL_INVALID_TX + 6; /// The transaction was not included to the pool since it is unactionable, /// it is not propagable and the local node does not author blocks. -const POOL_UNACTIONABLE: i64 = POOL_INVALID_TX + 8; +const POOL_UNACTIONABLE: i32 = POOL_INVALID_TX + 8; +/// Transaction does not provide any tags, so the pool can't identify it. +const POOL_NO_TAGS: i32 = POOL_INVALID_TX + 9; +/// Invalid block ID. +const POOL_INVALID_BLOCK_ID: i32 = POOL_INVALID_TX + 10; +/// The pool is not accepting future transactions. +const POOL_FUTURE_TX: i32 = POOL_INVALID_TX + 11; -impl From for rpc::Error { +impl From for JsonRpseeError { fn from(e: Error) -> Self { use sc_transaction_pool_api::error::Error as PoolError; match e { - Error::BadFormat(e) => rpc::Error { - code: rpc::ErrorCode::ServerError(BAD_FORMAT), - message: format!("Extrinsic has invalid format: {}", e), - data: None, - }, - Error::Verification(e) => rpc::Error { - code: rpc::ErrorCode::ServerError(VERIFICATION_ERROR), - message: format!("Verification Error: {}", e), - data: Some(e.to_string().into()), - }, - Error::Pool(PoolError::InvalidTransaction(InvalidTransaction::Custom(e))) => rpc::Error { - code: rpc::ErrorCode::ServerError(POOL_INVALID_TX), - message: "Invalid Transaction".into(), - data: Some(format!("Custom error: {}", e).into()), + Error::BadFormat(e) => CallError::Custom(ErrorObject::owned( + BAD_FORMAT, + format!("Extrinsic has invalid format: {}", e), + None::<()>, + )), + Error::Verification(e) => CallError::Custom(ErrorObject::owned( + VERIFICATION_ERROR, + format!("Verification Error: {}", e), + Some(format!("{:?}", e)), + )), + Error::Pool(PoolError::InvalidTransaction(InvalidTransaction::Custom(e))) => { + CallError::Custom(ErrorObject::owned( + POOL_INVALID_TX, + "Invalid Transaction", + Some(format!("Custom error: {}", e)), + )) }, Error::Pool(PoolError::InvalidTransaction(e)) => { let msg: &str = e.into(); - rpc::Error { - code: rpc::ErrorCode::ServerError(POOL_INVALID_TX), - message: "Invalid Transaction".into(), - data: Some(msg.into()), - } - }, - Error::Pool(PoolError::UnknownTransaction(e)) => rpc::Error { - code: rpc::ErrorCode::ServerError(POOL_UNKNOWN_VALIDITY), - message: "Unknown Transaction Validity".into(), - data: serde_json::to_value(e).ok(), - }, - Error::Pool(PoolError::TemporarilyBanned) => rpc::Error { - code: rpc::ErrorCode::ServerError(POOL_TEMPORARILY_BANNED), - message: "Transaction is temporarily banned".into(), - data: None, - }, - Error::Pool(PoolError::AlreadyImported(hash)) => rpc::Error { - code: rpc::ErrorCode::ServerError(POOL_ALREADY_IMPORTED), - message: "Transaction Already Imported".into(), - data: Some(format!("{:?}", hash).into()), - }, - Error::Pool(PoolError::TooLowPriority { old, new }) => rpc::Error { - code: rpc::ErrorCode::ServerError(POOL_TOO_LOW_PRIORITY), - message: format!("Priority is too low: ({} vs {})", old, new), - data: Some("The transaction has too low priority to replace another transaction already in the pool.".into()), - }, - Error::Pool(PoolError::CycleDetected) => rpc::Error { - code: rpc::ErrorCode::ServerError(POOL_CYCLE_DETECTED), - message: "Cycle Detected".into(), - data: None, + CallError::Custom(ErrorObject::owned( + POOL_INVALID_TX, + "Invalid Transaction", + Some(msg), + )) }, - Error::Pool(PoolError::ImmediatelyDropped) => rpc::Error { - code: rpc::ErrorCode::ServerError(POOL_IMMEDIATELY_DROPPED), - message: "Immediately Dropped".into(), - data: Some("The transaction couldn't enter the pool because of the limit".into()), + Error::Pool(PoolError::UnknownTransaction(e)) => { + CallError::Custom(ErrorObject::owned( + POOL_UNKNOWN_VALIDITY, + "Unknown Transaction Validity", + Some(format!("{:?}", e)), + )) }, - Error::Pool(PoolError::Unactionable) => rpc::Error { - code: rpc::ErrorCode::ServerError(POOL_UNACTIONABLE), - message: "Unactionable".into(), - data: Some( - "The transaction is unactionable since it is not propagable and \ - the local node does not author blocks".into(), - ), + Error::Pool(PoolError::TemporarilyBanned) => + CallError::Custom(ErrorObject::owned( + POOL_TEMPORARILY_BANNED, + "Transaction is temporarily banned", + None::<()>, + )), + Error::Pool(PoolError::AlreadyImported(hash)) => + CallError::Custom(ErrorObject::owned( + POOL_ALREADY_IMPORTED, + "Transaction Already Imported", + Some(format!("{:?}", hash)), + )), + Error::Pool(PoolError::TooLowPriority { old, new }) => CallError::Custom(ErrorObject::owned( + POOL_TOO_LOW_PRIORITY, + format!("Priority is too low: ({} vs {})", old, new), + Some("The transaction has too low priority to replace another transaction already in the pool.") + )), + Error::Pool(PoolError::CycleDetected) => + CallError::Custom(ErrorObject::owned( + POOL_CYCLE_DETECTED, + "Cycle Detected", + None::<()> + )), + Error::Pool(PoolError::ImmediatelyDropped) => CallError::Custom(ErrorObject::owned( + POOL_IMMEDIATELY_DROPPED, + "Immediately Dropped", + Some("The transaction couldn't enter the pool because of the limit"), + )), + Error::Pool(PoolError::Unactionable) => CallError::Custom(ErrorObject::owned( + POOL_UNACTIONABLE, + "Unactionable", + Some("The transaction is unactionable since it is not propagable and \ + the local node does not author blocks") + )), + Error::Pool(PoolError::NoTagsProvided) => CallError::Custom(ErrorObject::owned( + POOL_NO_TAGS, + "No tags provided", + Some("Transaction does not provide any tags, so the pool can't identify it") + )), + Error::Pool(PoolError::InvalidBlockId(_)) => + CallError::Custom(ErrorObject::owned( + POOL_INVALID_BLOCK_ID, + "The provided block ID is not valid", + None::<()> + )), + Error::Pool(PoolError::RejectedFutureTransaction) => { + CallError::Custom(ErrorObject::owned( + POOL_FUTURE_TX, + "The pool is not accepting future transactions", + None::<()>, + )) }, Error::UnsafeRpcCalled(e) => e.into(), - e => errors::internal(e), - } + e => CallError::Failed(e.into()), + }.into() } } diff --git a/client/rpc-api/src/author/mod.rs b/client/rpc-api/src/author/mod.rs index 84167ee95d108..feba7640e3b9f 100644 --- a/client/rpc-api/src/author/mod.rs +++ b/client/rpc-api/src/author/mod.rs @@ -18,85 +18,61 @@ //! Substrate block-author/full-node API. -pub mod error; -pub mod hash; - -use self::error::{FutureResult, Result}; -use jsonrpc_derive::rpc; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use sc_transaction_pool_api::TransactionStatus; use sp_core::Bytes; -pub use self::gen_client::Client as AuthorClient; +pub mod error; +pub mod hash; /// Substrate authoring RPC API -#[rpc] +#[rpc(client, server)] pub trait AuthorApi { - /// RPC metadata - type Metadata; - /// Submit hex-encoded extrinsic for inclusion in block. - #[rpc(name = "author_submitExtrinsic")] - fn submit_extrinsic(&self, extrinsic: Bytes) -> FutureResult; + #[method(name = "author_submitExtrinsic")] + async fn submit_extrinsic(&self, extrinsic: Bytes) -> RpcResult; /// Insert a key into the keystore. - #[rpc(name = "author_insertKey")] - fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> Result<()>; + #[method(name = "author_insertKey")] + fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> RpcResult<()>; /// Generate new session keys and returns the corresponding public keys. - #[rpc(name = "author_rotateKeys")] - fn rotate_keys(&self) -> Result; + #[method(name = "author_rotateKeys")] + fn rotate_keys(&self) -> RpcResult; /// Checks if the keystore has private keys for the given session public keys. /// /// `session_keys` is the SCALE encoded session keys object from the runtime. /// /// Returns `true` iff all private keys could be found. - #[rpc(name = "author_hasSessionKeys")] - fn has_session_keys(&self, session_keys: Bytes) -> Result; + #[method(name = "author_hasSessionKeys")] + fn has_session_keys(&self, session_keys: Bytes) -> RpcResult; /// Checks if the keystore has private keys for the given public key and key type. /// /// Returns `true` if a private key could be found. - #[rpc(name = "author_hasKey")] - fn has_key(&self, public_key: Bytes, key_type: String) -> Result; + #[method(name = "author_hasKey")] + fn has_key(&self, public_key: Bytes, key_type: String) -> RpcResult; /// Returns all pending extrinsics, potentially grouped by sender. - #[rpc(name = "author_pendingExtrinsics")] - fn pending_extrinsics(&self) -> Result>; + #[method(name = "author_pendingExtrinsics")] + fn pending_extrinsics(&self) -> RpcResult>; /// Remove given extrinsic from the pool and temporarily ban it to prevent reimporting. - #[rpc(name = "author_removeExtrinsic")] + #[method(name = "author_removeExtrinsic")] fn remove_extrinsic( &self, bytes_or_hash: Vec>, - ) -> Result>; + ) -> RpcResult>; /// Submit an extrinsic to watch. /// /// See [`TransactionStatus`](sc_transaction_pool_api::TransactionStatus) for details on /// transaction life cycle. - #[pubsub( - subscription = "author_extrinsicUpdate", - subscribe, - name = "author_submitAndWatchExtrinsic" + #[subscription( + name = "author_submitAndWatchExtrinsic" => "author_extrinsicUpdate", + unsubscribe = "author_unwatchExtrinsic", + item = TransactionStatus, )] - fn watch_extrinsic( - &self, - metadata: Self::Metadata, - subscriber: Subscriber>, - bytes: Bytes, - ); - - /// Unsubscribe from extrinsic watching. - #[pubsub( - subscription = "author_extrinsicUpdate", - unsubscribe, - name = "author_unwatchExtrinsic" - )] - fn unwatch_extrinsic( - &self, - metadata: Option, - id: SubscriptionId, - ) -> Result; + fn watch_extrinsic(&self, bytes: Bytes); } diff --git a/client/rpc-api/src/chain/error.rs b/client/rpc-api/src/chain/error.rs index a0cacb6739155..670e221cf1cde 100644 --- a/client/rpc-api/src/chain/error.rs +++ b/client/rpc-api/src/chain/error.rs @@ -18,38 +18,33 @@ //! Error helpers for Chain RPC module. -use crate::errors; -use jsonrpc_core as rpc; - +use jsonrpsee::{ + core::Error as JsonRpseeError, + types::error::{CallError, ErrorObject}, +}; /// Chain RPC Result type. pub type Result = std::result::Result; -/// Chain RPC future Result type. -pub type FutureResult = jsonrpc_core::BoxFuture>; - /// Chain RPC errors. #[derive(Debug, thiserror::Error)] pub enum Error { /// Client error. #[error("Client error: {}", .0)] - Client(#[from] Box), + Client(#[from] Box), /// Other error type. #[error("{0}")] Other(String), } /// Base error code for all chain errors. -const BASE_ERROR: i64 = 3000; +const BASE_ERROR: i32 = 3000; -impl From for rpc::Error { +impl From for JsonRpseeError { fn from(e: Error) -> Self { match e { - Error::Other(message) => rpc::Error { - code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), - message, - data: None, - }, - e => errors::internal(e), + Error::Other(message) => + CallError::Custom(ErrorObject::owned(BASE_ERROR + 1, message, None::<()>)).into(), + e => e.into(), } } } diff --git a/client/rpc-api/src/chain/mod.rs b/client/rpc-api/src/chain/mod.rs index d7d598942f1ea..f5f9524264e34 100644 --- a/client/rpc-api/src/chain/mod.rs +++ b/client/rpc-api/src/chain/mod.rs @@ -18,96 +18,59 @@ //! Substrate blockchain API. -pub mod error; - -use self::error::{FutureResult, Result}; -use jsonrpc_core::Result as RpcResult; -use jsonrpc_derive::rpc; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use sp_rpc::{list::ListOrValue, number::NumberOrHex}; -pub use self::gen_client::Client as ChainClient; +pub mod error; -/// Substrate blockchain API -#[rpc] +#[rpc(client, server)] pub trait ChainApi { - /// RPC metadata - type Metadata; - - /// Get header of a relay chain block. - #[rpc(name = "chain_getHeader")] - fn header(&self, hash: Option) -> FutureResult>; + /// Get header. + #[method(name = "chain_getHeader")] + async fn header(&self, hash: Option) -> RpcResult>; /// Get header and body of a relay chain block. - #[rpc(name = "chain_getBlock")] - fn block(&self, hash: Option) -> FutureResult>; + #[method(name = "chain_getBlock")] + async fn block(&self, hash: Option) -> RpcResult>; /// Get hash of the n-th block in the canon chain. /// /// By default returns latest block hash. - #[rpc(name = "chain_getBlockHash", alias("chain_getHead"))] + #[method(name = "chain_getBlockHash", aliases = ["chain_getHead"])] fn block_hash( &self, hash: Option>, - ) -> Result>>; + ) -> RpcResult>>; /// Get hash of the last finalized block in the canon chain. - #[rpc(name = "chain_getFinalizedHead", alias("chain_getFinalisedHead"))] - fn finalized_head(&self) -> Result; - - /// All head subscription - #[pubsub(subscription = "chain_allHead", subscribe, name = "chain_subscribeAllHeads")] - fn subscribe_all_heads(&self, metadata: Self::Metadata, subscriber: Subscriber
); - - /// Unsubscribe from all head subscription. - #[pubsub(subscription = "chain_allHead", unsubscribe, name = "chain_unsubscribeAllHeads")] - fn unsubscribe_all_heads( - &self, - metadata: Option, - id: SubscriptionId, - ) -> RpcResult; - - /// New head subscription - #[pubsub( - subscription = "chain_newHead", - subscribe, - name = "chain_subscribeNewHeads", - alias("subscribe_newHead", "chain_subscribeNewHead") + #[method(name = "chain_getFinalizedHead", aliases = ["chain_getFinalisedHead"])] + fn finalized_head(&self) -> RpcResult; + + /// All head subscription. + #[subscription( + name = "chain_subscribeAllHeads" => "chain_allHead", + unsubscribe = "chain_unsubscribeAllHeads", + item = Header )] - fn subscribe_new_heads(&self, metadata: Self::Metadata, subscriber: Subscriber
); - - /// Unsubscribe from new head subscription. - #[pubsub( - subscription = "chain_newHead", - unsubscribe, - name = "chain_unsubscribeNewHeads", - alias("unsubscribe_newHead", "chain_unsubscribeNewHead") + fn subscribe_all_heads(&self); + + /// New head subscription. + #[subscription( + name = "chain_subscribeNewHeads" => "chain_newHead", + aliases = ["subscribe_newHead", "chain_subscribeNewHead"], + unsubscribe = "chain_unsubscribeNewHeads", + unsubscribe_aliases = ["unsubscribe_newHead", "chain_unsubscribeNewHead"], + item = Header )] - fn unsubscribe_new_heads( - &self, - metadata: Option, - id: SubscriptionId, - ) -> RpcResult; - - /// Finalized head subscription - #[pubsub( - subscription = "chain_finalizedHead", - subscribe, - name = "chain_subscribeFinalizedHeads", - alias("chain_subscribeFinalisedHeads") + fn subscribe_new_heads(&self); + + /// Finalized head subscription. + #[subscription( + name = "chain_subscribeFinalizedHeads" => "chain_finalizedHead", + aliases = ["chain_subscribeFinalisedHeads"], + unsubscribe = "chain_unsubscribeFinalizedHeads", + unsubscribe_aliases = ["chain_unsubscribeFinalisedHeads"], + item = Header )] - fn subscribe_finalized_heads(&self, metadata: Self::Metadata, subscriber: Subscriber
); - - /// Unsubscribe from finalized head subscription. - #[pubsub( - subscription = "chain_finalizedHead", - unsubscribe, - name = "chain_unsubscribeFinalizedHeads", - alias("chain_unsubscribeFinalisedHeads") - )] - fn unsubscribe_finalized_heads( - &self, - metadata: Option, - id: SubscriptionId, - ) -> RpcResult; + fn subscribe_finalized_heads(&self); } diff --git a/client/rpc-api/src/child_state/mod.rs b/client/rpc-api/src/child_state/mod.rs index 6b4cd20f22605..a15b1a0e7ee05 100644 --- a/client/rpc-api/src/child_state/mod.rs +++ b/client/rpc-api/src/child_state/mod.rs @@ -16,89 +16,82 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Substrate state API. - -use crate::state::error::FutureResult; -use jsonrpc_derive::rpc; -use sp_core::storage::{PrefixedStorageKey, StorageData, StorageKey}; - -pub use self::gen_client::Client as ChildStateClient; +//! Substrate child state API use crate::state::ReadProof; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use sp_core::storage::{PrefixedStorageKey, StorageData, StorageKey}; /// Substrate child state API /// /// Note that all `PrefixedStorageKey` are deserialized /// from json and not guaranteed valid. -#[rpc] +#[rpc(client, server)] pub trait ChildStateApi { - /// RPC Metadata - type Metadata; - - /// DEPRECATED: Please use `childstate_getKeysPaged` with proper paging support. /// Returns the keys with prefix from a child storage, leave empty to get all the keys - #[rpc(name = "childstate_getKeys")] - fn storage_keys( + #[method(name = "childstate_getKeys")] + #[deprecated(since = "2.0.0", note = "Please use `getKeysPaged` with proper paging support")] + async fn storage_keys( &self, child_storage_key: PrefixedStorageKey, prefix: StorageKey, hash: Option, - ) -> FutureResult>; + ) -> RpcResult>; /// Returns the keys with prefix from a child storage with pagination support. /// Up to `count` keys will be returned. /// If `start_key` is passed, return next keys in storage in lexicographic order. - #[rpc(name = "childstate_getKeysPaged", alias("childstate_getKeysPagedAt"))] - fn storage_keys_paged( + #[method(name = "childstate_getKeysPaged", aliases = ["childstate_getKeysPagedAt"])] + async fn storage_keys_paged( &self, child_storage_key: PrefixedStorageKey, prefix: Option, count: u32, start_key: Option, hash: Option, - ) -> FutureResult>; + ) -> RpcResult>; /// Returns a child storage entry at a specific block's state. - #[rpc(name = "childstate_getStorage")] - fn storage( + #[method(name = "childstate_getStorage")] + async fn storage( &self, child_storage_key: PrefixedStorageKey, key: StorageKey, hash: Option, - ) -> FutureResult>; + ) -> RpcResult>; /// Returns child storage entries for multiple keys at a specific block's state. - #[rpc(name = "childstate_getStorageEntries")] - fn storage_entries( + #[method(name = "childstate_getStorageEntries")] + async fn storage_entries( &self, child_storage_key: PrefixedStorageKey, keys: Vec, hash: Option, - ) -> FutureResult>>; + ) -> RpcResult>>; /// Returns the hash of a child storage entry at a block's state. - #[rpc(name = "childstate_getStorageHash")] - fn storage_hash( + #[method(name = "childstate_getStorageHash")] + async fn storage_hash( &self, child_storage_key: PrefixedStorageKey, key: StorageKey, hash: Option, - ) -> FutureResult>; + ) -> RpcResult>; /// Returns the size of a child storage entry at a block's state. - #[rpc(name = "childstate_getStorageSize")] - fn storage_size( + #[method(name = "childstate_getStorageSize")] + async fn storage_size( &self, child_storage_key: PrefixedStorageKey, key: StorageKey, hash: Option, - ) -> FutureResult>; + ) -> RpcResult>; /// Returns proof of storage for child key entries at a specific block's state. - #[rpc(name = "state_getChildReadProof")] - fn read_child_proof( + #[method(name = "state_getChildReadProof")] + async fn read_child_proof( &self, child_storage_key: PrefixedStorageKey, keys: Vec, hash: Option, - ) -> FutureResult>; + ) -> RpcResult>; } diff --git a/client/rpc-api/src/dev/error.rs b/client/rpc-api/src/dev/error.rs index 1a14b0d78994e..fe74dea256376 100644 --- a/client/rpc-api/src/dev/error.rs +++ b/client/rpc-api/src/dev/error.rs @@ -18,14 +18,10 @@ //! Error helpers for Dev RPC module. -use crate::errors; -use jsonrpc_core as rpc; - -/// Dev RPC Result type. -pub type Result = std::result::Result; - -/// Dev RPC future Result type. -pub type FutureResult = jsonrpc_core::BoxFuture>; +use jsonrpsee::{ + core::Error as JsonRpseeError, + types::error::{CallError, ErrorObject}, +}; /// Dev RPC errors. #[derive(Debug, thiserror::Error)] @@ -45,27 +41,21 @@ pub enum Error { } /// Base error code for all dev errors. -const BASE_ERROR: i64 = 6000; +const BASE_ERROR: i32 = 6000; -impl From for rpc::Error { +impl From for JsonRpseeError { fn from(e: Error) -> Self { + let msg = e.to_string(); + match e { - Error::BlockQueryError(_) => rpc::Error { - code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), - message: e.to_string(), - data: None, - }, - Error::BlockExecutionFailed => rpc::Error { - code: rpc::ErrorCode::ServerError(BASE_ERROR + 3), - message: e.to_string(), - data: None, - }, - Error::WitnessCompactionFailed => rpc::Error { - code: rpc::ErrorCode::ServerError(BASE_ERROR + 4), - message: e.to_string(), - data: None, - }, - e => errors::internal(e), + Error::BlockQueryError(_) => + CallError::Custom(ErrorObject::owned(BASE_ERROR + 1, msg, None::<()>)), + Error::BlockExecutionFailed => + CallError::Custom(ErrorObject::owned(BASE_ERROR + 3, msg, None::<()>)), + Error::WitnessCompactionFailed => + CallError::Custom(ErrorObject::owned(BASE_ERROR + 4, msg, None::<()>)), + Error::UnsafeRpcCalled(e) => e.into(), } + .into() } } diff --git a/client/rpc-api/src/dev/mod.rs b/client/rpc-api/src/dev/mod.rs index b1ae8934af8a1..afd83272a0127 100644 --- a/client/rpc-api/src/dev/mod.rs +++ b/client/rpc-api/src/dev/mod.rs @@ -22,9 +22,8 @@ pub mod error; -use self::error::Result; use codec::{Decode, Encode}; -use jsonrpc_derive::rpc; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use scale_info::TypeInfo; use serde::{Deserialize, Serialize}; @@ -52,13 +51,13 @@ pub struct BlockStats { /// /// This API contains unstable and unsafe methods only meant for development nodes. They /// are all flagged as unsafe for this reason. -#[rpc] +#[rpc(client, server)] pub trait DevApi { /// Reexecute the specified `block_hash` and gather statistics while doing so. /// /// This function requires the specified block and its parent to be available /// at the queried node. If either the specified block or the parent is pruned, /// this function will return `None`. - #[rpc(name = "dev_getBlockStats")] - fn block_stats(&self, block_hash: Hash) -> Result>; + #[method(name = "dev_getBlockStats")] + fn block_stats(&self, block_hash: Hash) -> RpcResult>; } diff --git a/client/rpc-api/src/errors.rs b/client/rpc-api/src/errors.rs deleted file mode 100644 index e59b1b0eda5ce..0000000000000 --- a/client/rpc-api/src/errors.rs +++ /dev/null @@ -1,28 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use log::warn; - -pub fn internal(e: E) -> jsonrpc_core::Error { - warn!("Unknown error: {}", e); - jsonrpc_core::Error { - code: jsonrpc_core::ErrorCode::InternalError, - message: "Unknown error occurred".into(), - data: Some(e.to_string().into()), - } -} diff --git a/client/rpc-api/src/helpers.rs b/client/rpc-api/src/helpers.rs deleted file mode 100644 index 2fbd2f5040463..0000000000000 --- a/client/rpc-api/src/helpers.rs +++ /dev/null @@ -1,41 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use futures::{channel::oneshot, Future}; -use std::pin::Pin; - -/// Wraps around `oneshot::Receiver` and adjusts the error type to produce an internal error if the -/// sender gets dropped. -pub struct Receiver(pub oneshot::Receiver); - -impl Future for Receiver { - type Output = Result; - - fn poll( - mut self: Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll { - Future::poll(Pin::new(&mut self.0), cx).map_err(|_| jsonrpc_core::Error::internal_error()) - } -} - -impl jsonrpc_core::WrapFuture for Receiver { - fn into_future(self) -> jsonrpc_core::BoxFuture> { - Box::pin(async { self.await }) - } -} diff --git a/client/rpc-api/src/lib.rs b/client/rpc-api/src/lib.rs index e06f30bf9cd87..a0cbbcee80e3e 100644 --- a/client/rpc-api/src/lib.rs +++ b/client/rpc-api/src/lib.rs @@ -22,15 +22,9 @@ #![warn(missing_docs)] -mod errors; -mod helpers; -mod metadata; mod policy; -pub use helpers::Receiver; -pub use jsonrpc_core::IoHandlerExtension as RpcExtension; -pub use metadata::Metadata; -pub use policy::{DenyUnsafe, UnsafeRpcError}; +pub use policy::DenyUnsafe; pub mod author; pub mod chain; diff --git a/client/rpc-api/src/metadata.rs b/client/rpc-api/src/metadata.rs deleted file mode 100644 index 3c798782062e9..0000000000000 --- a/client/rpc-api/src/metadata.rs +++ /dev/null @@ -1,60 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2022 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! RPC Metadata -use std::sync::Arc; - -use futures::channel::mpsc; -use jsonrpc_pubsub::{PubSubMetadata, Session}; - -/// RPC Metadata. -/// -/// Manages persistent session for transports that support it -/// and may contain some additional info extracted from specific transports -/// (like remote client IP address, request headers, etc) -#[derive(Default, Clone)] -pub struct Metadata { - session: Option>, -} - -impl jsonrpc_core::Metadata for Metadata {} -impl PubSubMetadata for Metadata { - fn session(&self) -> Option> { - self.session.clone() - } -} - -impl Metadata { - /// Create new `Metadata` with session (Pub/Sub) support. - pub fn new(transport: mpsc::UnboundedSender) -> Self { - Metadata { session: Some(Arc::new(Session::new(transport))) } - } - - /// Create new `Metadata` for tests. - #[cfg(test)] - pub fn new_test() -> (mpsc::UnboundedReceiver, Self) { - let (tx, rx) = mpsc::unbounded(); - (rx, Self::new(tx)) - } -} - -impl From> for Metadata { - fn from(sender: mpsc::UnboundedSender) -> Self { - Self::new(sender) - } -} diff --git a/client/rpc-api/src/offchain/error.rs b/client/rpc-api/src/offchain/error.rs index 41f1416bfb367..be72e05fc4460 100644 --- a/client/rpc-api/src/offchain/error.rs +++ b/client/rpc-api/src/offchain/error.rs @@ -18,7 +18,10 @@ //! Offchain RPC errors. -use jsonrpc_core as rpc; +use jsonrpsee::{ + core::Error as JsonRpseeError, + types::error::{CallError, ErrorObject}, +}; /// Offchain RPC Result type. pub type Result = std::result::Result; @@ -35,16 +38,17 @@ pub enum Error { } /// Base error code for all offchain errors. -const BASE_ERROR: i64 = 5000; +const BASE_ERROR: i32 = 5000; -impl From for rpc::Error { +impl From for JsonRpseeError { fn from(e: Error) -> Self { match e { - Error::UnavailableStorageKind => rpc::Error { - code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), - message: "This storage kind is not available yet".into(), - data: None, - }, + Error::UnavailableStorageKind => CallError::Custom(ErrorObject::owned( + BASE_ERROR + 1, + "This storage kind is not available yet", + None::<()>, + )) + .into(), Error::UnsafeRpcCalled(e) => e.into(), } } diff --git a/client/rpc-api/src/offchain/mod.rs b/client/rpc-api/src/offchain/mod.rs index c76e83011072d..d9435d9a875fe 100644 --- a/client/rpc-api/src/offchain/mod.rs +++ b/client/rpc-api/src/offchain/mod.rs @@ -18,22 +18,19 @@ //! Substrate offchain API. -pub mod error; - -use self::error::Result; -use jsonrpc_derive::rpc; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use sp_core::{offchain::StorageKind, Bytes}; -pub use self::gen_client::Client as OffchainClient; +pub mod error; /// Substrate offchain RPC API -#[rpc] +#[rpc(client, server)] pub trait OffchainApi { /// Set offchain local storage under given key and prefix. - #[rpc(name = "offchain_localStorageSet")] - fn set_local_storage(&self, kind: StorageKind, key: Bytes, value: Bytes) -> Result<()>; + #[method(name = "offchain_localStorageSet")] + fn set_local_storage(&self, kind: StorageKind, key: Bytes, value: Bytes) -> RpcResult<()>; /// Get offchain local storage under given key and prefix. - #[rpc(name = "offchain_localStorageGet")] - fn get_local_storage(&self, kind: StorageKind, key: Bytes) -> Result>; + #[method(name = "offchain_localStorageGet")] + fn get_local_storage(&self, kind: StorageKind, key: Bytes) -> RpcResult>; } diff --git a/client/rpc-api/src/policy.rs b/client/rpc-api/src/policy.rs index dc0753c1b9139..69ca8958520a6 100644 --- a/client/rpc-api/src/policy.rs +++ b/client/rpc-api/src/policy.rs @@ -21,7 +21,13 @@ //! Contains a `DenyUnsafe` type that can be used to deny potentially unsafe //! RPC when accessed externally. -use jsonrpc_core as rpc; +use jsonrpsee::{ + core::Error as JsonRpseeError, + types::{ + error::{CallError, ErrorCode}, + ErrorObject, + }, +}; /// Signifies whether a potentially unsafe RPC should be denied. #[derive(Clone, Copy, Debug)] @@ -55,8 +61,18 @@ impl std::fmt::Display for UnsafeRpcError { impl std::error::Error for UnsafeRpcError {} -impl From for rpc::Error { - fn from(error: UnsafeRpcError) -> rpc::Error { - rpc::Error { code: rpc::ErrorCode::MethodNotFound, message: error.to_string(), data: None } +impl From for CallError { + fn from(e: UnsafeRpcError) -> CallError { + CallError::Custom(ErrorObject::owned( + ErrorCode::MethodNotFound.code(), + e.to_string(), + None::<()>, + )) + } +} + +impl From for JsonRpseeError { + fn from(e: UnsafeRpcError) -> JsonRpseeError { + JsonRpseeError::Call(e.into()) } } diff --git a/client/rpc-api/src/state/error.rs b/client/rpc-api/src/state/error.rs index 4414629e2e294..b1df64b4789ab 100644 --- a/client/rpc-api/src/state/error.rs +++ b/client/rpc-api/src/state/error.rs @@ -18,21 +18,19 @@ //! State RPC errors. -use crate::errors; -use jsonrpc_core as rpc; - +use jsonrpsee::{ + core::Error as JsonRpseeError, + types::error::{CallError, ErrorObject}, +}; /// State RPC Result type. pub type Result = std::result::Result; -/// State RPC future Result type. -pub type FutureResult = jsonrpc_core::BoxFuture>; - /// State RPC errors. #[derive(Debug, thiserror::Error)] pub enum Error { /// Client error. #[error("Client error: {}", .0)] - Client(#[from] Box), + Client(#[from] Box), /// Provided block range couldn't be resolved to a list of blocks. #[error("Cannot resolve a block range ['{:?}' ... '{:?}]. {}", .from, .to, .details)] InvalidBlockRange { @@ -57,22 +55,18 @@ pub enum Error { } /// Base code for all state errors. -const BASE_ERROR: i64 = 4000; +const BASE_ERROR: i32 = 4000; -impl From for rpc::Error { +impl From for JsonRpseeError { fn from(e: Error) -> Self { match e { - Error::InvalidBlockRange { .. } => rpc::Error { - code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), - message: format!("{}", e), - data: None, - }, - Error::InvalidCount { .. } => rpc::Error { - code: rpc::ErrorCode::ServerError(BASE_ERROR + 2), - message: format!("{}", e), - data: None, - }, - e => errors::internal(e), + Error::InvalidBlockRange { .. } => + CallError::Custom(ErrorObject::owned(BASE_ERROR + 1, e.to_string(), None::<()>)) + .into(), + Error::InvalidCount { .. } => + CallError::Custom(ErrorObject::owned(BASE_ERROR + 2, e.to_string(), None::<()>)) + .into(), + e => Self::to_call_error(e), } } } diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 42e927580960c..fba023e830262 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -18,161 +18,122 @@ //! Substrate state API. -pub mod error; -pub mod helpers; - -use self::error::FutureResult; -use jsonrpc_core::Result as RpcResult; -use jsonrpc_derive::rpc; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use sp_core::{ storage::{StorageChangeSet, StorageData, StorageKey}, Bytes, }; use sp_version::RuntimeVersion; -pub use self::{gen_client::Client as StateClient, helpers::ReadProof}; +pub mod error; +pub mod helpers; + +pub use self::helpers::ReadProof; /// Substrate state API -#[rpc] +#[rpc(client, server)] pub trait StateApi { - /// RPC Metadata - type Metadata; - /// Call a contract at a block's state. - #[rpc(name = "state_call", alias("state_callAt"))] - fn call(&self, name: String, bytes: Bytes, hash: Option) -> FutureResult; + #[method(name = "state_call", aliases = ["state_callAt"])] + async fn call(&self, name: String, bytes: Bytes, hash: Option) -> RpcResult; - /// DEPRECATED: Please use `state_getKeysPaged` with proper paging support. /// Returns the keys with prefix, leave empty to get all the keys. - #[rpc(name = "state_getKeys")] - fn storage_keys(&self, prefix: StorageKey, hash: Option) - -> FutureResult>; + #[method(name = "state_getKeys")] + #[deprecated(since = "2.0.0", note = "Please use `getKeysPaged` with proper paging support")] + async fn storage_keys( + &self, + prefix: StorageKey, + hash: Option, + ) -> RpcResult>; /// Returns the keys with prefix, leave empty to get all the keys - #[rpc(name = "state_getPairs")] - fn storage_pairs( + #[method(name = "state_getPairs")] + async fn storage_pairs( &self, prefix: StorageKey, hash: Option, - ) -> FutureResult>; + ) -> RpcResult>; /// Returns the keys with prefix with pagination support. /// Up to `count` keys will be returned. /// If `start_key` is passed, return next keys in storage in lexicographic order. - #[rpc(name = "state_getKeysPaged", alias("state_getKeysPagedAt"))] - fn storage_keys_paged( + #[method(name = "state_getKeysPaged", aliases = ["state_getKeysPagedAt"])] + async fn storage_keys_paged( &self, prefix: Option, count: u32, start_key: Option, hash: Option, - ) -> FutureResult>; + ) -> RpcResult>; /// Returns a storage entry at a specific block's state. - #[rpc(name = "state_getStorage", alias("state_getStorageAt"))] - fn storage(&self, key: StorageKey, hash: Option) -> FutureResult>; + #[method(name = "state_getStorage", aliases = ["state_getStorageAt"])] + async fn storage(&self, key: StorageKey, hash: Option) -> RpcResult>; /// Returns the hash of a storage entry at a block's state. - #[rpc(name = "state_getStorageHash", alias("state_getStorageHashAt"))] - fn storage_hash(&self, key: StorageKey, hash: Option) -> FutureResult>; + #[method(name = "state_getStorageHash", aliases = ["state_getStorageHashAt"])] + async fn storage_hash(&self, key: StorageKey, hash: Option) -> RpcResult>; /// Returns the size of a storage entry at a block's state. - #[rpc(name = "state_getStorageSize", alias("state_getStorageSizeAt"))] - fn storage_size(&self, key: StorageKey, hash: Option) -> FutureResult>; + #[method(name = "state_getStorageSize", aliases = ["state_getStorageSizeAt"])] + async fn storage_size(&self, key: StorageKey, hash: Option) -> RpcResult>; /// Returns the runtime metadata as an opaque blob. - #[rpc(name = "state_getMetadata")] - fn metadata(&self, hash: Option) -> FutureResult; + #[method(name = "state_getMetadata")] + async fn metadata(&self, hash: Option) -> RpcResult; /// Get the runtime version. - #[rpc(name = "state_getRuntimeVersion", alias("chain_getRuntimeVersion"))] - fn runtime_version(&self, hash: Option) -> FutureResult; + #[method(name = "state_getRuntimeVersion", aliases = ["chain_getRuntimeVersion"])] + async fn runtime_version(&self, hash: Option) -> RpcResult; /// Query historical storage entries (by key) starting from a block given as the second /// parameter. /// /// NOTE This first returned result contains the initial state of storage for all keys. /// Subsequent values in the vector represent changes to the previous state (diffs). - #[rpc(name = "state_queryStorage")] - fn query_storage( + #[method(name = "state_queryStorage")] + async fn query_storage( &self, keys: Vec, block: Hash, hash: Option, - ) -> FutureResult>>; + ) -> RpcResult>>; /// Query storage entries (by key) starting at block hash given as the second parameter. - #[rpc(name = "state_queryStorageAt")] - fn query_storage_at( + #[method(name = "state_queryStorageAt")] + async fn query_storage_at( &self, keys: Vec, at: Option, - ) -> FutureResult>>; + ) -> RpcResult>>; /// Returns proof of storage entries at a specific block's state. - #[rpc(name = "state_getReadProof")] - fn read_proof( + #[method(name = "state_getReadProof")] + async fn read_proof( &self, keys: Vec, hash: Option, - ) -> FutureResult>; + ) -> RpcResult>; /// New runtime version subscription - #[pubsub( - subscription = "state_runtimeVersion", - subscribe, - name = "state_subscribeRuntimeVersion", - alias("chain_subscribeRuntimeVersion") + #[subscription( + name = "state_subscribeRuntimeVersion" => "state_runtimeVersion", + unsubscribe = "state_unsubscribeRuntimeVersion", + aliases = ["chain_subscribeRuntimeVersion"], + unsubscribe_aliases = ["chain_unsubscribeRuntimeVersion"], + item = RuntimeVersion, )] - fn subscribe_runtime_version( - &self, - metadata: Self::Metadata, - subscriber: Subscriber, - ); + fn subscribe_runtime_version(&self); - /// Unsubscribe from runtime version subscription - #[pubsub( - subscription = "state_runtimeVersion", - unsubscribe, - name = "state_unsubscribeRuntimeVersion", - alias("chain_unsubscribeRuntimeVersion") + /// New storage subscription + #[subscription( + name = "state_subscribeStorage" => "state_storage", + unsubscribe = "state_unsubscribeStorage", + item = StorageChangeSet, )] - fn unsubscribe_runtime_version( - &self, - metadata: Option, - id: SubscriptionId, - ) -> RpcResult; - - /// Subscribe to the changes in the storage. - /// - /// This RPC endpoint has two modes of operation: - /// 1) When `keys` is not `None` you'll only be informed about the changes - /// done to the specified keys; this is RPC-safe. - /// 2) When `keys` is `None` you'll be informed of *all* of the changes; - /// **this is RPC-unsafe**. - /// - /// When subscribed to all of the changes this API will emit every storage - /// change for every block that is imported. These changes will only be sent - /// after a block is imported. If you require a consistent view across all changes - /// of every block, you need to take this into account. - #[pubsub(subscription = "state_storage", subscribe, name = "state_subscribeStorage")] - fn subscribe_storage( - &self, - metadata: Self::Metadata, - subscriber: Subscriber>, - keys: Option>, - ); - - /// Unsubscribe from storage subscription - #[pubsub(subscription = "state_storage", unsubscribe, name = "state_unsubscribeStorage")] - fn unsubscribe_storage( - &self, - metadata: Option, - id: SubscriptionId, - ) -> RpcResult; + fn subscribe_storage(&self, keys: Option>); - /// The `state_traceBlock` RPC provides a way to trace the re-execution of a single + /// The `traceBlock` RPC provides a way to trace the re-execution of a single /// block, collecting Spans and Events from both the client and the relevant WASM runtime. /// The Spans and Events are conceptually equivalent to those from the [Tracing][1] crate. /// @@ -323,13 +284,13 @@ pub trait StateApi { /// narrow down the traces using a smaller set of targets and/or storage keys. /// /// If you are having issues with maximum payload size you can use the flag - /// `-lstate_tracing=trace` to get some logging during tracing. - #[rpc(name = "state_traceBlock")] - fn trace_block( + /// `-ltracing=trace` to get some logging during tracing. + #[method(name = "state_traceBlock")] + async fn trace_block( &self, block: Hash, targets: Option, storage_keys: Option, methods: Option, - ) -> FutureResult; + ) -> RpcResult; } diff --git a/client/rpc-api/src/system/error.rs b/client/rpc-api/src/system/error.rs index 050d79b6ad636..777f8c6c6df0b 100644 --- a/client/rpc-api/src/system/error.rs +++ b/client/rpc-api/src/system/error.rs @@ -19,7 +19,10 @@ //! System RPC module errors. use crate::system::helpers::Health; -use jsonrpc_core as rpc; +use jsonrpsee::{ + core::Error as JsonRpseeError, + types::error::{CallError, ErrorObject}, +}; /// System RPC Result type. pub type Result = std::result::Result; @@ -35,22 +38,24 @@ pub enum Error { MalformattedPeerArg(String), } -/// Base code for all system errors. -const BASE_ERROR: i64 = 2000; +// Base code for all system errors. +const BASE_ERROR: i32 = 2000; +// Provided block range couldn't be resolved to a list of blocks. +const NOT_HEALTHY_ERROR: i32 = BASE_ERROR + 1; +// Peer argument is malformatted. +const MALFORMATTED_PEER_ARG_ERROR: i32 = BASE_ERROR + 2; -impl From for rpc::Error { +impl From for JsonRpseeError { fn from(e: Error) -> Self { match e { - Error::NotHealthy(ref h) => rpc::Error { - code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), - message: format!("{}", e), - data: serde_json::to_value(h).ok(), - }, - Error::MalformattedPeerArg(ref e) => rpc::Error { - code: rpc::ErrorCode::ServerError(BASE_ERROR + 2), - message: e.clone(), - data: None, - }, + Error::NotHealthy(ref h) => + CallError::Custom(ErrorObject::owned(NOT_HEALTHY_ERROR, e.to_string(), Some(h))), + Error::MalformattedPeerArg(e) => CallError::Custom(ErrorObject::owned( + MALFORMATTED_PEER_ARG_ERROR + 2, + e, + None::<()>, + )), } + .into() } } diff --git a/client/rpc-api/src/system/mod.rs b/client/rpc-api/src/system/mod.rs index b610094f5b58d..1e12d5be87ee8 100644 --- a/client/rpc-api/src/system/mod.rs +++ b/client/rpc-api/src/system/mod.rs @@ -18,65 +18,61 @@ //! Substrate system API. -pub mod error; -pub mod helpers; - -use crate::helpers::Receiver; -use jsonrpc_core::BoxFuture; -use jsonrpc_derive::rpc; +use jsonrpsee::{ + core::{JsonValue, RpcResult}, + proc_macros::rpc, +}; -use self::error::Result as SystemResult; +pub use self::helpers::{Health, NodeRole, PeerInfo, SyncState, SystemInfo}; -pub use self::{ - gen_client::Client as SystemClient, - helpers::{Health, NodeRole, PeerInfo, SyncState, SystemInfo}, -}; +pub mod error; +pub mod helpers; /// Substrate system RPC API -#[rpc] +#[rpc(client, server)] pub trait SystemApi { /// Get the node's implementation name. Plain old string. - #[rpc(name = "system_name")] - fn system_name(&self) -> SystemResult; + #[method(name = "system_name")] + fn system_name(&self) -> RpcResult; /// Get the node implementation's version. Should be a semver string. - #[rpc(name = "system_version")] - fn system_version(&self) -> SystemResult; + #[method(name = "system_version")] + fn system_version(&self) -> RpcResult; /// Get the chain's name. Given as a string identifier. - #[rpc(name = "system_chain")] - fn system_chain(&self) -> SystemResult; + #[method(name = "system_chain")] + fn system_chain(&self) -> RpcResult; /// Get the chain's type. - #[rpc(name = "system_chainType")] - fn system_type(&self) -> SystemResult; + #[method(name = "system_chainType")] + fn system_type(&self) -> RpcResult; /// Get a custom set of properties as a JSON object, defined in the chain spec. - #[rpc(name = "system_properties")] - fn system_properties(&self) -> SystemResult; + #[method(name = "system_properties")] + fn system_properties(&self) -> RpcResult; /// Return health status of the node. /// /// Node is considered healthy if it is: /// - connected to some peers (unless running in dev mode) /// - not performing a major sync - #[rpc(name = "system_health", returns = "Health")] - fn system_health(&self) -> Receiver; + #[method(name = "system_health")] + async fn system_health(&self) -> RpcResult; /// Returns the base58-encoded PeerId of the node. - #[rpc(name = "system_localPeerId", returns = "String")] - fn system_local_peer_id(&self) -> Receiver; + #[method(name = "system_localPeerId")] + async fn system_local_peer_id(&self) -> RpcResult; - /// Returns the multiaddresses that the local node is listening on + /// Returns the multi-addresses that the local node is listening on /// /// The addresses include a trailing `/p2p/` with the local PeerId, and are thus suitable to - /// be passed to `system_addReservedPeer` or as a bootnode address for example. - #[rpc(name = "system_localListenAddresses", returns = "Vec")] - fn system_local_listen_addresses(&self) -> Receiver>; + /// be passed to `addReservedPeer` or as a bootnode address for example. + #[method(name = "system_localListenAddresses")] + async fn system_local_listen_addresses(&self) -> RpcResult>; /// Returns currently connected peers - #[rpc(name = "system_peers", returns = "Vec>")] - fn system_peers(&self) -> BoxFuture>>>; + #[method(name = "system_peers")] + async fn system_peers(&self) -> RpcResult>>; /// Returns current state of the network. /// @@ -84,47 +80,44 @@ pub trait SystemApi { /// as its format might change at any time. // TODO: the future of this call is uncertain: https://github.com/paritytech/substrate/issues/1890 // https://github.com/paritytech/substrate/issues/5541 - #[rpc(name = "system_unstable_networkState", returns = "jsonrpc_core::Value")] - fn system_network_state(&self) -> BoxFuture>; + #[method(name = "system_unstable_networkState")] + async fn system_network_state(&self) -> RpcResult; /// Adds a reserved peer. Returns the empty string or an error. The string /// parameter should encode a `p2p` multiaddr. /// /// `/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV` /// is an example of a valid, passing multiaddr with PeerId attached. - #[rpc(name = "system_addReservedPeer", returns = "()")] - fn system_add_reserved_peer(&self, peer: String) -> BoxFuture>; + #[method(name = "system_addReservedPeer")] + async fn system_add_reserved_peer(&self, peer: String) -> RpcResult<()>; /// Remove a reserved peer. Returns the empty string or an error. The string /// should encode only the PeerId e.g. `QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV`. - #[rpc(name = "system_removeReservedPeer", returns = "()")] - fn system_remove_reserved_peer( - &self, - peer_id: String, - ) -> BoxFuture>; + #[method(name = "system_removeReservedPeer")] + async fn system_remove_reserved_peer(&self, peer_id: String) -> RpcResult<()>; /// Returns the list of reserved peers - #[rpc(name = "system_reservedPeers", returns = "Vec")] - fn system_reserved_peers(&self) -> Receiver>; + #[method(name = "system_reservedPeers")] + async fn system_reserved_peers(&self) -> RpcResult>; /// Returns the roles the node is running as. - #[rpc(name = "system_nodeRoles", returns = "Vec")] - fn system_node_roles(&self) -> Receiver>; + #[method(name = "system_nodeRoles")] + async fn system_node_roles(&self) -> RpcResult>; /// Returns the state of the syncing of the node: starting block, current best block, highest /// known block. - #[rpc(name = "system_syncState", returns = "SyncState")] - fn system_sync_state(&self) -> Receiver>; + #[method(name = "system_syncState")] + async fn system_sync_state(&self) -> RpcResult>; /// Adds the supplied directives to the current log filter /// /// The syntax is identical to the CLI `=`: /// /// `sync=debug,state=trace` - #[rpc(name = "system_addLogFilter", returns = "()")] - fn system_add_log_filter(&self, directives: String) -> Result<(), jsonrpc_core::Error>; + #[method(name = "system_addLogFilter")] + fn system_add_log_filter(&self, directives: String) -> RpcResult<()>; /// Resets the log filter to Substrate defaults - #[rpc(name = "system_resetLogFilter", returns = "()")] - fn system_reset_log_filter(&self) -> Result<(), jsonrpc_core::Error>; + #[method(name = "system_resetLogFilter")] + fn system_reset_log_filter(&self) -> RpcResult<()>; } diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index 4a13e9624a58e..ad01f3bdd6199 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -14,12 +14,8 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.21" -http = { package = "jsonrpc-http-server", version = "18.0.0" } -ipc = { package = "jsonrpc-ipc-server", version = "18.0.0" } -jsonrpc-core = "18.0.0" +jsonrpsee = { version = "0.12.0", features = ["server"] } log = "0.4.16" -pubsub = { package = "jsonrpc-pubsub", version = "18.0.0" } serde_json = "1.0.79" tokio = { version = "1.17.0", features = ["parking_lot"] } -ws = { package = "jsonrpc-ws-server", version = "18.0.0" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index 963d9aec072f5..4f69413895a9b 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -20,213 +20,193 @@ #![warn(missing_docs)] -mod middleware; - -use jsonrpc_core::{IoHandlerExtension, MetaIoHandler}; -use log::error; -use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use pubsub::PubSubMetadata; -use std::io; +use jsonrpsee::{ + http_server::{AccessControlBuilder, HttpServerBuilder, HttpServerHandle}, + ws_server::{WsServerBuilder, WsServerHandle}, + RpcModule, +}; +use std::{error::Error as StdError, net::SocketAddr}; + +pub use crate::middleware::{RpcMetrics, RpcMiddleware}; +pub use jsonrpsee::core::{ + id_providers::{RandomIntegerIdProvider, RandomStringIdProvider}, + traits::IdProvider, +}; const MEGABYTE: usize = 1024 * 1024; /// Maximal payload accepted by RPC servers. pub const RPC_MAX_PAYLOAD_DEFAULT: usize = 15 * MEGABYTE; -/// Maximal buffer size in WS server. -pub const WS_MAX_BUFFER_CAPACITY_DEFAULT: usize = 16 * MEGABYTE; - /// Default maximum number of connections for WS RPC servers. const WS_MAX_CONNECTIONS: usize = 100; -/// The RPC IoHandler containing all requested APIs. -pub type RpcHandler = pubsub::PubSubHandler; - -pub use middleware::{method_names, RpcMetrics, RpcMiddleware}; - -/// Construct rpc `IoHandler` -pub fn rpc_handler( - extension: impl IoHandlerExtension, - rpc_middleware: RpcMiddleware, -) -> RpcHandler { - let io_handler = MetaIoHandler::with_middleware(rpc_middleware); - let mut io = pubsub::PubSubHandler::new(io_handler); - extension.augment(&mut io); - - // add an endpoint to list all available methods. - let mut methods = io.iter().map(|x| x.0.clone()).collect::>(); - io.add_method("rpc_methods", { - methods.sort(); - let methods = serde_json::to_value(&methods) - .expect("Serialization of Vec is infallible; qed"); - - move |_| { - let methods = methods.clone(); - async move { - Ok(serde_json::json!({ - "version": 1, - "methods": methods, - })) - } - } - }); - io -} +/// Default maximum number subscriptions per connection for WS RPC servers. +const WS_MAX_SUBS_PER_CONN: usize = 1024; -/// RPC server-specific prometheus metrics. -#[derive(Debug, Clone, Default)] -pub struct ServerMetrics { - /// Number of sessions opened. - session_opened: Option>, - /// Number of sessions closed. - session_closed: Option>, -} +pub mod middleware; -impl ServerMetrics { - /// Create new WebSocket RPC server metrics. - pub fn new(registry: Option<&Registry>) -> Result { - registry - .map(|r| { - Ok(Self { - session_opened: register( - Counter::new( - "substrate_rpc_sessions_opened", - "Number of persistent RPC sessions opened", - )?, - r, - )? - .into(), - session_closed: register( - Counter::new( - "substrate_rpc_sessions_closed", - "Number of persistent RPC sessions closed", - )?, - r, - )? - .into(), - }) - }) - .unwrap_or_else(|| Ok(Default::default())) - } -} - -/// Type alias for ipc server -pub type IpcServer = ipc::Server; /// Type alias for http server -pub type HttpServer = http::Server; +pub type HttpServer = HttpServerHandle; /// Type alias for ws server -pub type WsServer = ws::Server; - -impl ws::SessionStats for ServerMetrics { - fn open_session(&self, _id: ws::SessionId) { - self.session_opened.as_ref().map(|m| m.inc()); - } +pub type WsServer = WsServerHandle; + +/// WebSocket specific settings on the server. +pub struct WsConfig { + /// Maximum connections. + pub max_connections: Option, + /// Maximum subscriptions per connection. + pub max_subs_per_conn: Option, + /// Maximum rpc request payload size. + pub max_payload_in_mb: Option, + /// Maximum rpc response payload size. + pub max_payload_out_mb: Option, +} - fn close_session(&self, _id: ws::SessionId) { - self.session_closed.as_ref().map(|m| m.inc()); +impl WsConfig { + // Deconstructs the config to get the finalized inner values. + // + // `Payload size` or `max subs per connection` bigger than u32::MAX will be truncated. + fn deconstruct(self) -> (u32, u32, u64, u32) { + let max_conns = self.max_connections.unwrap_or(WS_MAX_CONNECTIONS) as u64; + let max_payload_in_mb = payload_size_or_default(self.max_payload_in_mb) as u32; + let max_payload_out_mb = payload_size_or_default(self.max_payload_out_mb) as u32; + let max_subs_per_conn = self.max_subs_per_conn.unwrap_or(WS_MAX_SUBS_PER_CONN) as u32; + + (max_payload_in_mb, max_payload_out_mb, max_conns, max_subs_per_conn) } } /// Start HTTP server listening on given address. -pub fn start_http( - addr: &std::net::SocketAddr, +pub async fn start_http( + addrs: [SocketAddr; 2], cors: Option<&Vec>, - io: RpcHandler, - maybe_max_payload_mb: Option, - tokio_handle: tokio::runtime::Handle, -) -> io::Result { - let max_request_body_size = maybe_max_payload_mb - .map(|mb| mb.saturating_mul(MEGABYTE)) - .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); - - http::ServerBuilder::new(io) - .threads(1) - .event_loop_executor(tokio_handle) - .health_api(("/health", "system_health")) - .allowed_hosts(hosts_filtering(cors.is_some())) - .rest_api(if cors.is_some() { http::RestApi::Secure } else { http::RestApi::Unsecure }) - .cors(map_cors::(cors)) - .max_request_body_size(max_request_body_size) - .start_http(addr) -} - -/// Start IPC server listening on given path. -pub fn start_ipc( - addr: &str, - io: RpcHandler, - server_metrics: ServerMetrics, -) -> io::Result { - let builder = ipc::ServerBuilder::new(io); - #[cfg(target_os = "unix")] - builder.set_security_attributes({ - let security_attributes = ipc::SecurityAttributes::empty(); - security_attributes.set_mode(0o600)?; - security_attributes - }); - builder.session_stats(server_metrics).start(addr) + max_payload_in_mb: Option, + max_payload_out_mb: Option, + metrics: Option, + rpc_api: RpcModule, + rt: tokio::runtime::Handle, +) -> Result> { + let max_payload_in = payload_size_or_default(max_payload_in_mb); + let max_payload_out = payload_size_or_default(max_payload_out_mb); + + let mut acl = AccessControlBuilder::new(); + + if let Some(cors) = cors { + // Whitelist listening address. + // NOTE: set_allowed_hosts will whitelist both ports but only one will used. + acl = acl.set_allowed_hosts(format_allowed_hosts(&addrs[..]))?; + acl = acl.set_allowed_origins(cors)?; + }; + + let builder = HttpServerBuilder::new() + .max_request_body_size(max_payload_in as u32) + .max_response_body_size(max_payload_out as u32) + .set_access_control(acl.build()) + .custom_tokio_runtime(rt); + + let rpc_api = build_rpc_api(rpc_api); + let (handle, addr) = if let Some(metrics) = metrics { + let middleware = RpcMiddleware::new(metrics, "http".into()); + let builder = builder.set_middleware(middleware); + let server = builder.build(&addrs[..]).await?; + let addr = server.local_addr(); + (server.start(rpc_api)?, addr) + } else { + let server = builder.build(&addrs[..]).await?; + let addr = server.local_addr(); + (server.start(rpc_api)?, addr) + }; + + log::info!( + "Running JSON-RPC HTTP server: addr={}, allowed origins={:?}", + addr.map_or_else(|_| "unknown".to_string(), |a| a.to_string()), + cors + ); + + Ok(handle) } /// Start WS server listening on given address. -pub fn start_ws< - M: pubsub::PubSubMetadata + From>, ->( - addr: &std::net::SocketAddr, - max_connections: Option, +pub async fn start_ws( + addrs: [SocketAddr; 2], cors: Option<&Vec>, - io: RpcHandler, - maybe_max_payload_mb: Option, - maybe_max_out_buffer_capacity_mb: Option, - server_metrics: ServerMetrics, - tokio_handle: tokio::runtime::Handle, -) -> io::Result { - let max_payload = maybe_max_payload_mb - .map(|mb| mb.saturating_mul(MEGABYTE)) - .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); - let max_out_buffer_capacity = maybe_max_out_buffer_capacity_mb - .map(|mb| mb.saturating_mul(MEGABYTE)) - .unwrap_or(WS_MAX_BUFFER_CAPACITY_DEFAULT); - - if max_payload > max_out_buffer_capacity { - log::warn!( - "maximum payload ({}) is more than maximum output buffer ({}) size in ws server, the payload will actually be limited by the buffer size", - max_payload, - max_out_buffer_capacity, - ) + ws_config: WsConfig, + metrics: Option, + rpc_api: RpcModule, + rt: tokio::runtime::Handle, + id_provider: Option>, +) -> Result> { + let (max_payload_in, max_payload_out, max_connections, max_subs_per_conn) = + ws_config.deconstruct(); + + let mut builder = WsServerBuilder::new() + .max_request_body_size(max_payload_in) + .max_response_body_size(max_payload_out) + .max_connections(max_connections) + .max_subscriptions_per_connection(max_subs_per_conn) + .custom_tokio_runtime(rt); + + if let Some(provider) = id_provider { + builder = builder.set_id_provider(provider); + } else { + builder = builder.set_id_provider(RandomStringIdProvider::new(16)); + }; + + if let Some(cors) = cors { + // Whitelist listening address. + // NOTE: set_allowed_hosts will whitelist both ports but only one will used. + builder = builder.set_allowed_hosts(format_allowed_hosts(&addrs[..]))?; + builder = builder.set_allowed_origins(cors)?; } - ws::ServerBuilder::with_meta_extractor(io, |context: &ws::RequestContext| { - context.sender().into() - }) - .event_loop_executor(tokio_handle) - .max_payload(max_payload) - .max_connections(max_connections.unwrap_or(WS_MAX_CONNECTIONS)) - .max_out_buffer_capacity(max_out_buffer_capacity) - .allowed_origins(map_cors(cors)) - .allowed_hosts(hosts_filtering(cors.is_some())) - .session_stats(server_metrics) - .start(addr) - .map_err(|err| match err { - ws::Error::Io(io) => io, - ws::Error::ConnectionClosed => io::ErrorKind::BrokenPipe.into(), - e => { - error!("{}", e); - io::ErrorKind::Other.into() - }, - }) + let rpc_api = build_rpc_api(rpc_api); + let (handle, addr) = if let Some(metrics) = metrics { + let middleware = RpcMiddleware::new(metrics, "ws".into()); + let builder = builder.set_middleware(middleware); + let server = builder.build(&addrs[..]).await?; + let addr = server.local_addr(); + (server.start(rpc_api)?, addr) + } else { + let server = builder.build(&addrs[..]).await?; + let addr = server.local_addr(); + (server.start(rpc_api)?, addr) + }; + + log::info!( + "Running JSON-RPC WS server: addr={}, allowed origins={:?}", + addr.map_or_else(|_| "unknown".to_string(), |a| a.to_string()), + cors + ); + + Ok(handle) } -fn map_cors From<&'a str>>(cors: Option<&Vec>) -> http::DomainsValidation { - cors.map(|x| x.iter().map(AsRef::as_ref).map(Into::into).collect::>()) - .into() +fn format_allowed_hosts(addrs: &[SocketAddr]) -> Vec { + let mut hosts = Vec::with_capacity(addrs.len() * 2); + for addr in addrs { + hosts.push(format!("localhost:{}", addr.port())); + hosts.push(format!("127.0.0.1:{}", addr.port())); + } + hosts } -fn hosts_filtering(enable: bool) -> http::DomainsValidation { - if enable { - // NOTE The listening address is whitelisted by default. - // Setting an empty vector here enables the validation - // and allows only the listening address. - http::DomainsValidation::AllowOnly(vec![]) - } else { - http::DomainsValidation::Disabled - } +fn build_rpc_api(mut rpc_api: RpcModule) -> RpcModule { + let mut available_methods = rpc_api.method_names().collect::>(); + available_methods.sort_unstable(); + + rpc_api + .register_method("rpc_methods", move |_, _| { + Ok(serde_json::json!({ + "version": 1, + "methods": available_methods, + })) + }) + .expect("infallible all other methods have their own address space; qed"); + + rpc_api +} + +fn payload_size_or_default(size_mb: Option) -> usize { + size_mb.map_or(RPC_MAX_PAYLOAD_DEFAULT, |mb| mb.saturating_mul(MEGABYTE)) } diff --git a/client/rpc-servers/src/middleware.rs b/client/rpc-servers/src/middleware.rs index d4ac787ce9f0c..5b2ee4bedb7dd 100644 --- a/client/rpc-servers/src/middleware.rs +++ b/client/rpc-servers/src/middleware.rs @@ -16,34 +16,38 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Middleware for RPC requests. +//! RPC middlware to collect prometheus metrics on RPC calls. -use std::collections::HashSet; - -use jsonrpc_core::{FutureOutput, FutureResponse, Metadata, Middleware as RequestMiddleware}; +use jsonrpsee::core::middleware::Middleware; use prometheus_endpoint::{ - register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, + register, Counter, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, + U64, }; -use futures::{future::Either, Future, FutureExt}; -use pubsub::PubSubMetadata; - -use crate::RpcHandler; - -/// Metrics for RPC middleware +/// Metrics for RPC middleware storing information about the number of requests started/completed, +/// calls started/completed and their timings. #[derive(Debug, Clone)] pub struct RpcMetrics { + /// Number of RPC requests received since the server started. requests_started: CounterVec, + /// Number of RPC requests completed since the server started. requests_finished: CounterVec, + /// Histogram over RPC execution times. calls_time: HistogramVec, + /// Number of calls started. calls_started: CounterVec, + /// Number of calls completed. calls_finished: CounterVec, + /// Number of Websocket sessions opened (Websocket only). + ws_sessions_opened: Option>, + /// Number of Websocket sessions closed (Websocket only). + ws_sessions_closed: Option>, } impl RpcMetrics { /// Create an instance of metrics pub fn new(metrics_registry: Option<&Registry>) -> Result, PrometheusError> { - if let Some(r) = metrics_registry { + if let Some(metrics_registry) = metrics_registry { Ok(Some(Self { requests_started: register( CounterVec::new( @@ -53,7 +57,7 @@ impl RpcMetrics { ), &["protocol"], )?, - r, + metrics_registry, )?, requests_finished: register( CounterVec::new( @@ -63,7 +67,7 @@ impl RpcMetrics { ), &["protocol"], )?, - r, + metrics_registry, )?, calls_time: register( HistogramVec::new( @@ -73,7 +77,7 @@ impl RpcMetrics { ), &["protocol", "method"], )?, - r, + metrics_registry, )?, calls_started: register( CounterVec::new( @@ -83,7 +87,7 @@ impl RpcMetrics { ), &["protocol", "method"], )?, - r, + metrics_registry, )?, calls_finished: register( CounterVec::new( @@ -93,8 +97,24 @@ impl RpcMetrics { ), &["protocol", "method", "is_error"], )?, - r, + metrics_registry, )?, + ws_sessions_opened: register( + Counter::new( + "substrate_rpc_sessions_opened", + "Number of persistent RPC sessions opened", + )?, + metrics_registry, + )? + .into(), + ws_sessions_closed: register( + Counter::new( + "substrate_rpc_sessions_closed", + "Number of persistent RPC sessions closed", + )?, + metrics_registry, + )? + .into(), })) } else { Ok(None) @@ -102,140 +122,71 @@ impl RpcMetrics { } } -/// Instantiates a dummy `IoHandler` given a builder function to extract supported method names. -pub fn method_names(gen_handler: F) -> Result, E> -where - F: FnOnce(RpcMiddleware) -> Result, E>, - M: PubSubMetadata, -{ - let io = gen_handler(RpcMiddleware::new(None, HashSet::new(), "dummy"))?; - Ok(io.iter().map(|x| x.0.clone()).collect()) -} - +#[derive(Clone)] /// Middleware for RPC calls pub struct RpcMiddleware { - metrics: Option, - known_rpc_method_names: HashSet, - transport_label: String, + metrics: RpcMetrics, + transport_label: &'static str, } impl RpcMiddleware { - /// Create an instance of middleware. - /// - /// - `metrics`: Will be used to report statistics. - /// - `transport_label`: The label that is used when reporting the statistics. - pub fn new( - metrics: Option, - known_rpc_method_names: HashSet, - transport_label: &str, - ) -> Self { - RpcMiddleware { metrics, known_rpc_method_names, transport_label: transport_label.into() } + /// Create a new [`RpcMiddleware`] with the provided [`RpcMetrics`]. + pub fn new(metrics: RpcMetrics, transport_label: &'static str) -> Self { + Self { metrics, transport_label } } } -impl RequestMiddleware for RpcMiddleware { - type Future = FutureResponse; - type CallFuture = FutureOutput; - - fn on_request( - &self, - request: jsonrpc_core::Request, - meta: M, - next: F, - ) -> Either - where - F: Fn(jsonrpc_core::Request, M) -> X + Send + Sync, - X: Future> + Send + 'static, - { - let metrics = self.metrics.clone(); - let transport_label = self.transport_label.clone(); - if let Some(ref metrics) = metrics { - metrics.requests_started.with_label_values(&[transport_label.as_str()]).inc(); - } - let r = next(request, meta); - Either::Left( - async move { - let r = r.await; - if let Some(ref metrics) = metrics { - metrics.requests_finished.with_label_values(&[transport_label.as_str()]).inc(); - } - r - } - .boxed(), - ) +impl Middleware for RpcMiddleware { + type Instant = std::time::Instant; + + fn on_connect(&self) { + self.metrics.ws_sessions_opened.as_ref().map(|counter| counter.inc()); } - fn on_call( - &self, - call: jsonrpc_core::Call, - meta: M, - next: F, - ) -> Either - where - F: Fn(jsonrpc_core::Call, M) -> X + Send + Sync, - X: Future> + Send + 'static, - { - let start = std::time::Instant::now(); - let name = call_name(&call, &self.known_rpc_method_names).to_owned(); - let metrics = self.metrics.clone(); - let transport_label = self.transport_label.clone(); - log::trace!(target: "rpc_metrics", "[{}] {} call: {:?}", transport_label, name, &call); - if let Some(ref metrics) = metrics { - metrics - .calls_started - .with_label_values(&[transport_label.as_str(), name.as_str()]) - .inc(); - } - let r = next(call, meta); - Either::Left( - async move { - let r = r.await; - let micros = start.elapsed().as_micros(); - if let Some(ref metrics) = metrics { - metrics - .calls_time - .with_label_values(&[transport_label.as_str(), name.as_str()]) - .observe(micros as _); - metrics - .calls_finished - .with_label_values(&[ - transport_label.as_str(), - name.as_str(), - if is_success(&r) { "true" } else { "false" }, - ]) - .inc(); - } - log::debug!( - target: "rpc_metrics", - "[{}] {} call took {} μs", - transport_label, - name, - micros, - ); - r - } - .boxed(), - ) + fn on_request(&self) -> Self::Instant { + let now = std::time::Instant::now(); + self.metrics.requests_started.with_label_values(&[self.transport_label]).inc(); + now } -} -fn call_name<'a>(call: &'a jsonrpc_core::Call, known_methods: &HashSet) -> &'a str { - // To prevent bloating metric with all invalid method names we filter them out here. - let only_known = |method: &'a String| { - if known_methods.contains(method) { - method.as_str() - } else { - "invalid method" - } - }; + fn on_call(&self, name: &str) { + log::trace!(target: "rpc_metrics", "[{}] on_call name={}", self.transport_label, name); + self.metrics + .calls_started + .with_label_values(&[self.transport_label, name]) + .inc(); + } - match call { - jsonrpc_core::Call::Invalid { .. } => "invalid call", - jsonrpc_core::Call::MethodCall(ref call) => only_known(&call.method), - jsonrpc_core::Call::Notification(ref notification) => only_known(¬ification.method), + fn on_result(&self, name: &str, success: bool, started_at: Self::Instant) { + let micros = started_at.elapsed().as_micros(); + log::debug!( + target: "rpc_metrics", + "[{}] {} call took {} μs", + self.transport_label, + name, + micros, + ); + self.metrics + .calls_time + .with_label_values(&[self.transport_label, name]) + .observe(micros as _); + + self.metrics + .calls_finished + .with_label_values(&[ + self.transport_label, + name, + if success { "true" } else { "false" }, + ]) + .inc(); + } + + fn on_response(&self, started_at: Self::Instant) { + log::trace!(target: "rpc_metrics", "[{}] on_response started_at={:?}", self.transport_label, started_at); + self.metrics.requests_finished.with_label_values(&[self.transport_label]).inc(); } -} -fn is_success(output: &Option) -> bool { - matches!(output, Some(jsonrpc_core::Output::Success(..))) + fn on_disconnect(&self) { + self.metrics.ws_sessions_closed.as_ref().map(|counter| counter.inc()); + } } diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index f76665d6c97a7..515de401119d4 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -16,11 +16,10 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" hash-db = { version = "0.15.2", default-features = false } -jsonrpc-pubsub = "18.0.0" +jsonrpsee = { version = "0.12.0", features = ["server"] } lazy_static = { version = "1.4.0", optional = true } log = "0.4.16" parking_lot = "0.12.0" -rpc = { package = "jsonrpc-core", version = "18.0.0" } serde_json = "1.0.79" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } @@ -39,14 +38,19 @@ sp-runtime = { version = "6.0.0", path = "../../primitives/runtime" } sp-session = { version = "4.0.0-dev", path = "../../primitives/session" } sp-version = { version = "5.0.0", path = "../../primitives/version" } +tokio = { version = "1.17.0", optional = true } + [dev-dependencies] +env_logger = "0.9" assert_matches = "1.3.0" lazy_static = "1.4.0" +sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sc-network = { version = "0.10.0-dev", path = "../network" } sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +tokio = "1.17.0" sp-io = { version = "6.0.0", path = "../../primitives/io" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } [features] -test-helpers = ["lazy_static"] +test-helpers = ["lazy_static", "tokio"] diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index 2821eea2cc09d..d10398afc813b 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -23,26 +23,27 @@ mod tests; use std::sync::Arc; -use sp_blockchain::HeaderBackend; +use crate::SubscriptionTaskExecutor; use codec::{Decode, Encode}; -use futures::{ - future::{FutureExt, TryFutureExt}, - SinkExt, StreamExt as _, +use futures::{FutureExt, TryFutureExt}; +use jsonrpsee::{ + core::{async_trait, Error as JsonRpseeError, RpcResult}, + PendingSubscription, }; -use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::{ error::IntoPoolError, BlockHash, InPoolTransaction, TransactionFor, TransactionPool, - TransactionSource, TransactionStatus, TxHash, + TransactionSource, TxHash, }; use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; use sp_core::Bytes; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{generic, traits::Block as BlockT}; use sp_session::SessionKeys; -use self::error::{Error, FutureResult, Result}; +use self::error::{Error, Result}; /// Re-export the API for backward compatibility. pub use sc_rpc_api::author::*; @@ -52,12 +53,12 @@ pub struct Author { client: Arc, /// Transactions pool pool: Arc

, - /// Subscriptions manager - subscriptions: SubscriptionManager, /// The key store. keystore: SyncCryptoStorePtr, /// Whether to deny unsafe calls deny_unsafe: DenyUnsafe, + /// Executor to spawn subscriptions. + executor: SubscriptionTaskExecutor, } impl Author { @@ -65,11 +66,11 @@ impl Author { pub fn new( client: Arc, pool: Arc

, - subscriptions: SubscriptionManager, keystore: SyncCryptoStorePtr, deny_unsafe: DenyUnsafe, + executor: SubscriptionTaskExecutor, ) -> Self { - Author { client, pool, subscriptions, keystore, deny_unsafe } + Author { client, pool, keystore, deny_unsafe, executor } } } @@ -80,7 +81,8 @@ impl Author { /// some unique transactions via RPC and have them included in the pool. const TX_SOURCE: TransactionSource = TransactionSource::External; -impl AuthorApi, BlockHash

> for Author +#[async_trait] +impl AuthorApiServer, BlockHash

> for Author where P: TransactionPool + Sync + Send + 'static, Client: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, @@ -88,9 +90,24 @@ where P::Hash: Unpin, ::Hash: Unpin, { - type Metadata = crate::Metadata; + async fn submit_extrinsic(&self, ext: Bytes) -> RpcResult> { + let xt = match Decode::decode(&mut &ext[..]) { + Ok(xt) => xt, + Err(err) => return Err(Error::Client(Box::new(err)).into()), + }; + let best_block_hash = self.client.info().best_hash; + self.pool + .submit_one(&generic::BlockId::hash(best_block_hash), TX_SOURCE, xt) + .await + .map_err(|e| { + e.into_pool_error() + .map(|e| Error::Pool(e)) + .unwrap_or_else(|e| Error::Verification(Box::new(e))) + .into() + }) + } - fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> Result<()> { + fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> RpcResult<()> { self.deny_unsafe.check_if_safe()?; let key_type = key_type.as_str().try_into().map_err(|_| Error::BadKeyType)?; @@ -99,7 +116,7 @@ where Ok(()) } - fn rotate_keys(&self) -> Result { + fn rotate_keys(&self) -> RpcResult { self.deny_unsafe.check_if_safe()?; let best_block_hash = self.client.info().best_hash; @@ -107,10 +124,10 @@ where .runtime_api() .generate_session_keys(&generic::BlockId::Hash(best_block_hash), None) .map(Into::into) - .map_err(|e| Error::Client(Box::new(e))) + .map_err(|api_err| Error::Client(Box::new(api_err)).into()) } - fn has_session_keys(&self, session_keys: Bytes) -> Result { + fn has_session_keys(&self, session_keys: Bytes) -> RpcResult { self.deny_unsafe.check_if_safe()?; let best_block_hash = self.client.info().best_hash; @@ -124,40 +141,22 @@ where Ok(SyncCryptoStore::has_keys(&*self.keystore, &keys)) } - fn has_key(&self, public_key: Bytes, key_type: String) -> Result { + fn has_key(&self, public_key: Bytes, key_type: String) -> RpcResult { self.deny_unsafe.check_if_safe()?; let key_type = key_type.as_str().try_into().map_err(|_| Error::BadKeyType)?; Ok(SyncCryptoStore::has_keys(&*self.keystore, &[(public_key.to_vec(), key_type)])) } - fn submit_extrinsic(&self, ext: Bytes) -> FutureResult> { - let xt = match Decode::decode(&mut &ext[..]) { - Ok(xt) => xt, - Err(err) => return async move { Err(err.into()) }.boxed(), - }; - let best_block_hash = self.client.info().best_hash; - - self.pool - .submit_one(&generic::BlockId::hash(best_block_hash), TX_SOURCE, xt) - .map_err(|e| { - e.into_pool_error() - .map(Into::into) - .unwrap_or_else(|e| error::Error::Verification(Box::new(e))) - }) - .boxed() - } - - fn pending_extrinsics(&self) -> Result> { + fn pending_extrinsics(&self) -> RpcResult> { Ok(self.pool.ready().map(|tx| tx.data().encode().into()).collect()) } fn remove_extrinsic( &self, bytes_or_hash: Vec>>, - ) -> Result>> { + ) -> RpcResult>> { self.deny_unsafe.check_if_safe()?; - let hashes = bytes_or_hash .into_iter() .map(|x| match x { @@ -177,20 +176,12 @@ where .collect()) } - fn watch_extrinsic( - &self, - _metadata: Self::Metadata, - subscriber: Subscriber, BlockHash

>>, - xt: Bytes, - ) { + fn watch_extrinsic(&self, pending: PendingSubscription, xt: Bytes) { let best_block_hash = self.client.info().best_hash; - let dxt = match TransactionFor::

::decode(&mut &xt[..]).map_err(error::Error::from) { - Ok(tx) => tx, - Err(err) => { - log::debug!("Failed to submit extrinsic: {}", err); - // reject the subscriber (ignore errors - we don't care if subscriber is no longer - // there). - let _ = subscriber.reject(err.into()); + let dxt = match TransactionFor::

::decode(&mut &xt[..]).map_err(|e| Error::from(e)) { + Ok(dxt) => dxt, + Err(e) => { + pending.reject(JsonRpseeError::from(e)); return }, }; @@ -204,41 +195,25 @@ where .unwrap_or_else(|e| error::Error::Verification(Box::new(e))) }); - let subscriptions = self.subscriptions.clone(); - - let future = async move { - let tx_stream = match submit.await { - Ok(s) => s, + let fut = async move { + let stream = match submit.await { + Ok(stream) => stream, Err(err) => { - log::debug!("Failed to submit extrinsic: {}", err); - // reject the subscriber (ignore errors - we don't care if subscriber is no - // longer there). - let _ = subscriber.reject(err.into()); + pending.reject(JsonRpseeError::from(err)); return }, }; - subscriptions.add(subscriber, move |sink| { - tx_stream - .map(|v| Ok(Ok(v))) - .forward( - sink.sink_map_err(|e| log::debug!("Error sending notifications: {:?}", e)), - ) - .map(drop) - }); - }; + let mut sink = match pending.accept() { + Some(sink) => sink, + _ => return, + }; - let res = self.subscriptions.executor().spawn_obj(future.boxed().into()); - if res.is_err() { - log::warn!("Error spawning subscription RPC task."); + sink.pipe_from_stream(stream).await; } - } + .boxed(); - fn unwatch_extrinsic( - &self, - _metadata: Option, - id: SubscriptionId, - ) -> Result { - Ok(self.subscriptions.cancel(id)) + self.executor + .spawn("substrate-rpc-subscription", Some("rpc"), fut.map(drop).boxed()); } } diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index c555465645a74..f969812e5b14c 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -18,21 +18,26 @@ use super::*; +use crate::testing::{test_executor, timeout_secs}; use assert_matches::assert_matches; use codec::Encode; -use futures::executor; +use jsonrpsee::{ + core::Error as RpcError, + types::{error::CallError, EmptyParams}, + RpcModule, +}; use sc_transaction_pool::{BasicPool, FullChainApi}; +use sc_transaction_pool_api::TransactionStatus; use sp_core::{ blake2_256, + bytes::to_hex, crypto::{ByteArray, CryptoTypePublicPair, Pair}, - ed25519, - hexdisplay::HexDisplay, - sr25519, + ed25519, sr25519, testing::{ED25519, SR25519}, H256, }; use sp_keystore::testing::KeyStore; -use std::{mem, sync::Arc}; +use std::sync::Arc; use substrate_test_runtime_client::{ self, runtime::{Block, Extrinsic, SessionKeys, Transfer}, @@ -75,240 +80,253 @@ impl TestSetup { Author { client: self.client.clone(), pool: self.pool.clone(), - subscriptions: SubscriptionManager::new(Arc::new(crate::testing::TaskExecutor)), keystore: self.keystore.clone(), deny_unsafe: DenyUnsafe::No, + executor: test_executor(), } } -} -#[test] -fn submit_transaction_should_not_cause_error() { - let p = TestSetup::default().author(); - let xt = uxt(AccountKeyring::Alice, 1).encode(); - let h: H256 = blake2_256(&xt).into(); - - assert_matches!( - executor::block_on(AuthorApi::submit_extrinsic(&p, xt.clone().into())), - Ok(h2) if h == h2 - ); - assert!(executor::block_on(AuthorApi::submit_extrinsic(&p, xt.into())).is_err()); + fn into_rpc() -> RpcModule>> { + Self::default().author().into_rpc() + } } -#[test] -fn submit_rich_transaction_should_not_cause_error() { - let p = TestSetup::default().author(); - let xt = uxt(AccountKeyring::Alice, 0).encode(); - let h: H256 = blake2_256(&xt).into(); +#[tokio::test] +async fn author_submit_transaction_should_not_cause_error() { + let _ = env_logger::try_init(); + let author = TestSetup::default().author(); + let api = author.into_rpc(); + let xt: Bytes = uxt(AccountKeyring::Alice, 1).encode().into(); + let extrinsic_hash: H256 = blake2_256(&xt).into(); + let response: H256 = api.call("author_submitExtrinsic", [xt.clone()]).await.unwrap(); + + assert_eq!(response, extrinsic_hash); assert_matches!( - executor::block_on(AuthorApi::submit_extrinsic(&p, xt.clone().into())), - Ok(h2) if h == h2 + api.call::<_, H256>("author_submitExtrinsic", [xt]).await, + Err(RpcError::Call(CallError::Custom(err))) if err.message().contains("Already Imported") && err.code() == 1013 ); - assert!(executor::block_on(AuthorApi::submit_extrinsic(&p, xt.into())).is_err()); } -#[test] -fn should_watch_extrinsic() { - // given - let setup = TestSetup::default(); - let p = setup.author(); +#[tokio::test] +async fn author_should_watch_extrinsic() { + let api = TestSetup::into_rpc(); + let xt = to_hex(&uxt(AccountKeyring::Alice, 0).encode(), true); - let (subscriber, id_rx, data) = jsonrpc_pubsub::typed::Subscriber::new_test("test"); - - // when - p.watch_extrinsic( - Default::default(), - subscriber, - uxt(AccountKeyring::Alice, 0).encode().into(), - ); + let mut sub = api.subscribe("author_submitAndWatchExtrinsic", [xt]).await.unwrap(); + let (tx, sub_id) = timeout_secs(10, sub.next::>()) + .await + .unwrap() + .unwrap() + .unwrap(); - let id = executor::block_on(id_rx).unwrap().unwrap(); - assert_matches!(id, SubscriptionId::String(_)); + assert_matches!(tx, TransactionStatus::Ready); + assert_eq!(&sub_id, sub.subscription_id()); - let id = match id { - SubscriptionId::String(id) => id, - _ => unreachable!(), - }; - - // check notifications - let replacement = { + // Replace the extrinsic and observe the subscription is notified. + let (xt_replacement, xt_hash) = { let tx = Transfer { amount: 5, nonce: 0, from: AccountKeyring::Alice.into(), to: AccountKeyring::Bob.into(), }; - tx.into_signed_tx() + let tx = tx.into_signed_tx().encode(); + let hash = blake2_256(&tx); + + (to_hex(&tx, true), hash) }; - executor::block_on(AuthorApi::submit_extrinsic(&p, replacement.encode().into())).unwrap(); - let (res, data) = executor::block_on(data.into_future()); - - let expected = Some(format!( - r#"{{"jsonrpc":"2.0","method":"test","params":{{"result":"ready","subscription":"{}"}}}}"#, - id, - )); - assert_eq!(res, expected); - - let h = blake2_256(&replacement.encode()); - let expected = Some(format!( - r#"{{"jsonrpc":"2.0","method":"test","params":{{"result":{{"usurped":"0x{}"}},"subscription":"{}"}}}}"#, - HexDisplay::from(&h), - id, - )); - - let res = executor::block_on(data.into_future()).0; - assert_eq!(res, expected); + + let _ = api.call::<_, H256>("author_submitExtrinsic", [xt_replacement]).await.unwrap(); + + let (tx, sub_id) = timeout_secs(10, sub.next::>()) + .await + .unwrap() + .unwrap() + .unwrap(); + assert_eq!(tx, TransactionStatus::Usurped(xt_hash.into())); + assert_eq!(&sub_id, sub.subscription_id()); } -#[test] -fn should_return_watch_validation_error() { - // given - let setup = TestSetup::default(); - let p = setup.author(); +#[tokio::test] +async fn author_should_return_watch_validation_error() { + const METHOD: &'static str = "author_submitAndWatchExtrinsic"; - let (subscriber, id_rx, _data) = jsonrpc_pubsub::typed::Subscriber::new_test("test"); + let api = TestSetup::into_rpc(); + let failed_sub = api + .subscribe(METHOD, [to_hex(&uxt(AccountKeyring::Alice, 179).encode(), true)]) + .await; - // when - p.watch_extrinsic( - Default::default(), - subscriber, - uxt(AccountKeyring::Alice, 179).encode().into(), + assert_matches!( + failed_sub, + Err(RpcError::Call(CallError::Custom(err))) if err.message().contains("Invalid Transaction") && err.code() == 1010 ); - - // then - let res = executor::block_on(id_rx).unwrap(); - assert!(res.is_err(), "Expected the transaction to be rejected as invalid."); } -#[test] -fn should_return_pending_extrinsics() { - let p = TestSetup::default().author(); +#[tokio::test] +async fn author_should_return_pending_extrinsics() { + let api = TestSetup::into_rpc(); - let ex = uxt(AccountKeyring::Alice, 0); - executor::block_on(AuthorApi::submit_extrinsic(&p, ex.encode().into())).unwrap(); - assert_matches!( - p.pending_extrinsics(), - Ok(ref expected) if *expected == vec![Bytes(ex.encode())] - ); + let xt_bytes: Bytes = uxt(AccountKeyring::Alice, 0).encode().into(); + api.call::<_, H256>("author_submitExtrinsic", [to_hex(&xt_bytes, true)]) + .await + .unwrap(); + + let pending: Vec = + api.call("author_pendingExtrinsics", EmptyParams::new()).await.unwrap(); + assert_eq!(pending, vec![xt_bytes]); } -#[test] -fn should_remove_extrinsics() { +#[tokio::test] +async fn author_should_remove_extrinsics() { + const METHOD: &'static str = "author_removeExtrinsic"; let setup = TestSetup::default(); - let p = setup.author(); - - let ex1 = uxt(AccountKeyring::Alice, 0); - executor::block_on(p.submit_extrinsic(ex1.encode().into())).unwrap(); - let ex2 = uxt(AccountKeyring::Alice, 1); - executor::block_on(p.submit_extrinsic(ex2.encode().into())).unwrap(); - let ex3 = uxt(AccountKeyring::Bob, 0); - let hash3 = executor::block_on(p.submit_extrinsic(ex3.encode().into())).unwrap(); + let api = setup.author().into_rpc(); + + // Submit three extrinsics, then remove two of them (will cause the third to be removed as well, + // having a higher nonce) + let xt1_bytes = uxt(AccountKeyring::Alice, 0).encode(); + let xt1 = to_hex(&xt1_bytes, true); + let xt1_hash: H256 = api.call("author_submitExtrinsic", [xt1]).await.unwrap(); + + let xt2 = to_hex(&uxt(AccountKeyring::Alice, 1).encode(), true); + let xt2_hash: H256 = api.call("author_submitExtrinsic", [xt2]).await.unwrap(); + + let xt3 = to_hex(&uxt(AccountKeyring::Bob, 0).encode(), true); + let xt3_hash: H256 = api.call("author_submitExtrinsic", [xt3]).await.unwrap(); assert_eq!(setup.pool.status().ready, 3); - // now remove all 3 - let removed = p - .remove_extrinsic(vec![ - hash::ExtrinsicOrHash::Hash(hash3), - // Removing this one will also remove ex2 - hash::ExtrinsicOrHash::Extrinsic(ex1.encode().into()), - ]) + // Now remove all three. + // Notice how we need an extra `Vec` wrapping the `Vec` we want to submit as params. + let removed: Vec = api + .call( + METHOD, + vec![vec![ + hash::ExtrinsicOrHash::Hash(xt3_hash), + // Removing this one will also remove xt2 + hash::ExtrinsicOrHash::Extrinsic(xt1_bytes.into()), + ]], + ) + .await .unwrap(); - assert_eq!(removed.len(), 3); + assert_eq!(removed, vec![xt1_hash, xt2_hash, xt3_hash]); } -#[test] -fn should_insert_key() { +#[tokio::test] +async fn author_should_insert_key() { let setup = TestSetup::default(); - let p = setup.author(); - + let api = setup.author().into_rpc(); let suri = "//Alice"; - let key_pair = ed25519::Pair::from_string(suri, None).expect("Generates keypair"); - p.insert_key( + let keypair = ed25519::Pair::from_string(suri, None).expect("generates keypair"); + let params: (String, String, Bytes) = ( String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"), suri.to_string(), - key_pair.public().0.to_vec().into(), - ) - .expect("Insert key"); - - let public_keys = SyncCryptoStore::keys(&*setup.keystore, ED25519).unwrap(); + keypair.public().0.to_vec().into(), + ); + api.call::<_, ()>("author_insertKey", params).await.unwrap(); + let pubkeys = SyncCryptoStore::keys(&*setup.keystore, ED25519).unwrap(); - assert!(public_keys - .contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, key_pair.public().to_raw_vec()))); + assert!( + pubkeys.contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, keypair.public().to_raw_vec())) + ); } -#[test] -fn should_rotate_keys() { +#[tokio::test] +async fn author_should_rotate_keys() { let setup = TestSetup::default(); - let p = setup.author(); - - let new_public_keys = p.rotate_keys().expect("Rotates the keys"); + let api = setup.author().into_rpc(); + let new_pubkeys: Bytes = api.call("author_rotateKeys", EmptyParams::new()).await.unwrap(); let session_keys = - SessionKeys::decode(&mut &new_public_keys[..]).expect("SessionKeys decode successfully"); - - let ed25519_public_keys = SyncCryptoStore::keys(&*setup.keystore, ED25519).unwrap(); - let sr25519_public_keys = SyncCryptoStore::keys(&*setup.keystore, SR25519).unwrap(); - - assert!(ed25519_public_keys + SessionKeys::decode(&mut &new_pubkeys[..]).expect("SessionKeys decode successfully"); + let ed25519_pubkeys = SyncCryptoStore::keys(&*setup.keystore, ED25519).unwrap(); + let sr25519_pubkeys = SyncCryptoStore::keys(&*setup.keystore, SR25519).unwrap(); + assert!(ed25519_pubkeys .contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, session_keys.ed25519.to_raw_vec()))); - assert!(sr25519_public_keys + assert!(sr25519_pubkeys .contains(&CryptoTypePublicPair(sr25519::CRYPTO_ID, session_keys.sr25519.to_raw_vec()))); } -#[test] -fn test_has_session_keys() { - let setup = TestSetup::default(); - let p = setup.author(); - - let non_existent_public_keys = - TestSetup::default().author().rotate_keys().expect("Rotates the keys"); - - let public_keys = p.rotate_keys().expect("Rotates the keys"); - let test_vectors = vec![ - (public_keys, Ok(true)), - (vec![1, 2, 3].into(), Err(Error::InvalidSessionKeys)), - (non_existent_public_keys, Ok(false)), - ]; - - for (keys, result) in test_vectors { - assert_eq!( - result.map_err(|e| mem::discriminant(&e)), - p.has_session_keys(keys).map_err(|e| mem::discriminant(&e)), - ); - } +#[tokio::test] +async fn author_has_session_keys() { + // Setup + let api = TestSetup::into_rpc(); + + // Add a valid session key + let pubkeys: Bytes = api + .call("author_rotateKeys", EmptyParams::new()) + .await + .expect("Rotates the keys"); + + // Add a session key in a different keystore + let non_existent_pubkeys: Bytes = { + let api2 = TestSetup::default().author().into_rpc(); + api2.call("author_rotateKeys", EmptyParams::new()) + .await + .expect("Rotates the keys") + }; + + // Then… + let existing = api.call::<_, bool>("author_hasSessionKeys", vec![pubkeys]).await.unwrap(); + assert!(existing, "Existing key is in the session keys"); + + let inexistent = api + .call::<_, bool>("author_hasSessionKeys", vec![non_existent_pubkeys]) + .await + .unwrap(); + assert_eq!(inexistent, false, "Inexistent key is not in the session keys"); + + assert_matches!( + api.call::<_, bool>("author_hasSessionKeys", vec![Bytes::from(vec![1, 2, 3])]).await, + Err(RpcError::Call(CallError::Custom(err))) if err.message().contains("Session keys are not encoded correctly") + ); } -#[test] -fn test_has_key() { - let setup = TestSetup::default(); - let p = setup.author(); +#[tokio::test] +async fn author_has_key() { + let _ = env_logger::try_init(); + let api = TestSetup::into_rpc(); let suri = "//Alice"; - let alice_key_pair = ed25519::Pair::from_string(suri, None).expect("Generates keypair"); - p.insert_key( + let alice_keypair = ed25519::Pair::from_string(suri, None).expect("Generates keypair"); + let params = ( String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"), suri.to_string(), - alice_key_pair.public().0.to_vec().into(), - ) - .expect("Insert key"); - let bob_key_pair = ed25519::Pair::from_string("//Bob", None).expect("Generates keypair"); - - let test_vectors = vec![ - (alice_key_pair.public().to_raw_vec().into(), ED25519, Ok(true)), - (alice_key_pair.public().to_raw_vec().into(), SR25519, Ok(false)), - (bob_key_pair.public().to_raw_vec().into(), ED25519, Ok(false)), - ]; - - for (key, key_type, result) in test_vectors { - assert_eq!( - result.map_err(|e| mem::discriminant(&e)), - p.has_key( - key, - String::from_utf8(key_type.0.to_vec()).expect("Keytype is a valid string"), - ) - .map_err(|e| mem::discriminant(&e)), + Bytes::from(alice_keypair.public().0.to_vec()), + ); + + api.call::<_, ()>("author_insertKey", params).await.expect("insertKey works"); + + let bob_keypair = ed25519::Pair::from_string("//Bob", None).expect("Generates keypair"); + + // Alice's ED25519 key is there + let has_alice_ed: bool = { + let params = ( + Bytes::from(alice_keypair.public().to_raw_vec()), + String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"), ); - } + api.call("author_hasKey", params).await.unwrap() + }; + assert!(has_alice_ed); + + // Alice's SR25519 key is not there + let has_alice_sr: bool = { + let params = ( + Bytes::from(alice_keypair.public().to_raw_vec()), + String::from_utf8(SR25519.0.to_vec()).expect("Keytype is a valid string"), + ); + api.call("author_hasKey", params).await.unwrap() + }; + assert!(!has_alice_sr); + + // Bob's ED25519 key is not there + let has_bob_ed: bool = { + let params = ( + Bytes::from(bob_keypair.public().to_raw_vec()), + String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"), + ); + api.call("author_hasKey", params).await.unwrap() + }; + assert!(!has_bob_ed); } diff --git a/client/rpc/src/chain/chain_full.rs b/client/rpc/src/chain/chain_full.rs index 288a825eb5bed..9ca6b3edcfe60 100644 --- a/client/rpc/src/chain/chain_full.rs +++ b/client/rpc/src/chain/chain_full.rs @@ -18,34 +18,40 @@ //! Blockchain API backend for full nodes. -use super::{client_err, error::FutureResult, ChainBackend}; -use futures::FutureExt; -use jsonrpc_pubsub::manager::SubscriptionManager; +use super::{client_err, ChainBackend, Error}; +use crate::SubscriptionTaskExecutor; +use std::{marker::PhantomData, sync::Arc}; + +use futures::{ + future::{self, FutureExt}, + stream::{self, Stream, StreamExt}, +}; +use jsonrpsee::{core::async_trait, PendingSubscription}; use sc_client_api::{BlockBackend, BlockchainEvents}; use sp_blockchain::HeaderBackend; use sp_runtime::{ generic::{BlockId, SignedBlock}, traits::Block as BlockT, }; -use std::{marker::PhantomData, sync::Arc}; /// Blockchain API backend for full nodes. Reads all the data from local database. pub struct FullChain { /// Substrate client. client: Arc, - /// Current subscriptions. - subscriptions: SubscriptionManager, /// phantom member to pin the block type _phantom: PhantomData, + /// Subscription executor. + executor: SubscriptionTaskExecutor, } impl FullChain { /// Create new Chain API RPC handler. - pub fn new(client: Arc, subscriptions: SubscriptionManager) -> Self { - Self { client, subscriptions, _phantom: PhantomData } + pub fn new(client: Arc, executor: SubscriptionTaskExecutor) -> Self { + Self { client, executor, _phantom: PhantomData } } } +#[async_trait] impl ChainBackend for FullChain where Block: BlockT + 'static, @@ -56,17 +62,93 @@ where &self.client } - fn subscriptions(&self) -> &SubscriptionManager { - &self.subscriptions + async fn header(&self, hash: Option) -> Result, Error> { + self.client.header(BlockId::Hash(self.unwrap_or_best(hash))).map_err(client_err) } - fn header(&self, hash: Option) -> FutureResult> { - let res = self.client.header(BlockId::Hash(self.unwrap_or_best(hash))).map_err(client_err); - async move { res }.boxed() + async fn block(&self, hash: Option) -> Result>, Error> { + self.client.block(&BlockId::Hash(self.unwrap_or_best(hash))).map_err(client_err) } - fn block(&self, hash: Option) -> FutureResult>> { - let res = self.client.block(&BlockId::Hash(self.unwrap_or_best(hash))).map_err(client_err); - async move { res }.boxed() + fn subscribe_all_heads(&self, sink: PendingSubscription) { + subscribe_headers( + &self.client, + &self.executor, + sink, + || self.client().info().best_hash, + || { + self.client() + .import_notification_stream() + .map(|notification| notification.header) + }, + ) } + + fn subscribe_new_heads(&self, sink: PendingSubscription) { + subscribe_headers( + &self.client, + &self.executor, + sink, + || self.client().info().best_hash, + || { + self.client() + .import_notification_stream() + .filter(|notification| future::ready(notification.is_new_best)) + .map(|notification| notification.header) + }, + ) + } + + fn subscribe_finalized_heads(&self, sink: PendingSubscription) { + subscribe_headers( + &self.client, + &self.executor, + sink, + || self.client().info().finalized_hash, + || { + self.client() + .finality_notification_stream() + .map(|notification| notification.header) + }, + ) + } +} + +/// Subscribe to new headers. +fn subscribe_headers( + client: &Arc, + executor: &SubscriptionTaskExecutor, + pending: PendingSubscription, + best_block_hash: G, + stream: F, +) where + Block: BlockT + 'static, + Block::Header: Unpin, + Client: HeaderBackend + 'static, + F: FnOnce() -> S, + G: FnOnce() -> Block::Hash, + S: Stream + Send + Unpin + 'static, +{ + // send current head right at the start. + let maybe_header = client + .header(BlockId::Hash(best_block_hash())) + .map_err(client_err) + .and_then(|header| header.ok_or_else(|| Error::Other("Best header missing.".into()))) + .map_err(|e| log::warn!("Best header error {:?}", e)) + .ok(); + + // NOTE: by the time we set up the stream there might be a new best block and so there is a risk + // that the stream has a hole in it. The alternative would be to look up the best block *after* + // we set up the stream and chain it to the stream. Consuming code would need to handle + // duplicates at the beginning of the stream though. + let stream = stream::iter(maybe_header).chain(stream()); + + let fut = async move { + if let Some(mut sink) = pending.accept() { + sink.pipe_from_stream(stream).await; + } + } + .boxed(); + + executor.spawn("substrate-rpc-subscription", Some("rpc"), fut.map(drop).boxed()); } diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index 64231dd78c83c..a79c66e0a18f6 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -23,15 +23,14 @@ mod chain_full; #[cfg(test)] mod tests; -use futures::{future, StreamExt, TryStreamExt}; -use log::warn; -use rpc::{ - futures::{stream, FutureExt, SinkExt, Stream}, - Result as RpcResult, -}; use std::sync::Arc; -use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; +use crate::SubscriptionTaskExecutor; + +use jsonrpsee::{ + core::{async_trait, RpcResult}, + PendingSubscription, +}; use sc_client_api::BlockchainEvents; use sp_rpc::{list::ListOrValue, number::NumberOrHex}; use sp_runtime::{ @@ -39,13 +38,14 @@ use sp_runtime::{ traits::{Block as BlockT, Header, NumberFor}, }; -use self::error::{Error, FutureResult, Result}; +use self::error::Error; use sc_client_api::BlockBackend; pub use sc_rpc_api::chain::*; use sp_blockchain::HeaderBackend; /// Blockchain backend API +#[async_trait] trait ChainBackend: Send + Sync + 'static where Block: BlockT + 'static, @@ -55,9 +55,6 @@ where /// Get client reference. fn client(&self) -> &Arc; - /// Get subscriptions reference. - fn subscriptions(&self) -> &SubscriptionManager; - /// Tries to unwrap passed block hash, or uses best block hash otherwise. fn unwrap_or_best(&self, hash: Option) -> Block::Hash { match hash { @@ -67,15 +64,15 @@ where } /// Get header of a relay chain block. - fn header(&self, hash: Option) -> FutureResult>; + async fn header(&self, hash: Option) -> Result, Error>; /// Get header and body of a relay chain block. - fn block(&self, hash: Option) -> FutureResult>>; + async fn block(&self, hash: Option) -> Result>, Error>; /// Get hash of the n-th block in the canon chain. /// /// By default returns latest block hash. - fn block_hash(&self, number: Option) -> Result> { + fn block_hash(&self, number: Option) -> Result, Error> { match number { None => Ok(Some(self.client().info().best_hash)), Some(num_or_hex) => { @@ -97,107 +94,31 @@ where } /// Get hash of the last finalized block in the canon chain. - fn finalized_head(&self) -> Result { + fn finalized_head(&self) -> Result { Ok(self.client().info().finalized_hash) } /// All new head subscription - fn subscribe_all_heads( - &self, - _metadata: crate::Metadata, - subscriber: Subscriber, - ) { - subscribe_headers( - self.client(), - self.subscriptions(), - subscriber, - || self.client().info().best_hash, - || { - self.client() - .import_notification_stream() - .map(|notification| Ok::<_, rpc::Error>(notification.header)) - }, - ) - } - - /// Unsubscribe from all head subscription. - fn unsubscribe_all_heads( - &self, - _metadata: Option, - id: SubscriptionId, - ) -> RpcResult { - Ok(self.subscriptions().cancel(id)) - } + fn subscribe_all_heads(&self, sink: PendingSubscription); /// New best head subscription - fn subscribe_new_heads( - &self, - _metadata: crate::Metadata, - subscriber: Subscriber, - ) { - subscribe_headers( - self.client(), - self.subscriptions(), - subscriber, - || self.client().info().best_hash, - || { - self.client() - .import_notification_stream() - .filter(|notification| future::ready(notification.is_new_best)) - .map(|notification| Ok::<_, rpc::Error>(notification.header)) - }, - ) - } - - /// Unsubscribe from new best head subscription. - fn unsubscribe_new_heads( - &self, - _metadata: Option, - id: SubscriptionId, - ) -> RpcResult { - Ok(self.subscriptions().cancel(id)) - } + fn subscribe_new_heads(&self, sink: PendingSubscription); /// Finalized head subscription - fn subscribe_finalized_heads( - &self, - _metadata: crate::Metadata, - subscriber: Subscriber, - ) { - subscribe_headers( - self.client(), - self.subscriptions(), - subscriber, - || self.client().info().finalized_hash, - || { - self.client() - .finality_notification_stream() - .map(|notification| Ok::<_, rpc::Error>(notification.header)) - }, - ) - } - - /// Unsubscribe from finalized head subscription. - fn unsubscribe_finalized_heads( - &self, - _metadata: Option, - id: SubscriptionId, - ) -> RpcResult { - Ok(self.subscriptions().cancel(id)) - } + fn subscribe_finalized_heads(&self, sink: PendingSubscription); } /// Create new state API that works on full node. pub fn new_full( client: Arc, - subscriptions: SubscriptionManager, + executor: SubscriptionTaskExecutor, ) -> Chain where Block: BlockT + 'static, Block::Header: Unpin, Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, { - Chain { backend: Box::new(self::chain_full::FullChain::new(client, subscriptions)) } + Chain { backend: Box::new(self::chain_full::FullChain::new(client, executor)) } } /// Chain API with subscriptions support. @@ -205,122 +126,58 @@ pub struct Chain { backend: Box>, } -impl ChainApi, Block::Hash, Block::Header, SignedBlock> +#[async_trait] +impl ChainApiServer, Block::Hash, Block::Header, SignedBlock> for Chain where Block: BlockT + 'static, Block::Header: Unpin, Client: HeaderBackend + BlockchainEvents + 'static, { - type Metadata = crate::Metadata; - - fn header(&self, hash: Option) -> FutureResult> { - self.backend.header(hash) + async fn header(&self, hash: Option) -> RpcResult> { + self.backend.header(hash).await.map_err(Into::into) } - fn block(&self, hash: Option) -> FutureResult>> { - self.backend.block(hash) + async fn block(&self, hash: Option) -> RpcResult>> { + self.backend.block(hash).await.map_err(Into::into) } fn block_hash( &self, number: Option>, - ) -> Result>> { + ) -> RpcResult>> { match number { - None => self.backend.block_hash(None).map(ListOrValue::Value), - Some(ListOrValue::Value(number)) => - self.backend.block_hash(Some(number)).map(ListOrValue::Value), + None => self.backend.block_hash(None).map(ListOrValue::Value).map_err(Into::into), + Some(ListOrValue::Value(number)) => self + .backend + .block_hash(Some(number)) + .map(ListOrValue::Value) + .map_err(Into::into), Some(ListOrValue::List(list)) => Ok(ListOrValue::List( list.into_iter() .map(|number| self.backend.block_hash(Some(number))) - .collect::>()?, + .collect::>()?, )), } } - fn finalized_head(&self) -> Result { - self.backend.finalized_head() - } - - fn subscribe_all_heads(&self, metadata: Self::Metadata, subscriber: Subscriber) { - self.backend.subscribe_all_heads(metadata, subscriber) - } - - fn unsubscribe_all_heads( - &self, - metadata: Option, - id: SubscriptionId, - ) -> RpcResult { - self.backend.unsubscribe_all_heads(metadata, id) - } - - fn subscribe_new_heads(&self, metadata: Self::Metadata, subscriber: Subscriber) { - self.backend.subscribe_new_heads(metadata, subscriber) + fn finalized_head(&self) -> RpcResult { + self.backend.finalized_head().map_err(Into::into) } - fn unsubscribe_new_heads( - &self, - metadata: Option, - id: SubscriptionId, - ) -> RpcResult { - self.backend.unsubscribe_new_heads(metadata, id) + fn subscribe_all_heads(&self, sink: PendingSubscription) { + self.backend.subscribe_all_heads(sink) } - fn subscribe_finalized_heads( - &self, - metadata: Self::Metadata, - subscriber: Subscriber, - ) { - self.backend.subscribe_finalized_heads(metadata, subscriber) + fn subscribe_new_heads(&self, sink: PendingSubscription) { + self.backend.subscribe_new_heads(sink) } - fn unsubscribe_finalized_heads( - &self, - metadata: Option, - id: SubscriptionId, - ) -> RpcResult { - self.backend.unsubscribe_finalized_heads(metadata, id) + fn subscribe_finalized_heads(&self, sink: PendingSubscription) { + self.backend.subscribe_finalized_heads(sink) } } -/// Subscribe to new headers. -fn subscribe_headers( - client: &Arc, - subscriptions: &SubscriptionManager, - subscriber: Subscriber, - best_block_hash: G, - stream: F, -) where - Block: BlockT + 'static, - Block::Header: Unpin, - Client: HeaderBackend + 'static, - F: FnOnce() -> S, - G: FnOnce() -> Block::Hash, - S: Stream> + Send + 'static, -{ - subscriptions.add(subscriber, |sink| { - // send current head right at the start. - let header = client - .header(BlockId::Hash(best_block_hash())) - .map_err(client_err) - .and_then(|header| { - header.ok_or_else(|| Error::Other("Best header missing.".to_string())) - }) - .map_err(Into::into); - - // send further subscriptions - let stream = stream() - .inspect_err(|e| warn!("Block notification stream error: {:?}", e)) - .map(Ok); - - stream::iter(vec![Ok(header)]) - .chain(stream) - .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) - // we ignore the resulting Stream (if the first stream is over we are unsubscribed) - .map(|_| ()) - }); -} - fn client_err(err: sp_blockchain::Error) -> Error { Error::Client(Box::new(err)) } diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index fa4473d35f300..f09da200ff587 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -17,9 +17,9 @@ // along with this program. If not, see . use super::*; -use crate::testing::TaskExecutor; +use crate::testing::{test_executor, timeout_secs}; use assert_matches::assert_matches; -use futures::executor; +use jsonrpsee::types::EmptyParams; use sc_block_builder::BlockBuilderProvider; use sp_consensus::BlockOrigin; use sp_rpc::list::ListOrValue; @@ -28,221 +28,218 @@ use substrate_test_runtime_client::{ runtime::{Block, Header, H256}, }; -#[test] -fn should_return_header() { +#[tokio::test] +async fn should_return_header() { let client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); + let api = new_full(client.clone(), test_executor()).into_rpc(); - assert_matches!( - executor::block_on(api.header(Some(client.genesis_hash()).into())), - Ok(Some(ref x)) if x == &Header { + let res: Header = + api.call("chain_getHeader", [H256::from(client.genesis_hash())]).await.unwrap(); + assert_eq!( + res, + Header { parent_hash: H256::from_low_u64_be(0), number: 0, - state_root: x.state_root.clone(), - extrinsics_root: - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), + state_root: res.state_root.clone(), + extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + .parse() + .unwrap(), digest: Default::default(), } ); - assert_matches!( - executor::block_on(api.header(None.into())), - Ok(Some(ref x)) if x == &Header { + let res: Header = api.call("chain_getHeader", EmptyParams::new()).await.unwrap(); + assert_eq!( + res, + Header { parent_hash: H256::from_low_u64_be(0), number: 0, - state_root: x.state_root.clone(), - extrinsics_root: - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), + state_root: res.state_root.clone(), + extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + .parse() + .unwrap(), digest: Default::default(), } ); assert_matches!( - executor::block_on(api.header(Some(H256::from_low_u64_be(5)).into())), - Ok(None) + api.call::<_, Option

>("chain_getHeader", [H256::from_low_u64_be(5)]) + .await + .unwrap(), + None ); } -#[test] -fn should_return_a_block() { +#[tokio::test] +async fn should_return_a_block() { let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); + let api = new_full(client.clone(), test_executor()).into_rpc(); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; let block_hash = block.hash(); - executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + client.import(BlockOrigin::Own, block).await.unwrap(); + + let res: SignedBlock = + api.call("chain_getBlock", [H256::from(client.genesis_hash())]).await.unwrap(); // Genesis block is not justified - assert_matches!( - executor::block_on(api.block(Some(client.genesis_hash()).into())), - Ok(Some(SignedBlock { justifications: None, .. })) - ); + assert!(res.justifications.is_none()); - assert_matches!( - executor::block_on(api.block(Some(block_hash).into())), - Ok(Some(ref x)) if x.block == Block { + let res: SignedBlock = + api.call("chain_getBlock", [H256::from(block_hash)]).await.unwrap(); + assert_eq!( + res.block, + Block { header: Header { parent_hash: client.genesis_hash(), number: 1, - state_root: x.block.header.state_root.clone(), - extrinsics_root: - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), + state_root: res.block.header.state_root.clone(), + extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + .parse() + .unwrap(), digest: Default::default(), }, extrinsics: vec![], } ); - assert_matches!( - executor::block_on(api.block(None.into())), - Ok(Some(ref x)) if x.block == Block { + let res: SignedBlock = api.call("chain_getBlock", Vec::::new()).await.unwrap(); + assert_eq!( + res.block, + Block { header: Header { parent_hash: client.genesis_hash(), number: 1, - state_root: x.block.header.state_root.clone(), - extrinsics_root: - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), + state_root: res.block.header.state_root.clone(), + extrinsics_root: "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + .parse() + .unwrap(), digest: Default::default(), }, extrinsics: vec![], } ); - assert_matches!(executor::block_on(api.block(Some(H256::from_low_u64_be(5)).into())), Ok(None)); + assert_matches!( + api.call::<_, Option
>("chain_getBlock", [H256::from_low_u64_be(5)]) + .await + .unwrap(), + None + ); } -#[test] -fn should_return_block_hash() { +#[tokio::test] +async fn should_return_block_hash() { let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); + let api = new_full(client.clone(), test_executor()).into_rpc(); - assert_matches!( - api.block_hash(None.into()), - Ok(ListOrValue::Value(Some(ref x))) if x == &client.genesis_hash() - ); + let res: ListOrValue> = + api.call("chain_getBlockHash", EmptyParams::new()).await.unwrap(); assert_matches!( - api.block_hash(Some(ListOrValue::Value(0u64.into())).into()), - Ok(ListOrValue::Value(Some(ref x))) if x == &client.genesis_hash() + res, + ListOrValue::Value(Some(ref x)) if x == &client.genesis_hash() ); + let res: ListOrValue> = + api.call("chain_getBlockHash", [ListOrValue::from(0_u64)]).await.unwrap(); assert_matches!( - api.block_hash(Some(ListOrValue::Value(1u64.into())).into()), - Ok(ListOrValue::Value(None)) + res, + ListOrValue::Value(Some(ref x)) if x == &client.genesis_hash() ); + let res: Option>> = + api.call("chain_getBlockHash", [ListOrValue::from(1_u64)]).await.unwrap(); + assert_matches!(res, None); + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - executor::block_on(client.import(BlockOrigin::Own, block.clone())).unwrap(); + client.import(BlockOrigin::Own, block.clone()).await.unwrap(); + let res: ListOrValue> = + api.call("chain_getBlockHash", [ListOrValue::from(0_u64)]).await.unwrap(); assert_matches!( - api.block_hash(Some(ListOrValue::Value(0u64.into())).into()), - Ok(ListOrValue::Value(Some(ref x))) if x == &client.genesis_hash() + res, + ListOrValue::Value(Some(ref x)) if x == &client.genesis_hash() ); + + let res: ListOrValue> = + api.call("chain_getBlockHash", [ListOrValue::from(1_u64)]).await.unwrap(); assert_matches!( - api.block_hash(Some(ListOrValue::Value(1u64.into())).into()), - Ok(ListOrValue::Value(Some(ref x))) if x == &block.hash() + res, + ListOrValue::Value(Some(ref x)) if x == &block.hash() ); + + let res: ListOrValue> = api + .call("chain_getBlockHash", [ListOrValue::Value(sp_core::U256::from(1_u64))]) + .await + .unwrap(); assert_matches!( - api.block_hash(Some(ListOrValue::Value(sp_core::U256::from(1u64).into())).into()), - Ok(ListOrValue::Value(Some(ref x))) if x == &block.hash() + res, + ListOrValue::Value(Some(ref x)) if x == &block.hash() ); + let res: ListOrValue> = api + .call("chain_getBlockHash", [ListOrValue::List(vec![0_u64, 1_u64, 2_u64])]) + .await + .unwrap(); assert_matches!( - api.block_hash(Some(vec![0u64.into(), 1u64.into(), 2u64.into()].into())), - Ok(ListOrValue::List(list)) if list == &[client.genesis_hash().into(), block.hash().into(), None] + res, + ListOrValue::List(list) if list == &[client.genesis_hash().into(), block.hash().into(), None] ); } -#[test] -fn should_return_finalized_hash() { +#[tokio::test] +async fn should_return_finalized_hash() { let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); + let api = new_full(client.clone(), test_executor()).into_rpc(); - assert_matches!( - api.finalized_head(), - Ok(ref x) if x == &client.genesis_hash() - ); + let res: H256 = api.call("chain_getFinalizedHead", EmptyParams::new()).await.unwrap(); + assert_eq!(res, client.genesis_hash()); // import new block let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + client.import(BlockOrigin::Own, block).await.unwrap(); + // no finalization yet - assert_matches!( - api.finalized_head(), - Ok(ref x) if x == &client.genesis_hash() - ); + let res: H256 = api.call("chain_getFinalizedHead", EmptyParams::new()).await.unwrap(); + assert_eq!(res, client.genesis_hash()); // finalize client.finalize_block(BlockId::number(1), None).unwrap(); - assert_matches!( - api.finalized_head(), - Ok(ref x) if x == &client.block_hash(1).unwrap().unwrap() - ); + let res: H256 = api.call("chain_getFinalizedHead", EmptyParams::new()).await.unwrap(); + assert_eq!(res, client.block_hash(1).unwrap().unwrap()); } -#[test] -fn should_notify_about_latest_block() { - let (subscriber, id, mut transport) = Subscriber::new_test("test"); - - { - let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); - - api.subscribe_all_heads(Default::default(), subscriber); - - // assert id assigned - assert!(matches!(executor::block_on(id), Ok(Ok(SubscriptionId::String(_))))); - - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); - } - - // Check for the correct number of notifications - executor::block_on((&mut transport).take(2).collect::>()); - assert!(executor::block_on(transport.next()).is_none()); +#[tokio::test] +async fn should_notify_about_latest_block() { + test_head_subscription("chain_subscribeAllHeads").await; } -#[test] -fn should_notify_about_best_block() { - let (subscriber, id, mut transport) = Subscriber::new_test("test"); - - { - let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); - - api.subscribe_new_heads(Default::default(), subscriber); - - // assert id assigned - assert!(matches!(executor::block_on(id), Ok(Ok(SubscriptionId::String(_))))); - - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); - } - - // Assert that the correct number of notifications have been sent. - executor::block_on((&mut transport).take(2).collect::>()); - assert!(executor::block_on(transport.next()).is_none()); +#[tokio::test] +async fn should_notify_about_best_block() { + test_head_subscription("chain_subscribeNewHeads").await; } -#[test] -fn should_notify_about_finalized_block() { - let (subscriber, id, mut transport) = Subscriber::new_test("test"); - - { - let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor))); - - api.subscribe_finalized_heads(Default::default(), subscriber); +#[tokio::test] +async fn should_notify_about_finalized_block() { + test_head_subscription("chain_subscribeFinalizedHeads").await; +} - // assert id assigned - assert!(matches!(executor::block_on(id), Ok(Ok(SubscriptionId::String(_))))); +async fn test_head_subscription(method: &str) { + let mut client = Arc::new(substrate_test_runtime_client::new()); + let mut sub = { + let api = new_full(client.clone(), test_executor()).into_rpc(); + let sub = api.subscribe(method, EmptyParams::new()).await.unwrap(); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + client.import(BlockOrigin::Own, block).await.unwrap(); client.finalize_block(BlockId::number(1), None).unwrap(); - } + sub + }; + + assert_matches!(timeout_secs(10, sub.next::
()).await, Ok(Some(_))); + assert_matches!(timeout_secs(10, sub.next::
()).await, Ok(Some(_))); - // Assert that the correct number of notifications have been sent. - executor::block_on((&mut transport).take(2).collect::>()); - assert!(executor::block_on(transport.next()).is_none()); + sub.close(); + assert_matches!(timeout_secs(10, sub.next::
()).await, Ok(None)); } diff --git a/client/rpc/src/dev/mod.rs b/client/rpc/src/dev/mod.rs index d782a03feae43..7f4b68f56f6f6 100644 --- a/client/rpc/src/dev/mod.rs +++ b/client/rpc/src/dev/mod.rs @@ -16,19 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Implementation of the [`DevApi`] trait providing debug utilities for Substrate based +//! Implementation of the [`DevApiServer`] trait providing debug utilities for Substrate based //! blockchains. #[cfg(test)] mod tests; -pub use sc_rpc_api::dev::{BlockStats, DevApi}; - +use jsonrpsee::core::RpcResult; use sc_client_api::{BlockBackend, HeaderBackend}; -use sc_rpc_api::{ - dev::error::{Error, Result}, - DenyUnsafe, -}; +use sc_rpc_api::{dev::error::Error, DenyUnsafe}; use sp_api::{ApiExt, Core, ProvideRuntimeApi}; use sp_core::Encode; use sp_runtime::{ @@ -40,6 +36,8 @@ use std::{ sync::Arc, }; +pub use sc_rpc_api::dev::{BlockStats, DevApiServer}; + type HasherOf = <::Header as Header>::Hashing; /// The Dev API. All methods are unsafe. @@ -56,7 +54,7 @@ impl Dev { } } -impl DevApi for Dev +impl DevApiServer for Dev where Block: BlockT + 'static, Client: BlockBackend @@ -67,7 +65,7 @@ where + 'static, Client::Api: Core, { - fn block_stats(&self, hash: Block::Hash) -> Result> { + fn block_stats(&self, hash: Block::Hash) -> RpcResult> { self.deny_unsafe.check_if_safe()?; let block = { diff --git a/client/rpc/src/dev/tests.rs b/client/rpc/src/dev/tests.rs index 1d31abe38b640..b7a0de8f5ae0b 100644 --- a/client/rpc/src/dev/tests.rs +++ b/client/rpc/src/dev/tests.rs @@ -18,25 +18,32 @@ use super::*; use assert_matches::assert_matches; -use futures::executor; +use jsonrpsee::{core::Error as JsonRpseeError, types::error::CallError}; use sc_block_builder::BlockBuilderProvider; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use substrate_test_runtime_client::{prelude::*, runtime::Block}; -#[test] -fn block_stats_work() { +#[tokio::test] +async fn block_stats_work() { let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = >::new(client.clone(), DenyUnsafe::No); + let api = >::new(client.clone(), DenyUnsafe::No).into_rpc(); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + client.import(BlockOrigin::Own, block).await.unwrap(); // Can't gather stats for a block without a parent. - assert_eq!(api.block_stats(client.genesis_hash()).unwrap(), None); + assert_eq!( + api.call::<_, Option>("dev_getBlockStats", [client.genesis_hash()]) + .await + .unwrap(), + None + ); assert_eq!( - api.block_stats(client.info().best_hash).unwrap(), + api.call::<_, Option>("dev_getBlockStats", [client.info().best_hash]) + .await + .unwrap(), Some(BlockStats { witness_len: 597, witness_compact_len: 500, @@ -46,13 +53,17 @@ fn block_stats_work() { ); } -#[test] -fn deny_unsafe_works() { +#[tokio::test] +async fn deny_unsafe_works() { let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = >::new(client.clone(), DenyUnsafe::Yes); + let api = >::new(client.clone(), DenyUnsafe::Yes).into_rpc(); let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + client.import(BlockOrigin::Own, block).await.unwrap(); - assert_matches!(api.block_stats(client.info().best_hash), Err(Error::UnsafeRpcCalled(_))); + assert_matches!( + api.call::<_, Option>("dev_getBlockStats", [client.info().best_hash]) + .await, + Err(JsonRpseeError::Call(CallError::Custom(err))) if err.message().contains("RPC call is unsafe to be called externally") + ); } diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 59a1d542d365a..a0e810eafbb62 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -22,15 +22,14 @@ #![warn(missing_docs)] -use futures::{ - task::{FutureObj, Spawn, SpawnError}, - FutureExt, +pub use jsonrpsee::core::{ + id_providers::{ + RandomIntegerIdProvider as RandomIntegerSubscriptionId, + RandomStringIdProvider as RandomStringSubscriptionId, + }, + traits::IdProvider as RpcSubscriptionIdProvider, }; -use sp_core::traits::SpawnNamed; -use std::sync::Arc; - -pub use rpc::IoHandlerExtension as RpcExtension; -pub use sc_rpc_api::{DenyUnsafe, Metadata}; +pub use sc_rpc_api::DenyUnsafe; pub mod author; pub mod chain; @@ -43,24 +42,4 @@ pub mod system; pub mod testing; /// Task executor that is being used by RPC subscriptions. -#[derive(Clone)] -pub struct SubscriptionTaskExecutor(Arc); - -impl SubscriptionTaskExecutor { - /// Create a new `Self` with the given spawner. - pub fn new(spawn: impl SpawnNamed + 'static) -> Self { - Self(Arc::new(spawn)) - } -} - -impl Spawn for SubscriptionTaskExecutor { - fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { - self.0 - .spawn("substrate-rpc-subscription", Some("rpc"), future.map(drop).boxed()); - Ok(()) - } - - fn status(&self) -> Result<(), SpawnError> { - Ok(()) - } -} +pub type SubscriptionTaskExecutor = std::sync::Arc; diff --git a/client/rpc/src/offchain/mod.rs b/client/rpc/src/offchain/mod.rs index 67b97d31ab949..b66b78274a64e 100644 --- a/client/rpc/src/offchain/mod.rs +++ b/client/rpc/src/offchain/mod.rs @@ -21,7 +21,8 @@ #[cfg(test)] mod tests; -use self::error::{Error, Result}; +use self::error::Error; +use jsonrpsee::core::{async_trait, Error as JsonRpseeError, RpcResult}; use parking_lot::RwLock; /// Re-export the API for backward compatibility. pub use sc_rpc_api::offchain::*; @@ -47,27 +48,27 @@ impl Offchain { } } -impl OffchainApi for Offchain { - /// Set offchain local storage under given key and prefix. - fn set_local_storage(&self, kind: StorageKind, key: Bytes, value: Bytes) -> Result<()> { +#[async_trait] +impl OffchainApiServer for Offchain { + fn set_local_storage(&self, kind: StorageKind, key: Bytes, value: Bytes) -> RpcResult<()> { self.deny_unsafe.check_if_safe()?; let prefix = match kind { StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX, - StorageKind::LOCAL => return Err(Error::UnavailableStorageKind), + StorageKind::LOCAL => return Err(JsonRpseeError::from(Error::UnavailableStorageKind)), }; self.storage.write().set(prefix, &*key, &*value); Ok(()) } - /// Get offchain local storage under given key and prefix. - fn get_local_storage(&self, kind: StorageKind, key: Bytes) -> Result> { + fn get_local_storage(&self, kind: StorageKind, key: Bytes) -> RpcResult> { self.deny_unsafe.check_if_safe()?; let prefix = match kind { StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX, - StorageKind::LOCAL => return Err(Error::UnavailableStorageKind), + StorageKind::LOCAL => return Err(JsonRpseeError::from(Error::UnavailableStorageKind)), }; + Ok(self.storage.read().get(prefix, &*key).map(Into::into)) } } diff --git a/client/rpc/src/offchain/tests.rs b/client/rpc/src/offchain/tests.rs index 219eeb192dfdd..28a7b6115b657 100644 --- a/client/rpc/src/offchain/tests.rs +++ b/client/rpc/src/offchain/tests.rs @@ -39,6 +39,7 @@ fn local_storage_should_work() { #[test] fn offchain_calls_considered_unsafe() { + use jsonrpsee::types::error::CallError; let storage = InMemOffchainStorage::default(); let offchain = Offchain::new(storage, DenyUnsafe::Yes); let key = Bytes(b"offchain_storage".to_vec()); @@ -46,10 +47,14 @@ fn offchain_calls_considered_unsafe() { assert_matches!( offchain.set_local_storage(StorageKind::PERSISTENT, key.clone(), value.clone()), - Err(Error::UnsafeRpcCalled(_)) + Err(JsonRpseeError::Call(CallError::Custom(err))) => { + assert_eq!(err.message(), "RPC call is unsafe to be called externally") + } ); assert_matches!( offchain.get_local_storage(StorageKind::PERSISTENT, key), - Err(Error::UnsafeRpcCalled(_)) + Err(JsonRpseeError::Call(CallError::Custom(err))) => { + assert_eq!(err.message(), "RPC call is unsafe to be called externally") + } ); } diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index c9806a30b4549..a45651c5e7990 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -23,11 +23,15 @@ mod state_full; #[cfg(test)] mod tests; -use futures::FutureExt; -use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; -use rpc::Result as RpcResult; use std::sync::Arc; +use crate::SubscriptionTaskExecutor; + +use jsonrpsee::{ + core::{async_trait, Error as JsonRpseeError, RpcResult}, + ws_server::PendingSubscription, +}; + use sc_rpc_api::{state::ReadProof, DenyUnsafe}; use sp_core::{ storage::{PrefixedStorageKey, StorageChangeSet, StorageData, StorageKey}, @@ -38,7 +42,7 @@ use sp_version::RuntimeVersion; use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi}; -use self::error::{Error, FutureResult}; +use self::error::Error; use sc_client_api::{ Backend, BlockBackend, BlockchainEvents, ExecutorProvider, ProofProvider, StorageProvider, @@ -49,144 +53,122 @@ use sp_blockchain::{HeaderBackend, HeaderMetadata}; const STORAGE_KEYS_PAGED_MAX_COUNT: u32 = 1000; /// State backend API. +#[async_trait] pub trait StateBackend: Send + Sync + 'static where Block: BlockT + 'static, Client: Send + Sync + 'static, { /// Call runtime method at given block. - fn call( + async fn call( &self, block: Option, method: String, call_data: Bytes, - ) -> FutureResult; + ) -> Result; /// Returns the keys with prefix, leave empty to get all the keys. - fn storage_keys( + async fn storage_keys( &self, block: Option, prefix: StorageKey, - ) -> FutureResult>; + ) -> Result, Error>; /// Returns the keys with prefix along with their values, leave empty to get all the pairs. - fn storage_pairs( + async fn storage_pairs( &self, block: Option, prefix: StorageKey, - ) -> FutureResult>; + ) -> Result, Error>; /// Returns the keys with prefix with pagination support. - fn storage_keys_paged( + async fn storage_keys_paged( &self, block: Option, prefix: Option, count: u32, start_key: Option, - ) -> FutureResult>; + ) -> Result, Error>; /// Returns a storage entry at a specific block's state. - fn storage( + async fn storage( &self, block: Option, key: StorageKey, - ) -> FutureResult>; + ) -> Result, Error>; /// Returns the hash of a storage entry at a block's state. - fn storage_hash( + async fn storage_hash( &self, block: Option, key: StorageKey, - ) -> FutureResult>; + ) -> Result, Error>; /// Returns the size of a storage entry at a block's state. /// /// If data is available at `key`, it is returned. Else, the sum of values who's key has `key` /// prefix is returned, i.e. all the storage (double) maps that have this prefix. - fn storage_size( + async fn storage_size( &self, block: Option, key: StorageKey, - ) -> FutureResult>; + ) -> Result, Error>; /// Returns the runtime metadata as an opaque blob. - fn metadata(&self, block: Option) -> FutureResult; + async fn metadata(&self, block: Option) -> Result; /// Get the runtime version. - fn runtime_version(&self, block: Option) -> FutureResult; + async fn runtime_version(&self, block: Option) -> Result; /// Query historical storage entries (by key) starting from a block given as the second /// parameter. /// /// NOTE This first returned result contains the initial state of storage for all keys. /// Subsequent values in the vector represent changes to the previous state (diffs). - fn query_storage( + async fn query_storage( &self, from: Block::Hash, to: Option, keys: Vec, - ) -> FutureResult>>; + ) -> Result>, Error>; /// Query storage entries (by key) starting at block hash given as the second parameter. - fn query_storage_at( + async fn query_storage_at( &self, keys: Vec, at: Option, - ) -> FutureResult>>; + ) -> Result>, Error>; /// Returns proof of storage entries at a specific block's state. - fn read_proof( + async fn read_proof( &self, block: Option, keys: Vec, - ) -> FutureResult>; - - /// New runtime version subscription - fn subscribe_runtime_version( - &self, - _meta: crate::Metadata, - subscriber: Subscriber, - ); - - /// Unsubscribe from runtime version subscription - fn unsubscribe_runtime_version( - &self, - _meta: Option, - id: SubscriptionId, - ) -> RpcResult; - - /// New storage subscription - fn subscribe_storage( - &self, - _meta: crate::Metadata, - subscriber: Subscriber>, - keys: Option>, - ); - - /// Unsubscribe from storage subscription - fn unsubscribe_storage( - &self, - _meta: Option, - id: SubscriptionId, - ) -> RpcResult; + ) -> Result, Error>; /// Trace storage changes for block - fn trace_block( + async fn trace_block( &self, block: Block::Hash, targets: Option, storage_keys: Option, methods: Option, - ) -> FutureResult; + ) -> Result; + + /// New runtime version subscription + fn subscribe_runtime_version(&self, sink: PendingSubscription); + + /// New storage subscription + fn subscribe_storage(&self, sink: PendingSubscription, keys: Option>); } /// Create new state API that works on full node. pub fn new_full( client: Arc, - subscriptions: SubscriptionManager, + executor: SubscriptionTaskExecutor, deny_unsafe: DenyUnsafe, rpc_max_payload: Option, -) -> (State, ChildState) +) -> (StateApi, ChildState) where Block: BlockT + 'static, Block::Hash: Unpin, @@ -207,168 +189,127 @@ where { let child_backend = Box::new(self::state_full::FullState::new( client.clone(), - subscriptions.clone(), + executor.clone(), rpc_max_payload, )); - let backend = - Box::new(self::state_full::FullState::new(client, subscriptions, rpc_max_payload)); - (State { backend, deny_unsafe }, ChildState { backend: child_backend }) + let backend = Box::new(self::state_full::FullState::new(client, executor, rpc_max_payload)); + (StateApi { backend, deny_unsafe }, ChildState { backend: child_backend }) } /// State API with subscriptions support. -pub struct State { +pub struct StateApi { backend: Box>, /// Whether to deny unsafe calls deny_unsafe: DenyUnsafe, } -impl StateApi for State +#[async_trait] +impl StateApiServer for StateApi where Block: BlockT + 'static, Client: Send + Sync + 'static, { - type Metadata = crate::Metadata; - - fn call(&self, method: String, data: Bytes, block: Option) -> FutureResult { - self.backend.call(block, method, data) + async fn call( + &self, + method: String, + data: Bytes, + block: Option, + ) -> RpcResult { + self.backend.call(block, method, data).await.map_err(Into::into) } - fn storage_keys( + async fn storage_keys( &self, key_prefix: StorageKey, block: Option, - ) -> FutureResult> { - self.backend.storage_keys(block, key_prefix) + ) -> RpcResult> { + self.backend.storage_keys(block, key_prefix).await.map_err(Into::into) } - fn storage_pairs( + async fn storage_pairs( &self, key_prefix: StorageKey, block: Option, - ) -> FutureResult> { - if let Err(err) = self.deny_unsafe.check_if_safe() { - return async move { Err(err.into()) }.boxed() - } - - self.backend.storage_pairs(block, key_prefix) + ) -> RpcResult> { + self.deny_unsafe.check_if_safe()?; + self.backend.storage_pairs(block, key_prefix).await.map_err(Into::into) } - fn storage_keys_paged( + async fn storage_keys_paged( &self, prefix: Option, count: u32, start_key: Option, block: Option, - ) -> FutureResult> { + ) -> RpcResult> { if count > STORAGE_KEYS_PAGED_MAX_COUNT { - return async move { - Err(Error::InvalidCount { value: count, max: STORAGE_KEYS_PAGED_MAX_COUNT }) - } - .boxed() + return Err(JsonRpseeError::from(Error::InvalidCount { + value: count, + max: STORAGE_KEYS_PAGED_MAX_COUNT, + })) } - self.backend.storage_keys_paged(block, prefix, count, start_key) + self.backend + .storage_keys_paged(block, prefix, count, start_key) + .await + .map_err(Into::into) } - fn storage( + async fn storage( &self, key: StorageKey, block: Option, - ) -> FutureResult> { - self.backend.storage(block, key) + ) -> RpcResult> { + self.backend.storage(block, key).await.map_err(Into::into) } - fn storage_hash( + async fn storage_hash( &self, key: StorageKey, block: Option, - ) -> FutureResult> { - self.backend.storage_hash(block, key) + ) -> RpcResult> { + self.backend.storage_hash(block, key).await.map_err(Into::into) } - fn storage_size( + async fn storage_size( &self, key: StorageKey, block: Option, - ) -> FutureResult> { - self.backend.storage_size(block, key) + ) -> RpcResult> { + self.backend.storage_size(block, key).await.map_err(Into::into) + } + + async fn metadata(&self, block: Option) -> RpcResult { + self.backend.metadata(block).await.map_err(Into::into) } - fn metadata(&self, block: Option) -> FutureResult { - self.backend.metadata(block) + async fn runtime_version(&self, at: Option) -> RpcResult { + self.backend.runtime_version(at).await.map_err(Into::into) } - fn query_storage( + async fn query_storage( &self, keys: Vec, from: Block::Hash, to: Option, - ) -> FutureResult>> { - if let Err(err) = self.deny_unsafe.check_if_safe() { - return async move { Err(err.into()) }.boxed() - } - - self.backend.query_storage(from, to, keys) + ) -> RpcResult>> { + self.deny_unsafe.check_if_safe()?; + self.backend.query_storage(from, to, keys).await.map_err(Into::into) } - fn query_storage_at( + async fn query_storage_at( &self, keys: Vec, at: Option, - ) -> FutureResult>> { - self.backend.query_storage_at(keys, at) + ) -> RpcResult>> { + self.backend.query_storage_at(keys, at).await.map_err(Into::into) } - fn read_proof( + async fn read_proof( &self, keys: Vec, block: Option, - ) -> FutureResult> { - self.backend.read_proof(block, keys) - } - - fn subscribe_storage( - &self, - meta: Self::Metadata, - subscriber: Subscriber>, - keys: Option>, - ) { - if keys.is_none() { - if let Err(err) = self.deny_unsafe.check_if_safe() { - subscriber.reject(err.into()) - .expect("subscription rejection can only fail if it's been already rejected, and we're rejecting it for the first time; qed"); - return - } - } - - self.backend.subscribe_storage(meta, subscriber, keys) - } - - fn unsubscribe_storage( - &self, - meta: Option, - id: SubscriptionId, - ) -> RpcResult { - self.backend.unsubscribe_storage(meta, id) - } - - fn runtime_version(&self, at: Option) -> FutureResult { - self.backend.runtime_version(at) - } - - fn subscribe_runtime_version( - &self, - meta: Self::Metadata, - subscriber: Subscriber, - ) { - self.backend.subscribe_runtime_version(meta, subscriber); - } - - fn unsubscribe_runtime_version( - &self, - meta: Option, - id: SubscriptionId, - ) -> RpcResult { - self.backend.unsubscribe_runtime_version(meta, id) + ) -> RpcResult> { + self.backend.read_proof(block, keys).await.map_err(Into::into) } /// Re-execute the given block with the tracing targets given in `targets` @@ -376,88 +317,102 @@ where /// /// Note: requires the node to run with `--rpc-methods=Unsafe`. /// Note: requires runtimes compiled with wasm tracing support, `--features with-tracing`. - fn trace_block( + async fn trace_block( &self, block: Block::Hash, targets: Option, storage_keys: Option, methods: Option, - ) -> FutureResult { - if let Err(err) = self.deny_unsafe.check_if_safe() { - return async move { Err(err.into()) }.boxed() + ) -> RpcResult { + self.deny_unsafe.check_if_safe()?; + self.backend + .trace_block(block, targets, storage_keys, methods) + .await + .map_err(Into::into) + } + + fn subscribe_runtime_version(&self, sink: PendingSubscription) { + self.backend.subscribe_runtime_version(sink) + } + + fn subscribe_storage(&self, sink: PendingSubscription, keys: Option>) { + if keys.is_none() { + if let Err(err) = self.deny_unsafe.check_if_safe() { + let _ = sink.reject(JsonRpseeError::from(err)); + return + } } - self.backend.trace_block(block, targets, storage_keys, methods) + self.backend.subscribe_storage(sink, keys) } } /// Child state backend API. +#[async_trait] pub trait ChildStateBackend: Send + Sync + 'static where Block: BlockT + 'static, Client: Send + Sync + 'static, { /// Returns proof of storage for a child key entries at a specific block's state. - fn read_child_proof( + async fn read_child_proof( &self, block: Option, storage_key: PrefixedStorageKey, keys: Vec, - ) -> FutureResult>; + ) -> Result, Error>; /// Returns the keys with prefix from a child storage, /// leave prefix empty to get all the keys. - fn storage_keys( + async fn storage_keys( &self, block: Option, storage_key: PrefixedStorageKey, prefix: StorageKey, - ) -> FutureResult>; + ) -> Result, Error>; /// Returns the keys with prefix from a child storage with pagination support. - fn storage_keys_paged( + async fn storage_keys_paged( &self, block: Option, storage_key: PrefixedStorageKey, prefix: Option, count: u32, start_key: Option, - ) -> FutureResult>; + ) -> Result, Error>; /// Returns a child storage entry at a specific block's state. - fn storage( + async fn storage( &self, block: Option, storage_key: PrefixedStorageKey, key: StorageKey, - ) -> FutureResult>; + ) -> Result, Error>; /// Returns child storage entries at a specific block's state. - fn storage_entries( + async fn storage_entries( &self, block: Option, storage_key: PrefixedStorageKey, keys: Vec, - ) -> FutureResult>>; + ) -> Result>, Error>; /// Returns the hash of a child storage entry at a block's state. - fn storage_hash( + async fn storage_hash( &self, block: Option, storage_key: PrefixedStorageKey, key: StorageKey, - ) -> FutureResult>; + ) -> Result, Error>; /// Returns the size of a child storage entry at a block's state. - fn storage_size( + async fn storage_size( &self, block: Option, storage_key: PrefixedStorageKey, key: StorageKey, - ) -> FutureResult> { - self.storage(block, storage_key, key) - .map(|x| x.map(|r| r.map(|v| v.0.len() as u64))) - .boxed() + ) -> Result, Error> { + self.storage(block, storage_key, key).await.map(|x| x.map(|x| x.0.len() as u64)) } } @@ -466,76 +421,84 @@ pub struct ChildState { backend: Box>, } -impl ChildStateApi for ChildState +#[async_trait] +impl ChildStateApiServer for ChildState where Block: BlockT + 'static, Client: Send + Sync + 'static, { - type Metadata = crate::Metadata; - - fn read_child_proof( + async fn storage_keys( &self, - child_storage_key: PrefixedStorageKey, - keys: Vec, + storage_key: PrefixedStorageKey, + key_prefix: StorageKey, block: Option, - ) -> FutureResult> { - self.backend.read_child_proof(block, child_storage_key, keys) + ) -> RpcResult> { + self.backend + .storage_keys(block, storage_key, key_prefix) + .await + .map_err(Into::into) } - fn storage( + async fn storage_keys_paged( &self, storage_key: PrefixedStorageKey, - key: StorageKey, + prefix: Option, + count: u32, + start_key: Option, block: Option, - ) -> FutureResult> { - self.backend.storage(block, storage_key, key) + ) -> RpcResult> { + self.backend + .storage_keys_paged(block, storage_key, prefix, count, start_key) + .await + .map_err(Into::into) } - fn storage_entries( + async fn storage( &self, storage_key: PrefixedStorageKey, - keys: Vec, + key: StorageKey, block: Option, - ) -> FutureResult>> { - self.backend.storage_entries(block, storage_key, keys) + ) -> RpcResult> { + self.backend.storage(block, storage_key, key).await.map_err(Into::into) } - fn storage_keys( + async fn storage_entries( &self, storage_key: PrefixedStorageKey, - key_prefix: StorageKey, + keys: Vec, block: Option, - ) -> FutureResult> { - self.backend.storage_keys(block, storage_key, key_prefix) + ) -> RpcResult>> { + self.backend.storage_entries(block, storage_key, keys).await.map_err(Into::into) } - fn storage_keys_paged( + async fn storage_hash( &self, storage_key: PrefixedStorageKey, - prefix: Option, - count: u32, - start_key: Option, + key: StorageKey, block: Option, - ) -> FutureResult> { - self.backend.storage_keys_paged(block, storage_key, prefix, count, start_key) + ) -> RpcResult> { + self.backend.storage_hash(block, storage_key, key).await.map_err(Into::into) } - fn storage_hash( + async fn storage_size( &self, storage_key: PrefixedStorageKey, key: StorageKey, block: Option, - ) -> FutureResult> { - self.backend.storage_hash(block, storage_key, key) + ) -> RpcResult> { + self.backend.storage_size(block, storage_key, key).await.map_err(Into::into) } - fn storage_size( + async fn read_child_proof( &self, - storage_key: PrefixedStorageKey, - key: StorageKey, + child_storage_key: PrefixedStorageKey, + keys: Vec, block: Option, - ) -> FutureResult> { - self.backend.storage_size(block, storage_key, key) + ) -> RpcResult> { + self.backend + .read_child_proof(block, child_storage_key, keys) + .await + .map_err(Into::into) } } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 38f9b078d87a7..48165e912b03a 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -18,17 +18,26 @@ //! State API backend for full nodes. -use futures::{ - future, - future::{err, try_join_all}, - stream, FutureExt, SinkExt, StreamExt, +use std::{collections::HashMap, marker::PhantomData, sync::Arc}; + +use super::{ + client_err, + error::{Error, Result}, + ChildStateBackend, StateBackend, }; -use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; -use log::warn; -use rpc::Result as RpcResult; -use std::{collections::HashMap, sync::Arc}; +use crate::SubscriptionTaskExecutor; +use futures::{future, stream, FutureExt, StreamExt}; +use jsonrpsee::{ + core::{async_trait, Error as JsonRpseeError}, + PendingSubscription, +}; +use sc_client_api::{ + Backend, BlockBackend, BlockchainEvents, CallExecutor, ExecutorProvider, ProofProvider, + StorageProvider, +}; use sc_rpc_api::state::ReadProof; +use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi}; use sp_blockchain::{ CachedHeaderMetadata, Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult, @@ -42,19 +51,6 @@ use sp_core::{ use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_version::RuntimeVersion; -use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi}; - -use super::{ - client_err, - error::{Error, FutureResult, Result}, - ChildStateBackend, StateBackend, -}; -use sc_client_api::{ - Backend, BlockBackend, BlockchainEvents, CallExecutor, ExecutorProvider, ProofProvider, - StorageNotification, StorageProvider, -}; -use std::marker::PhantomData; - /// Ranges to query in state_queryStorage. struct QueryStorageRange { /// Hashes of all the blocks in the range. @@ -64,7 +60,7 @@ struct QueryStorageRange { /// State API backend for full nodes. pub struct FullState { client: Arc, - subscriptions: SubscriptionManager, + executor: SubscriptionTaskExecutor, _phantom: PhantomData<(BE, Block)>, rpc_max_payload: Option, } @@ -81,10 +77,10 @@ where /// Create new state API backend for full nodes. pub fn new( client: Arc, - subscriptions: SubscriptionManager, + executor: SubscriptionTaskExecutor, rpc_max_payload: Option, ) -> Self { - Self { client, subscriptions, _phantom: PhantomData, rpc_max_payload } + Self { client, executor, _phantom: PhantomData, rpc_max_payload } } /// Returns given block hash or best block hash if None is passed. @@ -174,6 +170,7 @@ where } } +#[async_trait] impl StateBackend for FullState where Block: BlockT + 'static, @@ -193,14 +190,13 @@ where + 'static, Client::Api: Metadata, { - fn call( + async fn call( &self, block: Option, method: String, call_data: Bytes, - ) -> FutureResult { - let r = self - .block_or_best(block) + ) -> std::result::Result { + self.block_or_best(block) .and_then(|block| { self.client .executor() @@ -213,43 +209,37 @@ where ) .map(Into::into) }) - .map_err(client_err); - async move { r }.boxed() + .map_err(client_err) } - fn storage_keys( + async fn storage_keys( &self, block: Option, prefix: StorageKey, - ) -> FutureResult> { - let r = self - .block_or_best(block) + ) -> std::result::Result, Error> { + self.block_or_best(block) .and_then(|block| self.client.storage_keys(&BlockId::Hash(block), &prefix)) - .map_err(client_err); - async move { r }.boxed() + .map_err(client_err) } - fn storage_pairs( + async fn storage_pairs( &self, block: Option, prefix: StorageKey, - ) -> FutureResult> { - let r = self - .block_or_best(block) + ) -> std::result::Result, Error> { + self.block_or_best(block) .and_then(|block| self.client.storage_pairs(&BlockId::Hash(block), &prefix)) - .map_err(client_err); - async move { r }.boxed() + .map_err(client_err) } - fn storage_keys_paged( + async fn storage_keys_paged( &self, block: Option, prefix: Option, count: u32, start_key: Option, - ) -> FutureResult> { - let r = self - .block_or_best(block) + ) -> std::result::Result, Error> { + self.block_or_best(block) .and_then(|block| { self.client.storage_keys_iter( &BlockId::Hash(block), @@ -258,40 +248,36 @@ where ) }) .map(|iter| iter.take(count as usize).collect()) - .map_err(client_err); - async move { r }.boxed() + .map_err(client_err) } - fn storage( + async fn storage( &self, block: Option, key: StorageKey, - ) -> FutureResult> { - let r = self - .block_or_best(block) + ) -> std::result::Result, Error> { + self.block_or_best(block) .and_then(|block| self.client.storage(&BlockId::Hash(block), &key)) - .map_err(client_err); - async move { r }.boxed() + .map_err(client_err) } - fn storage_size( + async fn storage_size( &self, block: Option, key: StorageKey, - ) -> FutureResult> { + ) -> std::result::Result, Error> { let block = match self.block_or_best(block) { Ok(b) => b, - Err(e) => return async move { Err(client_err(e)) }.boxed(), + Err(e) => return Err(client_err(e)), }; match self.client.storage(&BlockId::Hash(block), &key) { - Ok(Some(d)) => return async move { Ok(Some(d.0.len() as u64)) }.boxed(), - Err(e) => return async move { Err(client_err(e)) }.boxed(), + Ok(Some(d)) => return Ok(Some(d.0.len() as u64)), + Err(e) => return Err(client_err(e)), Ok(None) => {}, } - let r = self - .client + self.client .storage_pairs(&BlockId::Hash(block), &key) .map(|kv| { let item_sum = kv.iter().map(|(_, v)| v.0.len() as u64).sum::(); @@ -301,48 +287,46 @@ where None } }) - .map_err(client_err); - async move { r }.boxed() + .map_err(client_err) } - fn storage_hash( + async fn storage_hash( &self, block: Option, key: StorageKey, - ) -> FutureResult> { - let r = self - .block_or_best(block) + ) -> std::result::Result, Error> { + self.block_or_best(block) .and_then(|block| self.client.storage_hash(&BlockId::Hash(block), &key)) - .map_err(client_err); - async move { r }.boxed() + .map_err(client_err) } - fn metadata(&self, block: Option) -> FutureResult { - let r = self.block_or_best(block).map_err(client_err).and_then(|block| { + async fn metadata(&self, block: Option) -> std::result::Result { + self.block_or_best(block).map_err(client_err).and_then(|block| { self.client .runtime_api() .metadata(&BlockId::Hash(block)) .map(Into::into) .map_err(|e| Error::Client(Box::new(e))) - }); - async move { r }.boxed() + }) } - fn runtime_version(&self, block: Option) -> FutureResult { - let r = self.block_or_best(block).map_err(client_err).and_then(|block| { + async fn runtime_version( + &self, + block: Option, + ) -> std::result::Result { + self.block_or_best(block).map_err(client_err).and_then(|block| { self.client .runtime_version_at(&BlockId::Hash(block)) .map_err(|e| Error::Client(Box::new(e))) - }); - async move { r }.boxed() + }) } - fn query_storage( + async fn query_storage( &self, from: Block::Hash, to: Option, keys: Vec, - ) -> FutureResult>> { + ) -> std::result::Result>, Error> { let call_fn = move || { let range = self.query_storage_range(from, to)?; let mut changes = Vec::new(); @@ -350,168 +334,151 @@ where self.query_storage_unfiltered(&range, &keys, &mut last_values, &mut changes)?; Ok(changes) }; - - let r = call_fn(); - async move { r }.boxed() + call_fn() } - fn query_storage_at( + async fn query_storage_at( &self, keys: Vec, at: Option, - ) -> FutureResult>> { + ) -> std::result::Result>, Error> { let at = at.unwrap_or_else(|| self.client.info().best_hash); - self.query_storage(at, Some(at), keys) + self.query_storage(at, Some(at), keys).await } - fn read_proof( + async fn read_proof( &self, block: Option, keys: Vec, - ) -> FutureResult> { - let r = self - .block_or_best(block) + ) -> std::result::Result, Error> { + self.block_or_best(block) .and_then(|block| { self.client .read_proof(&BlockId::Hash(block), &mut keys.iter().map(|key| key.0.as_ref())) .map(|proof| proof.iter_nodes().map(|node| node.into()).collect()) .map(|proof| ReadProof { at: block, proof }) }) - .map_err(client_err); - async move { r }.boxed() + .map_err(client_err) } - fn subscribe_runtime_version( - &self, - _meta: crate::Metadata, - subscriber: Subscriber, - ) { - self.subscriptions.add(subscriber, |sink| { - let version = self - .block_or_best(None) - .and_then(|block| { - self.client.runtime_version_at(&BlockId::Hash(block)).map_err(Into::into) - }) - .map_err(client_err) - .map_err(Into::into); + fn subscribe_runtime_version(&self, pending: PendingSubscription) { + let client = self.client.clone(); - let client = self.client.clone(); - let mut previous_version = version.clone(); + let initial = match self + .block_or_best(None) + .and_then(|block| { + self.client.runtime_version_at(&BlockId::Hash(block)).map_err(Into::into) + }) + .map_err(|e| Error::Client(Box::new(e))) + { + Ok(initial) => initial, + Err(e) => { + pending.reject(JsonRpseeError::from(e)); + return + }, + }; - // A stream of all best blocks. - let stream = - client.import_notification_stream().filter(|n| future::ready(n.is_new_best)); + let mut previous_version = initial.clone(); - let stream = stream.filter_map(move |n| { + // A stream of new versions + let version_stream = client + .import_notification_stream() + .filter(|n| future::ready(n.is_new_best)) + .filter_map(move |n| { let version = client .runtime_version_at(&BlockId::hash(n.hash)) - .map_err(|e| Error::Client(Box::new(e))) - .map_err(Into::into); - - if previous_version != version { - previous_version = version.clone(); - future::ready(Some(Ok::<_, ()>(version))) - } else { - future::ready(None) + .map_err(|e| Error::Client(Box::new(e))); + + match version { + Ok(version) if version != previous_version => { + previous_version = version.clone(); + future::ready(Some(version)) + }, + _ => future::ready(None), } }); - stream::iter(vec![Ok(version)]) - .chain(stream) - .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) - // we ignore the resulting Stream (if the first stream is over we are unsubscribed) - .map(|_| ()) - }); - } + let stream = futures::stream::once(future::ready(initial)).chain(version_stream); - fn unsubscribe_runtime_version( - &self, - _meta: Option, - id: SubscriptionId, - ) -> RpcResult { - Ok(self.subscriptions.cancel(id)) + let fut = async move { + if let Some(mut sink) = pending.accept() { + sink.pipe_from_stream(stream).await; + } + } + .boxed(); + + self.executor + .spawn("substrate-rpc-subscription", Some("rpc"), fut.map(drop).boxed()); } - fn subscribe_storage( - &self, - _meta: crate::Metadata, - subscriber: Subscriber>, - keys: Option>, - ) { - let keys = Into::>>::into(keys); + fn subscribe_storage(&self, pending: PendingSubscription, keys: Option>) { let stream = match self.client.storage_changes_notification_stream(keys.as_deref(), None) { Ok(stream) => stream, - Err(err) => { - let _ = subscriber.reject(client_err(err).into()); + Err(blockchain_err) => { + pending.reject(JsonRpseeError::from(Error::Client(Box::new(blockchain_err)))); return }, }; // initial values - let initial = stream::iter( - keys.map(|keys| { - let block = self.client.info().best_hash; - let changes = keys - .into_iter() - .map(|key| { - let v = self.client.storage(&BlockId::Hash(block), &key).ok().flatten(); - (key, v) - }) - .collect(); - vec![Ok(Ok(StorageChangeSet { block, changes }))] - }) - .unwrap_or_default(), - ); - - self.subscriptions.add(subscriber, |sink| { - let stream = stream.map(|StorageNotification { block, changes }| { - Ok(Ok::<_, rpc::Error>(StorageChangeSet { - block, - changes: changes - .iter() - .filter_map(|(o_sk, k, v)| o_sk.is_none().then(|| (k.clone(), v.cloned()))) - .collect(), - })) - }); - - initial - .chain(stream) - .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) - // we ignore the resulting Stream (if the first stream is over we are unsubscribed) - .map(|_| ()) + let initial = stream::iter(keys.map(|keys| { + let block = self.client.info().best_hash; + let changes = keys + .into_iter() + .map(|key| { + let v = self.client.storage(&BlockId::Hash(block), &key).ok().flatten(); + (key, v) + }) + .collect(); + StorageChangeSet { block, changes } + })); + + // let storage_stream = stream.map(|(block, changes)| StorageChangeSet { + let storage_stream = stream.map(|storage_notif| StorageChangeSet { + block: storage_notif.block, + changes: storage_notif + .changes + .iter() + .filter_map(|(o_sk, k, v)| o_sk.is_none().then(|| (k.clone(), v.cloned()))) + .collect(), }); - } - fn unsubscribe_storage( - &self, - _meta: Option, - id: SubscriptionId, - ) -> RpcResult { - Ok(self.subscriptions.cancel(id)) + let stream = initial + .chain(storage_stream) + .filter(|storage| future::ready(!storage.changes.is_empty())); + + let fut = async move { + if let Some(mut sink) = pending.accept() { + sink.pipe_from_stream(stream).await; + } + } + .boxed(); + + self.executor + .spawn("substrate-rpc-subscription", Some("rpc"), fut.map(drop).boxed()); } - fn trace_block( + async fn trace_block( &self, block: Block::Hash, targets: Option, storage_keys: Option, methods: Option, - ) -> FutureResult { - let block_executor = sc_tracing::block::BlockExecutor::new( + ) -> std::result::Result { + sc_tracing::block::BlockExecutor::new( self.client.clone(), block, targets, storage_keys, methods, self.rpc_max_payload, - ); - let r = block_executor - .trace_block() - .map_err(|e| invalid_block::(block, None, e.to_string())); - async move { r }.boxed() + ) + .trace_block() + .map_err(|e| invalid_block::(block, None, e.to_string())) } } +#[async_trait] impl ChildStateBackend for FullState where Block: BlockT + 'static, @@ -530,14 +497,13 @@ where + 'static, Client::Api: Metadata, { - fn read_child_proof( + async fn read_child_proof( &self, block: Option, storage_key: PrefixedStorageKey, keys: Vec, - ) -> FutureResult> { - let r = self - .block_or_best(block) + ) -> std::result::Result, Error> { + self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { Some((ChildType::ParentKeyId, storage_key)) => @@ -553,19 +519,16 @@ where .map(|proof| proof.iter_nodes().map(|node| node.into()).collect()) .map(|proof| ReadProof { at: block, proof }) }) - .map_err(client_err); - - async move { r }.boxed() + .map_err(client_err) } - fn storage_keys( + async fn storage_keys( &self, block: Option, storage_key: PrefixedStorageKey, prefix: StorageKey, - ) -> FutureResult> { - let r = self - .block_or_best(block) + ) -> std::result::Result, Error> { + self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { Some((ChildType::ParentKeyId, storage_key)) => @@ -574,21 +537,18 @@ where }; self.client.child_storage_keys(&BlockId::Hash(block), &child_info, &prefix) }) - .map_err(client_err); - - async move { r }.boxed() + .map_err(client_err) } - fn storage_keys_paged( + async fn storage_keys_paged( &self, block: Option, storage_key: PrefixedStorageKey, prefix: Option, count: u32, start_key: Option, - ) -> FutureResult> { - let r = self - .block_or_best(block) + ) -> std::result::Result, Error> { + self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { Some((ChildType::ParentKeyId, storage_key)) => @@ -603,19 +563,16 @@ where ) }) .map(|iter| iter.take(count as usize).collect()) - .map_err(client_err); - - async move { r }.boxed() + .map_err(client_err) } - fn storage( + async fn storage( &self, block: Option, storage_key: PrefixedStorageKey, key: StorageKey, - ) -> FutureResult> { - let r = self - .block_or_best(block) + ) -> std::result::Result, Error> { + self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { Some((ChildType::ParentKeyId, storage_key)) => @@ -624,28 +581,25 @@ where }; self.client.child_storage(&BlockId::Hash(block), &child_info, &key) }) - .map_err(client_err); - - async move { r }.boxed() + .map_err(client_err) } - fn storage_entries( + async fn storage_entries( &self, block: Option, storage_key: PrefixedStorageKey, keys: Vec, - ) -> FutureResult>> { - let child_info = match ChildType::from_prefixed_key(&storage_key) { - Some((ChildType::ParentKeyId, storage_key)) => - Arc::new(ChildInfo::new_default(storage_key)), - None => return err(client_err(sp_blockchain::Error::InvalidChildStorageKey)).boxed(), - }; - let block = match self.block_or_best(block) { - Ok(b) => b, - Err(e) => return err(client_err(e)).boxed(), + ) -> std::result::Result>, Error> { + let child_info = if let Some((ChildType::ParentKeyId, storage_key)) = + ChildType::from_prefixed_key(&storage_key) + { + Arc::new(ChildInfo::new_default(storage_key)) + } else { + return Err(client_err(sp_blockchain::Error::InvalidChildStorageKey)) }; + let block = self.block_or_best(block).map_err(client_err)?; let client = self.client.clone(); - try_join_all(keys.into_iter().map(move |key| { + future::try_join_all(keys.into_iter().map(move |key| { let res = client .clone() .child_storage(&BlockId::Hash(block), &child_info, &key) @@ -653,17 +607,16 @@ where async move { res } })) - .boxed() + .await } - fn storage_hash( + async fn storage_hash( &self, block: Option, storage_key: PrefixedStorageKey, key: StorageKey, - ) -> FutureResult> { - let r = self - .block_or_best(block) + ) -> std::result::Result, Error> { + self.block_or_best(block) .and_then(|block| { let child_info = match ChildType::from_prefixed_key(&storage_key) { Some((ChildType::ParentKeyId, storage_key)) => @@ -672,9 +625,7 @@ where }; self.client.child_storage_hash(&BlockId::Hash(block), &child_info, &key) }) - .map_err(client_err); - - async move { r }.boxed() + .map_err(client_err) } } diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 19e474ee6459a..a375a30d2c1a2 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -18,9 +18,13 @@ use self::error::Error; use super::*; -use crate::testing::TaskExecutor; +use crate::testing::{test_executor, timeout_secs}; use assert_matches::assert_matches; -use futures::{executor, StreamExt}; +use futures::executor; +use jsonrpsee::{ + core::Error as RpcError, + types::{error::CallError as RpcCallError, EmptyParams, ErrorObject}, +}; use sc_block_builder::BlockBuilderProvider; use sc_rpc_api::DenyUnsafe; use sp_consensus::BlockOrigin; @@ -36,8 +40,8 @@ fn prefixed_storage_key() -> PrefixedStorageKey { child_info.prefixed_storage_key() } -#[test] -fn should_return_storage() { +#[tokio::test] +async fn should_return_storage() { const KEY: &[u8] = b":mock"; const VALUE: &[u8] = b"hello world"; const CHILD_VALUE: &[u8] = b"hello world !"; @@ -51,49 +55,46 @@ fn should_return_storage() { .add_extra_storage(b":map:acc2".to_vec(), vec![1, 2, 3]) .build(); let genesis_hash = client.genesis_hash(); - let (client, child) = new_full( - Arc::new(client), - SubscriptionManager::new(Arc::new(TaskExecutor)), - DenyUnsafe::No, - None, - ); + let (client, child) = new_full(Arc::new(client), test_executor(), DenyUnsafe::No, None); let key = StorageKey(KEY.to_vec()); assert_eq!( - executor::block_on(client.storage(key.clone(), Some(genesis_hash).into())) + client + .storage(key.clone(), Some(genesis_hash).into()) + .await .map(|x| x.map(|x| x.0.len())) .unwrap() .unwrap() as usize, VALUE.len(), ); assert_matches!( - executor::block_on(client.storage_hash(key.clone(), Some(genesis_hash).into())) + client + .storage_hash(key.clone(), Some(genesis_hash).into()) + .await .map(|x| x.is_some()), Ok(true) ); assert_eq!( - executor::block_on(client.storage_size(key.clone(), None)).unwrap().unwrap() as usize, + client.storage_size(key.clone(), None).await.unwrap().unwrap() as usize, VALUE.len(), ); assert_eq!( - executor::block_on(client.storage_size(StorageKey(b":map".to_vec()), None)) - .unwrap() - .unwrap() as usize, + client.storage_size(StorageKey(b":map".to_vec()), None).await.unwrap().unwrap() as usize, 2 + 3, ); assert_eq!( - executor::block_on( - child - .storage(prefixed_storage_key(), key, Some(genesis_hash).into()) - .map(|x| x.map(|x| x.unwrap().0.len())) - ) - .unwrap() as usize, + child + .storage(prefixed_storage_key(), key, Some(genesis_hash).into()) + .await + .map(|x| x.map(|x| x.0.len())) + .unwrap() + .unwrap() as usize, CHILD_VALUE.len(), ); } -#[test] -fn should_return_storage_entries() { +#[tokio::test] +async fn should_return_storage_entries() { const KEY1: &[u8] = b":mock"; const KEY2: &[u8] = b":turtle"; const VALUE: &[u8] = b"hello world"; @@ -107,22 +108,15 @@ fn should_return_storage_entries() { .add_extra_child_storage(&child_info, KEY2.to_vec(), CHILD_VALUE2.to_vec()) .build(); let genesis_hash = client.genesis_hash(); - let (_client, child) = new_full( - Arc::new(client), - SubscriptionManager::new(Arc::new(TaskExecutor)), - DenyUnsafe::No, - None, - ); + let (_client, child) = new_full(Arc::new(client), test_executor(), DenyUnsafe::No, None); let keys = &[StorageKey(KEY1.to_vec()), StorageKey(KEY2.to_vec())]; assert_eq!( - executor::block_on(child.storage_entries( - prefixed_storage_key(), - keys.to_vec(), - Some(genesis_hash).into() - )) - .map(|x| x.into_iter().map(|x| x.map(|x| x.0.len()).unwrap()).sum::()) - .unwrap(), + child + .storage_entries(prefixed_storage_key(), keys.to_vec(), Some(genesis_hash).into()) + .await + .map(|x| x.into_iter().map(|x| x.map(|x| x.0.len()).unwrap()).sum::()) + .unwrap(), CHILD_VALUE1.len() + CHILD_VALUE2.len() ); @@ -130,18 +124,16 @@ fn should_return_storage_entries() { let mut failing_keys = vec![StorageKey(b":soup".to_vec())]; failing_keys.extend_from_slice(keys); assert_matches!( - executor::block_on(child.storage_entries( - prefixed_storage_key(), - failing_keys, - Some(genesis_hash).into() - )) - .map(|x| x.iter().all(|x| x.is_some())), + child + .storage_entries(prefixed_storage_key(), failing_keys, Some(genesis_hash).into()) + .await + .map(|x| x.iter().all(|x| x.is_some())), Ok(false) ); } -#[test] -fn should_return_child_storage() { +#[tokio::test] +async fn should_return_child_storage() { let child_info = ChildInfo::new_default(STORAGE_KEY); let client = Arc::new( substrate_test_runtime_client::TestClientBuilder::new() @@ -149,49 +141,30 @@ fn should_return_child_storage() { .build(), ); let genesis_hash = client.genesis_hash(); - let (_client, child) = - new_full(client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, None); + let (_client, child) = new_full(client, test_executor(), DenyUnsafe::No, None); let child_key = prefixed_storage_key(); let key = StorageKey(b"key".to_vec()); assert_matches!( - executor::block_on(child.storage( + child.storage( child_key.clone(), key.clone(), Some(genesis_hash).into(), - )), + ).await, Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1 ); - - // should fail if key does not exist. - let failing_key = StorageKey(b":soup".to_vec()); assert_matches!( - executor::block_on(child.storage( - prefixed_storage_key(), - failing_key, - Some(genesis_hash).into() - )) - .map(|x| x.is_some()), - Ok(false) - ); - - assert_matches!( - executor::block_on(child.storage_hash( - child_key.clone(), - key.clone(), - Some(genesis_hash).into(), - )) - .map(|x| x.is_some()), + child + .storage_hash(child_key.clone(), key.clone(), Some(genesis_hash).into(),) + .await + .map(|x| x.is_some()), Ok(true) ); - assert_matches!( - executor::block_on(child.storage_size(child_key.clone(), key.clone(), None)), - Ok(Some(1)) - ); + assert_matches!(child.storage_size(child_key.clone(), key.clone(), None).await, Ok(Some(1))); } -#[test] -fn should_return_child_storage_entries() { +#[tokio::test] +async fn should_return_child_storage_entries() { let child_info = ChildInfo::new_default(STORAGE_KEY); let client = Arc::new( substrate_test_runtime_client::TestClientBuilder::new() @@ -200,17 +173,14 @@ fn should_return_child_storage_entries() { .build(), ); let genesis_hash = client.genesis_hash(); - let (_client, child) = - new_full(client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, None); + let (_client, child) = new_full(client, test_executor(), DenyUnsafe::No, None); let child_key = prefixed_storage_key(); let keys = vec![StorageKey(b"key1".to_vec()), StorageKey(b"key2".to_vec())]; - let res = executor::block_on(child.storage_entries( - child_key.clone(), - keys.clone(), - Some(genesis_hash).into(), - )) - .unwrap(); + let res = child + .storage_entries(child_key.clone(), keys.clone(), Some(genesis_hash).into()) + .await + .unwrap(); assert_matches!( res[0], @@ -232,46 +202,37 @@ fn should_return_child_storage_entries() { Ok(true) ); assert_matches!( - executor::block_on(child.storage_size(child_key.clone(), keys[0].clone(), None)), + child.storage_size(child_key.clone(), keys[0].clone(), None).await, Ok(Some(1)) ); } -#[test] -fn should_call_contract() { +#[tokio::test] +async fn should_call_contract() { let client = Arc::new(substrate_test_runtime_client::new()); let genesis_hash = client.genesis_hash(); - let (client, _child) = - new_full(client, SubscriptionManager::new(Arc::new(TaskExecutor)), DenyUnsafe::No, None); + let (client, _child) = new_full(client, test_executor(), DenyUnsafe::No, None); + + use jsonrpsee::{core::Error, types::error::CallError}; assert_matches!( - executor::block_on(client.call( - "balanceOf".into(), - Bytes(vec![1, 2, 3]), - Some(genesis_hash).into() - )), - Err(Error::Client(_)) + client + .call("balanceOf".into(), Bytes(vec![1, 2, 3]), Some(genesis_hash).into()) + .await, + Err(Error::Call(CallError::Failed(_))) ) } -#[test] -fn should_notify_about_storage_changes() { - let (subscriber, id, mut transport) = Subscriber::new_test("test"); - - { +#[tokio::test] +async fn should_notify_about_storage_changes() { + let mut sub = { let mut client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full( - client.clone(), - SubscriptionManager::new(Arc::new(TaskExecutor)), - DenyUnsafe::No, - None, - ); - - api.subscribe_storage(Default::default(), subscriber, None.into()); + let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No, None); - // assert id assigned - assert!(matches!(executor::block_on(id), Ok(Ok(SubscriptionId::String(_))))); + let api_rpc = api.into_rpc(); + let sub = api_rpc.subscribe("state_subscribeStorage", EmptyParams::new()).await.unwrap(); + // Cause a change: let mut builder = client.new_block(Default::default()).unwrap(); builder .push_transfer(runtime::Transfer { @@ -282,38 +243,32 @@ fn should_notify_about_storage_changes() { }) .unwrap(); let block = builder.build().unwrap().block; - executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); - } + client.import(BlockOrigin::Own, block).await.unwrap(); - // Check notification sent to transport - executor::block_on((&mut transport).take(2).collect::>()); - assert!(executor::block_on(transport.next()).is_none()); -} + sub + }; -#[test] -fn should_send_initial_storage_changes_and_notifications() { - let (subscriber, id, mut transport) = Subscriber::new_test("test"); + // We should get a message back on our subscription about the storage change: + // NOTE: previous versions of the subscription code used to return an empty value for the + // "initial" storage change here + assert_matches!(timeout_secs(1, sub.next::>()).await, Ok(Some(_))); + assert_matches!(timeout_secs(1, sub.next::>()).await, Ok(None)); +} - { +#[tokio::test] +async fn should_send_initial_storage_changes_and_notifications() { + let mut sub = { let mut client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full( - client.clone(), - SubscriptionManager::new(Arc::new(TaskExecutor)), - DenyUnsafe::No, - None, - ); + let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No, None); let alice_balance_key = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())); - api.subscribe_storage( - Default::default(), - subscriber, - Some(vec![StorageKey(alice_balance_key.to_vec())]).into(), - ); - - // assert id assigned - assert!(matches!(executor::block_on(id), Ok(Ok(SubscriptionId::String(_))))); + let api_rpc = api.into_rpc(); + let sub = api_rpc + .subscribe("state_subscribeStorage", [[StorageKey(alice_balance_key.to_vec())]]) + .await + .unwrap(); let mut builder = client.new_block(Default::default()).unwrap(); builder @@ -325,23 +280,22 @@ fn should_send_initial_storage_changes_and_notifications() { }) .unwrap(); let block = builder.build().unwrap().block; - executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); - } + client.import(BlockOrigin::Own, block).await.unwrap(); + + sub + }; + + assert_matches!(timeout_secs(1, sub.next::>()).await, Ok(Some(_))); + assert_matches!(timeout_secs(1, sub.next::>()).await, Ok(Some(_))); - // Check for the correct number of notifications - executor::block_on((&mut transport).take(2).collect::>()); - assert!(executor::block_on(transport.next()).is_none()); + // No more messages to follow + assert_matches!(timeout_secs(1, sub.next::>()).await, Ok(None)); } -#[test] -fn should_query_storage() { - fn run_tests(mut client: Arc) { - let (api, _child) = new_full( - client.clone(), - SubscriptionManager::new(Arc::new(TaskExecutor)), - DenyUnsafe::No, - None, - ); +#[tokio::test] +async fn should_query_storage() { + async fn run_tests(mut client: Arc) { + let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No, None); let mut add_block = |nonce| { let mut builder = client.new_block(Default::default()).unwrap(); @@ -393,7 +347,7 @@ fn should_query_storage() { let keys = (1..6).map(|k| StorageKey(vec![k])).collect::>(); let result = api.query_storage(keys.clone(), genesis_hash, Some(block1_hash).into()); - assert_eq!(executor::block_on(result).unwrap(), expected); + assert_eq!(result.await.unwrap(), expected); // Query all changes let result = api.query_storage(keys.clone(), genesis_hash, None.into()); @@ -406,23 +360,28 @@ fn should_query_storage() { (StorageKey(vec![5]), Some(StorageData(vec![1]))), ], }); - assert_eq!(executor::block_on(result).unwrap(), expected); + assert_eq!(result.await.unwrap(), expected); // Query changes up to block2. let result = api.query_storage(keys.clone(), genesis_hash, Some(block2_hash)); - assert_eq!(executor::block_on(result).unwrap(), expected); + assert_eq!(result.await.unwrap(), expected); // Inverted range. let result = api.query_storage(keys.clone(), block1_hash, Some(genesis_hash)); assert_eq!( - executor::block_on(result).map_err(|e| e.to_string()), - Err(Error::InvalidBlockRange { - from: format!("1 ({:?})", block1_hash), - to: format!("0 ({:?})", genesis_hash), - details: "from number > to number".to_owned(), - }) + result.await.map_err(|e| e.to_string()), + Err(RpcError::Call(RpcCallError::Custom(ErrorObject::owned( + 4001, + Error::InvalidBlockRange { + from: format!("1 ({:?})", block1_hash), + to: format!("0 ({:?})", genesis_hash), + details: "from number > to number".to_owned(), + } + .to_string(), + None::<()>, + )))) .map_err(|e| e.to_string()) ); @@ -433,15 +392,20 @@ fn should_query_storage() { let result = api.query_storage(keys.clone(), genesis_hash, Some(random_hash1)); assert_eq!( - executor::block_on(result).map_err(|e| e.to_string()), - Err(Error::InvalidBlockRange { - from: format!("{:?}", genesis_hash), - to: format!("{:?}", Some(random_hash1)), - details: format!( - "UnknownBlock: Header was not found in the database: {:?}", - random_hash1 - ), - }) + result.await.map_err(|e| e.to_string()), + Err(RpcError::Call(RpcCallError::Custom(ErrorObject::owned( + 4001, + Error::InvalidBlockRange { + from: format!("{:?}", genesis_hash), + to: format!("{:?}", Some(random_hash1)), + details: format!( + "UnknownBlock: Header was not found in the database: {:?}", + random_hash1 + ), + } + .to_string(), + None::<()>, + )))) .map_err(|e| e.to_string()) ); @@ -449,15 +413,20 @@ fn should_query_storage() { let result = api.query_storage(keys.clone(), random_hash1, Some(genesis_hash)); assert_eq!( - executor::block_on(result).map_err(|e| e.to_string()), - Err(Error::InvalidBlockRange { - from: format!("{:?}", random_hash1), - to: format!("{:?}", Some(genesis_hash)), - details: format!( - "UnknownBlock: Header was not found in the database: {:?}", - random_hash1 - ), - }) + result.await.map_err(|e| e.to_string()), + Err(RpcError::Call(RpcCallError::Custom(ErrorObject::owned( + 4001, + Error::InvalidBlockRange { + from: format!("{:?}", random_hash1), + to: format!("{:?}", Some(genesis_hash)), + details: format!( + "UnknownBlock: Header was not found in the database: {:?}", + random_hash1 + ), + } + .to_string(), + None::<()>, + )))) .map_err(|e| e.to_string()), ); @@ -465,15 +434,20 @@ fn should_query_storage() { let result = api.query_storage(keys.clone(), random_hash1, None); assert_eq!( - executor::block_on(result).map_err(|e| e.to_string()), - Err(Error::InvalidBlockRange { - from: format!("{:?}", random_hash1), - to: format!("{:?}", Some(block2_hash)), // Best block hash. - details: format!( - "UnknownBlock: Header was not found in the database: {:?}", - random_hash1 - ), - }) + result.await.map_err(|e| e.to_string()), + Err(RpcError::Call(RpcCallError::Custom(ErrorObject::owned( + 4001, + Error::InvalidBlockRange { + from: format!("{:?}", random_hash1), + to: format!("{:?}", Some(block2_hash)), // Best block hash. + details: format!( + "UnknownBlock: Header was not found in the database: {:?}", + random_hash1 + ), + } + .to_string(), + None::<()>, + )))) .map_err(|e| e.to_string()), ); @@ -481,15 +455,20 @@ fn should_query_storage() { let result = api.query_storage(keys.clone(), random_hash1, Some(random_hash2)); assert_eq!( - executor::block_on(result).map_err(|e| e.to_string()), - Err(Error::InvalidBlockRange { - from: format!("{:?}", random_hash1), // First hash not found. - to: format!("{:?}", Some(random_hash2)), - details: format!( - "UnknownBlock: Header was not found in the database: {:?}", - random_hash1 - ), - }) + result.await.map_err(|e| e.to_string()), + Err(RpcError::Call(RpcCallError::Custom(ErrorObject::owned( + 4001, + Error::InvalidBlockRange { + from: format!("{:?}", random_hash1), // First hash not found. + to: format!("{:?}", Some(random_hash2)), + details: format!( + "UnknownBlock: Header was not found in the database: {:?}", + random_hash1 + ), + } + .to_string(), + None::<()> + )))) .map_err(|e| e.to_string()), ); @@ -497,7 +476,7 @@ fn should_query_storage() { let result = api.query_storage_at(keys.clone(), Some(block1_hash)); assert_eq!( - executor::block_on(result).unwrap(), + result.await.unwrap(), vec![StorageChangeSet { block: block1_hash, changes: vec![ @@ -511,19 +490,14 @@ fn should_query_storage() { ); } - run_tests(Arc::new(substrate_test_runtime_client::new())); - run_tests(Arc::new(TestClientBuilder::new().build())); + run_tests(Arc::new(substrate_test_runtime_client::new())).await; + run_tests(Arc::new(TestClientBuilder::new().build())).await; } -#[test] -fn should_return_runtime_version() { +#[tokio::test] +async fn should_return_runtime_version() { let client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full( - client.clone(), - SubscriptionManager::new(Arc::new(TaskExecutor)), - DenyUnsafe::No, - None, - ); + let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No, None); let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",4],\ @@ -532,7 +506,7 @@ fn should_return_runtime_version() { [\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],[\"0xbc9d89904f5b923f\",1]],\ \"transactionVersion\":1,\"stateVersion\":1}"; - let runtime_version = executor::block_on(api.runtime_version(None.into())).unwrap(); + let runtime_version = api.runtime_version(None.into()).await.unwrap(); let serialized = serde_json::to_string(&runtime_version).unwrap(); assert_eq!(serialized, result); @@ -540,28 +514,26 @@ fn should_return_runtime_version() { assert_eq!(deserialized, runtime_version); } -#[test] -fn should_notify_on_runtime_version_initially() { - let (subscriber, id, mut transport) = Subscriber::new_test("test"); - - { +#[tokio::test] +async fn should_notify_on_runtime_version_initially() { + let mut sub = { let client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full( - client.clone(), - SubscriptionManager::new(Arc::new(TaskExecutor)), - DenyUnsafe::No, - None, - ); + let (api, _child) = new_full(client, test_executor(), DenyUnsafe::No, None); - api.subscribe_runtime_version(Default::default(), subscriber); + let api_rpc = api.into_rpc(); + let sub = api_rpc + .subscribe("state_subscribeRuntimeVersion", EmptyParams::new()) + .await + .unwrap(); - // assert id assigned - assert!(matches!(executor::block_on(id), Ok(Ok(SubscriptionId::String(_))))); - } + sub + }; // assert initial version sent. - executor::block_on((&mut transport).take(1).collect::>()); - assert!(executor::block_on(transport.next()).is_none()); + assert_matches!(timeout_secs(10, sub.next::()).await, Ok(Some(_))); + + sub.close(); + assert_matches!(timeout_secs(10, sub.next::()).await, Ok(None)); } #[test] @@ -572,38 +544,24 @@ fn should_deserialize_storage_key() { assert_eq!(k.0.len(), 32); } -#[test] -fn wildcard_storage_subscriptions_are_rpc_unsafe() { - let (subscriber, id, _) = Subscriber::new_test("test"); - +#[tokio::test] +async fn wildcard_storage_subscriptions_are_rpc_unsafe() { let client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full( - client.clone(), - SubscriptionManager::new(Arc::new(TaskExecutor)), - DenyUnsafe::Yes, - None, - ); + let (api, _child) = new_full(client, test_executor(), DenyUnsafe::Yes, None); - api.subscribe_storage(Default::default(), subscriber, None.into()); - - let error = executor::block_on(id).unwrap().unwrap_err(); - assert_eq!(error.to_string(), "Method not found: RPC call is unsafe to be called externally"); + let api_rpc = api.into_rpc(); + let err = api_rpc.subscribe("state_subscribeStorage", EmptyParams::new()).await; + assert_matches!(err, Err(RpcError::Call(RpcCallError::Custom(e))) if e.message() == "RPC call is unsafe to be called externally"); } -#[test] -fn concrete_storage_subscriptions_are_rpc_safe() { - let (subscriber, id, _) = Subscriber::new_test("test"); - +#[tokio::test] +async fn concrete_storage_subscriptions_are_rpc_safe() { let client = Arc::new(substrate_test_runtime_client::new()); - let (api, _child) = new_full( - client.clone(), - SubscriptionManager::new(Arc::new(TaskExecutor)), - DenyUnsafe::Yes, - None, - ); + let (api, _child) = new_full(client, test_executor(), DenyUnsafe::Yes, None); + let api_rpc = api.into_rpc(); let key = StorageKey(STORAGE_KEY.to_vec()); - api.subscribe_storage(Default::default(), subscriber, Some(vec![key])); + let sub = api_rpc.subscribe("state_subscribeStorage", [[key]]).await; - assert!(matches!(executor::block_on(id), Ok(Ok(SubscriptionId::String(_))))); + assert!(sub.is_ok()); } diff --git a/client/rpc/src/system/mod.rs b/client/rpc/src/system/mod.rs index 534e446e140ad..ea24524cd2ea9 100644 --- a/client/rpc/src/system/mod.rs +++ b/client/rpc/src/system/mod.rs @@ -18,30 +18,23 @@ //! Substrate system API. -use self::error::Result; -use futures::{channel::oneshot, FutureExt}; -use sc_rpc_api::{DenyUnsafe, Receiver}; +#[cfg(test)] +mod tests; + +use futures::channel::oneshot; +use jsonrpsee::{ + core::{async_trait, error::Error as JsonRpseeError, JsonValue, RpcResult}, + types::error::{CallError, ErrorCode, ErrorObject}, +}; +use sc_rpc_api::DenyUnsafe; use sc_tracing::logging; use sc_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::{self, Header as HeaderT}; -pub use self::{ - gen_client::Client as SystemClient, - helpers::{Health, NodeRole, PeerInfo, SyncState, SystemInfo}, -}; -pub use sc_rpc_api::system::*; - -#[cfg(test)] -mod tests; +use self::error::Result; -/// Early exit for RPCs that require `--rpc-methods=Unsafe` to be enabled -macro_rules! bail_if_unsafe { - ($value: expr) => { - if let Err(err) = $value.check_if_safe() { - return async move { Err(err.into()) }.boxed() - } - }; -} +pub use self::helpers::{Health, NodeRole, PeerInfo, SyncState, SystemInfo}; +pub use sc_rpc_api::system::*; /// System API implementation pub struct System { @@ -62,7 +55,7 @@ pub enum Request { /// Must return information about the peers we are connected to. Peers(oneshot::Sender::Number>>>), /// Must return the state of the network. - NetworkState(oneshot::Sender), + NetworkState(oneshot::Sender), /// Must return any potential parse error. NetworkAddReservedPeer(String, oneshot::Sender>), /// Must return any potential parse error. @@ -89,121 +82,123 @@ impl System { } } -impl SystemApi::Number> for System { - fn system_name(&self) -> Result { +#[async_trait] +impl SystemApiServer::Number> for System { + fn system_name(&self) -> RpcResult { Ok(self.info.impl_name.clone()) } - fn system_version(&self) -> Result { + fn system_version(&self) -> RpcResult { Ok(self.info.impl_version.clone()) } - fn system_chain(&self) -> Result { + fn system_chain(&self) -> RpcResult { Ok(self.info.chain_name.clone()) } - fn system_type(&self) -> Result { + fn system_type(&self) -> RpcResult { Ok(self.info.chain_type.clone()) } - fn system_properties(&self) -> Result { + fn system_properties(&self) -> RpcResult { Ok(self.info.properties.clone()) } - fn system_health(&self) -> Receiver { + async fn system_health(&self) -> RpcResult { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::Health(tx)); - Receiver(rx) + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) } - fn system_local_peer_id(&self) -> Receiver { + async fn system_local_peer_id(&self) -> RpcResult { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::LocalPeerId(tx)); - Receiver(rx) + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) } - fn system_local_listen_addresses(&self) -> Receiver> { + async fn system_local_listen_addresses(&self) -> RpcResult> { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::LocalListenAddresses(tx)); - Receiver(rx) + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) } - fn system_peers( + async fn system_peers( &self, - ) -> rpc::BoxFuture::Number>>>> { - bail_if_unsafe!(self.deny_unsafe); - + ) -> RpcResult::Number>>> { + self.deny_unsafe.check_if_safe()?; let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::Peers(tx)); - - async move { rx.await.map_err(|_| rpc::Error::internal_error()) }.boxed() + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) } - fn system_network_state(&self) -> rpc::BoxFuture> { - bail_if_unsafe!(self.deny_unsafe); - + async fn system_network_state(&self) -> RpcResult { + self.deny_unsafe.check_if_safe()?; let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::NetworkState(tx)); - - async move { rx.await.map_err(|_| rpc::Error::internal_error()) }.boxed() + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) } - fn system_add_reserved_peer(&self, peer: String) -> rpc::BoxFuture> { - bail_if_unsafe!(self.deny_unsafe); - + async fn system_add_reserved_peer(&self, peer: String) -> RpcResult<()> { + self.deny_unsafe.check_if_safe()?; let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::NetworkAddReservedPeer(peer, tx)); - async move { - match rx.await { - Ok(Ok(())) => Ok(()), - Ok(Err(e)) => Err(rpc::Error::from(e)), - Err(_) => Err(rpc::Error::internal_error()), - } + match rx.await { + Ok(Ok(())) => Ok(()), + Ok(Err(e)) => Err(JsonRpseeError::from(e)), + Err(e) => Err(JsonRpseeError::to_call_error(e)), } - .boxed() } - fn system_remove_reserved_peer(&self, peer: String) -> rpc::BoxFuture> { - bail_if_unsafe!(self.deny_unsafe); - + async fn system_remove_reserved_peer(&self, peer: String) -> RpcResult<()> { + self.deny_unsafe.check_if_safe()?; let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::NetworkRemoveReservedPeer(peer, tx)); - async move { - match rx.await { - Ok(Ok(())) => Ok(()), - Ok(Err(e)) => Err(rpc::Error::from(e)), - Err(_) => Err(rpc::Error::internal_error()), - } + match rx.await { + Ok(Ok(())) => Ok(()), + Ok(Err(e)) => Err(JsonRpseeError::from(e)), + Err(e) => Err(JsonRpseeError::to_call_error(e)), } - .boxed() } - fn system_reserved_peers(&self) -> Receiver> { + async fn system_reserved_peers(&self) -> RpcResult> { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::NetworkReservedPeers(tx)); - Receiver(rx) + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) } - fn system_node_roles(&self) -> Receiver> { + async fn system_node_roles(&self) -> RpcResult> { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::NodeRoles(tx)); - Receiver(rx) + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) } - fn system_sync_state(&self) -> Receiver::Number>> { + async fn system_sync_state(&self) -> RpcResult::Number>> { let (tx, rx) = oneshot::channel(); let _ = self.send_back.unbounded_send(Request::SyncState(tx)); - Receiver(rx) + rx.await.map_err(|e| JsonRpseeError::to_call_error(e)) } - fn system_add_log_filter(&self, directives: String) -> rpc::Result<()> { + fn system_add_log_filter(&self, directives: String) -> RpcResult<()> { self.deny_unsafe.check_if_safe()?; + logging::add_directives(&directives); - logging::reload_filter().map_err(|_e| rpc::Error::internal_error()) + logging::reload_filter().map_err(|e| { + JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( + ErrorCode::InternalError.code(), + e, + None::<()>, + ))) + }) } - fn system_reset_log_filter(&self) -> rpc::Result<()> { + fn system_reset_log_filter(&self) -> RpcResult<()> { self.deny_unsafe.check_if_safe()?; - logging::reset_log_filter().map_err(|_e| rpc::Error::internal_error()) + logging::reset_log_filter().map_err(|e| { + JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( + ErrorCode::InternalError.code(), + e, + None::<()>, + ))) + }) } } diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 5d6945b714200..77acdf8418ccc 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -16,12 +16,18 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use super::*; - +use super::{helpers::SyncState, *}; use assert_matches::assert_matches; -use futures::{executor, prelude::*}; +use futures::prelude::*; +use jsonrpsee::{ + core::Error as RpcError, + types::{error::CallError, EmptyParams}, + RpcModule, +}; use sc_network::{self, config::Role, PeerId}; +use sc_rpc_api::system::helpers::PeerInfo; use sc_utils::mpsc::tracing_unbounded; +use sp_core::H256; use std::{ env, io::{BufRead, BufReader, Write}, @@ -43,7 +49,7 @@ impl Default for Status { } } -fn api>>(sync: T) -> System { +fn api>>(sync: T) -> RpcModule> { let status = sync.into().unwrap_or_default(); let should_have_peers = !status.is_dev; let (tx, rx) = tracing_unbounded("rpc_system_tests"); @@ -136,98 +142,122 @@ fn api>>(sync: T) -> System { tx, sc_rpc_api::DenyUnsafe::No, ) + .into_rpc() } -fn wait_receiver(rx: Receiver) -> T { - futures::executor::block_on(rx).unwrap() +#[tokio::test] +async fn system_name_works() { + assert_eq!( + api(None).call::<_, String>("system_name", EmptyParams::new()).await.unwrap(), + "testclient".to_string(), + ); } -#[test] -fn system_name_works() { - assert_eq!(api(None).system_name().unwrap(), "testclient".to_owned()); +#[tokio::test] +async fn system_version_works() { + assert_eq!( + api(None).call::<_, String>("system_version", EmptyParams::new()).await.unwrap(), + "0.2.0".to_string(), + ); } -#[test] -fn system_version_works() { - assert_eq!(api(None).system_version().unwrap(), "0.2.0".to_owned()); +#[tokio::test] +async fn system_chain_works() { + assert_eq!( + api(None).call::<_, String>("system_chain", EmptyParams::new()).await.unwrap(), + "testchain".to_string(), + ); } -#[test] -fn system_chain_works() { - assert_eq!(api(None).system_chain().unwrap(), "testchain".to_owned()); -} +#[tokio::test] +async fn system_properties_works() { + type Map = serde_json::map::Map; -#[test] -fn system_properties_works() { - assert_eq!(api(None).system_properties().unwrap(), serde_json::map::Map::new()); + assert_eq!( + api(None).call::<_, Map>("system_properties", EmptyParams::new()).await.unwrap(), + Map::new() + ); } -#[test] -fn system_type_works() { - assert_eq!(api(None).system_type().unwrap(), Default::default()); +#[tokio::test] +async fn system_type_works() { + assert_eq!( + api(None) + .call::<_, String>("system_chainType", EmptyParams::new()) + .await + .unwrap(), + "Live".to_owned(), + ); } -#[test] -fn system_health() { - assert_matches!( - wait_receiver(api(None).system_health()), - Health { peers: 0, is_syncing: false, should_have_peers: true } +#[tokio::test] +async fn system_health() { + assert_eq!( + api(None).call::<_, Health>("system_health", EmptyParams::new()).await.unwrap(), + Health { peers: 0, is_syncing: false, should_have_peers: true }, ); - assert_matches!( - wait_receiver( - api(Status { peer_id: PeerId::random(), peers: 5, is_syncing: true, is_dev: true }) - .system_health() - ), - Health { peers: 5, is_syncing: true, should_have_peers: false } + assert_eq!( + api(Status { peer_id: PeerId::random(), peers: 5, is_syncing: true, is_dev: true }) + .call::<_, Health>("system_health", EmptyParams::new()) + .await + .unwrap(), + Health { peers: 5, is_syncing: true, should_have_peers: false }, ); assert_eq!( - wait_receiver( - api(Status { peer_id: PeerId::random(), peers: 5, is_syncing: false, is_dev: false }) - .system_health() - ), - Health { peers: 5, is_syncing: false, should_have_peers: true } + api(Status { peer_id: PeerId::random(), peers: 5, is_syncing: false, is_dev: false }) + .call::<_, Health>("system_health", EmptyParams::new()) + .await + .unwrap(), + Health { peers: 5, is_syncing: false, should_have_peers: true }, ); assert_eq!( - wait_receiver( - api(Status { peer_id: PeerId::random(), peers: 0, is_syncing: false, is_dev: true }) - .system_health() - ), - Health { peers: 0, is_syncing: false, should_have_peers: false } + api(Status { peer_id: PeerId::random(), peers: 0, is_syncing: false, is_dev: true }) + .call::<_, Health>("system_health", EmptyParams::new()) + .await + .unwrap(), + Health { peers: 0, is_syncing: false, should_have_peers: false }, ); } -#[test] -fn system_local_peer_id_works() { +#[tokio::test] +async fn system_local_peer_id_works() { assert_eq!( - wait_receiver(api(None).system_local_peer_id()), - "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_owned(), + api(None) + .call::<_, String>("system_localPeerId", EmptyParams::new()) + .await + .unwrap(), + "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_owned() ); } -#[test] -fn system_local_listen_addresses_works() { +#[tokio::test] +async fn system_local_listen_addresses_works() { assert_eq!( - wait_receiver(api(None).system_local_listen_addresses()), + api(None) + .call::<_, Vec>("system_localListenAddresses", EmptyParams::new()) + .await + .unwrap(), vec![ - "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" - .to_string(), + "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV", "/ip4/127.0.0.1/tcp/30334/ws/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" - .to_string(), ] ); } -#[test] -fn system_peers() { +#[tokio::test] +async fn system_peers() { let peer_id = PeerId::random(); - let req = api(Status { peer_id, peers: 1, is_syncing: false, is_dev: true }).system_peers(); - let res = executor::block_on(req).unwrap(); + let peer_info: Vec> = + api(Status { peer_id, peers: 1, is_syncing: false, is_dev: true }) + .call("system_peers", EmptyParams::new()) + .await + .unwrap(); assert_eq!( - res, + peer_info, vec![PeerInfo { peer_id: peer_id.to_base58(), roles: "FULL".into(), @@ -237,14 +267,16 @@ fn system_peers() { ); } -#[test] -fn system_network_state() { - let req = api(None).system_network_state(); - let res = executor::block_on(req).unwrap(); - +#[tokio::test] +async fn system_network_state() { + use sc_network::network_state::NetworkState; + let network_state: NetworkState = api(None) + .call("system_unstable_networkState", EmptyParams::new()) + .await + .unwrap(); assert_eq!( - serde_json::from_value::(res).unwrap(), - sc_network::network_state::NetworkState { + network_state, + NetworkState { peer_id: String::new(), listened_addresses: Default::default(), external_addresses: Default::default(), @@ -255,50 +287,59 @@ fn system_network_state() { ); } -#[test] -fn system_node_roles() { - assert_eq!(wait_receiver(api(None).system_node_roles()), vec![NodeRole::Authority]); +#[tokio::test] +async fn system_node_roles() { + let node_roles: Vec = + api(None).call("system_nodeRoles", EmptyParams::new()).await.unwrap(); + assert_eq!(node_roles, vec![NodeRole::Authority]); } - -#[test] -fn system_sync_state() { +#[tokio::test] +async fn system_sync_state() { + let sync_state: SyncState = + api(None).call("system_syncState", EmptyParams::new()).await.unwrap(); assert_eq!( - wait_receiver(api(None).system_sync_state()), + sync_state, SyncState { starting_block: 1, current_block: 2, highest_block: Some(3) } ); } -#[test] -fn system_network_add_reserved() { +#[tokio::test] +async fn system_network_add_reserved() { let good_peer_id = - "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; - let bad_peer_id = "/ip4/198.51.100.19/tcp/30333"; + ["/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"]; + let _good: () = api(None) + .call("system_addReservedPeer", good_peer_id) + .await + .expect("good peer id works"); - let good_fut = api(None).system_add_reserved_peer(good_peer_id.into()); - let bad_fut = api(None).system_add_reserved_peer(bad_peer_id.into()); - assert_eq!(executor::block_on(good_fut), Ok(())); - assert!(executor::block_on(bad_fut).is_err()); + let bad_peer_id = ["/ip4/198.51.100.19/tcp/30333"]; + assert_matches!( + api(None).call::<_, ()>("system_addReservedPeer", bad_peer_id).await, + Err(RpcError::Call(CallError::Custom(err))) if err.message().contains("Peer id is missing from the address") + ); } -#[test] -fn system_network_remove_reserved() { - let good_peer_id = "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; - let bad_peer_id = - "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; +#[tokio::test] +async fn system_network_remove_reserved() { + let _good_peer: () = api(None) + .call("system_removeReservedPeer", ["QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"]) + .await + .expect("call with good peer id works"); - let good_fut = api(None).system_remove_reserved_peer(good_peer_id.into()); - let bad_fut = api(None).system_remove_reserved_peer(bad_peer_id.into()); - assert_eq!(executor::block_on(good_fut), Ok(())); - assert!(executor::block_on(bad_fut).is_err()); -} + let bad_peer_id = + ["/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"]; -#[test] -fn system_network_reserved_peers() { - assert_eq!( - wait_receiver(api(None).system_reserved_peers()), - vec!["QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string()] + assert_matches!( + api(None).call::<_, String>("system_removeReservedPeer", bad_peer_id).await, + Err(RpcError::Call(CallError::Custom(err))) if err.message().contains("base-58 decode error: provided string contained invalid character '/' at byte 0") ); } +#[tokio::test] +async fn system_network_reserved_peers() { + let reserved_peers: Vec = + api(None).call("system_reservedPeers", EmptyParams::new()).await.unwrap(); + assert_eq!(reserved_peers, vec!["QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string()],); +} #[test] fn test_add_reset_log_filter() { @@ -315,15 +356,20 @@ fn test_add_reset_log_filter() { for line in std::io::stdin().lock().lines() { let line = line.expect("Failed to read bytes"); if line.contains("add_reload") { - api(None) - .system_add_log_filter("test_after_add".into()) - .expect("`system_add_log_filter` failed"); + let filter = "test_after_add"; + let fut = + async move { api(None).call::<_, ()>("system_addLogFilter", [filter]).await }; + futures::executor::block_on(fut).expect("`system_addLogFilter` failed"); } else if line.contains("add_trace") { - api(None) - .system_add_log_filter("test_before_add=trace".into()) - .expect("`system_add_log_filter` failed"); + let filter = "test_before_add=trace"; + let fut = + async move { api(None).call::<_, ()>("system_addLogFilter", [filter]).await }; + futures::executor::block_on(fut).expect("`system_addLogFilter (trace)` failed"); } else if line.contains("reset") { - api(None).system_reset_log_filter().expect("`system_reset_log_filter` failed"); + let fut = async move { + api(None).call::<_, ()>("system_resetLogFilter", EmptyParams::new()).await + }; + futures::executor::block_on(fut).expect("`system_resetLogFilter` failed"); } else if line.contains("exit") { return } diff --git a/client/rpc/src/testing.rs b/client/rpc/src/testing.rs index bfb91adb81d31..584e4a9901eab 100644 --- a/client/rpc/src/testing.rs +++ b/client/rpc/src/testing.rs @@ -18,29 +18,16 @@ //! Testing utils used by the RPC tests. -use futures::{ - executor, - task::{FutureObj, Spawn, SpawnError}, -}; - -// Executor shared by all tests. -// -// This shared executor is used to prevent `Too many open files` errors -// on systems with a lot of cores. -lazy_static::lazy_static! { - static ref EXECUTOR: executor::ThreadPool = executor::ThreadPool::new() - .expect("Failed to create thread pool executor for tests"); +use std::{future::Future, sync::Arc}; + +use sp_core::testing::TaskExecutor; + +/// Executor for testing. +pub fn test_executor() -> Arc { + Arc::new(TaskExecutor::default()) } -/// Executor for use in testing -pub struct TaskExecutor; -impl Spawn for TaskExecutor { - fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { - EXECUTOR.spawn_ok(future); - Ok(()) - } - - fn status(&self) -> Result<(), SpawnError> { - Ok(()) - } +/// Wrap a future in a timeout a little more concisely +pub fn timeout_secs>(s: u64, f: F) -> tokio::time::Timeout { + tokio::time::timeout(std::time::Duration::from_secs(s), f) } diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 7138d9d384eeb..a62298a260aa4 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -22,10 +22,9 @@ wasmtime = ["sc-executor/wasmtime"] test-helpers = [] [dependencies] +jsonrpsee = { version = "0.12.0", features = ["server"] } thiserror = "1.0.30" futures = "0.3.21" -jsonrpc-pubsub = "18.0" -jsonrpc-core = "18.0" rand = "0.7.3" parking_lot = "0.12.0" log = "0.4.16" diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index cabf004b2f707..5319bf24d5e72 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -25,7 +25,7 @@ use crate::{ start_rpc_servers, RpcHandlers, SpawnTaskHandle, TaskManager, TransactionPoolAdapter, }; use futures::{channel::oneshot, future::ready, FutureExt, StreamExt}; -use jsonrpc_pubsub::manager::SubscriptionManager; +use jsonrpsee::RpcModule; use log::info; use prometheus_endpoint::Registry; use sc_chain_spec::get_extension; @@ -45,6 +45,14 @@ use sc_network::{ warp_request_handler::{self, RequestHandler as WarpSyncRequestHandler, WarpSyncProvider}, NetworkService, }; +use sc_rpc::{ + author::AuthorApiServer, + chain::ChainApiServer, + offchain::OffchainApiServer, + state::{ChildStateApiServer, StateApiServer}, + system::SystemApiServer, + DenyUnsafe, SubscriptionTaskExecutor, +}; use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::MaintainedTransactionPool; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; @@ -62,69 +70,6 @@ use sp_runtime::{ }; use std::{str::FromStr, sync::Arc, time::SystemTime}; -/// A utility trait for building an RPC extension given a `DenyUnsafe` instance. -/// This is useful since at service definition time we don't know whether the -/// specific interface where the RPC extension will be exposed is safe or not. -/// This trait allows us to lazily build the RPC extension whenever we bind the -/// service to an interface. -pub trait RpcExtensionBuilder { - /// The type of the RPC extension that will be built. - type Output: sc_rpc::RpcExtension; - - /// Returns an instance of the RPC extension for a particular `DenyUnsafe` - /// value, e.g. the RPC extension might not expose some unsafe methods. - fn build( - &self, - deny: sc_rpc::DenyUnsafe, - subscription_executor: sc_rpc::SubscriptionTaskExecutor, - ) -> Result; -} - -impl RpcExtensionBuilder for F -where - F: Fn(sc_rpc::DenyUnsafe, sc_rpc::SubscriptionTaskExecutor) -> Result, - R: sc_rpc::RpcExtension, -{ - type Output = R; - - fn build( - &self, - deny: sc_rpc::DenyUnsafe, - subscription_executor: sc_rpc::SubscriptionTaskExecutor, - ) -> Result { - (*self)(deny, subscription_executor) - } -} - -/// A utility struct for implementing an `RpcExtensionBuilder` given a cloneable -/// `RpcExtension`, the resulting builder will simply ignore the provided -/// `DenyUnsafe` instance and return a static `RpcExtension` instance. -pub struct NoopRpcExtensionBuilder(pub R); - -impl RpcExtensionBuilder for NoopRpcExtensionBuilder -where - R: Clone + sc_rpc::RpcExtension, -{ - type Output = R; - - fn build( - &self, - _deny: sc_rpc::DenyUnsafe, - _subscription_executor: sc_rpc::SubscriptionTaskExecutor, - ) -> Result { - Ok(self.0.clone()) - } -} - -impl From for NoopRpcExtensionBuilder -where - R: sc_rpc::RpcExtension, -{ - fn from(e: R) -> NoopRpcExtensionBuilder { - NoopRpcExtensionBuilder(e) - } -} - /// Full client type. pub type TFullClient = Client, TFullCallExecutor, TBl, TRtApi>; @@ -389,9 +334,9 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub keystore: SyncCryptoStorePtr, /// A shared transaction pool. pub transaction_pool: Arc, - /// A RPC extension builder. Use `NoopRpcExtensionBuilder` if you just want to pass in the - /// extensions directly. - pub rpc_extensions_builder: Box + Send>, + /// Builds additional [`RpcModule`]s that should be added to the server + pub rpc_builder: + Box Result, Error>>, /// A shared network instance. pub network: Arc::Hash>>, /// A Sender for RPC requests. @@ -463,7 +408,6 @@ where TExPool: MaintainedTransactionPool::Hash> + parity_util_mem::MallocSizeOf + 'static, - TRpc: sc_rpc::RpcExtension, { let SpawnTasksParams { mut config, @@ -472,7 +416,7 @@ where backend, keystore, transaction_pool, - rpc_extensions_builder, + rpc_builder, network, system_rpc_tx, telemetry, @@ -536,35 +480,25 @@ where metrics_service.run(client.clone(), transaction_pool.clone(), network.clone()), ); - // RPC - let gen_handler = |deny_unsafe: sc_rpc::DenyUnsafe, - rpc_middleware: sc_rpc_server::RpcMiddleware| { - gen_handler( + let rpc_id_provider = config.rpc_id_provider.take(); + + // jsonrpsee RPC + let gen_rpc_module = |deny_unsafe: DenyUnsafe| { + gen_rpc_module( deny_unsafe, - rpc_middleware, - &config, task_manager.spawn_handle(), client.clone(), transaction_pool.clone(), keystore.clone(), - &*rpc_extensions_builder, - backend.offchain_storage(), system_rpc_tx.clone(), + &config, + backend.offchain_storage(), + &*rpc_builder, ) }; - let rpc_metrics = sc_rpc_server::RpcMetrics::new(config.prometheus_registry())?; - let server_metrics = sc_rpc_server::ServerMetrics::new(config.prometheus_registry())?; - let rpc = start_rpc_servers(&config, gen_handler, rpc_metrics.clone(), server_metrics)?; - // This is used internally, so don't restrict access to unsafe RPC - let known_rpc_method_names = - sc_rpc_server::method_names(|m| gen_handler(sc_rpc::DenyUnsafe::No, m))?; - let rpc_handlers = RpcHandlers(Arc::new( - gen_handler( - sc_rpc::DenyUnsafe::No, - sc_rpc_server::RpcMiddleware::new(rpc_metrics, known_rpc_method_names, "inbrowser"), - )? - .into(), - )); + + let rpc = start_rpc_servers(&config, gen_rpc_module, rpc_id_provider)?; + let rpc_handlers = RpcHandlers(Arc::new(gen_rpc_module(sc_rpc::DenyUnsafe::No)?.into())); // Spawn informant task spawn_handle.spawn( @@ -578,7 +512,7 @@ where ), ); - task_manager.keep_alive((config.base_path, rpc, rpc_handlers.clone())); + task_manager.keep_alive((config.base_path, rpc)); Ok(rpc_handlers) } @@ -642,18 +576,17 @@ fn init_telemetry>( Ok(telemetry.handle()) } -fn gen_handler( - deny_unsafe: sc_rpc::DenyUnsafe, - rpc_middleware: sc_rpc_server::RpcMiddleware, - config: &Configuration, +fn gen_rpc_module( + deny_unsafe: DenyUnsafe, spawn_handle: SpawnTaskHandle, client: Arc, transaction_pool: Arc, keystore: SyncCryptoStorePtr, - rpc_extensions_builder: &(dyn RpcExtensionBuilder + Send), - offchain_storage: Option<>::OffchainStorage>, system_rpc_tx: TracingUnboundedSender>, -) -> Result, Error> + config: &Configuration, + offchain_storage: Option<>::OffchainStorage>, + rpc_builder: &(dyn Fn(DenyUnsafe, SubscriptionTaskExecutor) -> Result, Error>), +) -> Result, Error> where TBl: BlockT, TCl: ProvideRuntimeApi @@ -668,15 +601,12 @@ where + Send + Sync + 'static, - TExPool: MaintainedTransactionPool::Hash> + 'static, TBackend: sc_client_api::backend::Backend + 'static, - TRpc: sc_rpc::RpcExtension, >::Api: sp_session::SessionKeys + sp_api::Metadata, + TExPool: MaintainedTransactionPool::Hash> + 'static, TBl::Hash: Unpin, TBl::Header: Unpin, { - use sc_rpc::{author, chain, offchain, state, system}; - let system_info = sc_rpc::system::SystemInfo { chain_name: config.chain_spec.name().into(), impl_name: config.impl_name.clone(), @@ -685,42 +615,50 @@ where chain_type: config.chain_spec.chain_type(), }; - let task_executor = sc_rpc::SubscriptionTaskExecutor::new(spawn_handle); - let subscriptions = SubscriptionManager::new(Arc::new(task_executor.clone())); + let mut rpc_api = RpcModule::new(()); + let task_executor = Arc::new(spawn_handle); let (chain, state, child_state) = { - // Full nodes - let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone()); + let chain = sc_rpc::chain::new_full(client.clone(), task_executor.clone()).into_rpc(); let (state, child_state) = sc_rpc::state::new_full( client.clone(), - subscriptions.clone(), + task_executor.clone(), deny_unsafe, config.rpc_max_payload, ); + let state = state.into_rpc(); + let child_state = child_state.into_rpc(); + (chain, state, child_state) }; - let author = - sc_rpc::author::Author::new(client, transaction_pool, subscriptions, keystore, deny_unsafe); - let system = system::System::new(system_info, system_rpc_tx, deny_unsafe); + let author = sc_rpc::author::Author::new( + client.clone(), + transaction_pool, + keystore, + deny_unsafe, + task_executor.clone(), + ) + .into_rpc(); - let maybe_offchain_rpc = offchain_storage.map(|storage| { - let offchain = sc_rpc::offchain::Offchain::new(storage, deny_unsafe); - offchain::OffchainApi::to_delegate(offchain) - }); + let system = sc_rpc::system::System::new(system_info, system_rpc_tx, deny_unsafe).into_rpc(); - Ok(sc_rpc_server::rpc_handler( - ( - state::StateApi::to_delegate(state), - state::ChildStateApi::to_delegate(child_state), - chain::ChainApi::to_delegate(chain), - maybe_offchain_rpc, - author::AuthorApi::to_delegate(author), - system::SystemApi::to_delegate(system), - rpc_extensions_builder.build(deny_unsafe, task_executor)?, - ), - rpc_middleware, - )) + if let Some(storage) = offchain_storage { + let offchain = sc_rpc::offchain::Offchain::new(storage, deny_unsafe).into_rpc(); + + rpc_api.merge(offchain).map_err(|e| Error::Application(e.into()))?; + } + + rpc_api.merge(chain).map_err(|e| Error::Application(e.into()))?; + rpc_api.merge(author).map_err(|e| Error::Application(e.into()))?; + rpc_api.merge(system).map_err(|e| Error::Application(e.into()))?; + rpc_api.merge(state).map_err(|e| Error::Application(e.into()))?; + rpc_api.merge(child_state).map_err(|e| Error::Application(e.into()))?; + // Additional [`RpcModule`]s defined in the node to fit the specific blockchain + let extra_rpcs = rpc_builder(deny_unsafe, task_executor.clone())?; + rpc_api.merge(extra_rpcs).map_err(|e| Error::Application(e.into()))?; + + Ok(rpc_api) } /// Parameters to pass into `build_network`. diff --git a/client/service/src/config.rs b/client/service/src/config.rs index e49e8b40a7b1a..35380da11fc71 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -100,6 +100,18 @@ pub struct Configuration { pub rpc_methods: RpcMethods, /// Maximum payload of rpc request/responses. pub rpc_max_payload: Option, + /// Maximum payload of a rpc request + pub rpc_max_request_size: Option, + /// Maximum payload of a rpc request + pub rpc_max_response_size: Option, + /// Custom JSON-RPC subscription ID provider. + /// + /// Default: [`crate::RandomStringSubscriptionId`]. + pub rpc_id_provider: Option>, + /// Maximum allowed subscriptions per rpc connection + /// + /// Default: 1024. + pub rpc_max_subs_per_conn: Option, /// Maximum size of the output buffer capacity for websocket connections. pub ws_max_out_buffer_capacity: Option, /// Prometheus endpoint configuration. `None` if disabled. diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 0d2461376d961..027b704789635 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -34,13 +34,15 @@ mod client; mod metrics; mod task_manager; -use std::{collections::HashMap, io, net::SocketAddr, pin::Pin}; +use std::{collections::HashMap, net::SocketAddr}; use codec::{Decode, Encode}; -use futures::{Future, FutureExt, StreamExt}; +use futures::{channel::mpsc, FutureExt, StreamExt}; +use jsonrpsee::{core::Error as JsonRpseeError, RpcModule}; use log::{debug, error, warn}; -use sc_client_api::{BlockBackend, ProofProvider}; +use sc_client_api::{blockchain::HeaderBackend, BlockBackend, BlockchainEvents, ProofProvider}; use sc_network::PeerId; +use sc_rpc_server::WsConfig; use sc_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain::HeaderMetadata; use sp_runtime::{ @@ -52,8 +54,7 @@ pub use self::{ builder::{ build_network, build_offchain_workers, new_client, new_db_backend, new_full_client, new_full_parts, spawn_tasks, BuildNetworkParams, KeystoreContainer, NetworkStarter, - NoopRpcExtensionBuilder, RpcExtensionBuilder, SpawnTasksParams, TFullBackend, - TFullCallExecutor, TFullClient, + SpawnTasksParams, TFullBackend, TFullCallExecutor, TFullClient, }, client::{ClientConfig, LocalCallExecutor}, error::Error, @@ -65,12 +66,14 @@ pub use sc_chain_spec::{ ChainSpec, ChainType, Extension as ChainSpecExtension, GenericChainSpec, NoExtension, Properties, RuntimeGenesis, }; -use sc_client_api::{blockchain::HeaderBackend, BlockchainEvents}; + pub use sc_consensus::ImportQueue; pub use sc_executor::NativeExecutionDispatch; #[doc(hidden)] pub use sc_network::config::{TransactionImport, TransactionImportFuture}; -pub use sc_rpc::Metadata as RpcMetadata; +pub use sc_rpc::{ + RandomIntegerSubscriptionId, RandomStringSubscriptionId, RpcSubscriptionIdProvider, +}; pub use sc_tracing::TracingReceiver; pub use sc_transaction_pool::Options as TransactionPoolOptions; pub use sc_transaction_pool_api::{error::IntoPoolError, InPoolTransaction, TransactionPool}; @@ -82,32 +85,27 @@ const DEFAULT_PROTOCOL_ID: &str = "sup"; /// RPC handlers that can perform RPC queries. #[derive(Clone)] -pub struct RpcHandlers( - Arc>, -); +pub struct RpcHandlers(Arc>); impl RpcHandlers { /// Starts an RPC query. /// - /// The query is passed as a string and must be a JSON text similar to what an HTTP client - /// would for example send. + /// The query is passed as a string and must be valid JSON-RPC request object. /// - /// Returns a `Future` that contains the optional response. + /// Returns a response and a stream if the call successful, fails if the + /// query could not be decoded as a JSON-RPC request object. /// - /// If the request subscribes you to events, the `Sender` in the `RpcSession` object is used to - /// send back spontaneous events. - pub fn rpc_query( + /// If the request subscribes you to events, the `stream` can be used to + /// retrieve the events. + pub async fn rpc_query( &self, - mem: &RpcSession, - request: &str, - ) -> Pin> + Send>> { - self.0.handle_request(request, mem.metadata.clone()).boxed() + json_query: &str, + ) -> Result<(String, mpsc::UnboundedReceiver), JsonRpseeError> { + self.0.raw_json_request(json_query).await } - /// Provides access to the underlying `MetaIoHandler` - pub fn io_handler( - &self, - ) -> Arc> { + /// Provides access to the underlying `RpcModule` + pub fn handle(&self) -> Arc> { self.0.clone() } } @@ -284,74 +282,41 @@ async fn build_network_future< // Wrapper for HTTP and WS servers that makes sure they are properly shut down. mod waiting { pub struct HttpServer(pub Option); - impl Drop for HttpServer { - fn drop(&mut self) { - if let Some(server) = self.0.take() { - server.close_handle().close(); - server.wait(); - } - } - } - pub struct IpcServer(pub Option); - impl Drop for IpcServer { + impl Drop for HttpServer { fn drop(&mut self) { if let Some(server) = self.0.take() { - server.close_handle().close(); - let _ = server.wait(); + // This doesn't not wait for the server to be stopped but fires the signal. + let _ = server.stop(); } } } pub struct WsServer(pub Option); + impl Drop for WsServer { fn drop(&mut self) { if let Some(server) = self.0.take() { - server.close_handle().close(); - let _ = server.wait(); + // This doesn't not wait for the server to be stopped but fires the signal. + let _ = server.stop(); } } } } -/// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them -/// alive. -fn start_rpc_servers< - H: FnMut( - sc_rpc::DenyUnsafe, - sc_rpc_server::RpcMiddleware, - ) -> Result, Error>, ->( +/// Starts RPC servers. +fn start_rpc_servers( config: &Configuration, - mut gen_handler: H, - rpc_metrics: Option, - server_metrics: sc_rpc_server::ServerMetrics, -) -> Result, Error> { - fn maybe_start_server( - address: Option, - mut start: F, - ) -> Result, Error> - where - F: FnMut(&SocketAddr) -> Result, - { - address - .map(|mut address| { - start(&address).or_else(|e| match e { - Error::Io(e) => match e.kind() { - io::ErrorKind::AddrInUse | io::ErrorKind::PermissionDenied => { - warn!("Unable to bind RPC server to {}. Trying random port.", address); - address.set_port(0); - start(&address) - }, - _ => Err(e.into()), - }, - e => Err(e), - }) - }) - .transpose() - } + gen_rpc_module: R, + rpc_id_provider: Option>, +) -> Result, error::Error> +where + R: Fn(sc_rpc::DenyUnsafe) -> Result, Error>, +{ + let (max_request_size, ws_max_response_size, http_max_response_size) = + legacy_cli_parsing(config); - fn deny_unsafe(addr: &SocketAddr, methods: &RpcMethods) -> sc_rpc::DenyUnsafe { + fn deny_unsafe(addr: SocketAddr, methods: &RpcMethods) -> sc_rpc::DenyUnsafe { let is_exposed_addr = !addr.ip().is_loopback(); match (is_exposed_addr, methods) { | (_, RpcMethods::Unsafe) | (false, RpcMethods::Auto) => sc_rpc::DenyUnsafe::No, @@ -359,85 +324,54 @@ fn start_rpc_servers< } } - let rpc_method_names = sc_rpc_server::method_names(|m| gen_handler(sc_rpc::DenyUnsafe::No, m))?; - Ok(Box::new(( - config - .rpc_ipc - .as_ref() - .map(|path| { - sc_rpc_server::start_ipc( - &*path, - gen_handler( - sc_rpc::DenyUnsafe::No, - sc_rpc_server::RpcMiddleware::new( - rpc_metrics.clone(), - rpc_method_names.clone(), - "ipc", - ), - )?, - server_metrics.clone(), - ) - .map_err(Error::from) - }) - .transpose()?, - maybe_start_server(config.rpc_http, |address| { - sc_rpc_server::start_http( - address, - config.rpc_cors.as_ref(), - gen_handler( - deny_unsafe(address, &config.rpc_methods), - sc_rpc_server::RpcMiddleware::new( - rpc_metrics.clone(), - rpc_method_names.clone(), - "http", - ), - )?, - config.rpc_max_payload, - config.tokio_handle.clone(), - ) - .map_err(Error::from) - })? - .map(|s| waiting::HttpServer(Some(s))), - maybe_start_server(config.rpc_ws, |address| { - sc_rpc_server::start_ws( - address, - config.rpc_ws_max_connections, - config.rpc_cors.as_ref(), - gen_handler( - deny_unsafe(address, &config.rpc_methods), - sc_rpc_server::RpcMiddleware::new( - rpc_metrics.clone(), - rpc_method_names.clone(), - "ws", - ), - )?, - config.rpc_max_payload, - config.ws_max_out_buffer_capacity, - server_metrics.clone(), - config.tokio_handle.clone(), - ) - .map_err(Error::from) - })? - .map(|s| waiting::WsServer(Some(s))), - ))) -} + let random_port = |mut addr: SocketAddr| { + addr.set_port(0); + addr + }; -/// An RPC session. Used to perform in-memory RPC queries (ie. RPC queries that don't go through -/// the HTTP or WebSockets server). -#[derive(Clone)] -pub struct RpcSession { - metadata: sc_rpc::Metadata, -} + let ws_addr = config + .rpc_ws + .unwrap_or_else(|| "127.0.0.1:9944".parse().expect("valid sockaddr; qed")); + let ws_addr2 = random_port(ws_addr); + let http_addr = config + .rpc_http + .unwrap_or_else(|| "127.0.0.1:9933".parse().expect("valid sockaddr; qed")); + let http_addr2 = random_port(http_addr); + + let metrics = sc_rpc_server::RpcMetrics::new(config.prometheus_registry())?; + + let http_fut = sc_rpc_server::start_http( + [http_addr, http_addr2], + config.rpc_cors.as_ref(), + max_request_size, + http_max_response_size, + metrics.clone(), + gen_rpc_module(deny_unsafe(ws_addr, &config.rpc_methods))?, + config.tokio_handle.clone(), + ); + + let ws_config = WsConfig { + max_connections: config.rpc_ws_max_connections, + max_payload_in_mb: max_request_size, + max_payload_out_mb: ws_max_response_size, + max_subs_per_conn: config.rpc_max_subs_per_conn, + }; -impl RpcSession { - /// Creates an RPC session. - /// - /// The `sender` is stored inside the `RpcSession` and is used to communicate spontaneous JSON - /// messages. - /// - /// The `RpcSession` must be kept alive in order to receive messages on the sender. - pub fn new(sender: futures::channel::mpsc::UnboundedSender) -> RpcSession { - RpcSession { metadata: sender.into() } + let ws_fut = sc_rpc_server::start_ws( + [ws_addr, ws_addr2], + config.rpc_cors.as_ref(), + ws_config, + metrics, + gen_rpc_module(deny_unsafe(http_addr, &config.rpc_methods))?, + config.tokio_handle.clone(), + rpc_id_provider, + ); + + match tokio::task::block_in_place(|| { + config.tokio_handle.block_on(futures::future::try_join(http_fut, ws_fut)) + }) { + Ok((http, ws)) => Ok(Box::new((http, ws))), + Err(e) => Err(Error::Application(e)), } } @@ -545,6 +479,44 @@ where } } +fn legacy_cli_parsing(config: &Configuration) -> (Option, Option, Option) { + let ws_max_response_size = config.ws_max_out_buffer_capacity.map(|max| { + eprintln!("DEPRECATED: `--ws_max_out_buffer_capacity` has been removed use `rpc-max-response-size or rpc-max-request-size` instead"); + eprintln!("Setting WS `rpc-max-response-size` to `max(ws_max_out_buffer_capacity, rpc_max_response_size)`"); + std::cmp::max(max, config.rpc_max_response_size.unwrap_or(0)) + }); + + let max_request_size = match (config.rpc_max_payload, config.rpc_max_request_size) { + (Some(legacy_max), max) => { + eprintln!("DEPRECATED: `--rpc_max_payload` has been removed use `rpc-max-response-size or rpc-max-request-size` instead"); + eprintln!( + "Setting `rpc-max-response-size` to `max(rpc_max_payload, rpc_max_request_size)`" + ); + Some(std::cmp::max(legacy_max, max.unwrap_or(0))) + }, + (None, Some(max)) => Some(max), + (None, None) => None, + }; + + let http_max_response_size = match (config.rpc_max_payload, config.rpc_max_request_size) { + (Some(legacy_max), max) => { + eprintln!("DEPRECATED: `--rpc_max_payload` has been removed use `rpc-max-response-size or rpc-max-request-size` instead"); + eprintln!( + "Setting HTTP `rpc-max-response-size` to `max(rpc_max_payload, rpc_max_response_size)`" + ); + Some(std::cmp::max(legacy_max, max.unwrap_or(0))) + }, + (None, Some(max)) => Some(max), + (None, None) => None, + }; + + if config.rpc_ipc.is_some() { + eprintln!("DEPRECATED: `--ipc-path` has no effect anymore IPC support has been removed"); + } + + (max_request_size, ws_max_response_size, http_max_response_size) +} + #[cfg(test)] mod tests { use super::*; diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index ef04e16a65d26..749c83c6eeac7 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -246,6 +246,10 @@ fn node_config< rpc_cors: None, rpc_methods: Default::default(), rpc_max_payload: None, + rpc_max_request_size: None, + rpc_max_response_size: None, + rpc_id_provider: None, + rpc_max_subs_per_conn: None, ws_max_out_buffer_capacity: None, prometheus_config: None, telemetry_endpoints: None, diff --git a/client/sync-state-rpc/Cargo.toml b/client/sync-state-rpc/Cargo.toml index a32849dc0e964..f42c307ffa84c 100644 --- a/client/sync-state-rpc/Cargo.toml +++ b/client/sync-state-rpc/Cargo.toml @@ -14,9 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpc-core = "18.0.0" -jsonrpc-core-client = "18.0.0" -jsonrpc-derive = "18.0.0" +jsonrpsee = { version = "0.12.0", features = ["server", "macros"] } serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0.79" thiserror = "1.0.30" diff --git a/client/sync-state-rpc/src/lib.rs b/client/sync-state-rpc/src/lib.rs index d7696c662e856..a0a5b66cb86fc 100644 --- a/client/sync-state-rpc/src/lib.rs +++ b/client/sync-state-rpc/src/lib.rs @@ -37,10 +37,15 @@ //! ``` //! //! If the [`LightSyncStateExtension`] is not added as an extension to the chain spec, -//! the [`SyncStateRpcHandler`] will fail at instantiation. +//! the [`SyncStateRpc`] will fail at instantiation. #![deny(unused_crate_dependencies)] +use jsonrpsee::{ + core::{Error as JsonRpseeError, RpcResult}, + proc_macros::rpc, + types::{error::CallError, ErrorObject}, +}; use sc_client_api::StorageData; use sp_blockchain::HeaderBackend; use sp_runtime::{ @@ -49,8 +54,6 @@ use sp_runtime::{ }; use std::sync::Arc; -use jsonrpc_derive::rpc; - type SharedAuthoritySet = sc_finality_grandpa::SharedAuthoritySet<::Hash, NumberFor>; type SharedEpochChanges = @@ -76,13 +79,13 @@ pub enum Error { LightSyncStateExtensionNotFound, } -impl From> for jsonrpc_core::Error { +impl From> for JsonRpseeError { fn from(error: Error) -> Self { let message = match error { Error::JsonRpc(s) => s, _ => error.to_string(), }; - jsonrpc_core::Error { message, code: jsonrpc_core::ErrorCode::ServerError(1), data: None } + CallError::Custom(ErrorObject::owned(1, message, None::<()>)).into() } } @@ -101,7 +104,7 @@ fn serialize_encoded( /// chain-spec as an extension. pub type LightSyncStateExtension = Option; -/// Hardcoded infomation that allows light clients to sync quickly. +/// Hardcoded information that allows light clients to sync quickly. #[derive(serde::Serialize, Clone)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] @@ -121,30 +124,30 @@ pub struct LightSyncState { } /// An api for sync state RPC calls. -#[rpc] +#[rpc(client, server)] pub trait SyncStateRpcApi { - /// Returns the json-serialized chainspec running the node, with a sync state. - #[rpc(name = "sync_state_genSyncSpec", returns = "jsonrpc_core::Value")] - fn system_gen_sync_spec(&self, raw: bool) -> jsonrpc_core::Result; + /// Returns the JSON serialized chainspec running the node, with a sync state. + #[method(name = "sync_state_genSyncSpec")] + fn system_gen_sync_spec(&self, raw: bool) -> RpcResult; } -/// The handler for sync state RPC calls. -pub struct SyncStateRpcHandler { +/// An api for sync state RPC calls. +pub struct SyncStateRpc { chain_spec: Box, - client: Arc, + client: Arc, shared_authority_set: SharedAuthoritySet, shared_epoch_changes: SharedEpochChanges, } -impl SyncStateRpcHandler +impl SyncStateRpc where Block: BlockT, - Backend: HeaderBackend + sc_client_api::AuxStore + 'static, + Client: HeaderBackend + sc_client_api::AuxStore + 'static, { - /// Create a new handler. + /// Create a new sync state RPC helper. pub fn new( chain_spec: Box, - client: Arc, + client: Arc, shared_authority_set: SharedAuthoritySet, shared_epoch_changes: SharedEpochChanges, ) -> Result> { @@ -177,32 +180,23 @@ where } } -impl SyncStateRpcApi for SyncStateRpcHandler +impl SyncStateRpcApiServer for SyncStateRpc where Block: BlockT, Backend: HeaderBackend + sc_client_api::AuxStore + 'static, { - fn system_gen_sync_spec(&self, raw: bool) -> jsonrpc_core::Result { + fn system_gen_sync_spec(&self, raw: bool) -> RpcResult { + let current_sync_state = self.build_sync_state()?; let mut chain_spec = self.chain_spec.cloned_box(); - let sync_state = self.build_sync_state().map_err(map_error::>)?; - let extension = sc_chain_spec::get_extension_mut::( chain_spec.extensions_mut(), ) - .ok_or_else(|| { - Error::::JsonRpc("Could not find `LightSyncState` chain-spec extension!".into()) - })?; - - *extension = - Some(serde_json::to_value(&sync_state).map_err(|err| map_error::(err))?); + .ok_or(Error::::LightSyncStateExtensionNotFound)?; - let json_string = chain_spec.as_json(raw).map_err(map_error::)?; + let val = serde_json::to_value(¤t_sync_state)?; + *extension = Some(val); - serde_json::from_str(&json_string).map_err(|err| map_error::(err)) + chain_spec.as_json(raw).map_err(|e| Error::::JsonRpc(e).into()) } } - -fn map_error(error: S) -> jsonrpc_core::Error { - Error::::JsonRpc(error.to_string()).into() -} diff --git a/client/tracing/src/block/mod.rs b/client/tracing/src/block/mod.rs index 6de3a0220e15e..2a034f06ce8a8 100644 --- a/client/tracing/src/block/mod.rs +++ b/client/tracing/src/block/mod.rs @@ -53,7 +53,7 @@ const AVG_SPAN: usize = 100 * 8; // are used for the RPC Id this may need to be adjusted. Note: The base payload // does not include the RPC result. // -// The estimate is based on the JSONRPC response message which has the following format: +// The estimate is based on the JSON-RPC response message which has the following format: // `{"jsonrpc":"2.0","result":[],"id":18446744073709551615}`. // // We care about the total size of the payload because jsonrpc-server will simply ignore diff --git a/client/transaction-pool/api/src/error.rs b/client/transaction-pool/api/src/error.rs index b093657f739b1..aada44734d053 100644 --- a/client/transaction-pool/api/src/error.rs +++ b/client/transaction-pool/api/src/error.rs @@ -46,7 +46,7 @@ pub enum Error { TemporarilyBanned, #[error("[{0:?}] Already imported")] - AlreadyImported(Box), + AlreadyImported(Box), #[error("Too low priority ({} > {})", old, new)] TooLowPriority { @@ -72,7 +72,7 @@ pub enum Error { } /// Transaction pool error conversion. -pub trait IntoPoolError: std::error::Error + Send + Sized { +pub trait IntoPoolError: std::error::Error + Send + Sized + Sync { /// Try to extract original `Error` /// /// This implementation is optional and used only to diff --git a/client/transaction-pool/api/src/lib.rs b/client/transaction-pool/api/src/lib.rs index 448578b327b03..0ebb8f9d4cd9c 100644 --- a/client/transaction-pool/api/src/lib.rs +++ b/client/transaction-pool/api/src/lib.rs @@ -22,16 +22,17 @@ pub mod error; use futures::{Future, Stream}; -use serde::{Deserialize, Serialize}; -pub use sp_runtime::transaction_validity::{ - TransactionLongevity, TransactionPriority, TransactionSource, TransactionTag, -}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Member, NumberFor}, }; use std::{collections::HashMap, hash::Hash, pin::Pin, sync::Arc}; +pub use sp_runtime::transaction_validity::{ + TransactionLongevity, TransactionPriority, TransactionSource, TransactionTag, +}; + /// Transaction pool status. #[derive(Debug)] pub struct PoolStatus { @@ -177,7 +178,7 @@ pub trait TransactionPool: Send + Sync { /// Block type. type Block: BlockT; /// Transaction hash type. - type Hash: Hash + Eq + Member + Serialize; + type Hash: Hash + Eq + Member + Serialize + DeserializeOwned; /// In-pool transaction type. type InPoolTransaction: InPoolTransaction< Transaction = TransactionFor, diff --git a/frame/bags-list/remote-tests/Cargo.toml b/frame/bags-list/remote-tests/Cargo.toml index 05675741ae51b..760115917c6d4 100644 --- a/frame/bags-list/remote-tests/Cargo.toml +++ b/frame/bags-list/remote-tests/Cargo.toml @@ -32,4 +32,3 @@ remote-externalities = { path = "../../../utils/frame/remote-externalities", ver # others log = "0.4.16" -tokio = { version = "1", features = ["macros"] } diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index e506e78a2fbdc..36f6c06328501 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -14,9 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpc-core = "18.0.0" -jsonrpc-core-client = "18.0.0" -jsonrpc-derive = "18.0.0" +jsonrpsee = { version = "0.12.0", features = ["server", "macros"] } serde = { version = "1", features = ["derive"] } # Substrate Dependencies diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs index e83e4e6249b92..599e80676cb19 100644 --- a/frame/contracts/rpc/src/lib.rs +++ b/frame/contracts/rpc/src/lib.rs @@ -17,11 +17,16 @@ //! Node-specific RPC methods for interaction with contracts. -use std::sync::Arc; +#![warn(unused_crate_dependencies)] + +use std::{marker::PhantomData, sync::Arc}; use codec::Codec; -use jsonrpc_core::{Error, ErrorCode, Result}; -use jsonrpc_derive::rpc; +use jsonrpsee::{ + core::{async_trait, Error as JsonRpseeError, RpcResult}, + proc_macros::rpc, + types::error::{CallError, ErrorCode, ErrorObject}, +}; use pallet_contracts_primitives::{ Code, CodeUploadResult, ContractExecResult, ContractInstantiateResult, }; @@ -37,8 +42,8 @@ use sp_runtime::{ pub use pallet_contracts_rpc_runtime_api::ContractsApi as ContractsRuntimeApi; -const RUNTIME_ERROR: i64 = 1; -const CONTRACT_DOESNT_EXIST: i64 = 2; +const RUNTIME_ERROR: i32 = 1; +const CONTRACT_DOESNT_EXIST: i32 = 2; pub type Weight = u64; @@ -58,15 +63,17 @@ const GAS_LIMIT: Weight = 5 * GAS_PER_SECOND; /// A private newtype for converting `ContractAccessError` into an RPC error. struct ContractAccessError(pallet_contracts_primitives::ContractAccessError); -impl From for Error { - fn from(e: ContractAccessError) -> Error { + +impl From for JsonRpseeError { + fn from(e: ContractAccessError) -> Self { use pallet_contracts_primitives::ContractAccessError::*; match e.0 { - DoesntExist => Error { - code: ErrorCode::ServerError(CONTRACT_DOESNT_EXIST), - message: "The specified contract doesn't exist.".into(), - data: None, - }, + DoesntExist => CallError::Custom(ErrorObject::owned( + CONTRACT_DOESNT_EXIST, + "The specified contract doesn't exist.", + None::<()>, + )) + .into(), } } } @@ -109,7 +116,7 @@ pub struct CodeUploadRequest { } /// Contracts RPC methods. -#[rpc] +#[rpc(client, server)] pub trait ContractsApi where Balance: Copy + TryFrom + Into, @@ -121,12 +128,12 @@ where /// /// This method is useful for calling getter-like methods on contracts or to dry-run a /// a contract call in order to determine the `gas_limit`. - #[rpc(name = "contracts_call")] + #[method(name = "contracts_call")] fn call( &self, call_request: CallRequest, at: Option, - ) -> Result>; + ) -> RpcResult>; /// Instantiate a new contract. /// @@ -134,12 +141,12 @@ where /// is not actually created. /// /// This method is useful for UIs to dry-run contract instantiations. - #[rpc(name = "contracts_instantiate")] + #[method(name = "contracts_instantiate")] fn instantiate( &self, instantiate_request: InstantiateRequest, at: Option, - ) -> Result>; + ) -> RpcResult>; /// Upload new code without instantiating a contract from it. /// @@ -147,48 +154,50 @@ where /// won't change any state. /// /// This method is useful for UIs to dry-run code upload. - #[rpc(name = "contracts_upload_code")] + #[method(name = "contracts_upload_code")] fn upload_code( &self, upload_request: CodeUploadRequest, at: Option, - ) -> Result>; + ) -> RpcResult>; /// Returns the value under a specified storage `key` in a contract given by `address` param, /// or `None` if it is not set. - #[rpc(name = "contracts_getStorage")] + #[method(name = "contracts_getStorage")] fn get_storage( &self, address: AccountId, key: H256, at: Option, - ) -> Result>; + ) -> RpcResult>; } -/// An implementation of contract specific RPC methods. -pub struct Contracts { - client: Arc, - _marker: std::marker::PhantomData, +/// Contracts RPC methods. +pub struct ContractsRpc { + client: Arc, + _marker: PhantomData, } -impl Contracts { +impl ContractsRpc { /// Create new `Contracts` with the given reference to the client. - pub fn new(client: Arc) -> Self { - Contracts { client, _marker: Default::default() } + pub fn new(client: Arc) -> Self { + Self { client, _marker: Default::default() } } } -impl - ContractsApi< + +#[async_trait] +impl + ContractsApiServer< ::Hash, <::Header as HeaderT>::Number, AccountId, Balance, Hash, - > for Contracts + > for ContractsRpc where Block: BlockT, - C: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, - C::Api: ContractsRuntimeApi< + Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, + Client::Api: ContractsRuntimeApi< Block, AccountId, Balance, @@ -203,7 +212,7 @@ where &self, call_request: CallRequest, at: Option<::Hash>, - ) -> Result> { + ) -> RpcResult> { let api = self.client.runtime_api(); let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. @@ -226,7 +235,7 @@ where &self, instantiate_request: InstantiateRequest, at: Option<::Hash>, - ) -> Result> { + ) -> RpcResult> { let api = self.client.runtime_api(); let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. @@ -265,7 +274,7 @@ where &self, upload_request: CodeUploadRequest, at: Option<::Hash>, - ) -> Result> { + ) -> RpcResult> { let api = self.client.runtime_api(); let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. @@ -285,12 +294,9 @@ where address: AccountId, key: H256, at: Option<::Hash>, - ) -> Result> { + ) -> RpcResult> { let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash)); - + let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); let result = api .get_storage(&at, address, key.into()) .map_err(runtime_error_into_rpc_err)? @@ -302,32 +308,35 @@ where } /// Converts a runtime trap into an RPC error. -fn runtime_error_into_rpc_err(err: impl std::fmt::Display) -> Error { - Error { - code: ErrorCode::ServerError(RUNTIME_ERROR), - message: "Runtime error".into(), - data: Some(err.to_string().into()), - } +fn runtime_error_into_rpc_err(err: impl std::fmt::Debug) -> JsonRpseeError { + CallError::Custom(ErrorObject::owned( + RUNTIME_ERROR, + "Runtime error", + Some(format!("{:?}", err)), + )) + .into() } -fn decode_hex>(from: H, name: &str) -> Result { - from.try_into().map_err(|_| Error { - code: ErrorCode::InvalidParams, - message: format!("{:?} does not fit into the {} type", from, name), - data: None, +fn decode_hex>(from: H, name: &str) -> RpcResult { + from.try_into().map_err(|_| { + JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( + ErrorCode::InvalidParams.code(), + format!("{:?} does not fit into the {} type", from, name), + None::<()>, + ))) }) } -fn limit_gas(gas_limit: Weight) -> Result<()> { +fn limit_gas(gas_limit: Weight) -> RpcResult<()> { if gas_limit > GAS_LIMIT { - Err(Error { - code: ErrorCode::InvalidParams, - message: format!( + Err(JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( + ErrorCode::InvalidParams.code(), + format!( "Requested gas limit is greater than maximum allowed: {} > {}", gas_limit, GAS_LIMIT ), - data: None, - }) + None::<()>, + )))) } else { Ok(()) } @@ -336,6 +345,7 @@ fn limit_gas(gas_limit: Weight) -> Result<()> { #[cfg(test)] mod tests { use super::*; + use pallet_contracts_primitives::{ContractExecResult, ContractInstantiateResult}; use sp_core::U256; fn trim(json: &str) -> String { diff --git a/frame/merkle-mountain-range/rpc/Cargo.toml b/frame/merkle-mountain-range/rpc/Cargo.toml index 9b3d3a43c6045..2d3bfebc6633f 100644 --- a/frame/merkle-mountain-range/rpc/Cargo.toml +++ b/frame/merkle-mountain-range/rpc/Cargo.toml @@ -14,9 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpc-core = "18.0.0" -jsonrpc-core-client = "18.0.0" -jsonrpc-derive = "18.0.0" +jsonrpsee = { version = "0.12.0", features = ["server", "macros"] } serde = { version = "1.0.136", features = ["derive"] } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } diff --git a/frame/merkle-mountain-range/rpc/src/lib.rs b/frame/merkle-mountain-range/rpc/src/lib.rs index be1a74450d1f4..12e4e11f88256 100644 --- a/frame/merkle-mountain-range/rpc/src/lib.rs +++ b/frame/merkle-mountain-range/rpc/src/lib.rs @@ -16,14 +16,18 @@ // limitations under the License. #![warn(missing_docs)] +#![warn(unused_crate_dependencies)] //! Node-specific RPC methods for interaction with Merkle Mountain Range pallet. -use std::sync::Arc; +use std::{marker::PhantomData, sync::Arc}; use codec::{Codec, Encode}; -use jsonrpc_core::{Error, ErrorCode, Result}; -use jsonrpc_derive::rpc; +use jsonrpsee::{ + core::{async_trait, RpcResult}, + proc_macros::rpc, + types::error::{CallError, ErrorObject}, +}; use serde::{Deserialize, Serialize}; use sp_api::ProvideRuntimeApi; @@ -34,6 +38,11 @@ use sp_runtime::{generic::BlockId, traits::Block as BlockT}; pub use sp_mmr_primitives::MmrApi as MmrRuntimeApi; +const RUNTIME_ERROR: i32 = 8000; +const MMR_ERROR: i32 = 8010; +const LEAF_NOT_FOUND_ERROR: i32 = MMR_ERROR + 1; +const GENERATE_PROOF_ERROR: i32 = MMR_ERROR + 2; + /// Retrieved MMR leaf and its proof. #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] #[serde(rename_all = "camelCase")] @@ -86,7 +95,7 @@ impl LeafBatchProof { } /// MMR RPC methods. -#[rpc] +#[rpc(client, server)] pub trait MmrApi { /// Generate MMR proof for given leaf index. /// @@ -96,12 +105,12 @@ pub trait MmrApi { /// /// Returns the (full) leaf itself and a proof for this leaf (compact encoding, i.e. hash of /// the leaf). Both parameters are SCALE-encoded. - #[rpc(name = "mmr_generateProof")] + #[method(name = "mmr_generateProof")] fn generate_proof( &self, leaf_index: LeafIndex, at: Option, - ) -> Result>; + ) -> RpcResult>; /// Generate MMR proof for the given leaf indices. /// @@ -113,43 +122,43 @@ pub trait MmrApi { /// the leaves). Both parameters are SCALE-encoded. /// The order of entries in the `leaves` field of the returned struct /// is the same as the order of the entries in `leaf_indices` supplied - #[rpc(name = "mmr_generateBatchProof")] + #[method(name = "mmr_generateBatchProof")] fn generate_batch_proof( &self, leaf_indices: Vec, at: Option, - ) -> Result>; + ) -> RpcResult>; } -/// An implementation of MMR specific RPC methods. -pub struct Mmr { - client: Arc, - _marker: std::marker::PhantomData, +/// MMR RPC methods. +pub struct MmrRpc { + client: Arc, + _marker: PhantomData, } -impl Mmr { +impl MmrRpc { /// Create new `Mmr` with the given reference to the client. pub fn new(client: Arc) -> Self { Self { client, _marker: Default::default() } } } -impl MmrApi<::Hash> for Mmr +#[async_trait] +impl MmrApiServer<::Hash> + for MmrRpc where Block: BlockT, - C: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, - C::Api: MmrRuntimeApi, + Client: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, + Client::Api: MmrRuntimeApi, MmrHash: Codec + Send + Sync + 'static, { fn generate_proof( &self, leaf_index: LeafIndex, at: Option<::Hash>, - ) -> Result::Hash>> { + ) -> RpcResult> { let api = self.client.runtime_api(); - let block_hash = at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash); + let block_hash = at.unwrap_or_else(|| self.client.info().best_hash); let (leaf, proof) = api .generate_proof_with_context( @@ -167,7 +176,7 @@ where &self, leaf_indices: Vec, at: Option<::Hash>, - ) -> Result::Hash>> { + ) -> RpcResult::Hash>> { let api = self.client.runtime_api(); let block_hash = at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. @@ -186,37 +195,31 @@ where } } -const RUNTIME_ERROR: i64 = 8000; -const MMR_ERROR: i64 = 8010; - -/// Converts a mmr-specific error into an RPC error. -fn mmr_error_into_rpc_error(err: MmrError) -> Error { +/// Converts a mmr-specific error into a [`CallError`]. +fn mmr_error_into_rpc_error(err: MmrError) -> CallError { + let data = format!("{:?}", err); match err { - MmrError::LeafNotFound => Error { - code: ErrorCode::ServerError(MMR_ERROR + 1), - message: "Leaf was not found".into(), - data: Some(format!("{:?}", err).into()), - }, - MmrError::GenerateProof => Error { - code: ErrorCode::ServerError(MMR_ERROR + 2), - message: "Error while generating the proof".into(), - data: Some(format!("{:?}", err).into()), - }, - _ => Error { - code: ErrorCode::ServerError(MMR_ERROR), - message: "Unexpected MMR error".into(), - data: Some(format!("{:?}", err).into()), - }, + MmrError::LeafNotFound => CallError::Custom(ErrorObject::owned( + LEAF_NOT_FOUND_ERROR, + "Leaf was not found", + Some(data), + )), + MmrError::GenerateProof => CallError::Custom(ErrorObject::owned( + GENERATE_PROOF_ERROR, + "Error while generating the proof", + Some(data), + )), + _ => CallError::Custom(ErrorObject::owned(MMR_ERROR, "Unexpected MMR error", Some(data))), } } -/// Converts a runtime trap into an RPC error. -fn runtime_error_into_rpc_error(err: impl std::fmt::Display) -> Error { - Error { - code: ErrorCode::ServerError(RUNTIME_ERROR), - message: "Runtime trapped".into(), - data: Some(err.to_string().into()), - } +/// Converts a runtime trap into a [`CallError`]. +fn runtime_error_into_rpc_error(err: impl std::fmt::Debug) -> CallError { + CallError::Custom(ErrorObject::owned( + RUNTIME_ERROR, + "Runtime trapped", + Some(format!("{:?}", err)), + )) } #[cfg(test)] diff --git a/frame/state-trie-migration/Cargo.toml b/frame/state-trie-migration/Cargo.toml index cae3bb1a9f975..958ab50315427 100644 --- a/frame/state-trie-migration/Cargo.toml +++ b/frame/state-trie-migration/Cargo.toml @@ -31,7 +31,7 @@ substrate-state-trie-migration-rpc = { optional = true, path = "../../utils/fram [dev-dependencies] parking_lot = "0.12.0" -tokio = { version = "1.10", features = ["macros"] } +tokio = { version = "1.17.0", features = ["macros"] } pallet-balances = { path = "../balances" } sp-tracing = { path = "../../primitives/tracing" } diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index b7a353916efbc..6133d3a4b6da1 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -14,9 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } -jsonrpc-core = "18.0.0" -jsonrpc-core-client = "18.0.0" -jsonrpc-derive = "18.0.0" +jsonrpsee = { version = "0.12.0", features = ["server", "macros"] } pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", path = "./runtime-api" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } diff --git a/frame/transaction-payment/rpc/src/lib.rs b/frame/transaction-payment/rpc/src/lib.rs index 29d94fa260105..b0be19fdb22a9 100644 --- a/frame/transaction-payment/rpc/src/lib.rs +++ b/frame/transaction-payment/rpc/src/lib.rs @@ -17,11 +17,14 @@ //! RPC interface for the transaction payment pallet. -pub use self::gen_client::Client as TransactionPaymentClient; +use std::{convert::TryInto, sync::Arc}; + use codec::{Codec, Decode}; -use jsonrpc_core::{Error as RpcError, ErrorCode, Result}; -use jsonrpc_derive::rpc; -pub use pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi as TransactionPaymentRuntimeApi; +use jsonrpsee::{ + core::{async_trait, Error as JsonRpseeError, RpcResult}, + proc_macros::rpc, + types::error::{CallError, ErrorCode, ErrorObject}, +}; use pallet_transaction_payment_rpc_runtime_api::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; @@ -31,28 +34,31 @@ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, MaybeDisplay}, }; -use std::sync::Arc; -#[rpc] +pub use pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi as TransactionPaymentRuntimeApi; + +#[rpc(client, server)] pub trait TransactionPaymentApi { - #[rpc(name = "payment_queryInfo")] - fn query_info(&self, encoded_xt: Bytes, at: Option) -> Result; - #[rpc(name = "payment_queryFeeDetails")] + #[method(name = "payment_queryInfo")] + fn query_info(&self, encoded_xt: Bytes, at: Option) -> RpcResult; + + #[method(name = "payment_queryFeeDetails")] fn query_fee_details( &self, encoded_xt: Bytes, at: Option, - ) -> Result>; + ) -> RpcResult>; } -/// A struct that implements the [`TransactionPaymentApi`]. -pub struct TransactionPayment { +/// Provides RPC methods to query a dispatchable's class, weight and fee. +pub struct TransactionPaymentRpc { + /// Shared reference to the client. client: Arc, _marker: std::marker::PhantomData

, } -impl TransactionPayment { - /// Create new `TransactionPayment` with the given reference to the client. +impl TransactionPaymentRpc { + /// Creates a new instance of the TransactionPaymentRpc helper. pub fn new(client: Arc) -> Self { Self { client, _marker: Default::default() } } @@ -66,8 +72,8 @@ pub enum Error { RuntimeError, } -impl From for i64 { - fn from(e: Error) -> i64 { +impl From for i32 { + fn from(e: Error) -> i32 { match e { Error::RuntimeError => 1, Error::DecodeError => 2, @@ -75,66 +81,75 @@ impl From for i64 { } } -impl TransactionPaymentApi<::Hash, RuntimeDispatchInfo> - for TransactionPayment +#[async_trait] +impl + TransactionPaymentApiServer<::Hash, RuntimeDispatchInfo> + for TransactionPaymentRpc where Block: BlockT, - C: 'static + ProvideRuntimeApi + HeaderBackend, + C: ProvideRuntimeApi + HeaderBackend + Send + Sync + 'static, C::Api: TransactionPaymentRuntimeApi, - Balance: Codec + MaybeDisplay + Copy + TryInto, + Balance: Codec + MaybeDisplay + Copy + TryInto + Send + Sync + 'static, { fn query_info( &self, encoded_xt: Bytes, - at: Option<::Hash>, - ) -> Result> { + at: Option, + ) -> RpcResult> { let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash)); + let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); let encoded_len = encoded_xt.len() as u32; - let uxt: Block::Extrinsic = Decode::decode(&mut &*encoded_xt).map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::DecodeError.into()), - message: "Unable to query dispatch info.".into(), - data: Some(format!("{:?}", e).into()), + let uxt: Block::Extrinsic = Decode::decode(&mut &*encoded_xt).map_err(|e| { + CallError::Custom(ErrorObject::owned( + Error::DecodeError.into(), + "Unable to query dispatch info.", + Some(format!("{:?}", e)), + )) })?; - api.query_info(&at, uxt, encoded_len).map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::RuntimeError.into()), - message: "Unable to query dispatch info.".into(), - data: Some(e.to_string().into()), + api.query_info(&at, uxt, encoded_len).map_err(|e| { + CallError::Custom(ErrorObject::owned( + Error::RuntimeError.into(), + "Unable to query dispatch info.", + Some(e.to_string()), + )) + .into() }) } fn query_fee_details( &self, encoded_xt: Bytes, - at: Option<::Hash>, - ) -> Result> { + at: Option, + ) -> RpcResult> { let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash)); + let at = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); let encoded_len = encoded_xt.len() as u32; - let uxt: Block::Extrinsic = Decode::decode(&mut &*encoded_xt).map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::DecodeError.into()), - message: "Unable to query fee details.".into(), - data: Some(format!("{:?}", e).into()), + let uxt: Block::Extrinsic = Decode::decode(&mut &*encoded_xt).map_err(|e| { + CallError::Custom(ErrorObject::owned( + Error::DecodeError.into(), + "Unable to query fee details.", + Some(format!("{:?}", e)), + )) })?; - let fee_details = api.query_fee_details(&at, uxt, encoded_len).map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::RuntimeError.into()), - message: "Unable to query fee details.".into(), - data: Some(e.to_string().into()), + let fee_details = api.query_fee_details(&at, uxt, encoded_len).map_err(|e| { + CallError::Custom(ErrorObject::owned( + Error::RuntimeError.into(), + "Unable to query fee details.", + Some(e.to_string()), + )) })?; let try_into_rpc_balance = |value: Balance| { - value.try_into().map_err(|_| RpcError { - code: ErrorCode::InvalidParams, - message: format!("{} doesn't fit in NumberOrHex representation", value), - data: None, + value.try_into().map_err(|_| { + JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( + ErrorCode::InvalidParams.code(), + format!("{} doesn't fit in NumberOrHex representation", value), + None::<()>, + ))) }) }; diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index cdadfb0f10f03..148f34246044d 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -28,7 +28,7 @@ pub use sc_client_api::{ }; pub use sc_client_db::{self, Backend}; pub use sc_executor::{self, NativeElseWasmExecutor, WasmExecutionMethod}; -pub use sc_service::{client, RpcHandlers, RpcSession}; +pub use sc_service::{client, RpcHandlers}; pub use sp_consensus; pub use sp_keyring::{ ed25519::Keyring as Ed25519Keyring, sr25519::Keyring as Sr25519Keyring, AccountKeyring, @@ -37,10 +37,7 @@ pub use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; pub use sp_runtime::{Storage, StorageChild}; pub use sp_state_machine::ExecutionStrategy; -use futures::{ - future::{Future, FutureExt}, - stream::StreamExt, -}; +use futures::{future::Future, stream::StreamExt}; use sc_client_api::BlockchainEvents; use sc_service::client::{ClientConfig, LocalCallExecutor}; use serde::Deserialize; @@ -297,16 +294,14 @@ impl /// The output of an RPC transaction. pub struct RpcTransactionOutput { /// The output string of the transaction if any. - pub result: Option, - /// The session object. - pub session: RpcSession, + pub result: String, /// An async receiver if data will be returned via a callback. pub receiver: futures::channel::mpsc::UnboundedReceiver, } impl std::fmt::Debug for RpcTransactionOutput { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "RpcTransactionOutput {{ result: {:?}, session, receiver }}", self.result) + write!(f, "RpcTransactionOutput {{ result: {:?}, receiver }}", self.result) } } @@ -328,56 +323,51 @@ impl std::fmt::Display for RpcTransactionError { } /// An extension trait for `RpcHandlers`. +#[async_trait::async_trait] pub trait RpcHandlersExt { /// Send a transaction through the RpcHandlers. - fn send_transaction( + async fn send_transaction( &self, extrinsic: OpaqueExtrinsic, - ) -> Pin> + Send>>; + ) -> Result; } +#[async_trait::async_trait] impl RpcHandlersExt for RpcHandlers { - fn send_transaction( + async fn send_transaction( &self, extrinsic: OpaqueExtrinsic, - ) -> Pin> + Send>> { - let (tx, rx) = futures::channel::mpsc::unbounded(); - let mem = RpcSession::new(tx); - Box::pin( - self.rpc_query( - &mem, - &format!( - r#"{{ + ) -> Result { + let (result, rx) = self + .rpc_query(&format!( + r#"{{ "jsonrpc": "2.0", "method": "author_submitExtrinsic", "params": ["0x{}"], "id": 0 }}"#, - hex::encode(extrinsic.encode()) - ), - ) - .map(move |result| parse_rpc_result(result, mem, rx)), - ) + hex::encode(extrinsic.encode()) + )) + .await + .expect("valid JSON-RPC request object; qed"); + parse_rpc_result(result, rx) } } pub(crate) fn parse_rpc_result( - result: Option, - session: RpcSession, + result: String, receiver: futures::channel::mpsc::UnboundedReceiver, ) -> Result { - if let Some(ref result) = result { - let json: serde_json::Value = - serde_json::from_str(result).expect("the result can only be a JSONRPC string; qed"); - let error = json.as_object().expect("JSON result is always an object; qed").get("error"); - - if let Some(error) = error { - return Err(serde_json::from_value(error.clone()) - .expect("the JSONRPC result's error is always valid; qed")) - } + let json: serde_json::Value = + serde_json::from_str(&result).expect("the result can only be a JSONRPC string; qed"); + let error = json.as_object().expect("JSON result is always an object; qed").get("error"); + + if let Some(error) = error { + return Err(serde_json::from_value(error.clone()) + .expect("the JSONRPC result's error is always valid; qed")) } - Ok(RpcTransactionOutput { result, session, receiver }) + Ok(RpcTransactionOutput { result, receiver }) } /// An extension trait for `BlockchainEvents`. @@ -418,40 +408,23 @@ where #[cfg(test)] mod tests { - use sc_service::RpcSession; - - fn create_session_and_receiver( - ) -> (RpcSession, futures::channel::mpsc::UnboundedReceiver) { - let (tx, rx) = futures::channel::mpsc::unbounded(); - let mem = RpcSession::new(tx.into()); - - (mem, rx) - } - #[test] fn parses_error_properly() { - let (mem, rx) = create_session_and_receiver(); - assert!(super::parse_rpc_result(None, mem, rx).is_ok()); - - let (mem, rx) = create_session_and_receiver(); + let (_, rx) = futures::channel::mpsc::unbounded(); assert!(super::parse_rpc_result( - Some( - r#"{ + r#"{ "jsonrpc": "2.0", "result": 19, "id": 1 }"# - .to_string() - ), - mem, + .to_string(), rx ) - .is_ok(),); + .is_ok()); - let (mem, rx) = create_session_and_receiver(); + let (_, rx) = futures::channel::mpsc::unbounded(); let error = super::parse_rpc_result( - Some( - r#"{ + r#"{ "jsonrpc": "2.0", "error": { "code": -32601, @@ -459,9 +432,7 @@ mod tests { }, "id": 1 }"# - .to_string(), - ), - mem, + .to_string(), rx, ) .unwrap_err(); @@ -469,10 +440,9 @@ mod tests { assert_eq!(error.message, "Method not found"); assert!(error.data.is_none()); - let (mem, rx) = create_session_and_receiver(); + let (_, rx) = futures::channel::mpsc::unbounded(); let error = super::parse_rpc_result( - Some( - r#"{ + r#"{ "jsonrpc": "2.0", "error": { "code": -32601, @@ -481,9 +451,7 @@ mod tests { }, "id": 1 }"# - .to_string(), - ), - mem, + .to_string(), rx, ) .unwrap_err(); diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 94325ae9c1ab1..aef061d952a96 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -176,6 +176,19 @@ impl serde::Serialize for Extrinsic { } } +// rustc can't deduce this trait bound https://github.com/rust-lang/rust/issues/48214 +#[cfg(feature = "std")] +impl<'a> serde::Deserialize<'a> for Extrinsic { + fn deserialize(de: D) -> Result + where + D: serde::Deserializer<'a>, + { + let r = sp_core::bytes::deserialize(de)?; + Decode::decode(&mut &r[..]) + .map_err(|e| serde::de::Error::custom(format!("Decode error: {}", e))) + } +} + impl BlindCheckable for Extrinsic { type Checked = Self; diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index 49c004c3c074d..4a931470eafac 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } env_logger = "0.9" -jsonrpsee = { version = "0.10.1", features = ["ws-client", "macros"] } +jsonrpsee = { version = "0.12.0", features = ["ws-client", "macros"] } log = "0.4.16" serde = "1.0.136" serde_json = "1.0" diff --git a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml index 3913bae425757..726cc9f989ced 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml +++ b/utils/frame/rpc/state-trie-migration-rpc/Cargo.toml @@ -25,9 +25,7 @@ sp-state-machine = { path = "../../../../primitives/state-machine" } sp-trie = { path = "../../../../primitives/trie" } trie-db = { version = "0.23.1" } -jsonrpc-core = "18.0.0" -jsonrpc-core-client = "18.0.0" -jsonrpc-derive = "18.0.0" +jsonrpsee = { version = "0.12.0", features = ["server", "macros"] } # Substrate Dependencies sc-client-api = { version = "4.0.0-dev", path = "../../../../client/api" } diff --git a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs index 2e3dd08a7db7e..531bf463f6523 100644 --- a/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs +++ b/utils/frame/rpc/state-trie-migration-rpc/src/lib.rs @@ -17,8 +17,11 @@ //! Rpc for state migration. -use jsonrpc_core::{Error, ErrorCode, Result}; -use jsonrpc_derive::rpc; +use jsonrpsee::{ + core::{Error as JsonRpseeError, RpcResult}, + proc_macros::rpc, + types::error::{CallError, ErrorCode, ErrorObject}, +}; use sc_rpc_api::DenyUnsafe; use serde::{Deserialize, Serialize}; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; @@ -107,15 +110,15 @@ pub struct MigrationStatusResult { } /// Migration RPC methods. -#[rpc] +#[rpc(server)] pub trait StateMigrationApi { /// Check current migration state. /// /// This call is performed locally without submitting any transactions. Thus executing this /// won't change any state. Nonetheless it is a VERY costy call that should be /// only exposed to trusted peers. - #[rpc(name = "state_trieMigrationStatus")] - fn call(&self, at: Option) -> Result; + #[method(name = "state_trieMigrationStatus")] + fn call(&self, at: Option) -> RpcResult; } /// An implementation of state migration specific RPC methods. @@ -133,16 +136,14 @@ impl MigrationRpc { } } -impl StateMigrationApi<::Hash> for MigrationRpc +impl StateMigrationApiServer<::Hash> for MigrationRpc where B: BlockT, C: Send + Sync + 'static + sc_client_api::HeaderBackend, BA: 'static + sc_client_api::backend::Backend, { - fn call(&self, at: Option<::Hash>) -> Result { - if let Err(err) = self.deny_unsafe.check_if_safe() { - return Err(err.into()) - } + fn call(&self, at: Option<::Hash>) -> RpcResult { + self.deny_unsafe.check_if_safe()?; let block_id = BlockId::hash(at.unwrap_or_else(|| self.client.info().best_hash)); let state = self.backend.state_at(block_id).map_err(error_into_rpc_err)?; @@ -155,10 +156,10 @@ where } } -fn error_into_rpc_err(err: impl std::fmt::Display) -> Error { - Error { - code: ErrorCode::InternalError, - message: "Error while checking migration state".into(), - data: Some(err.to_string().into()), - } +fn error_into_rpc_err(err: impl std::fmt::Display) -> JsonRpseeError { + JsonRpseeError::Call(CallError::Custom(ErrorObject::owned( + ErrorCode::InternalError.code(), + "Error while checking migration state", + Some(err.to_string()), + ))) } diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index f9967758928e8..0c6d082406421 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "3.0.0" } futures = "0.3.21" -jsonrpc-client-transports = { version = "18.0.0", features = ["http"] } +jsonrpsee = { version = "0.12.0", features = ["jsonrpsee-types"] } serde = "1" frame-support = { version = "4.0.0-dev", path = "../../../../frame/support" } sc-rpc-api = { version = "0.10.0-dev", path = "../../../../client/rpc-api" } @@ -25,5 +25,6 @@ sp-storage = { version = "6.0.0", path = "../../../../primitives/storage" } [dev-dependencies] scale-info = "2.0.1" +jsonrpsee = { version = "0.12.0", features = ["ws-client", "jsonrpsee-types"] } tokio = "1.17.0" frame-system = { version = "4.0.0-dev", path = "../../../../frame/system" } diff --git a/utils/frame/rpc/support/src/lib.rs b/utils/frame/rpc/support/src/lib.rs index 5d7cba19f643c..2ee007c84f0aa 100644 --- a/utils/frame/rpc/support/src/lib.rs +++ b/utils/frame/rpc/support/src/lib.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Combines [sc_rpc_api::state::StateClient] with [frame_support::storage::generator] traits +//! Combines [sc_rpc_api::state::StateApiClient] with [frame_support::storage::generator] traits //! to provide strongly typed chain state queries over rpc. #![warn(missing_docs)] @@ -23,29 +23,26 @@ use codec::{DecodeAll, FullCodec, FullEncode}; use core::marker::PhantomData; use frame_support::storage::generator::{StorageDoubleMap, StorageMap, StorageValue}; -use jsonrpc_client_transports::RpcError; -use sc_rpc_api::state::StateClient; +use jsonrpsee::core::Error as RpcError; +use sc_rpc_api::state::StateApiClient; use serde::{de::DeserializeOwned, Serialize}; use sp_storage::{StorageData, StorageKey}; /// A typed query on chain state usable from an RPC client. /// /// ```no_run -/// # use jsonrpc_client_transports::RpcError; -/// # use jsonrpc_client_transports::transports::http; +/// # use jsonrpsee::core::Error as RpcError; +/// # use jsonrpsee::ws_client::WsClientBuilder; /// # use codec::Encode; /// # use frame_support::{decl_storage, decl_module}; /// # use substrate_frame_rpc_support::StorageQuery; /// # use frame_system::Config; -/// # use sc_rpc_api::state::StateClient; +/// # use sc_rpc_api::state::StateApiClient; /// # /// # // Hash would normally be ::Hash, but we don't have /// # // frame_system::Config implemented for TestRuntime. Here we just pretend. /// # type Hash = (); /// # -/// # fn main() -> Result<(), RpcError> { -/// # tokio::runtime::Runtime::new().unwrap().block_on(test()) -/// # } /// # /// # struct TestRuntime; /// # @@ -66,24 +63,25 @@ use sp_storage::{StorageData, StorageKey}; /// } /// } /// -/// # async fn test() -> Result<(), RpcError> { -/// let conn = http::connect("http://[::1]:9933").await?; -/// let cl = StateClient::::new(conn); +/// #[tokio::main] +/// async fn main() -> Result<(), RpcError> { +/// let cl = WsClientBuilder::default().build("ws://[::1]:9944").await?; /// -/// let q = StorageQuery::value::(); -/// let _: Option = q.get(&cl, None).await?; +/// let q = StorageQuery::value::(); +/// let hash = None::; +/// let _: Option = q.get(&cl, hash).await?; /// -/// let q = StorageQuery::map::((0, 0, 0)); -/// let _: Option = q.get(&cl, None).await?; +/// let q = StorageQuery::map::((0, 0, 0)); +/// let _: Option = q.get(&cl, hash).await?; /// -/// let q = StorageQuery::map::(12); -/// let _: Option = q.get(&cl, None).await?; +/// let q = StorageQuery::map::(12); +/// let _: Option = q.get(&cl, hash).await?; /// -/// let q = StorageQuery::double_map::(3, (0, 0, 0)); -/// let _: Option = q.get(&cl, None).await?; -/// # -/// # Ok(()) -/// # } +/// let q = StorageQuery::double_map::(3, (0, 0, 0)); +/// let _: Option = q.get(&cl, hash).await?; +/// +/// Ok(()) +/// } /// ``` #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] pub struct StorageQuery { @@ -120,14 +118,18 @@ impl StorageQuery { /// /// block_index indicates the block for which state will be queried. A value of None indicates /// the latest block. - pub async fn get( + pub async fn get( self, - state_client: &StateClient, + state_client: &StateClient, block_index: Option, - ) -> Result, RpcError> { + ) -> Result, RpcError> + where + Hash: Send + Sync + 'static + DeserializeOwned + Serialize, + StateClient: StateApiClient + Sync, + { let opt: Option = state_client.storage(self.key, block_index).await?; opt.map(|encoded| V::decode_all(&mut &encoded.0[..])) .transpose() - .map_err(|decode_err| RpcError::Other(Box::new(decode_err))) + .map_err(|decode_err| RpcError::Custom(decode_err.to_string())) } } diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index 5252d96af3f75..c95ae4793ca6a 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -13,11 +13,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] +serde_json = "1" codec = { package = "parity-scale-codec", version = "3.0.0" } +jsonrpsee = { version = "0.12.0", features = ["server"] } futures = "0.3.21" -jsonrpc-core = "18.0.0" -jsonrpc-core-client = "18.0.0" -jsonrpc-derive = "18.0.0" log = "0.4.16" frame-system-rpc-runtime-api = { version = "4.0.0-dev", path = "../../../../frame/system/rpc/runtime-api" } sc-client-api = { version = "4.0.0-dev", path = "../../../../client/api" } @@ -31,5 +30,7 @@ sp-runtime = { version = "6.0.0", path = "../../../../primitives/runtime" } [dev-dependencies] sc-transaction-pool = { version = "4.0.0-dev", path = "../../../../client/transaction-pool" } +tokio = "1.17.0" +assert_matches = "1.3.0" sp-tracing = { version = "5.0.0", path = "../../../../primitives/tracing" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" } diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index b7da7730f0920..b044035c8120e 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -17,12 +17,15 @@ //! System FRAME specific RPC methods. -use std::sync::Arc; +use std::{fmt::Display, sync::Arc}; + +use codec::{self, Codec, Decode, Encode}; +use jsonrpsee::{ + core::{async_trait, RpcResult}, + proc_macros::rpc, + types::error::{CallError, ErrorObject}, +}; -use codec::{Codec, Decode, Encode}; -use futures::FutureExt; -use jsonrpc_core::{Error as RpcError, ErrorCode}; -use jsonrpc_derive::rpc; use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_api::ApiExt; @@ -31,26 +34,22 @@ use sp_blockchain::HeaderBackend; use sp_core::{hexdisplay::HexDisplay, Bytes}; use sp_runtime::{generic::BlockId, legacy, traits}; -pub use self::gen_client::Client as SystemClient; pub use frame_system_rpc_runtime_api::AccountNonceApi; -/// Future that resolves to account nonce. -type FutureResult = jsonrpc_core::BoxFuture>; - /// System RPC methods. -#[rpc] +#[rpc(client, server)] pub trait SystemApi { /// Returns the next valid index (aka nonce) for given account. /// /// This method takes into consideration all pending transactions /// currently in the pool and if no transactions are found in the pool /// it fallbacks to query the index from the runtime (aka. state nonce). - #[rpc(name = "system_accountNextIndex", alias("account_nextIndex"))] - fn nonce(&self, account: AccountId) -> FutureResult; + #[method(name = "system_accountNextIndex", aliases = ["account_nextIndex"])] + async fn nonce(&self, account: AccountId) -> RpcResult; /// Dry run an extrinsic at a given block. Return SCALE encoded ApplyExtrinsicResult. - #[rpc(name = "system_dryRun", alias("system_dryRunAt"))] - fn dry_run(&self, extrinsic: Bytes, at: Option) -> FutureResult; + #[method(name = "system_dryRun", aliases = ["system_dryRunAt"])] + async fn dry_run(&self, extrinsic: Bytes, at: Option) -> RpcResult; } /// Error type of this RPC api. @@ -61,8 +60,8 @@ pub enum Error { RuntimeError, } -impl From for i64 { - fn from(e: Error) -> i64 { +impl From for i32 { + fn from(e: Error) -> i32 { match e { Error::RuntimeError => 1, Error::DecodeError => 2, @@ -71,22 +70,23 @@ impl From for i64 { } /// An implementation of System-specific RPC methods on full client. -pub struct FullSystem { +pub struct SystemRpc { client: Arc, pool: Arc

, deny_unsafe: DenyUnsafe, _marker: std::marker::PhantomData, } -impl FullSystem { +impl SystemRpc { /// Create new `FullSystem` given client and transaction pool. pub fn new(client: Arc, pool: Arc

, deny_unsafe: DenyUnsafe) -> Self { - FullSystem { client, pool, deny_unsafe, _marker: Default::default() } + Self { client, pool, deny_unsafe, _marker: Default::default() } } } -impl SystemApi<::Hash, AccountId, Index> - for FullSystem +#[async_trait] +impl + SystemApiServer<::Hash, AccountId, Index> for SystemRpc where C: sp_api::ProvideRuntimeApi, C: HeaderBackend, @@ -95,88 +95,83 @@ where C::Api: BlockBuilder, P: TransactionPool + 'static, Block: traits::Block, - AccountId: Clone + std::fmt::Display + Codec, - Index: Clone + std::fmt::Display + Codec + Send + traits::AtLeast32Bit + 'static, + AccountId: Clone + Display + Codec + Send + 'static, + Index: Clone + Display + Codec + Send + traits::AtLeast32Bit + 'static, { - fn nonce(&self, account: AccountId) -> FutureResult { - let get_nonce = || { - let api = self.client.runtime_api(); - let best = self.client.info().best_hash; - let at = BlockId::hash(best); - - let nonce = api.account_nonce(&at, account.clone()).map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::RuntimeError.into()), - message: "Unable to query nonce.".into(), - data: Some(e.to_string().into()), - })?; - - Ok(adjust_nonce(&*self.pool, account, nonce)) - }; - - let res = get_nonce(); - async move { res }.boxed() + async fn nonce(&self, account: AccountId) -> RpcResult { + let api = self.client.runtime_api(); + let best = self.client.info().best_hash; + let at = BlockId::hash(best); + + let nonce = api.account_nonce(&at, account.clone()).map_err(|e| { + CallError::Custom(ErrorObject::owned( + Error::RuntimeError.into(), + "Unable to query nonce.", + Some(e.to_string()), + )) + })?; + Ok(adjust_nonce(&*self.pool, account, nonce)) } - fn dry_run( + async fn dry_run( &self, extrinsic: Bytes, at: Option<::Hash>, - ) -> FutureResult { - if let Err(err) = self.deny_unsafe.check_if_safe() { - return async move { Err(err.into()) }.boxed() - } + ) -> RpcResult { + self.deny_unsafe.check_if_safe()?; + let api = self.client.runtime_api(); + let at = BlockId::::hash(at.unwrap_or_else(|| + // If the block hash is not supplied assume the best block. + self.client.info().best_hash)); + + let uxt: ::Extrinsic = + Decode::decode(&mut &*extrinsic).map_err(|e| { + CallError::Custom(ErrorObject::owned( + Error::DecodeError.into(), + "Unable to dry run extrinsic", + Some(e.to_string()), + )) + })?; - let dry_run = || { - let api = self.client.runtime_api(); - let at = BlockId::::hash(at.unwrap_or_else(|| - // If the block hash is not supplied assume the best block. - self.client.info().best_hash)); - - let uxt: ::Extrinsic = Decode::decode(&mut &*extrinsic) - .map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::DecodeError.into()), - message: "Unable to dry run extrinsic.".into(), - data: Some(e.to_string().into()), - })?; - - let api_version = api - .api_version::>(&at) - .map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::RuntimeError.into()), - message: "Unable to dry run extrinsic.".into(), - data: Some(e.to_string().into()), - })? - .ok_or_else(|| RpcError { - code: ErrorCode::ServerError(Error::RuntimeError.into()), - message: "Unable to dry run extrinsic.".into(), - data: Some( - format!("Could not find `BlockBuilder` api for block `{:?}`.", at).into(), - ), - })?; - - let result = if api_version < 6 { - #[allow(deprecated)] - api.apply_extrinsic_before_version_6(&at, uxt) - .map(legacy::byte_sized_error::convert_to_latest) - .map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::RuntimeError.into()), - message: "Unable to dry run extrinsic.".into(), - data: Some(e.to_string().into()), - })? - } else { - api.apply_extrinsic(&at, uxt).map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::RuntimeError.into()), - message: "Unable to dry run extrinsic.".into(), - data: Some(e.to_string().into()), - })? - }; + let api_version = api + .api_version::>(&at) + .map_err(|e| { + CallError::Custom(ErrorObject::owned( + Error::RuntimeError.into(), + "Unable to dry run extrinsic.", + Some(e.to_string()), + )) + })? + .ok_or_else(|| { + CallError::Custom(ErrorObject::owned( + Error::RuntimeError.into(), + "Unable to dry run extrinsic.", + Some(format!("Could not find `BlockBuilder` api for block `{:?}`.", at)), + )) + })?; - Ok(Encode::encode(&result).into()) + let result = if api_version < 6 { + #[allow(deprecated)] + api.apply_extrinsic_before_version_6(&at, uxt) + .map(legacy::byte_sized_error::convert_to_latest) + .map_err(|e| { + CallError::Custom(ErrorObject::owned( + Error::RuntimeError.into(), + "Unable to dry run extrinsic.", + Some(e.to_string()), + )) + })? + } else { + api.apply_extrinsic(&at, uxt).map_err(|e| { + CallError::Custom(ErrorObject::owned( + Error::RuntimeError.into(), + "Unable to dry run extrinsic.", + Some(e.to_string()), + )) + })? }; - let res = dry_run(); - - async move { res }.boxed() + Ok(Encode::encode(&result).into()) } } @@ -220,7 +215,9 @@ where mod tests { use super::*; + use assert_matches::assert_matches; use futures::executor::block_on; + use jsonrpsee::{core::Error as JsonRpseeError, types::error::CallError}; use sc_transaction_pool::BasicPool; use sp_runtime::{ transaction_validity::{InvalidTransaction, TransactionValidityError}, @@ -228,8 +225,8 @@ mod tests { }; use substrate_test_runtime_client::{runtime::Transfer, AccountKeyring}; - #[test] - fn should_return_next_nonce_for_some_account() { + #[tokio::test] + async fn should_return_next_nonce_for_some_account() { sp_tracing::try_init_simple(); // given @@ -254,17 +251,17 @@ mod tests { let ext1 = new_transaction(1); block_on(pool.submit_one(&BlockId::number(0), source, ext1)).unwrap(); - let accounts = FullSystem::new(client, pool, DenyUnsafe::Yes); + let accounts = SystemRpc::new(client, pool, DenyUnsafe::Yes); // when - let nonce = accounts.nonce(AccountKeyring::Alice.into()); + let nonce = accounts.nonce(AccountKeyring::Alice.into()).await; // then - assert_eq!(block_on(nonce).unwrap(), 2); + assert_eq!(nonce.unwrap(), 2); } - #[test] - fn dry_run_should_deny_unsafe() { + #[tokio::test] + async fn dry_run_should_deny_unsafe() { sp_tracing::try_init_simple(); // given @@ -273,17 +270,17 @@ mod tests { let pool = BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); - let accounts = FullSystem::new(client, pool, DenyUnsafe::Yes); + let accounts = SystemRpc::new(client, pool, DenyUnsafe::Yes); // when - let res = accounts.dry_run(vec![].into(), None); - - // then - assert_eq!(block_on(res), Err(sc_rpc_api::UnsafeRpcError.into())); + let res = accounts.dry_run(vec![].into(), None).await; + assert_matches!(res, Err(JsonRpseeError::Call(CallError::Custom(e))) => { + assert!(e.message().contains("RPC call is unsafe to be called externally")); + }); } - #[test] - fn dry_run_should_work() { + #[tokio::test] + async fn dry_run_should_work() { sp_tracing::try_init_simple(); // given @@ -292,7 +289,7 @@ mod tests { let pool = BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); - let accounts = FullSystem::new(client, pool, DenyUnsafe::No); + let accounts = SystemRpc::new(client, pool, DenyUnsafe::No); let tx = Transfer { from: AccountKeyring::Alice.into(), @@ -303,16 +300,15 @@ mod tests { .into_signed_tx(); // when - let res = accounts.dry_run(tx.encode().into(), None); + let bytes = accounts.dry_run(tx.encode().into(), None).await.expect("Call is successful"); // then - let bytes = block_on(res).unwrap().0; - let apply_res: ApplyExtrinsicResult = Decode::decode(&mut bytes.as_slice()).unwrap(); + let apply_res: ApplyExtrinsicResult = Decode::decode(&mut bytes.as_ref()).unwrap(); assert_eq!(apply_res, Ok(Ok(()))); } - #[test] - fn dry_run_should_indicate_error() { + #[tokio::test] + async fn dry_run_should_indicate_error() { sp_tracing::try_init_simple(); // given @@ -321,7 +317,7 @@ mod tests { let pool = BasicPool::new_full(Default::default(), true.into(), None, spawner, client.clone()); - let accounts = FullSystem::new(client, pool, DenyUnsafe::No); + let accounts = SystemRpc::new(client, pool, DenyUnsafe::No); let tx = Transfer { from: AccountKeyring::Alice.into(), @@ -332,11 +328,10 @@ mod tests { .into_signed_tx(); // when - let res = accounts.dry_run(tx.encode().into(), None); + let bytes = accounts.dry_run(tx.encode().into(), None).await.expect("Call is successful"); // then - let bytes = block_on(res).unwrap().0; - let apply_res: ApplyExtrinsicResult = Decode::decode(&mut bytes.as_slice()).unwrap(); + let apply_res: ApplyExtrinsicResult = Decode::decode(&mut bytes.as_ref()).unwrap(); assert_eq!(apply_res, Err(TransactionValidityError::Invalid(InvalidTransaction::Stale))); } } diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index 2c0a2787b1dac..a5e658fc68476 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -14,12 +14,12 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] clap = { version = "3.1.6", features = ["derive"] } -jsonrpsee = { version = "0.10.1", default-features = false, features = ["ws-client"] } log = "0.4.16" parity-scale-codec = "3.0.0" serde = "1.0.136" zstd = { version = "0.10.0", default-features = false } remote-externalities = { version = "0.10.0-dev", path = "../../remote-externalities" } +jsonrpsee = { version = "0.12.0", default-features = false, features = ["ws-client"] } sc-chain-spec = { version = "4.0.0-dev", path = "../../../../client/chain-spec" } sc-cli = { version = "0.10.0-dev", path = "../../../../client/cli" } sc-executor = { version = "0.10.0-dev", path = "../../../../client/executor" }