diff --git a/Cargo.lock b/Cargo.lock index 2d6263f7ab4e..d5abe5c3b151 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -778,9 +778,9 @@ dependencies = [ [[package]] name = "build_html" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3108fe6fe7ac796fb7625bdde8fa2b67b5a7731496251ca57c7b8cadd78a16a1" +checksum = "225eb82ce9e70dcc0cfa6e404d0f353326b6e163bf500ec4711cec317d11935c" [[package]] name = "bumpalo" @@ -5575,9 +5575,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -5704,9 +5704,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.189" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" dependencies = [ "serde_derive", ] @@ -5723,9 +5723,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -8188,9 +8188,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" dependencies = [ "anyhow", "once_cell", @@ -8225,9 +8225,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e22e3bfe96fa30a57313e774a5e8c74ffee884abff57ecacc10e8832315ee8a2" +checksum = "b1dcab481683131c093271c19602bd495b1d682f7a94f764f2227111a0a104f0" dependencies = [ "anyhow", "async-trait", @@ -8247,9 +8247,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb7ff3ec44b7b92fd4e28d9d92b83d61dc74125ccfc90bcfb27a5750d8a8580" +checksum = "ace39bdf50b8421c4d546381fe1ecc5212f953ce61cf93d4fa69172078dbe4af" dependencies = [ "anyhow", "blst", @@ -8271,13 +8271,14 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7fcde1275970a6b8a33ea2ade5cc994d6392f95509ce374e0e7a26cde4cd6db" +checksum = "216e3d9f3df8c119e037e44c41db12fa6448dafbf1eaf5015d13b22400866980" dependencies = [ "anyhow", "async-trait", "rand 0.8.5", + "semver", "tracing", "vise", "zksync_concurrency", @@ -8292,9 +8293,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ee48bee7dae8adb2769c7315adde1780832d05ecb6a77c08cdda53a315992a" +checksum = "19d7dd832b1bbcd0a2b977b2d85986437105fd5e1e82bd4becb2e6a9db112655" dependencies = [ "anyhow", "async-trait", @@ -8309,6 +8310,7 @@ dependencies = [ "pin-project", "prost 0.12.1", "rand 0.8.5", + "semver", "snow", "thiserror", "tls-listener", @@ -8327,9 +8329,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72223c0b20621775db51bcc4b043addafeaf784d444af2ad4bc8bcdee477367c" +checksum = "06277266e31efdc1465f6a27ce96c7435392a270978a91956b8a848732df2cfa" dependencies = [ "anyhow", "bit-vec", @@ -8349,9 +8351,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1750ad93f7e3a0c2f5880f9bcc1244a3b46d3e6c124c4f65f545032b87464" +checksum = "9099b2295f550176d824b5287f2f31b7739c4d10247faec1132f1c6e9d18059c" dependencies = [ "anyhow", "async-trait", @@ -8369,9 +8371,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" dependencies = [ "anyhow", "rand 0.8.5", @@ -9052,13 +9054,17 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "hex", + "jsonrpsee", "rand 0.8.5", "secrecy", + "semver", "tempfile", "test-casing", "thiserror", "tokio", "tracing", + "zksync_basic_types", "zksync_concurrency", "zksync_config", "zksync_consensus_bft", @@ -9073,16 +9079,20 @@ dependencies = [ "zksync_l1_contract_interface", "zksync_merkle_tree", "zksync_metadata_calculator", + "zksync_multivm", "zksync_node_api_server", "zksync_node_genesis", "zksync_node_sync", "zksync_node_test_utils", "zksync_protobuf", + "zksync_state", "zksync_state_keeper", + "zksync_storage", "zksync_system_constants", "zksync_test_account", "zksync_types", "zksync_utils", + "zksync_vm_interface", "zksync_web3_decl", ] @@ -9139,6 +9149,7 @@ dependencies = [ "ctrlc", "futures 0.3.28", "pin-project-lite", + "semver", "thiserror", "tokio", "tracing", @@ -9341,9 +9352,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" dependencies = [ "anyhow", "bit-vec", @@ -9362,9 +9373,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" dependencies = [ "anyhow", "heck 0.5.0", diff --git a/Cargo.toml b/Cargo.toml index d244d436b9f5..075f5007be4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -218,16 +218,16 @@ zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.4" } vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "4ef15d46410ffc11744771a3a6c7c09dd9470c90" } # Consensus dependencies. -zksync_concurrency = "=0.1.0-rc.11" -zksync_consensus_bft = "=0.1.0-rc.11" -zksync_consensus_crypto = "=0.1.0-rc.11" -zksync_consensus_executor = "=0.1.0-rc.11" -zksync_consensus_network = "=0.1.0-rc.11" -zksync_consensus_roles = "=0.1.0-rc.11" -zksync_consensus_storage = "=0.1.0-rc.11" -zksync_consensus_utils = "=0.1.0-rc.11" -zksync_protobuf = "=0.1.0-rc.11" -zksync_protobuf_build = "=0.1.0-rc.11" +zksync_concurrency = "=0.1.0-rc.12" +zksync_consensus_bft = "=0.1.0-rc.12" +zksync_consensus_crypto = "=0.1.0-rc.12" +zksync_consensus_executor = "=0.1.0-rc.12" +zksync_consensus_network = "=0.1.0-rc.12" +zksync_consensus_roles = "=0.1.0-rc.12" +zksync_consensus_storage = "=0.1.0-rc.12" +zksync_consensus_utils = "=0.1.0-rc.12" +zksync_protobuf = "=0.1.0-rc.12" +zksync_protobuf_build = "=0.1.0-rc.12" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index c30cc1a432bb..7b94ca7a0c2a 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -242,7 +242,13 @@ impl ExternalNodeBuilder { let config = self.config.consensus.clone(); let secrets = config::read_consensus_secrets().context("config::read_consensus_secrets()")?; - let layer = ExternalNodeConsensusLayer { config, secrets }; + let layer = ExternalNodeConsensusLayer { + build_version: crate::metadata::SERVER_VERSION + .parse() + .context("CRATE_VERSION.parse()")?, + config, + secrets, + }; self.node.add_layer(layer); Ok(self) } diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 50885a6ec6fe..e5e01f880feb 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, BTreeSet}; use secrecy::{ExposeSecret as _, Secret}; -use zksync_basic_types::L2ChainId; +use zksync_basic_types::{ethabi, L2ChainId}; use zksync_concurrency::{limiter, time}; /// `zksync_consensus_crypto::TextFmt` representation of `zksync_consensus_roles::validator::PublicKey`. @@ -89,6 +89,8 @@ pub struct GenesisSpec { /// Leader of the committee. Represents /// `zksync_consensus_roles::validator::LeaderSelectionMode::Sticky`. pub leader: ValidatorPublicKey, + /// Address of the registry contract. + pub registry_address: Option, } #[derive(Clone, Debug, PartialEq, Default)] diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 028b5e38055f..bc3b6025b15a 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -243,17 +243,17 @@ impl Distribution for EncodeDist { default_upgrade_addr: rng.gen(), diamond_proxy_addr: rng.gen(), validator_timelock_addr: rng.gen(), - l1_erc20_bridge_proxy_addr: rng.gen(), - l2_erc20_bridge_addr: rng.gen(), - l1_shared_bridge_proxy_addr: rng.gen(), - l2_shared_bridge_addr: rng.gen(), - l1_weth_bridge_proxy_addr: rng.gen(), - l2_weth_bridge_addr: rng.gen(), - l2_testnet_paymaster_addr: rng.gen(), + l1_erc20_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_erc20_bridge_addr: self.sample_opt(|| rng.gen()), + l1_shared_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_shared_bridge_addr: self.sample_opt(|| rng.gen()), + l1_weth_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_weth_bridge_addr: self.sample_opt(|| rng.gen()), + l2_testnet_paymaster_addr: self.sample_opt(|| rng.gen()), l1_multicall3_addr: rng.gen(), - base_token_addr: rng.gen(), - chain_admin_addr: rng.gen(), ecosystem_contracts: self.sample(rng), + base_token_addr: self.sample_opt(|| rng.gen()), + chain_admin_addr: self.sample_opt(|| rng.gen()), } } } @@ -777,6 +777,7 @@ impl Distribution for EncodeDist { validators: self.sample_collect(rng), attesters: self.sample_collect(rng), leader: ValidatorPublicKey(self.sample(rng)), + registry_address: self.sample_opt(|| rng.gen()), } } } diff --git a/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json b/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json deleted file mode 100644 index 3baa610d7d78..000000000000 --- a/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n genesis\n FROM\n consensus_replica_state\n WHERE\n fake_key\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "genesis", - "type_info": "Jsonb" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - true - ] - }, - "hash": "14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542" -} diff --git a/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json b/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json new file mode 100644 index 000000000000..28a1e54230d8 --- /dev/null +++ b/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n genesis,\n global_config\n FROM\n consensus_replica_state\n WHERE\n fake_key\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "genesis", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "global_config", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + true + ] + }, + "hash": "17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634" +} diff --git a/core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json b/core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json similarity index 51% rename from core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json rename to core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json index 38b88c316eef..3817369ecc16 100644 --- a/core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json +++ b/core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json @@ -1,15 +1,16 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n consensus_replica_state (fake_key, genesis, state)\n VALUES\n (TRUE, $1, $2)\n ", + "query": "\n INSERT INTO\n consensus_replica_state (fake_key, global_config, genesis, state)\n VALUES\n (TRUE, $1, $2, $3)\n ", "describe": { "columns": [], "parameters": { "Left": [ + "Jsonb", "Jsonb", "Jsonb" ] }, "nullable": [] }, - "hash": "f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975" + "hash": "1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85" } diff --git a/core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json b/core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json similarity index 58% rename from core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json rename to core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json index a42fbe98ff2f..cabe0a3dc557 100644 --- a/core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json +++ b/core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches_consensus (l1_batch_number, certificate, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "query": "\n INSERT INTO\n l1_batches_consensus (l1_batch_number, certificate, updated_at, created_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ", "describe": { "columns": [], "parameters": { @@ -11,5 +11,5 @@ }, "nullable": [] }, - "hash": "d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510" + "hash": "311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1" } diff --git a/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json b/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json new file mode 100644 index 000000000000..ec17f2e0b61b --- /dev/null +++ b/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n attesters\n FROM\n l1_batches_consensus_committees\n WHERE\n l1_batch_number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "attesters", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1" +} diff --git a/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json b/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json deleted file mode 100644 index 5130763af73c..000000000000 --- a/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(l1_batch_number) AS \"number\"\n FROM\n l1_batches_consensus\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - null - ] - }, - "hash": "849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c" -} diff --git a/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json b/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json new file mode 100644 index 000000000000..a59468bd516c --- /dev/null +++ b/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l1_batch_number\n FROM\n l1_batches_consensus\n ORDER BY\n l1_batch_number DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97" +} diff --git a/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json b/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json new file mode 100644 index 000000000000..356fd8e9d999 --- /dev/null +++ b/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches_consensus_committees (l1_batch_number, attesters, updated_at)\n VALUES\n ($1, $2, NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n l1_batch_number = $1,\n attesters = $2,\n updated_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7" +} diff --git a/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql new file mode 100644 index 000000000000..fee0b42079f3 --- /dev/null +++ b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE consensus_replica_state DROP COLUMN global_config; + +DROP TABLE l1_batches_consensus_committees; diff --git a/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql new file mode 100644 index 000000000000..c31952b96465 --- /dev/null +++ b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql @@ -0,0 +1,8 @@ +ALTER TABLE consensus_replica_state + ADD COLUMN global_config JSONB NULL; + +CREATE TABLE l1_batches_consensus_committees ( + l1_batch_number BIGINT PRIMARY KEY REFERENCES l1_batches (number) ON DELETE CASCADE, + attesters JSONB NOT NULL, + updated_at TIMESTAMP NOT NULL +); diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index 658da6c76821..f0ef336bc543 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -22,6 +22,36 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::models::{parse_h160, parse_h256}; +/// Global config of the consensus. +#[derive(Debug, PartialEq, Clone)] +pub struct GlobalConfig { + pub genesis: validator::Genesis, + pub registry_address: Option, +} + +impl ProtoFmt for GlobalConfig { + type Proto = proto::GlobalConfig; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + genesis: read_required(&r.genesis).context("genesis")?, + registry_address: r + .registry_address + .as_ref() + .map(|a| parse_h160(a)) + .transpose() + .context("registry_address")?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + genesis: Some(self.genesis.build()), + registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), + } + } +} + /// Global attestation status served by /// `attestationStatus` RPC. #[derive(Debug, PartialEq, Clone)] @@ -469,3 +499,24 @@ impl ProtoRepr for proto::Transaction { } } } + +impl ProtoRepr for proto::AttesterCommittee { + type Type = attester::Committee; + + fn read(&self) -> anyhow::Result { + let members: Vec<_> = self + .members + .iter() + .enumerate() + .map(|(i, m)| attester::WeightedAttester::read(m).context(i)) + .collect::>() + .context("members")?; + Self::Type::new(members) + } + + fn build(this: &Self::Type) -> Self { + Self { + members: this.iter().map(|x| x.build()).collect(), + } + } +} diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index ea0c12f1b5f3..da9151f10f4d 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package zksync.dal; import "zksync/roles/validator.proto"; +import "zksync/roles/attester.proto"; message Payload { // zksync-era ProtocolVersionId @@ -117,6 +118,15 @@ message PaymasterParams { optional bytes paymaster_input = 2; // required } +message AttesterCommittee { + repeated roles.attester.WeightedAttester members = 1; // required +} + +message GlobalConfig { + optional roles.validator.Genesis genesis = 1; // required + optional bytes registry_address = 2; // optional; H160 +} + message AttestationStatus { optional roles.validator.GenesisHash genesis = 1; // required optional uint64 next_batch_to_attest = 2; // required diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 8f05cb381777..2dca58e2a6a6 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -1,5 +1,4 @@ use anyhow::Context as _; -use bigdecimal::Zero as _; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BlockStoreState, ReplicaState}; use zksync_db_connection::{ @@ -7,10 +6,10 @@ use zksync_db_connection::{ error::{DalError, DalResult, SqlxContext}, instrument::{InstrumentExt, Instrumented}, }; -use zksync_protobuf::ProtoFmt as _; +use zksync_protobuf::ProtoRepr as _; use zksync_types::L2BlockNumber; -pub use crate::consensus::{AttestationStatus, Payload}; +pub use crate::consensus::{proto, AttestationStatus, GlobalConfig, Payload}; use crate::{Core, CoreDal}; /// Storage access methods for `zksync_core::consensus` module. @@ -33,72 +32,77 @@ pub enum InsertCertificateError { } impl ConsensusDal<'_, '_> { - /// Fetches genesis. - pub async fn genesis(&mut self) -> DalResult> { - Ok(sqlx::query!( + /// Fetch consensus global config. + pub async fn global_config(&mut self) -> anyhow::Result> { + // global_config contains a superset of genesis information. + // genesis column is deprecated and will be removed once the main node + // is fully upgraded. + // For now we keep the information between both columns in sync. + let Some(row) = sqlx::query!( r#" SELECT - genesis + genesis, + global_config FROM consensus_replica_state WHERE fake_key "# ) - .try_map(|row| { - let Some(genesis) = row.genesis else { - return Ok(None); - }; - // Deserialize the json, but don't allow for unknown fields. - // We might encounter an unknown fields here in case if support for the previous - // consensus protocol version is removed before the migration to a new version - // is performed. The node should NOT operate in such a state. - Ok(Some( - validator::GenesisRaw::read( - &zksync_protobuf::serde::deserialize_proto_with_options( - &genesis, /*deny_unknown_fields=*/ true, - ) - .decode_column("genesis")?, - ) - .decode_column("genesis")? - .with_hash(), - )) - }) - .instrument("genesis") + .instrument("global_config") .fetch_optional(self.storage) .await? - .flatten()) + else { + return Ok(None); + }; + if let Some(global_config) = row.global_config { + return Ok(Some( + zksync_protobuf::serde::deserialize(&global_config).context("global_config")?, + )); + } + if let Some(genesis) = row.genesis { + let genesis: validator::Genesis = + zksync_protobuf::serde::deserialize(&genesis).context("genesis")?; + return Ok(Some(GlobalConfig { + genesis, + registry_address: None, + })); + } + Ok(None) } - /// Attempts to update the genesis. + /// Attempts to update the global config. /// Fails if the new genesis is invalid. /// Fails if the new genesis has different `chain_id`. /// Fails if the storage contains a newer genesis (higher fork number). - /// Noop if the new genesis is the same as the current one. + /// Noop if the new global config is the same as the current one. /// Resets the stored consensus state otherwise and purges all certificates. - pub async fn try_update_genesis(&mut self, genesis: &validator::Genesis) -> anyhow::Result<()> { + pub async fn try_update_global_config(&mut self, want: &GlobalConfig) -> anyhow::Result<()> { let mut txn = self.storage.start_transaction().await?; - if let Some(got) = txn.consensus_dal().genesis().await? { + if let Some(got) = txn.consensus_dal().global_config().await? { // Exit if the genesis didn't change. - if &got == genesis { + if &got == want { return Ok(()); } anyhow::ensure!( - got.chain_id == genesis.chain_id, + got.genesis.chain_id == want.genesis.chain_id, "changing chain_id is not allowed: old = {:?}, new = {:?}", - got.chain_id, - genesis.chain_id, + got.genesis.chain_id, + want.genesis.chain_id, ); anyhow::ensure!( - got.fork_number < genesis.fork_number, + got.genesis.fork_number < want.genesis.fork_number, "transition to a past fork is not allowed: old = {:?}, new = {:?}", - got.fork_number, - genesis.fork_number, + got.genesis.fork_number, + want.genesis.fork_number, ); - genesis.verify().context("genesis.verify()")?; + want.genesis.verify().context("genesis.verify()")?; } let genesis = - zksync_protobuf::serde::serialize(genesis, serde_json::value::Serializer).unwrap(); + zksync_protobuf::serde::serialize(&want.genesis, serde_json::value::Serializer) + .unwrap(); + let global_config = + zksync_protobuf::serde::serialize(want, serde_json::value::Serializer).unwrap(); let state = zksync_protobuf::serde::serialize( &ReplicaState::default(), serde_json::value::Serializer, @@ -131,14 +135,15 @@ impl ConsensusDal<'_, '_> { sqlx::query!( r#" INSERT INTO - consensus_replica_state (fake_key, genesis, state) + consensus_replica_state (fake_key, global_config, genesis, state) VALUES - (TRUE, $1, $2) + (TRUE, $1, $2, $3) "#, + global_config, genesis, state, ) - .instrument("try_update_genesis#INSERT INTO consenuss_replica_state") + .instrument("try_update_global_config#INSERT INTO consensus_replica_state") .execute(&mut txn) .await?; txn.commit().await?; @@ -154,25 +159,33 @@ impl ConsensusDal<'_, '_> { .start_transaction() .await .context("start_transaction")?; - let Some(old) = txn.consensus_dal().genesis().await.context("genesis()")? else { + let Some(old) = txn + .consensus_dal() + .global_config() + .await + .context("global_config()")? + else { return Ok(()); }; - let new = validator::GenesisRaw { - chain_id: old.chain_id, - fork_number: old.fork_number.next(), - first_block: txn - .consensus_dal() - .next_block() - .await - .context("next_block()")?, - - protocol_version: old.protocol_version, - validators: old.validators.clone(), - attesters: old.attesters.clone(), - leader_selection: old.leader_selection.clone(), - } - .with_hash(); - txn.consensus_dal().try_update_genesis(&new).await?; + let new = GlobalConfig { + genesis: validator::GenesisRaw { + chain_id: old.genesis.chain_id, + fork_number: old.genesis.fork_number.next(), + first_block: txn + .consensus_dal() + .next_block() + .await + .context("next_block()")?, + + protocol_version: old.genesis.protocol_version, + validators: old.genesis.validators.clone(), + attesters: old.genesis.attesters.clone(), + leader_selection: old.genesis.leader_selection.clone(), + } + .with_hash(), + registry_address: old.registry_address, + }; + txn.consensus_dal().try_update_global_config(&new).await?; txn.commit().await?; Ok(()) } @@ -259,7 +272,12 @@ impl ConsensusDal<'_, '_> { /// so it might NOT be the certificate for the last L2 block. pub async fn block_certificates_range(&mut self) -> anyhow::Result { // It cannot be older than genesis first block. - let mut start = self.genesis().await?.context("genesis()")?.first_block; + let mut start = self + .global_config() + .await? + .context("genesis()")? + .genesis + .first_block; start = start.max(self.first_block().await.context("first_block()")?); let row = sqlx::query!( r#" @@ -422,21 +440,96 @@ impl ConsensusDal<'_, '_> { Ok(()) } + /// Persist the attester committee for the given batch. + pub async fn upsert_attester_committee( + &mut self, + number: attester::BatchNumber, + committee: &attester::Committee, + ) -> anyhow::Result<()> { + let committee = proto::AttesterCommittee::build(committee); + let committee = + zksync_protobuf::serde::serialize_proto(&committee, serde_json::value::Serializer) + .unwrap(); + sqlx::query!( + r#" + INSERT INTO + l1_batches_consensus_committees (l1_batch_number, attesters, updated_at) + VALUES + ($1, $2, NOW()) + ON CONFLICT (l1_batch_number) DO + UPDATE + SET + l1_batch_number = $1, + attesters = $2, + updated_at = NOW() + "#, + i64::try_from(number.0).context("overflow")?, + committee + ) + .instrument("upsert_attester_committee") + .report_latency() + .execute(self.storage) + .await?; + Ok(()) + } + + /// Fetches the attester committee for the L1 batch with the given number. + pub async fn attester_committee( + &mut self, + n: attester::BatchNumber, + ) -> anyhow::Result> { + let Some(row) = sqlx::query!( + r#" + SELECT + attesters + FROM + l1_batches_consensus_committees + WHERE + l1_batch_number = $1 + "#, + i64::try_from(n.0)? + ) + .instrument("attester_committee") + .report_latency() + .fetch_optional(self.storage) + .await? + else { + return Ok(None); + }; + let raw = zksync_protobuf::serde::deserialize_proto(&row.attesters) + .context("deserialize_proto()")?; + Ok(Some( + proto::AttesterCommittee::read(&raw).context("read()")?, + )) + } + /// Inserts a certificate for the L1 batch. /// Noop if a certificate for the same L1 batch is already present. - /// No verification is performed - it cannot be performed due to circular dependency on + /// Verification against previously stored attester committee is performed. + /// Batch hash is not verified - it cannot be performed due to circular dependency on /// `zksync_l1_contract_interface`. pub async fn insert_batch_certificate( &mut self, cert: &attester::BatchQC, ) -> anyhow::Result<()> { - let res = sqlx::query!( + let cfg = self + .global_config() + .await + .context("global_config()")? + .context("genesis is missing")?; + let committee = self + .attester_committee(cert.message.number) + .await + .context("attester_committee()")? + .context("attester committee is missing")?; + cert.verify(cfg.genesis.hash(), &committee) + .context("cert.verify()")?; + sqlx::query!( r#" INSERT INTO - l1_batches_consensus (l1_batch_number, certificate, created_at, updated_at) + l1_batches_consensus (l1_batch_number, certificate, updated_at, created_at) VALUES ($1, $2, NOW(), NOW()) - ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::try_from(cert.message.number.0).context("overflow")?, // Unwrap is ok, because serialization should always succeed. @@ -446,9 +539,6 @@ impl ConsensusDal<'_, '_> { .report_latency() .execute(self.storage) .await?; - if res.rows_affected().is_zero() { - tracing::debug!(l1_batch_number = ?cert.message.number, "duplicate batch certificate"); - } Ok(()) } @@ -457,24 +547,28 @@ impl ConsensusDal<'_, '_> { pub async fn last_batch_certificate_number( &mut self, ) -> anyhow::Result> { - let row = sqlx::query!( + let Some(row) = sqlx::query!( r#" SELECT - MAX(l1_batch_number) AS "number" + l1_batch_number FROM l1_batches_consensus + ORDER BY + l1_batch_number DESC + LIMIT + 1 "# ) .instrument("last_batch_certificate_number") .report_latency() - .fetch_one(self.storage) - .await?; - - let Some(n) = row.number else { + .fetch_optional(self.storage) + .await? + else { return Ok(None); }; + Ok(Some(attester::BatchNumber( - n.try_into().context("overflow")?, + row.l1_batch_number.try_into().context("overflow")?, ))) } @@ -529,7 +623,7 @@ impl ConsensusDal<'_, '_> { /// This is a main node only query. /// ENs should call the attestation_status RPC of the main node. pub async fn attestation_status(&mut self) -> anyhow::Result> { - let Some(genesis) = self.genesis().await.context("genesis()")? else { + let Some(cfg) = self.global_config().await.context("genesis()")? else { return Ok(None); }; let Some(next_batch_to_attest) = async { @@ -542,18 +636,21 @@ impl ConsensusDal<'_, '_> { return Ok(Some(last + 1)); } // Otherwise start with the batch containing the first block of the fork. - self.batch_of_block(genesis.first_block) + self.batch_of_block(cfg.genesis.first_block) .await .context("batch_of_block()") } .await? else { - tracing::info!(%genesis.first_block, "genesis block not found"); + tracing::info!(%cfg.genesis.first_block, "genesis block not found"); return Ok(None); }; Ok(Some(AttestationStatus { - genesis: genesis.hash(), - next_batch_to_attest, + genesis: cfg.genesis.hash(), + // We never attest batch 0 for technical reasons: + // * it is not supported to read state before batch 0. + // * the registry contract needs to be deployed before we can start operating on it + next_batch_to_attest: next_batch_to_attest.max(attester::BatchNumber(1)), })) } } @@ -563,8 +660,9 @@ mod tests { use rand::Rng as _; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::ReplicaState; - use zksync_types::{L1BatchNumber, ProtocolVersion}; + use zksync_types::ProtocolVersion; + use super::GlobalConfig; use crate::{ tests::{create_l1_batch_header, create_l2_block_header}, ConnectionPool, Core, CoreDal, @@ -575,19 +673,22 @@ mod tests { let rng = &mut rand::thread_rng(); let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); - assert_eq!(None, conn.consensus_dal().genesis().await.unwrap()); + assert_eq!(None, conn.consensus_dal().global_config().await.unwrap()); for n in 0..3 { let setup = validator::testonly::Setup::new(rng, 3); let mut genesis = (*setup.genesis).clone(); genesis.fork_number = validator::ForkNumber(n); - let genesis = genesis.with_hash(); + let cfg = GlobalConfig { + genesis: genesis.with_hash(), + registry_address: Some(rng.gen()), + }; conn.consensus_dal() - .try_update_genesis(&genesis) + .try_update_global_config(&cfg) .await .unwrap(); assert_eq!( - genesis, - conn.consensus_dal().genesis().await.unwrap().unwrap() + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() ); assert_eq!( ReplicaState::default(), @@ -597,8 +698,8 @@ mod tests { let want: ReplicaState = rng.gen(); conn.consensus_dal().set_replica_state(&want).await.unwrap(); assert_eq!( - genesis, - conn.consensus_dal().genesis().await.unwrap().unwrap() + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() ); assert_eq!(want, conn.consensus_dal().replica_state().await.unwrap()); } @@ -608,14 +709,32 @@ mod tests { #[tokio::test] async fn test_batch_certificate() { let rng = &mut rand::thread_rng(); + let setup = validator::testonly::Setup::new(rng, 3); let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); + let cfg = GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: Some(rng.gen()), + }; + conn.consensus_dal() + .try_update_global_config(&cfg) + .await + .unwrap(); - let mut mock_batch_qc = |number: L1BatchNumber| { - let mut cert: attester::BatchQC = rng.gen(); - cert.message.number.0 = u64::from(number.0); - cert.signatures.add(rng.gen(), rng.gen()); - cert + let mut make_cert = |number: attester::BatchNumber| { + let m = attester::Batch { + genesis: setup.genesis.hash(), + hash: rng.gen(), + number, + }; + let mut sigs = attester::MultiSig::default(); + for k in &setup.attester_keys { + sigs.add(k.public(), k.sign_msg(m.clone()).sig); + } + attester::BatchQC { + message: m, + signatures: sigs, + } }; // Required for inserting l2 blocks @@ -627,8 +746,7 @@ mod tests { // Insert some mock L2 blocks and L1 batches let mut block_number = 0; let mut batch_number = 0; - let num_batches = 3; - for _ in 0..num_batches { + for _ in 0..3 { for _ in 0..3 { block_number += 1; let l2_block = create_l2_block_header(block_number); @@ -636,64 +754,56 @@ mod tests { } batch_number += 1; let l1_batch = create_l1_batch_header(batch_number); - conn.blocks_dal() .insert_mock_l1_batch(&l1_batch) .await .unwrap(); - conn.blocks_dal() .mark_l2_blocks_as_executed_in_l1_batch(l1_batch.number) .await .unwrap(); } - let l1_batch_number = L1BatchNumber(batch_number); + let n = attester::BatchNumber(batch_number.into()); // Insert a batch certificate for the last L1 batch. - let cert1 = mock_batch_qc(l1_batch_number); - + let want = make_cert(n); conn.consensus_dal() - .insert_batch_certificate(&cert1) + .upsert_attester_committee(n, setup.genesis.attesters.as_ref().unwrap()) .await .unwrap(); - - // Try insert duplicate batch certificate for the same batch. - let cert2 = mock_batch_qc(l1_batch_number); - conn.consensus_dal() - .insert_batch_certificate(&cert2) + .insert_batch_certificate(&want) .await .unwrap(); + // Reinserting a cert should fail. + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n)) + .await + .is_err()); + // Retrieve the latest certificate. - let number = conn + let got_n = conn .consensus_dal() .last_batch_certificate_number() .await .unwrap() .unwrap(); - - let cert = conn + let got = conn .consensus_dal() - .batch_certificate(number) + .batch_certificate(got_n) .await .unwrap() .unwrap(); - - assert_eq!(cert, cert1, "duplicates are ignored"); + assert_eq!(got, want); // Try insert batch certificate for non-existing batch - let cert3 = mock_batch_qc(l1_batch_number.next()); - conn.consensus_dal() - .insert_batch_certificate(&cert3) - .await - .expect_err("missing payload"); - - // Insert one more L1 batch without a certificate. - conn.blocks_dal() - .insert_mock_l1_batch(&create_l1_batch_header(batch_number + 1)) + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n.next())) .await - .unwrap(); + .is_err()); } } diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index 3365f56add77..298c43b80ccd 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -89,6 +89,7 @@ CONTRACTS_L2_ERC20_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L1_WETH_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_WETH_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_TESTNET_PAYMASTER_ADDR="FC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +CONTRACTS_L2_CONSENSUS_REGISTRY_ADDR="D64e136566a9E04eb05B30184fF577F52682D182" CONTRACTS_L1_MULTICALL3_ADDR="0xcA11bde05977b3631167028862bE2a173976CA11" CONTRACTS_L1_SHARED_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_SHARED_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index b57f033d0d22..f5eb5c5b2f10 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -6,7 +6,7 @@ use zksync_config::configs::consensus::{ }; use zksync_protobuf::{kB, read_optional, repr::ProtoRepr, required, ProtoFmt}; -use crate::{proto::consensus as proto, read_optional_repr}; +use crate::{parse_h160, proto::consensus as proto, read_optional_repr}; impl ProtoRepr for proto::WeightedValidator { type Type = WeightedValidator; @@ -65,6 +65,12 @@ impl ProtoRepr for proto::GenesisSpec { .collect::>() .context("attesters")?, leader: ValidatorPublicKey(required(&self.leader).context("leader")?.clone()), + registry_address: self + .registry_address + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("registry_address")?, }) } fn build(this: &Self::Type) -> Self { @@ -74,6 +80,7 @@ impl ProtoRepr for proto::GenesisSpec { validators: this.validators.iter().map(ProtoRepr::build).collect(), attesters: this.attesters.iter().map(ProtoRepr::build).collect(), leader: Some(this.leader.0.clone()), + registry_address: this.registry_address.map(|a| format!("{:?}", a)), } } } diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index c64c993be7c8..835ead1ab65c 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -56,6 +56,8 @@ message GenesisSpec { repeated WeightedValidator validators = 3; // must be non-empty; validator committee. optional string leader = 4; // required; ValidatorPublicKey repeated WeightedAttester attesters = 5; // can be empty; attester committee. + // Currently not in consensus genesis, but still a part of the global configuration. + optional string registry_address = 6; // optional; H160 } // Per peer connection RPC rate limits. diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index bf26caddd07b..9391c8627573 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -44,10 +44,23 @@ pub struct SyncBlock { pub protocol_version: ProtocolVersionId, } +/// Global configuration of the consensus served by the main node to the external nodes. +/// In particular, it contains consensus genesis. +/// +/// The wrapped JSON value corresponds to `zksync_dal::consensus::GlobalConfig`. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConsensusGlobalConfig(pub serde_json::Value); + +/// [DEPRECATED] Genesis served by the main node to the external nodes. +/// This type is deprecated since ConsensusGlobalConfig also contains genesis and is extensible. +/// +/// The wrapped JSON value corresponds to `zksync_consensus_roles::validator::Genesis`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ConsensusGenesis(pub serde_json::Value); /// AttestationStatus maintained by the main node. /// Used for testing L1 batch signing by consensus attesters. +/// +/// The wrapped JSON value corresponds to `zksync_dal::consensus::AttestationStatus`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AttestationStatus(pub serde_json::Value); diff --git a/core/lib/web3_decl/src/error.rs b/core/lib/web3_decl/src/error.rs index f42fe8de59d5..3aa16a9ab77c 100644 --- a/core/lib/web3_decl/src/error.rs +++ b/core/lib/web3_decl/src/error.rs @@ -60,6 +60,19 @@ pub struct EnrichedClientError { args: HashMap<&'static str, String>, } +/// Whether the error should be considered retriable. +pub fn is_retriable(err: &ClientError) -> bool { + match err { + ClientError::Transport(_) | ClientError::RequestTimeout => true, + ClientError::Call(err) => { + // At least some RPC providers use "internal error" in case of the server being overloaded + err.code() == ErrorCode::ServerIsBusy.code() + || err.code() == ErrorCode::InternalError.code() + } + _ => false, + } +} + /// Alias for a result with enriched client RPC error. pub type EnrichedClientResult = Result; @@ -87,15 +100,7 @@ impl EnrichedClientError { /// Whether the error should be considered retriable. pub fn is_retriable(&self) -> bool { - match self.as_ref() { - ClientError::Transport(_) | ClientError::RequestTimeout => true, - ClientError::Call(err) => { - // At least some RPC providers use "internal error" in case of the server being overloaded - err.code() == ErrorCode::ServerIsBusy.code() - || err.code() == ErrorCode::InternalError.code() - } - _ => false, - } + is_retriable(&self.inner_error) } } diff --git a/core/lib/web3_decl/src/namespaces/en.rs b/core/lib/web3_decl/src/namespaces/en.rs index dac774dd7bdf..8a4d2db8c6fe 100644 --- a/core/lib/web3_decl/src/namespaces/en.rs +++ b/core/lib/web3_decl/src/namespaces/en.rs @@ -25,6 +25,9 @@ pub trait EnNamespace { #[method(name = "consensusGenesis")] async fn consensus_genesis(&self) -> RpcResult>; + #[method(name = "consensusGlobalConfig")] + async fn consensus_global_config(&self) -> RpcResult>; + /// Lists all tokens created at or before the specified `block_number`. /// /// This method is used by EN after snapshot recovery in order to recover token records. diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index 086a75c81de9..f247313db2b1 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -25,7 +25,7 @@ use super::{ /// Please don't add fields that duplicate `SystemEnv` or `L1BatchEnv` information, since both of these /// are also provided to an executor. #[derive(Debug)] -pub(crate) struct TxExecutionArgs { +pub struct TxExecutionArgs { /// Transaction / call itself. pub transaction: Transaction, /// Nonce override for the initiator account. @@ -80,7 +80,7 @@ impl TxExecutionArgs { } #[derive(Debug, Clone)] -pub(crate) struct TransactionExecutionOutput { +pub struct TransactionExecutionOutput { /// Output of the VM. pub vm: VmExecutionResultAndLogs, /// Execution metrics. @@ -91,7 +91,7 @@ pub(crate) struct TransactionExecutionOutput { /// Executor of transactions. #[derive(Debug)] -pub(crate) enum TransactionExecutor { +pub enum TransactionExecutor { Real(MainOneshotExecutor), #[doc(hidden)] // Intended for tests only Mock(MockOneshotExecutor), diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index f2a3f0e5f8c3..faaccf03c96a 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -16,10 +16,10 @@ use zksync_types::{ api, fee_model::BatchFeeInput, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, }; +pub use self::execute::{TransactionExecutor, TxExecutionArgs}; use self::vm_metrics::SandboxStage; pub(super) use self::{ error::SandboxExecutionError, - execute::{TransactionExecutor, TxExecutionArgs}, tracers::ApiTracer, validate::ValidationError, vm_metrics::{SubmitTxStage, SANDBOX_METRICS}, @@ -158,7 +158,7 @@ async fn get_pending_state( /// Arguments for VM execution necessary to set up storage and environment. #[derive(Debug, Clone)] -pub(crate) struct TxSetupArgs { +pub struct TxSetupArgs { pub execution_mode: TxExecutionMode, pub operator_account: AccountTreeId, pub fee_input: BatchFeeInput, @@ -215,7 +215,7 @@ impl BlockStartInfoInner { /// Information about first L1 batch / L2 block in the node storage. #[derive(Debug, Clone)] -pub(crate) struct BlockStartInfo { +pub struct BlockStartInfo { cached_pruning_info: Arc>, max_cache_age: Duration, } @@ -331,7 +331,7 @@ impl BlockStartInfo { } #[derive(Debug, thiserror::Error)] -pub(crate) enum BlockArgsError { +pub enum BlockArgsError { #[error("Block is pruned; first retained block is {0}")] Pruned(L2BlockNumber), #[error("Block is missing, but can appear in the future")] @@ -342,7 +342,7 @@ pub(crate) enum BlockArgsError { /// Information about a block provided to VM. #[derive(Debug, Clone, Copy)] -pub(crate) struct BlockArgs { +pub struct BlockArgs { block_id: api::BlockId, resolved_block_number: L2BlockNumber, l1_batch_timestamp_s: Option, diff --git a/core/node/api_server/src/execution_sandbox/tracers.rs b/core/node/api_server/src/execution_sandbox/tracers.rs index 31384b7a0898..6fdc3dbc7b62 100644 --- a/core/node/api_server/src/execution_sandbox/tracers.rs +++ b/core/node/api_server/src/execution_sandbox/tracers.rs @@ -11,7 +11,7 @@ use zksync_types::ProtocolVersionId; /// Custom tracers supported by the API sandbox. #[derive(Debug)] -pub(crate) enum ApiTracer { +pub enum ApiTracer { CallTracer(Arc>>), Validation { params: ValidationTracerParams, diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 5f913e305cd0..f0d96118638b 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -140,6 +140,38 @@ impl MultiVMBaseSystemContracts { } } } + + pub fn load_estimate_gas_blocking() -> Self { + Self { + pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), + post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), + post_virtual_blocks_finish_upgrade_fix: + BaseSystemContracts::estimate_gas_post_virtual_blocks_finish_upgrade_fix(), + post_boojum: BaseSystemContracts::estimate_gas_post_boojum(), + post_allowlist_removal: BaseSystemContracts::estimate_gas_post_allowlist_removal(), + post_1_4_1: BaseSystemContracts::estimate_gas_post_1_4_1(), + post_1_4_2: BaseSystemContracts::estimate_gas_post_1_4_2(), + vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), + vm_1_5_0_increased_memory: + BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), + } + } + + pub fn load_eth_call_blocking() -> Self { + Self { + pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), + post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), + post_virtual_blocks_finish_upgrade_fix: + BaseSystemContracts::playground_post_virtual_blocks_finish_upgrade_fix(), + post_boojum: BaseSystemContracts::playground_post_boojum(), + post_allowlist_removal: BaseSystemContracts::playground_post_allowlist_removal(), + post_1_4_1: BaseSystemContracts::playground_post_1_4_1(), + post_1_4_2: BaseSystemContracts::playground_post_1_4_2(), + vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), + vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( + ), + } + } } /// Smart contracts to be used in the API sandbox requests, e.g. for estimating gas and @@ -169,32 +201,8 @@ impl ApiContracts { /// Blocking version of [`Self::load_from_disk()`]. pub fn load_from_disk_blocking() -> Self { Self { - estimate_gas: MultiVMBaseSystemContracts { - pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), - post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), - post_virtual_blocks_finish_upgrade_fix: - BaseSystemContracts::estimate_gas_post_virtual_blocks_finish_upgrade_fix(), - post_boojum: BaseSystemContracts::estimate_gas_post_boojum(), - post_allowlist_removal: BaseSystemContracts::estimate_gas_post_allowlist_removal(), - post_1_4_1: BaseSystemContracts::estimate_gas_post_1_4_1(), - post_1_4_2: BaseSystemContracts::estimate_gas_post_1_4_2(), - vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), - vm_1_5_0_increased_memory: - BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), - }, - eth_call: MultiVMBaseSystemContracts { - pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), - post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), - post_virtual_blocks_finish_upgrade_fix: - BaseSystemContracts::playground_post_virtual_blocks_finish_upgrade_fix(), - post_boojum: BaseSystemContracts::playground_post_boojum(), - post_allowlist_removal: BaseSystemContracts::playground_post_allowlist_removal(), - post_1_4_1: BaseSystemContracts::playground_post_1_4_1(), - post_1_4_2: BaseSystemContracts::playground_post_1_4_2(), - vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), - vm_1_5_0_increased_memory: - BaseSystemContracts::playground_post_1_5_0_increased_memory(), - }, + estimate_gas: MultiVMBaseSystemContracts::load_estimate_gas_blocking(), + eth_call: MultiVMBaseSystemContracts::load_eth_call_blocking(), } } } @@ -1003,7 +1011,7 @@ impl TxSender { .await } - pub(super) async fn eth_call( + pub async fn eth_call( &self, block_args: BlockArgs, call_overrides: CallOverrides, diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs index c3e116d39928..de7635263735 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs @@ -19,6 +19,12 @@ impl EnNamespaceServer for EnNamespace { .map_err(|err| self.current_method().map_err(err)) } + async fn consensus_global_config(&self) -> RpcResult> { + self.consensus_global_config_impl() + .await + .map_err(|err| self.current_method().map_err(err)) + } + async fn consensus_genesis(&self) -> RpcResult> { self.consensus_genesis_impl() .await diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index ca15352fd1ac..26f4aa2b0b5f 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -21,18 +21,35 @@ impl EnNamespace { Self { state } } + pub async fn consensus_global_config_impl( + &self, + ) -> Result, Web3Error> { + let mut conn = self.state.acquire_connection().await?; + let Some(cfg) = conn + .consensus_dal() + .global_config() + .await + .context("global_config()")? + else { + return Ok(None); + }; + Ok(Some(en::ConsensusGlobalConfig( + zksync_protobuf::serde::serialize(&cfg, serde_json::value::Serializer).unwrap(), + ))) + } + pub async fn consensus_genesis_impl(&self) -> Result, Web3Error> { let mut conn = self.state.acquire_connection().await?; - let Some(genesis) = conn + let Some(cfg) = conn .consensus_dal() - .genesis() + .global_config() .await - .map_err(DalError::generalize)? + .context("global_config()")? else { return Ok(None); }; Ok(Some(en::ConsensusGenesis( - zksync_protobuf::serde::serialize(&genesis, serde_json::value::Serializer).unwrap(), + zksync_protobuf::serde::serialize(&cfg.genesis, serde_json::value::Serializer).unwrap(), ))) } @@ -40,7 +57,7 @@ impl EnNamespace { pub async fn attestation_status_impl( &self, ) -> Result, Web3Error> { - let status = self + let Some(status) = self .state .acquire_connection() .await? @@ -54,13 +71,13 @@ impl EnNamespace { .context("TransactionBuilder::build()")? .consensus_dal() .attestation_status() - .await?; - - Ok(status.map(|s| { - en::AttestationStatus( - zksync_protobuf::serde::serialize(&s, serde_json::value::Serializer).unwrap(), - ) - })) + .await? + else { + return Ok(None); + }; + Ok(Some(en::AttestationStatus( + zksync_protobuf::serde::serialize(&status, serde_json::value::Serializer).unwrap(), + ))) } pub(crate) fn current_method(&self) -> &MethodTracer { diff --git a/core/node/consensus/Cargo.toml b/core/node/consensus/Cargo.toml index e82969dae6c6..ba52892584d2 100644 --- a/core/node/consensus/Cargo.toml +++ b/core/node/consensus/Cargo.toml @@ -11,6 +11,7 @@ keywords.workspace = true categories.workspace = true [dependencies] +zksync_basic_types.workspace = true zksync_config.workspace = true zksync_concurrency.workspace = true zksync_consensus_crypto.workspace = true @@ -20,6 +21,7 @@ zksync_consensus_storage.workspace = true zksync_consensus_executor.workspace = true zksync_consensus_bft.workspace = true zksync_consensus_utils.workspace = true +zksync_contracts.workspace = true zksync_protobuf.workspace = true zksync_dal.workspace = true zksync_l1_contract_interface.workspace = true @@ -31,22 +33,27 @@ zksync_system_constants.workspace = true zksync_types.workspace = true zksync_utils.workspace = true zksync_web3_decl.workspace = true - +zksync_node_api_server.workspace = true +zksync_state.workspace = true +zksync_storage.workspace = true +zksync_vm_interface.workspace = true +zksync_multivm.workspace = true anyhow.workspace = true async-trait.workspace = true secrecy.workspace = true tempfile.workspace = true thiserror.workspace = true tracing.workspace = true +hex.workspace = true tokio.workspace = true +jsonrpsee.workspace = true +semver.workspace = true [dev-dependencies] zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true zksync_node_api_server.workspace = true zksync_test_account.workspace = true -zksync_contracts.workspace = true -tokio.workspace = true test-casing.workspace = true rand.workspace = true diff --git a/core/node/consensus/src/abi.rs b/core/node/consensus/src/abi.rs new file mode 100644 index 000000000000..0e2200e28038 --- /dev/null +++ b/core/node/consensus/src/abi.rs @@ -0,0 +1,133 @@ +//! Strongly-typed API for Consensus-related solidity contracts. +//! Placeholder until we can depend on alloy_sol_types. +use anyhow::Context as _; +use zksync_types::{ethabi, ethabi::Token}; + +/// Strongly typed representation of a contract function. +/// It also represents the inputs of the function. +pub trait Function { + /// Name of the solidity function. + const NAME: &'static str; + /// Type representing contract this function belongs to. + type Contract: AsRef; + /// Typ representing outputs of this function. + type Outputs; + /// Encodes this struct to inputs of this function. + fn encode(&self) -> Vec; + /// Decodes outputs of this function. + fn decode_outputs(outputs: Vec) -> anyhow::Result; +} + +/// Address of contract C. It is just a wrapper of ethabi::Address, +/// just additionally indicating what contract is deployed under this address. +#[derive(Debug)] +pub struct Address(ethabi::Address, std::marker::PhantomData); + +impl Clone for Address { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for Address {} + +impl PartialEq for Address { + fn eq(&self, other: &Self) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for Address {} + +impl Address { + pub fn new(address: ethabi::Address) -> Self { + Self(address, std::marker::PhantomData) + } +} + +impl std::ops::Deref for Address { + type Target = ethabi::Address; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +/// Represents a call to the function F. +#[derive(Debug)] +pub struct Call { + /// Contract of the function. + pub contract: F::Contract, + /// Inputs to the function. + pub inputs: F, +} + +impl Call { + pub(super) fn function(&self) -> ðabi::Function { + self.contract.as_ref().function(F::NAME).unwrap() + } + /// Converts the call to raw calldata. + pub fn calldata(&self) -> ethabi::Result { + self.function().encode_input(&self.inputs.encode()) + } + /// Parses the outputs of the call. + pub fn decode_outputs(&self, outputs: &[u8]) -> anyhow::Result { + F::decode_outputs( + self.function() + .decode_output(outputs) + .context("decode_output()")?, + ) + } +} + +pub(crate) fn into_fixed_bytes(t: Token) -> anyhow::Result<[u8; N]> { + match t { + Token::FixedBytes(b) => b.try_into().ok().context("bad size"), + bad => anyhow::bail!("want fixed_bytes, got {bad:?}"), + } +} + +pub(crate) fn into_tuple(t: Token) -> anyhow::Result<[Token; N]> { + match t { + Token::Tuple(ts) => ts.try_into().ok().context("bad size"), + bad => anyhow::bail!("want tuple, got {bad:?}"), + } +} + +pub(crate) fn into_uint>(t: Token) -> anyhow::Result { + match t { + Token::Uint(i) => i.try_into().ok().context("overflow"), + bad => anyhow::bail!("want uint, got {bad:?}"), + } +} + +#[cfg(test)] +fn example(t: ðabi::ParamType) -> Token { + use ethabi::ParamType as T; + match t { + T::Address => Token::Address(ethabi::Address::default()), + T::Bytes => Token::Bytes(ethabi::Bytes::default()), + T::Int(_) => Token::Int(ethabi::Int::default()), + T::Uint(_) => Token::Uint(ethabi::Uint::default()), + T::Bool => Token::Bool(bool::default()), + T::String => Token::String(String::default()), + T::Array(t) => Token::Array(vec![example(t)]), + T::FixedBytes(n) => Token::FixedBytes(vec![0; *n]), + T::FixedArray(t, n) => Token::FixedArray(vec![example(t); *n]), + T::Tuple(ts) => Token::Tuple(ts.iter().map(example).collect()), + } +} + +#[cfg(test)] +impl Call { + pub(crate) fn test(&self) -> anyhow::Result<()> { + self.calldata().context("calldata()")?; + F::decode_outputs( + self.function() + .outputs + .iter() + .map(|p| example(&p.kind)) + .collect(), + )?; + Ok(()) + } +} diff --git a/core/node/consensus/src/config.rs b/core/node/consensus/src/config.rs index c2fa13472066..22f8fc01192f 100644 --- a/core/node/consensus/src/config.rs +++ b/core/node/consensus/src/config.rs @@ -11,6 +11,8 @@ use zksync_config::{ use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_executor as executor; use zksync_consensus_roles::{attester, node, validator}; +use zksync_dal::consensus_dal; +use zksync_types::ethabi; fn read_secret_text(text: Option<&Secret>) -> anyhow::Result> { text.map(|text| Text::new(text.expose_secret()).decode()) @@ -41,16 +43,18 @@ pub(super) struct GenesisSpec { pub(super) validators: validator::Committee, pub(super) attesters: Option, pub(super) leader_selection: validator::LeaderSelectionMode, + pub(super) registry_address: Option, } impl GenesisSpec { - pub(super) fn from_genesis(g: &validator::Genesis) -> Self { + pub(super) fn from_global_config(cfg: &consensus_dal::GlobalConfig) -> Self { Self { - chain_id: g.chain_id, - protocol_version: g.protocol_version, - validators: g.validators.clone(), - attesters: g.attesters.clone(), - leader_selection: g.leader_selection.clone(), + chain_id: cfg.genesis.chain_id, + protocol_version: cfg.genesis.protocol_version, + validators: cfg.genesis.validators.clone(), + attesters: cfg.genesis.attesters.clone(), + leader_selection: cfg.genesis.leader_selection.clone(), + registry_address: cfg.registry_address, } } @@ -93,6 +97,7 @@ impl GenesisSpec { } else { Some(attester::Committee::new(attesters).context("attesters")?) }, + registry_address: x.registry_address, }) } } @@ -104,6 +109,7 @@ pub(super) fn node_key(secrets: &ConsensusSecrets) -> anyhow::Result, ) -> anyhow::Result { let mut gossip_static_outbound = HashMap::new(); { @@ -128,6 +134,7 @@ pub(super) fn executor( }; Ok(executor::Config { + build_version, server_addr: cfg.server_addr, public_addr: net::Host(cfg.public_addr.0.clone()), max_payload_size: cfg.max_payload_size, diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 259cac5d074a..e1f10b8e4e50 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -1,20 +1,25 @@ use std::sync::Arc; use anyhow::Context as _; +use jsonrpsee::{core::ClientError, types::error::ErrorCode}; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BatchStore, BlockStore}; use zksync_dal::consensus_dal; -use zksync_node_sync::{ - fetcher::FetchedBlock, sync_action::ActionQueueSender, MainNodeClient, SyncState, -}; -use zksync_protobuf::ProtoFmt as _; +use zksync_node_sync::{fetcher::FetchedBlock, sync_action::ActionQueueSender, SyncState}; use zksync_types::L2BlockNumber; -use zksync_web3_decl::client::{DynClient, L2}; +use zksync_web3_decl::{ + client::{DynClient, L2}, + error::is_retriable, + namespaces::{EnNamespaceClient as _, EthNamespaceClient as _}, +}; use super::{config, storage::Store, ConsensusConfig, ConsensusSecrets}; -use crate::storage::{self, ConnectionPool}; +use crate::{ + registry, + storage::{self, ConnectionPool}, +}; /// External node. pub(super) struct EN { @@ -27,7 +32,7 @@ impl EN { /// Task running a consensus node for the external node. /// It may be a validator, but it cannot be a leader (cannot propose blocks). /// - /// NOTE: Before starting the consensus node if fetches all the blocks + /// NOTE: Before starting the consensus node it fetches all the blocks /// older than consensus genesis from the main node using json RPC. pub async fn run( self, @@ -35,6 +40,7 @@ impl EN { actions: ActionQueueSender, cfg: ConsensusConfig, secrets: ConsensusSecrets, + build_version: Option, ) -> anyhow::Result<()> { let attester = config::attester_key(&secrets).context("attester_key")?; @@ -47,13 +53,16 @@ impl EN { // Update sync state in the background. s.spawn_bg(self.fetch_state_loop(ctx)); - // Initialize genesis. - let genesis = self.fetch_genesis(ctx).await.wrap("fetch_genesis()")?; + // Initialize global config. + let global_config = self + .fetch_global_config(ctx) + .await + .wrap("fetch_genesis()")?; let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &genesis) + conn.try_update_global_config(ctx, &global_config) .await - .wrap("set_genesis()")?; + .wrap("try_update_global_config()")?; let mut payload_queue = conn .new_payload_queue(ctx, actions, self.sync_state.clone()) @@ -63,18 +72,22 @@ impl EN { drop(conn); // Fetch blocks before the genesis. - self.fetch_blocks(ctx, &mut payload_queue, Some(genesis.first_block)) - .await - .wrap("fetch_blocks()")?; + self.fetch_blocks( + ctx, + &mut payload_queue, + Some(global_config.genesis.first_block), + ) + .await + .wrap("fetch_blocks()")?; // Monitor the genesis of the main node. // If it changes, it means that a hard fork occurred and we need to reset the consensus state. s.spawn_bg::<()>({ - let old = genesis.clone(); + let old = global_config.clone(); async { let old = old; loop { - if let Ok(new) = self.fetch_genesis(ctx).await { + if let Ok(new) = self.fetch_global_config(ctx).await { if new != old { return Err(anyhow::format_err!( "genesis changed: old {old:?}, new {new:?}" @@ -105,10 +118,14 @@ impl EN { s.spawn_bg(async { Ok(runner.run(ctx).await?) }); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(self.run_attestation_updater(ctx, genesis.clone(), attestation.clone())); + s.spawn_bg(self.run_attestation_controller( + ctx, + global_config.clone(), + attestation.clone(), + )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets)?, + config: config::executor(&cfg, &secrets, build_version)?, block_store, batch_store, validator: config::validator_key(&secrets) @@ -164,24 +181,21 @@ impl EN { /// Monitors the `AttestationStatus` on the main node, /// and updates the attestation config accordingly. - async fn run_attestation_updater( + async fn run_attestation_controller( &self, ctx: &ctx::Ctx, - genesis: validator::Genesis, + cfg: consensus_dal::GlobalConfig, attestation: Arc, ) -> ctx::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); - let Some(committee) = &genesis.attesters else { - return Ok(()); - }; - let committee = Arc::new(committee.clone()); + let registry = registry::Registry::new(cfg.genesis.clone(), self.pool.clone()).await; let mut next = attester::BatchNumber(0); loop { let status = loop { match self.fetch_attestation_status(ctx).await { Err(err) => tracing::warn!("{err:#}"), Ok(status) => { - if status.genesis != genesis.hash() { + if status.genesis != cfg.genesis.hash() { return Err(anyhow::format_err!("genesis mismatch").into()); } if status.next_batch_to_attest >= next { @@ -191,6 +205,7 @@ impl EN { } ctx.sleep(POLL_INTERVAL).await?; }; + next = status.next_batch_to_attest.next(); tracing::info!( "waiting for hash of batch {:?}", status.next_batch_to_attest @@ -199,6 +214,27 @@ impl EN { .pool .wait_for_batch_hash(ctx, status.next_batch_to_attest) .await?; + let Some(committee) = registry + .attester_committee_for( + ctx, + cfg.registry_address.map(registry::Address::new), + status.next_batch_to_attest, + ) + .await + .wrap("attester_committee_for()")? + else { + tracing::info!("attestation not required"); + continue; + }; + let committee = Arc::new(committee); + // Persist the derived committee. + self.pool + .connection(ctx) + .await + .wrap("connection")? + .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) + .await + .wrap("upsert_attester_committee()")?; tracing::info!( "attesting batch {:?} with hash {hash:?}", status.next_batch_to_attest @@ -214,7 +250,6 @@ impl EN { })) .await .context("start_attestation()")?; - next = status.next_batch_to_attest.next(); } } @@ -224,37 +259,52 @@ impl EN { const DELAY_INTERVAL: time::Duration = time::Duration::milliseconds(500); const RETRY_INTERVAL: time::Duration = time::Duration::seconds(5); loop { - match ctx.wait(self.client.fetch_l2_block_number()).await? { + match ctx.wait(self.client.get_block_number()).await? { Ok(head) => { + let head = L2BlockNumber(head.try_into().ok().context("overflow")?); self.sync_state.set_main_node_block(head); ctx.sleep(DELAY_INTERVAL).await?; } Err(err) => { - tracing::warn!("main_node_client.fetch_l2_block_number(): {err}"); + tracing::warn!("get_block_number(): {err}"); ctx.sleep(RETRY_INTERVAL).await?; } } } } - /// Fetches genesis from the main node. + /// Fetches consensus global configuration from the main node. #[tracing::instrument(skip_all)] - async fn fetch_genesis(&self, ctx: &ctx::Ctx) -> ctx::Result { - let genesis = ctx - .wait(self.client.fetch_consensus_genesis()) - .await? - .context("fetch_consensus_genesis()")? - .context("main node is not running consensus component")?; - // Deserialize the json, but don't allow for unknown fields. - // We need to compute the hash of the Genesis, so simply ignoring the unknown fields won't - // do. - Ok(validator::GenesisRaw::read( - &zksync_protobuf::serde::deserialize_proto_with_options( - &genesis.0, /*deny_unknown_fields=*/ true, - ) - .context("deserialize")?, - )? - .with_hash()) + async fn fetch_global_config( + &self, + ctx: &ctx::Ctx, + ) -> ctx::Result { + match ctx.wait(self.client.consensus_global_config()).await? { + Ok(cfg) => { + let cfg = cfg.context("main node is not running consensus component")?; + Ok(zksync_protobuf::serde::deserialize(&cfg.0).context("deserialize()")?) + } + Err(ClientError::Call(err)) if err.code() == ErrorCode::MethodNotFound.code() => { + tracing::info!( + "consensus_global_config() not found, calling consensus_genesis() instead" + ); + let genesis = ctx + .wait(self.client.consensus_genesis()) + .await? + .context("consensus_genesis()")? + .context("main node is not running consensus component")?; + Ok(consensus_dal::GlobalConfig { + genesis: zksync_protobuf::serde::deserialize(&genesis.0) + .context("deserialize()")?, + registry_address: None, + }) + } + Err(err) => { + return Err(err) + .context("consensus_global_config()") + .map_err(|err| err.into()) + } + } } #[tracing::instrument(skip_all)] @@ -262,15 +312,12 @@ impl EN { &self, ctx: &ctx::Ctx, ) -> ctx::Result { - match ctx.wait(self.client.fetch_attestation_status()).await? { - Ok(Some(status)) => Ok(zksync_protobuf::serde::deserialize(&status.0) - .context("deserialize(AttestationStatus")?), - Ok(None) => Err(anyhow::format_err!("empty response").into()), - Err(err) => Err(anyhow::format_err!( - "AttestationStatus call to main node HTTP RPC failed: {err:#}" - ) - .into()), - } + let status = ctx + .wait(self.client.attestation_status()) + .await? + .context("attestation_status()")? + .context("main node is not runnign consensus component")?; + Ok(zksync_protobuf::serde::deserialize(&status.0).context("deserialize()")?) } /// Fetches (with retries) the given block from the main node. @@ -278,14 +325,11 @@ impl EN { const RETRY_INTERVAL: time::Duration = time::Duration::seconds(5); loop { - let res = ctx.wait(self.client.fetch_l2_block(n, true)).await?; - match res { + match ctx.wait(self.client.sync_l2_block(n, true)).await? { Ok(Some(block)) => return Ok(block.try_into()?), Ok(None) => {} - Err(err) if err.is_retriable() => {} - Err(err) => { - return Err(anyhow::format_err!("client.fetch_l2_block({}): {err}", n).into()); - } + Err(err) if is_retriable(&err) => {} + Err(err) => Err(err).with_context(|| format!("client.sync_l2_block({n})"))?, } ctx.sleep(RETRY_INTERVAL).await?; } diff --git a/core/node/consensus/src/era.rs b/core/node/consensus/src/era.rs index 574e496f4d11..3150f839680e 100644 --- a/core/node/consensus/src/era.rs +++ b/core/node/consensus/src/era.rs @@ -45,6 +45,7 @@ pub async fn run_external_node( sync_state: SyncState, main_node_client: Box>, actions: ActionQueueSender, + build_version: semver::Version, ) -> anyhow::Result<()> { let en = en::EN { pool: ConnectionPool(pool), @@ -58,7 +59,8 @@ pub async fn run_external_node( is_validator = secrets.validator_key.is_some(), "running external node" ); - en.run(ctx, actions, cfg, secrets).await + en.run(ctx, actions, cfg, secrets, Some(build_version)) + .await } None => { tracing::info!("running fetcher"); diff --git a/core/node/consensus/src/lib.rs b/core/node/consensus/src/lib.rs index 13d918b5b6ee..ff9cdf865281 100644 --- a/core/node/consensus/src/lib.rs +++ b/core/node/consensus/src/lib.rs @@ -5,6 +5,7 @@ use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; +mod abi; // Currently `batch` module is only used in tests, // but will be used in production once batch syncing is implemented in consensus. #[allow(unused)] @@ -13,8 +14,10 @@ mod config; mod en; pub mod era; mod mn; +mod registry; mod storage; #[cfg(test)] pub(crate) mod testonly; #[cfg(test)] mod tests; +mod vm; diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 7de86b4d8ba1..4d428346ebe4 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -6,9 +6,10 @@ use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BatchStore, BlockStore}; +use zksync_dal::consensus_dal; use crate::{ - config, + config, registry, storage::{ConnectionPool, InsertCertificateError, Store}, }; @@ -36,9 +37,9 @@ pub async fn run_main_node( pool.connection(ctx) .await .wrap("connection()")? - .adjust_genesis(ctx, &spec) + .adjust_global_config(ctx, &spec) .await - .wrap("adjust_genesis()")?; + .wrap("adjust_global_config()")?; } // The main node doesn't have a payload queue as it produces all the L2 blocks itself. @@ -47,33 +48,40 @@ pub async fn run_main_node( .wrap("Store::new()")?; s.spawn_bg(runner.run(ctx)); - let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) + let global_config = pool + .connection(ctx) .await - .wrap("BlockStore::new()")?; - s.spawn_bg(runner.run(ctx)); - - let genesis = block_store.genesis().clone(); + .wrap("connection()")? + .global_config(ctx) + .await + .wrap("global_config()")? + .context("global_config() disappeared")?; anyhow::ensure!( - genesis.leader_selection + global_config.genesis.leader_selection == validator::LeaderSelectionMode::Sticky(validator_key.public()), "unsupported leader selection mode - main node has to be the leader" ); + let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) + .await + .wrap("BlockStore::new()")?; + s.spawn_bg(runner.run(ctx)); + let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) .await .wrap("BatchStore::new()")?; s.spawn_bg(runner.run(ctx)); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(run_attestation_updater( + s.spawn_bg(run_attestation_controller( ctx, &pool, - genesis, + global_config, attestation.clone(), )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets)?, + config: config::executor(&cfg, &secrets, None)?, block_store, batch_store, validator: Some(executor::Validator { @@ -93,18 +101,17 @@ pub async fn run_main_node( /// Manages attestation state by configuring the /// next batch to attest and storing the collected /// certificates. -async fn run_attestation_updater( +async fn run_attestation_controller( ctx: &ctx::Ctx, pool: &ConnectionPool, - genesis: validator::Genesis, + cfg: consensus_dal::GlobalConfig, attestation: Arc, ) -> anyhow::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); + let registry = registry::Registry::new(cfg.genesis, pool.clone()).await; + let registry_addr = cfg.registry_address.map(registry::Address::new); + let mut next = attester::BatchNumber(0); let res = async { - let Some(committee) = &genesis.attesters else { - return Ok(()); - }; - let committee = Arc::new(committee.clone()); loop { // After regenesis it might happen that the batch number for the first block // is not immediately known (the first block was not produced yet), @@ -118,10 +125,12 @@ async fn run_attestation_updater( .await .wrap("attestation_status()")? { - Some(status) => break status, - None => ctx.sleep(POLL_INTERVAL).await?, + Some(status) if status.next_batch_to_attest >= next => break status, + _ => {} } + ctx.sleep(POLL_INTERVAL).await?; }; + next = status.next_batch_to_attest.next(); tracing::info!( "waiting for hash of batch {:?}", status.next_batch_to_attest @@ -129,6 +138,22 @@ async fn run_attestation_updater( let hash = pool .wait_for_batch_hash(ctx, status.next_batch_to_attest) .await?; + let Some(committee) = registry + .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) + .await + .wrap("attester_committee_for()")? + else { + tracing::info!("attestation not required"); + continue; + }; + let committee = Arc::new(committee); + // Persist the derived committee. + pool.connection(ctx) + .await + .wrap("connection")? + .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) + .await + .wrap("upsert_attester_committee()")?; tracing::info!( "attesting batch {:?} with hash {hash:?}", status.next_batch_to_attest @@ -140,7 +165,7 @@ async fn run_attestation_updater( number: status.next_batch_to_attest, genesis: status.genesis, }, - committee: committee.clone(), + committee, })) .await .context("start_attestation()")?; diff --git a/core/node/consensus/src/registry/abi.rs b/core/node/consensus/src/registry/abi.rs new file mode 100644 index 000000000000..55cc7f9264fb --- /dev/null +++ b/core/node/consensus/src/registry/abi.rs @@ -0,0 +1,225 @@ +//! Strongly-typed API for ConsensusRegistry contract. +#![allow(dead_code)] + +use std::sync::Arc; + +use anyhow::Context as _; +use zksync_types::{ethabi, ethabi::Token}; + +use crate::abi; + +/// Reprents ConsensusRegistry contract. +#[derive(Debug, Clone)] +pub(crate) struct ConsensusRegistry(Arc); + +impl AsRef for ConsensusRegistry { + fn as_ref(&self) -> ðabi::Contract { + &self.0 + } +} + +impl ConsensusRegistry { + const FILE: &'static str = "contracts/l2-contracts/artifacts-zk/contracts/ConsensusRegistry.sol/ConsensusRegistry.json"; + + /// Loads bytecode of the contract. + #[cfg(test)] + pub(crate) fn bytecode() -> Vec { + zksync_contracts::read_bytecode(Self::FILE) + } + + /// Loads the `ethabi` representation of the contract. + pub(crate) fn load() -> Self { + Self(zksync_contracts::load_contract(ConsensusRegistry::FILE).into()) + } + + /// Constructs a call to function `F` of this contract. + pub(crate) fn call>(&self, inputs: F) -> abi::Call { + abi::Call { + contract: self.clone(), + inputs, + } + } +} + +/// ConsensusRegistry.getAttesterCommittee function. +#[derive(Debug, Default)] +pub(crate) struct GetAttesterCommittee; + +impl abi::Function for GetAttesterCommittee { + type Contract = ConsensusRegistry; + const NAME: &'static str = "getAttesterCommittee"; + + fn encode(&self) -> Vec { + vec![] + } + + type Outputs = Vec; + fn decode_outputs(tokens: Vec) -> anyhow::Result { + let [attesters] = tokens.try_into().ok().context("bad size")?; + let mut res = vec![]; + for token in attesters.into_array().context("not array")? { + res.push(Attester::from_token(token).context("attesters")?); + } + Ok(res) + } +} + +/// ConsensusRegistry.add function. +#[derive(Debug, Default)] +pub(crate) struct Add { + pub(crate) node_owner: ethabi::Address, + pub(crate) validator_weight: u32, + pub(crate) validator_pub_key: BLS12_381PublicKey, + pub(crate) validator_pop: BLS12_381Signature, + pub(crate) attester_weight: u32, + pub(crate) attester_pub_key: Secp256k1PublicKey, +} + +impl abi::Function for Add { + type Contract = ConsensusRegistry; + const NAME: &'static str = "add"; + fn encode(&self) -> Vec { + vec![ + Token::Address(self.node_owner), + Token::Uint(self.validator_weight.into()), + self.validator_pub_key.to_token(), + self.validator_pop.to_token(), + Token::Uint(self.attester_weight.into()), + self.attester_pub_key.to_token(), + ] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.initialize function. +#[derive(Debug, Default)] +pub(crate) struct Initialize { + pub(crate) initial_owner: ethabi::Address, +} + +impl abi::Function for Initialize { + type Contract = ConsensusRegistry; + const NAME: &'static str = "initialize"; + fn encode(&self) -> Vec { + vec![Token::Address(self.initial_owner)] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.commitAttesterCommittee function. +#[derive(Debug, Default)] +pub(crate) struct CommitAttesterCommittee; + +impl abi::Function for CommitAttesterCommittee { + type Contract = ConsensusRegistry; + const NAME: &'static str = "commitAttesterCommittee"; + fn encode(&self) -> Vec { + vec![] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.owner function. +#[derive(Debug, Default)] +pub(crate) struct Owner; + +impl abi::Function for Owner { + type Contract = ConsensusRegistry; + const NAME: &'static str = "owner"; + fn encode(&self) -> Vec { + vec![] + } + type Outputs = ethabi::Address; + fn decode_outputs(tokens: Vec) -> anyhow::Result { + let [owner] = tokens.try_into().ok().context("bad size")?; + owner.into_address().context("not an address") + } +} + +// Auxiliary structs. + +/// Raw representation of a secp256k1 public key. +#[derive(Debug, Default)] +pub(crate) struct Secp256k1PublicKey { + pub(crate) tag: [u8; 1], + pub(crate) x: [u8; 32], +} + +impl Secp256k1PublicKey { + fn from_token(token: Token) -> anyhow::Result { + let [tag, x] = abi::into_tuple(token)?; + Ok(Self { + tag: abi::into_fixed_bytes(tag).context("tag")?, + x: abi::into_fixed_bytes(x).context("x")?, + }) + } + + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.tag.into()), + Token::FixedBytes(self.x.into()), + ]) + } +} + +/// Raw representation of an attester committee member. +#[derive(Debug)] +pub(crate) struct Attester { + pub(crate) weight: u32, + pub(crate) pub_key: Secp256k1PublicKey, +} + +impl Attester { + fn from_token(token: Token) -> anyhow::Result { + let [weight, pub_key] = abi::into_tuple(token)?; + Ok(Self { + weight: abi::into_uint(weight).context("weight")?, + pub_key: Secp256k1PublicKey::from_token(pub_key).context("pub_key")?, + }) + } +} + +/// Raw representation of a BLS12_381 public key. +#[derive(Debug, Default)] +pub(crate) struct BLS12_381PublicKey { + pub(crate) a: [u8; 32], + pub(crate) b: [u8; 32], + pub(crate) c: [u8; 32], +} + +impl BLS12_381PublicKey { + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.a.into()), + Token::FixedBytes(self.b.into()), + Token::FixedBytes(self.c.into()), + ]) + } +} + +#[derive(Debug, Default)] +pub(crate) struct BLS12_381Signature { + pub(crate) a: [u8; 32], + pub(crate) b: [u8; 16], +} + +impl BLS12_381Signature { + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.a.into()), + Token::FixedBytes(self.b.into()), + ]) + } +} diff --git a/core/node/consensus/src/registry/mod.rs b/core/node/consensus/src/registry/mod.rs new file mode 100644 index 000000000000..74da41309573 --- /dev/null +++ b/core/node/consensus/src/registry/mod.rs @@ -0,0 +1,80 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _}; +use zksync_consensus_crypto::ByteFmt; +use zksync_consensus_roles::{attester, validator}; + +use crate::{storage::ConnectionPool, vm::VM}; + +mod abi; +#[cfg(test)] +pub(crate) mod testonly; +#[cfg(test)] +mod tests; + +fn decode_attester_key(k: &abi::Secp256k1PublicKey) -> anyhow::Result { + let mut x = vec![]; + x.extend(k.tag); + x.extend(k.x); + ByteFmt::decode(&x) +} + +fn decode_weighted_attester(a: &abi::Attester) -> anyhow::Result { + Ok(attester::WeightedAttester { + weight: a.weight.into(), + key: decode_attester_key(&a.pub_key).context("key")?, + }) +} + +pub type Address = crate::abi::Address; + +#[derive(Debug)] +pub(crate) struct Registry { + contract: abi::ConsensusRegistry, + genesis: validator::Genesis, + vm: VM, +} + +impl Registry { + pub async fn new(genesis: validator::Genesis, pool: ConnectionPool) -> Self { + Self { + contract: abi::ConsensusRegistry::load(), + genesis, + vm: VM::new(pool).await, + } + } + + /// Attester committee for the given batch. + /// It reads committee from the contract. + /// Falls back to committee specified in the genesis. + pub async fn attester_committee_for( + &self, + ctx: &ctx::Ctx, + address: Option
, + attested_batch: attester::BatchNumber, + ) -> ctx::Result> { + let Some(batch_defining_committee) = attested_batch.prev() else { + // Batch 0 doesn't need attestation. + return Ok(None); + }; + let Some(address) = address else { + return Ok(self.genesis.attesters.clone()); + }; + let raw = self + .vm + .call( + ctx, + batch_defining_committee, + address, + self.contract.call(abi::GetAttesterCommittee), + ) + .await + .wrap("vm.call()")?; + let mut attesters = vec![]; + for a in raw { + attesters.push(decode_weighted_attester(&a).context("decode_weighted_attester()")?); + } + Ok(Some( + attester::Committee::new(attesters.into_iter()).context("Committee::new()")?, + )) + } +} diff --git a/core/node/consensus/src/registry/testonly.rs b/core/node/consensus/src/registry/testonly.rs new file mode 100644 index 000000000000..a0c55a557feb --- /dev/null +++ b/core/node/consensus/src/registry/testonly.rs @@ -0,0 +1,118 @@ +use rand::Rng; +use zksync_consensus_crypto::ByteFmt; +use zksync_consensus_roles::{attester, validator}; +use zksync_test_account::Account; +use zksync_types::{ethabi, Execute, Transaction, U256}; + +use super::*; + +pub(crate) fn make_tx( + account: &mut Account, + address: crate::abi::Address, + call: crate::abi::Call, +) -> Transaction { + account.get_l2_tx_for_execute( + Execute { + contract_address: *address, + calldata: call.calldata().unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ) +} + +pub(crate) struct WeightedValidator { + weight: validator::Weight, + key: validator::PublicKey, + pop: validator::ProofOfPossession, +} + +fn encode_attester_key(k: &attester::PublicKey) -> abi::Secp256k1PublicKey { + let b: [u8; 33] = ByteFmt::encode(k).try_into().unwrap(); + abi::Secp256k1PublicKey { + tag: b[0..1].try_into().unwrap(), + x: b[1..33].try_into().unwrap(), + } +} + +fn encode_validator_key(k: &validator::PublicKey) -> abi::BLS12_381PublicKey { + let b: [u8; 96] = ByteFmt::encode(k).try_into().unwrap(); + abi::BLS12_381PublicKey { + a: b[0..32].try_into().unwrap(), + b: b[32..64].try_into().unwrap(), + c: b[64..96].try_into().unwrap(), + } +} + +fn encode_validator_pop(pop: &validator::ProofOfPossession) -> abi::BLS12_381Signature { + let b: [u8; 48] = ByteFmt::encode(pop).try_into().unwrap(); + abi::BLS12_381Signature { + a: b[0..32].try_into().unwrap(), + b: b[32..48].try_into().unwrap(), + } +} + +pub(crate) fn gen_validator(rng: &mut impl Rng) -> WeightedValidator { + let k: validator::SecretKey = rng.gen(); + WeightedValidator { + key: k.public(), + weight: rng.gen_range(1..100), + pop: k.sign_pop(), + } +} + +pub(crate) fn gen_attester(rng: &mut impl Rng) -> attester::WeightedAttester { + attester::WeightedAttester { + key: rng.gen(), + weight: rng.gen_range(1..100), + } +} + +impl Registry { + pub(crate) fn deploy(&self, account: &mut Account) -> (Address, Transaction) { + let tx = account.get_deploy_tx( + &abi::ConsensusRegistry::bytecode(), + None, + zksync_test_account::TxType::L2, + ); + (Address::new(tx.address), tx.tx) + } + + pub(crate) fn add( + &self, + node_owner: ethabi::Address, + validator: WeightedValidator, + attester: attester::WeightedAttester, + ) -> anyhow::Result> { + Ok(self.contract.call(abi::Add { + node_owner, + validator_pub_key: encode_validator_key(&validator.key), + validator_weight: validator + .weight + .try_into() + .context("overflow") + .context("validator_weight")?, + validator_pop: encode_validator_pop(&validator.pop), + attester_pub_key: encode_attester_key(&attester.key), + attester_weight: attester + .weight + .try_into() + .context("overflow") + .context("attester_weight")?, + })) + } + + pub(crate) fn initialize( + &self, + initial_owner: ethabi::Address, + ) -> crate::abi::Call { + self.contract.call(abi::Initialize { initial_owner }) + } + + pub(crate) fn commit_attester_committee( + &self, + ) -> crate::abi::Call { + self.contract.call(abi::CommitAttesterCommittee) + } +} diff --git a/core/node/consensus/src/registry/tests.rs b/core/node/consensus/src/registry/tests.rs new file mode 100644 index 000000000000..935cd6738918 --- /dev/null +++ b/core/node/consensus/src/registry/tests.rs @@ -0,0 +1,91 @@ +use rand::Rng as _; +use zksync_concurrency::{ctx, scope}; +use zksync_consensus_roles::{attester, validator::testonly::Setup}; +use zksync_test_account::Account; +use zksync_types::ProtocolVersionId; + +use super::*; +use crate::storage::ConnectionPool; + +/// Test checking that parsing logic matches the abi specified in the json file. +#[test] +fn test_consensus_registry_abi() { + zksync_concurrency::testonly::abort_on_panic(); + let c = abi::ConsensusRegistry::load(); + c.call(abi::GetAttesterCommittee).test().unwrap(); + c.call(abi::Add::default()).test().unwrap(); + c.call(abi::Initialize::default()).test().unwrap(); + c.call(abi::CommitAttesterCommittee).test().unwrap(); + c.call(abi::Owner).test().unwrap(); +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_attester_committee() { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let setup = Setup::new(rng, 10); + let account = &mut Account::random(); + let to_fund = &[account.address]; + + scope::run!(ctx, |ctx, s| async { + let pool = ConnectionPool::test(false, ProtocolVersionId::latest()).await; + let registry = Registry::new(setup.genesis.clone(), pool.clone()).await; + + // If the registry contract address is not specified, + // then the committee from genesis should be returned. + let got = registry + .attester_committee_for(ctx, None, attester::BatchNumber(10)) + .await + .unwrap(); + assert_eq!(setup.genesis.attesters, got); + + let (mut node, runner) = crate::testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run_real(ctx, to_fund)); + + // Deploy registry contract and initialize it. + let committee = + attester::Committee::new((0..5).map(|_| testonly::gen_attester(rng))).unwrap(); + let (registry_addr, tx) = registry.deploy(account); + let mut txs = vec![tx]; + let account_addr = account.address(); + txs.push(testonly::make_tx( + account, + registry_addr, + registry.initialize(account_addr), + )); + // Add attesters. + for a in committee.iter() { + txs.push(testonly::make_tx( + account, + registry_addr, + registry + .add(rng.gen(), testonly::gen_validator(rng), a.clone()) + .unwrap(), + )); + } + // Commit the update. + txs.push(testonly::make_tx( + account, + registry_addr, + registry.commit_attester_committee(), + )); + + node.push_block(&txs).await; + node.seal_batch().await; + pool.wait_for_batch(ctx, node.last_batch()).await?; + + // Read the attester committee using the vm. + let batch = attester::BatchNumber(node.last_batch().0.into()); + assert_eq!( + Some(committee), + registry + .attester_committee_for(ctx, Some(registry_addr), batch + 1) + .await + .unwrap() + ); + Ok(()) + }) + .await + .unwrap(); +} diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 6ff2fb1ce0a0..512b37e81a11 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -1,13 +1,14 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; use zksync_consensus_crypto::keccak256::Keccak256; -use zksync_consensus_roles::{attester, validator}; +use zksync_consensus_roles::{attester, attester::BatchNumber, validator}; use zksync_consensus_storage::{self as storage, BatchStoreState}; use zksync_dal::{consensus_dal, consensus_dal::Payload, Core, CoreDal, DalError}; use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; +use zksync_node_api_server::execution_sandbox::{BlockArgs, BlockStartInfo}; use zksync_node_sync::{fetcher::IoCursorExt as _, ActionQueueSender, SyncState}; use zksync_state_keeper::io::common::IoCursor; -use zksync_types::{commitment::L1BatchWithMetadata, L1BatchNumber}; +use zksync_types::{api, commitment::L1BatchWithMetadata, L1BatchNumber}; use super::{InsertCertificateError, PayloadQueue}; use crate::config; @@ -18,7 +19,7 @@ pub(crate) struct ConnectionPool(pub(crate) zksync_dal::ConnectionPool); impl ConnectionPool { /// Wrapper for `connection_tagged()`. - pub(crate) async fn connection<'a>(&'a self, ctx: &ctx::Ctx) -> ctx::Result> { + pub(crate) async fn connection(&self, ctx: &ctx::Ctx) -> ctx::Result> { Ok(Connection( ctx.wait(self.0.connection_tagged("consensus")) .await? @@ -164,6 +165,22 @@ impl<'a> Connection<'a> { .map_err(E::Other)?) } + /// Wrapper for `consensus_dal().upsert_attester_committee()`. + pub async fn upsert_attester_committee( + &mut self, + ctx: &ctx::Ctx, + number: BatchNumber, + committee: &attester::Committee, + ) -> ctx::Result<()> { + ctx.wait( + self.0 + .consensus_dal() + .upsert_attester_committee(number, committee), + ) + .await??; + Ok(()) + } + /// Wrapper for `consensus_dal().replica_state()`. pub async fn replica_state(&mut self, ctx: &ctx::Ctx) -> ctx::Result { Ok(ctx @@ -229,22 +246,22 @@ impl<'a> Connection<'a> { }) } - /// Wrapper for `consensus_dal().genesis()`. - pub async fn genesis(&mut self, ctx: &ctx::Ctx) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().genesis()) - .await? - .map_err(DalError::generalize)?) + /// Wrapper for `consensus_dal().global_config()`. + pub async fn global_config( + &mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result> { + Ok(ctx.wait(self.0.consensus_dal().global_config()).await??) } - /// Wrapper for `consensus_dal().try_update_genesis()`. - pub async fn try_update_genesis( + /// Wrapper for `consensus_dal().try_update_global_config()`. + pub async fn try_update_global_config( &mut self, ctx: &ctx::Ctx, - genesis: &validator::Genesis, + cfg: &consensus_dal::GlobalConfig, ) -> ctx::Result<()> { Ok(ctx - .wait(self.0.consensus_dal().try_update_genesis(genesis)) + .wait(self.0.consensus_dal().try_update_global_config(cfg)) .await??) } @@ -267,7 +284,7 @@ impl<'a> Connection<'a> { /// (Re)initializes consensus genesis to start at the last L2 block in storage. /// Noop if `spec` matches the current genesis. - pub(crate) async fn adjust_genesis( + pub(crate) async fn adjust_global_config( &mut self, ctx: &ctx::Ctx, spec: &config::GenesisSpec, @@ -277,31 +294,34 @@ impl<'a> Connection<'a> { .await .wrap("start_transaction()")?; - let old = txn.genesis(ctx).await.wrap("genesis()")?; + let old = txn.global_config(ctx).await.wrap("genesis()")?; if let Some(old) = &old { - if &config::GenesisSpec::from_genesis(old) == spec { + if &config::GenesisSpec::from_global_config(old) == spec { // Hard fork is not needed. return Ok(()); } } tracing::info!("Performing a hard fork of consensus."); - let genesis = validator::GenesisRaw { - chain_id: spec.chain_id, - fork_number: old - .as_ref() - .map_or(validator::ForkNumber(0), |old| old.fork_number.next()), - first_block: txn.next_block(ctx).await.context("next_block()")?, - protocol_version: spec.protocol_version, - validators: spec.validators.clone(), - attesters: spec.attesters.clone(), - leader_selection: spec.leader_selection.clone(), - } - .with_hash(); + let new = consensus_dal::GlobalConfig { + genesis: validator::GenesisRaw { + chain_id: spec.chain_id, + fork_number: old.as_ref().map_or(validator::ForkNumber(0), |old| { + old.genesis.fork_number.next() + }), + first_block: txn.next_block(ctx).await.context("next_block()")?, + protocol_version: spec.protocol_version, + validators: spec.validators.clone(), + attesters: spec.attesters.clone(), + leader_selection: spec.leader_selection.clone(), + } + .with_hash(), + registry_address: spec.registry_address, + }; - txn.try_update_genesis(ctx, &genesis) + txn.try_update_global_config(ctx, &new) .await - .wrap("try_update_genesis()")?; + .wrap("try_update_global_config()")?; txn.commit(ctx).await.wrap("commit()")?; Ok(()) } @@ -447,4 +467,29 @@ impl<'a> Connection<'a> { .await? .context("attestation_status()")?) } + + /// Constructs `BlockArgs` for the last block of the batch. + pub async fn vm_block_args( + &mut self, + ctx: &ctx::Ctx, + batch: attester::BatchNumber, + ) -> ctx::Result { + let (_, block) = self + .get_l2_block_range_of_l1_batch(ctx, batch) + .await + .wrap("get_l2_block_range_of_l1_batch()")? + .context("batch not sealed")?; + let block = api::BlockId::Number(api::BlockNumber::Number(block.0.into())); + let start_info = ctx + .wait(BlockStartInfo::new( + &mut self.0, + /*max_cache_age=*/ std::time::Duration::from_secs(10), + )) + .await? + .context("BlockStartInfo::new()")?; + Ok(ctx + .wait(BlockArgs::new(&mut self.0, block, &start_info)) + .await? + .context("BlockArgs::new")?) + } } diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 6a96812ae408..cb8e039d7d01 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -325,9 +325,10 @@ impl storage::PersistentBlockStore for Store { Ok(self .conn(ctx) .await? - .genesis(ctx) + .global_config(ctx) .await? - .context("not found")?) + .context("not found")? + .genesis) } fn persisted(&self) -> sync::watch::Receiver { diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index 5d1279afbbfd..65c464d98b93 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -1,5 +1,4 @@ //! Storage test helpers. - use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; use zksync_consensus_roles::{attester, validator}; @@ -13,6 +12,7 @@ use zksync_types::{ }; use super::{Connection, ConnectionPool}; +use crate::registry; impl Connection<'_> { /// Wrapper for `consensus_dal().batch_of_block()`. @@ -181,16 +181,16 @@ impl ConnectionPool { want_last: validator::BlockNumber, ) -> ctx::Result> { let blocks = self.wait_for_block_certificates(ctx, want_last).await?; - let genesis = self + let cfg = self .connection(ctx) .await .wrap("connection()")? - .genesis(ctx) + .global_config(ctx) .await .wrap("genesis()")? .context("genesis is missing")?; for block in &blocks { - block.verify(&genesis).context(block.number())?; + block.verify(&cfg.genesis).context(block.number())?; } Ok(blocks) } @@ -199,6 +199,7 @@ impl ConnectionPool { &self, ctx: &ctx::Ctx, want_last: attester::BatchNumber, + registry_addr: Option, ) -> ctx::Result<()> { // Wait for the last batch to be attested. const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); @@ -214,17 +215,17 @@ impl ConnectionPool { ctx.sleep(POLL_INTERVAL).await?; } let mut conn = self.connection(ctx).await.wrap("connection()")?; - let genesis = conn - .genesis(ctx) + let cfg = conn + .global_config(ctx) .await - .wrap("genesis()")? - .context("genesis is missing")?; + .wrap("global_config()")? + .context("global config is missing")?; let first = conn - .batch_of_block(ctx, genesis.first_block) + .batch_of_block(ctx, cfg.genesis.first_block) .await .wrap("batch_of_block()")? .context("batch of first_block is missing")?; - let committee = genesis.attesters.as_ref().unwrap(); + let registry = registry::Registry::new(cfg.genesis.clone(), self.clone()).await; for i in first.0..want_last.0 { let i = attester::BatchNumber(i); let hash = conn @@ -240,8 +241,13 @@ impl ConnectionPool { if cert.message.hash != hash { return Err(anyhow::format_err!("cert[{i:?}]: hash mismatch").into()); } - cert.verify(genesis.hash(), committee) - .context("cert[{i:?}].verify()")?; + let committee = registry + .attester_committee_for(ctx, registry_addr, i) + .await + .context("attester_committee_for()")? + .context("committee not specified")?; + cert.verify(cfg.genesis.hash(), &committee) + .with_context(|| format!("cert[{i:?}].verify()"))?; } Ok(()) } diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 90063772da92..241998f26928 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -42,8 +42,9 @@ use zksync_state_keeper::{ }; use zksync_test_account::Account; use zksync_types::{ + ethabi, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput}, - Address, L1BatchNumber, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, + L1BatchNumber, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, Transaction, }; use zksync_web3_decl::client::{Client, DynClient, L2}; @@ -54,6 +55,7 @@ use crate::{ }; /// Fake StateKeeper for tests. +#[derive(Debug)] pub(super) struct StateKeeper { protocol_version: ProtocolVersionId, // Batch of the `last_block`. @@ -62,8 +64,6 @@ pub(super) struct StateKeeper { // timestamp of the last block. last_timestamp: u64, batch_sealed: bool, - // test L2 account - account: Account, next_priority_op: PriorityOpId, actions_sender: ActionQueueSender, @@ -116,6 +116,7 @@ pub(super) fn new_configs( }) .collect(), leader: config::ValidatorPublicKey(setup.validator_keys[0].public().encode()), + registry_address: None, }; network::testonly::new_configs(rng, setup, gossip_peers) .into_iter() @@ -183,7 +184,6 @@ pub(super) struct StateKeeperRunner { addr: sync::watch::Sender>, rocksdb_dir: tempfile::TempDir, metadata_calculator: MetadataCalculator, - account: Account, } impl StateKeeper { @@ -242,7 +242,6 @@ impl StateKeeper { .await .context("MetadataCalculator::new()")?; let tree_reader = metadata_calculator.tree_reader(); - let account = Account::random(); Ok(( Self { protocol_version, @@ -256,7 +255,6 @@ impl StateKeeper { addr: addr.subscribe(), pool: pool.clone(), tree_reader, - account: account.clone(), }, StateKeeperRunner { actions_queue, @@ -265,7 +263,6 @@ impl StateKeeper { addr, rocksdb_dir, metadata_calculator, - account, }, )) } @@ -306,22 +303,29 @@ impl StateKeeper { } } - /// Pushes a new L2 block with `transactions` transactions to the `StateKeeper`. - pub async fn push_random_block(&mut self, rng: &mut impl Rng) { + pub async fn push_block(&mut self, txs: &[Transaction]) { let mut actions = vec![self.open_block()]; - for _ in 0..rng.gen_range(3..8) { - let tx = match rng.gen() { - true => l2_transaction(&mut self.account, 1_000_000), + actions.extend( + txs.iter() + .map(|tx| FetchedTransaction::new(tx.clone()).into()), + ); + actions.push(SyncAction::SealL2Block); + self.actions_sender.push_actions(actions).await.unwrap(); + } + + /// Pushes a new L2 block with `transactions` transactions to the `StateKeeper`. + pub async fn push_random_block(&mut self, rng: &mut impl Rng, account: &mut Account) { + let txs: Vec<_> = (0..rng.gen_range(3..8)) + .map(|_| match rng.gen() { + true => l2_transaction(account, 1_000_000), false => { - let tx = l1_transaction(&mut self.account, self.next_priority_op); + let tx = l1_transaction(account, self.next_priority_op); self.next_priority_op += 1; tx } - }; - actions.push(FetchedTransaction::new(tx).into()); - } - actions.push(SyncAction::SealL2Block); - self.actions_sender.push_actions(actions).await.unwrap(); + }) + .collect(); + self.push_block(&txs).await; } /// Pushes `SealBatch` command to the `StateKeeper`. @@ -334,14 +338,19 @@ impl StateKeeper { } /// Pushes `count` random L2 blocks to the StateKeeper. - pub async fn push_random_blocks(&mut self, rng: &mut impl Rng, count: usize) { + pub async fn push_random_blocks( + &mut self, + rng: &mut impl Rng, + account: &mut Account, + count: usize, + ) { for _ in 0..count { // 20% chance to seal an L1 batch. // `seal_batch()` also produces a (fictive) block. if rng.gen_range(0..100) < 20 { self.seal_batch().await; } else { - self.push_random_block(rng).await; + self.push_random_block(rng, account).await; } } } @@ -451,7 +460,13 @@ impl StateKeeper { client, sync_state: self.sync_state.clone(), } - .run(ctx, self.actions_sender, cfgs.config, cfgs.secrets) + .run( + ctx, + self.actions_sender, + cfgs.config, + cfgs.secrets, + cfgs.net.build_version, + ) .await } } @@ -534,14 +549,21 @@ async fn mock_metadata_calculator_step(ctx: &ctx::Ctx, pool: &ConnectionPool) -> impl StateKeeperRunner { // Executes the state keeper task with real metadata calculator task // and fake commitment generator (because real one is too slow). - pub async fn run_real(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { + pub async fn run_real( + self, + ctx: &ctx::Ctx, + addrs_to_fund: &[ethabi::Address], + ) -> anyhow::Result<()> { let res = scope::run!(ctx, |ctx, s| async { - // Fund the test account. Required for L2 transactions to succeed. - fund(&self.pool.0, &[self.account.address]).await; + // Fund the test accounts. Required for L2 transactions to succeed. + fund(&self.pool.0, addrs_to_fund).await; let (stop_send, stop_recv) = sync::watch::channel(false); - let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(self.pool.0.clone(), Address::repeat_byte(11), 5); + let (persistence, l2_block_sealer) = StateKeeperPersistence::new( + self.pool.0.clone(), + ethabi::Address::repeat_byte(11), + 5, + ); let io = ExternalIO::new( self.pool.0.clone(), @@ -649,8 +671,11 @@ impl StateKeeperRunner { pub async fn run(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { let res = scope::run!(ctx, |ctx, s| async { let (stop_send, stop_recv) = sync::watch::channel(false); - let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(self.pool.0.clone(), Address::repeat_byte(11), 5); + let (persistence, l2_block_sealer) = StateKeeperPersistence::new( + self.pool.0.clone(), + ethabi::Address::repeat_byte(11), + 5, + ); let tree_writes_persistence = TreeWritesPersistence::new(self.pool.0.clone()); let io = ExternalIO::new( diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index b245d0524aa9..abd35508c7f7 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -1,17 +1,24 @@ use anyhow::Context as _; -use test_casing::{test_casing, Product}; +use rand::Rng as _; +use test_casing::test_casing; use tracing::Instrument as _; use zksync_concurrency::{ctx, error::Wrap, scope}; use zksync_consensus_roles::{ attester, validator::testonly::{Setup, SetupSpec}, }; -use zksync_dal::consensus_dal::AttestationStatus; -use zksync_node_sync::MainNodeClient; +use zksync_dal::consensus_dal; +use zksync_test_account::Account; use zksync_types::{L1BatchNumber, ProtocolVersionId}; +use zksync_web3_decl::namespaces::EnNamespaceClient as _; -use super::{FROM_SNAPSHOT, VERSIONS}; -use crate::{mn::run_main_node, storage::ConnectionPool, testonly}; +use super::VERSIONS; +use crate::{ + mn::run_main_node, + registry::{testonly, Registry}, + storage::ConnectionPool, + testonly::{new_configs, StateKeeper}, +}; #[test_casing(2, VERSIONS)] #[tokio::test] @@ -19,24 +26,31 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { let pool = ConnectionPool::test(false, version).await; - let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + let (mut sk, runner) = StateKeeper::new(ctx, pool.clone()).await?; s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); // Setup nontrivial genesis. while sk.last_sealed_batch() < L1BatchNumber(3) { - sk.push_random_blocks(rng, 10).await; + sk.push_random_blocks(rng, account, 10).await; } let mut setup = SetupSpec::new(rng, 3); setup.first_block = sk.last_block(); let first_batch = sk.last_batch(); let setup = Setup::from(setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &setup.genesis) - .await - .wrap("try_update_genesis()")?; + conn.try_update_global_config( + ctx, + &consensus_dal::GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: None, + }, + ) + .await + .wrap("try_update_global_config()")?; // Make sure that the first_batch is actually sealed. sk.seal_batch().await; pool.wait_for_batch(ctx, first_batch).await?; @@ -44,11 +58,11 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Connect to API endpoint. let api = sk.connect(ctx).await?; let fetch_status = || async { - let s = api - .fetch_attestation_status() - .await? + let s = ctx + .wait(api.attestation_status()) + .await?? .context("no attestation_status")?; - let s: AttestationStatus = + let s: consensus_dal::AttestationStatus = zksync_protobuf::serde::deserialize(&s.0).context("deserialize()")?; anyhow::ensure!(s.genesis == setup.genesis.hash(), "genesis hash mismatch"); Ok(s) @@ -62,24 +76,37 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { attester::BatchNumber(first_batch.0.into()) ); - // Insert a (fake) cert, then check again. + tracing::info!("Insert a cert"); { let mut conn = pool.connection(ctx).await?; let number = status.next_batch_to_attest; let hash = conn.batch_hash(ctx, number).await?.unwrap(); - let genesis = conn.genesis(ctx).await?.unwrap().hash(); + let gcfg = conn.global_config(ctx).await?.unwrap(); + let m = attester::Batch { + number, + hash, + genesis: gcfg.genesis.hash(), + }; + let mut sigs = attester::MultiSig::default(); + for k in &setup.attester_keys { + sigs.add(k.public(), k.sign_msg(m.clone()).sig); + } let cert = attester::BatchQC { - signatures: attester::MultiSig::default(), - message: attester::Batch { - number, - hash, - genesis, - }, + signatures: sigs, + message: m, }; + conn.upsert_attester_committee( + ctx, + cert.message.number, + setup.genesis.attesters.as_ref().unwrap(), + ) + .await + .context("upsert_attester_committee")?; conn.insert_batch_certificate(ctx, &cert) .await .context("insert_batch_certificate()")?; } + tracing::info!("Check again."); let want = status.next_batch_to_attest.next(); let got = fetch_status().await?; assert_eq!(want, got.next_batch_to_attest); @@ -93,34 +120,65 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Test running a couple of attesters (which are also validators). // Main node is expected to collect all certificates. // External nodes are expected to just vote for the batch. -// -// TODO: it would be nice to use `StateKeeperRunner::run_real()` in this test, -// however as of now it doesn't work with ENs and it doesn't work with -// `ConnectionPool::from_snapshot`. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(2, VERSIONS)] #[tokio::test] -async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_multiple_attesters(version: ProtocolVersionId) { const NODES: usize = 4; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); + let account = &mut Account::random(); + let to_fund = &[account.address]; let setup = Setup::new(rng, 4); - let cfgs = testonly::new_configs(rng, &setup, NODES); - + let mut cfgs = new_configs(rng, &setup, NODES); scope::run!(ctx, |ctx, s| async { - let validator_pool = ConnectionPool::test(from_snapshot, version).await; - let (mut validator, runner) = - testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + let validator_pool = ConnectionPool::test(false, version).await; + let (mut validator, runner) = StateKeeper::new(ctx, validator_pool.clone()).await?; s.spawn_bg(async { runner - .run(ctx) + .run_real(ctx, to_fund) .instrument(tracing::info_span!("validator")) .await .context("validator") }); - // API server needs at least 1 L1 batch to start. + + tracing::info!("deploy registry with 1 attester"); + let attesters: Vec<_> = setup.genesis.attesters.as_ref().unwrap().iter().collect(); + let registry = Registry::new(setup.genesis.clone(), validator_pool.clone()).await; + let (registry_addr, tx) = registry.deploy(account); + cfgs[0] + .config + .genesis_spec + .as_mut() + .unwrap() + .registry_address = Some(*registry_addr); + let mut txs = vec![tx]; + txs.push(testonly::make_tx( + account, + registry_addr, + registry.initialize(account.address), + )); + txs.push(testonly::make_tx( + account, + registry_addr, + registry + .add( + rng.gen(), + testonly::gen_validator(rng), + attesters[0].clone(), + ) + .unwrap(), + )); + txs.push(testonly::make_tx( + account, + registry_addr, + registry.commit_attester_committee(), + )); + validator.push_block(&txs).await; validator.seal_batch().await; + + tracing::info!("wait for the batch to be processed before starting consensus"); validator_pool .wait_for_payload(ctx, validator.last_block()) .await?; @@ -137,13 +195,13 @@ async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId let mut node_pools = vec![]; for (i, cfg) in cfgs[1..].iter().enumerate() { let i = ctx::NoCopy(i); - let pool = ConnectionPool::test(from_snapshot, version).await; - let (node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + let pool = ConnectionPool::test(false, version).await; + let (node, runner) = StateKeeper::new(ctx, pool.clone()).await?; node_pools.push(pool.clone()); s.spawn_bg(async { let i = i; runner - .run(ctx) + .run_real(ctx, to_fund) .instrument(tracing::info_span!("node", i = *i)) .await .with_context(|| format!("node{}", *i)) @@ -151,13 +209,31 @@ async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId s.spawn_bg(node.run_consensus(ctx, validator.connect(ctx).await?, cfg.clone())); } - tracing::info!("Create some batches"); - validator.push_random_blocks(rng, 20).await; - validator.seal_batch().await; + tracing::info!("add attesters one by one"); + #[allow(clippy::needless_range_loop)] + for i in 1..attesters.len() { + let txs = vec![ + testonly::make_tx( + account, + registry_addr, + registry + .add( + rng.gen(), + testonly::gen_validator(rng), + attesters[i].clone(), + ) + .unwrap(), + ), + testonly::make_tx(account, registry_addr, registry.commit_attester_committee()), + ]; + validator.push_block(&txs).await; + validator.seal_batch().await; + } + tracing::info!("Wait for the batches to be attested"); let want_last = attester::BatchNumber(validator.last_sealed_batch().0.into()); validator_pool - .wait_for_batch_certificates_and_verify(ctx, want_last) + .wait_for_batch_certificates_and_verify(ctx, want_last, Some(registry_addr)) .await?; Ok(()) }) diff --git a/core/node/consensus/src/tests/batch.rs b/core/node/consensus/src/tests/batch.rs index 41d73fdb87c6..f0cae7f2c02e 100644 --- a/core/node/consensus/src/tests/batch.rs +++ b/core/node/consensus/src/tests/batch.rs @@ -1,6 +1,7 @@ use test_casing::{test_casing, Product}; use zksync_concurrency::{ctx, scope}; use zksync_consensus_roles::validator; +use zksync_test_account::Account; use zksync_types::{L1BatchNumber, ProtocolVersionId}; use super::{FROM_SNAPSHOT, VERSIONS}; @@ -13,6 +14,7 @@ async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersion let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let pool = ConnectionPool::test(from_snapshot, version).await; + let account = &mut Account::random(); // Fill storage with unsigned L2 blocks and L1 batches in a way that the // last L1 batch is guaranteed to have some L2 blocks executed in it. @@ -23,11 +25,11 @@ async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersion for _ in 0..3 { for _ in 0..2 { - sk.push_random_block(rng).await; + sk.push_random_block(rng, account).await; } sk.seal_batch().await; } - sk.push_random_block(rng).await; + sk.push_random_block(rng, account).await; pool.wait_for_payload(ctx, sk.last_block()).await?; @@ -84,11 +86,13 @@ async fn test_batch_witness(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); + let to_fund = &[account.address]; scope::run!(ctx, |ctx, s| async { let pool = ConnectionPool::from_genesis(version).await; let (mut node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run_real(ctx)); + s.spawn_bg(runner.run_real(ctx, to_fund)); tracing::info!("analyzing storage"); { @@ -101,7 +105,7 @@ async fn test_batch_witness(version: ProtocolVersionId) { } // Seal a bunch of batches. - node.push_random_blocks(rng, 10).await; + node.push_random_blocks(rng, account, 10).await; node.seal_batch().await; pool.wait_for_batch(ctx, node.last_sealed_batch()).await?; // We can verify only 2nd batch onward, because diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 0b611d55f06a..91f01f865a2b 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -7,6 +7,8 @@ use zksync_consensus_roles::{ validator::testonly::{Setup, SetupSpec}, }; use zksync_consensus_storage::BlockStore; +use zksync_dal::consensus_dal; +use zksync_test_account::Account; use zksync_types::ProtocolVersionId; use crate::{ @@ -28,6 +30,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let pool = ConnectionPool::test(false, version).await; + let account = &mut Account::random(); // Fill storage with unsigned L2 blocks. // Fetch a suffix of blocks that we will generate (fake) certs for. @@ -35,15 +38,21 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // Start state keeper. let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; s.spawn_bg(runner.run(ctx)); - sk.push_random_blocks(rng, 10).await; + sk.push_random_blocks(rng, account, 10).await; pool.wait_for_payload(ctx, sk.last_block()).await?; let mut setup = SetupSpec::new(rng, 3); setup.first_block = validator::BlockNumber(4); let mut setup = Setup::from(setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &setup.genesis) - .await - .wrap("try_update_genesis()")?; + conn.try_update_global_config( + ctx, + &consensus_dal::GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: None, + }, + ) + .await + .wrap("try_update_global_config()")?; for i in setup.genesis.first_block.0..sk.last_block().next().0 { let i = validator::BlockNumber(i); let payload = conn @@ -95,6 +104,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Start state keeper."); @@ -103,7 +113,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { s.spawn_bg(runner.run(ctx)); tracing::info!("Populate storage with a bunch of blocks."); - sk.push_random_blocks(rng, 5).await; + sk.push_random_blocks(rng, account, 5).await; pool .wait_for_payload(ctx, sk.last_block()) .await @@ -118,7 +128,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { s.spawn_bg(run_main_node(ctx, cfg.config.clone(), cfg.secrets.clone(), pool.clone())); tracing::info!("Generate couple more blocks and wait for consensus to catch up."); - sk.push_random_blocks(rng, 3).await; + sk.push_random_blocks(rng, account, 3).await; pool .wait_for_block_certificate(ctx, sk.last_block()) .await @@ -126,7 +136,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Synchronously produce blocks one by one, and wait for consensus."); for _ in 0..2 { - sk.push_random_blocks(rng, 1).await; + sk.push_random_blocks(rng, account, 1).await; pool .wait_for_block_certificate(ctx, sk.last_block()) .await @@ -158,6 +168,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("spawn validator"); @@ -173,7 +184,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { )); tracing::info!("produce some batches"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; validator_pool .wait_for_block_certificate(ctx, validator.last_block()) @@ -191,7 +202,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { }); tracing::info!("produce more batches"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; node_pool .wait_for_block_certificate(ctx, validator.last_block()) @@ -209,7 +220,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { }); tracing::info!("produce more blocks and compare storages"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; let want = validator_pool .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?; @@ -243,6 +254,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); // topology: // validator <-> node <-> node <-> ... @@ -264,7 +276,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { .context("validator") }); tracing::info!("Generate a couple of blocks, before initializing consensus genesis."); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; // API server needs at least 1 L1 batch to start. validator.seal_batch().await; validator_pool @@ -299,7 +311,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Make validator produce blocks and wait for fetchers to get them."); // Note that block from before and after genesis have to be fetched. - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; let want_last = validator.last_block(); let want = validator_pool .wait_for_block_certificates_and_verify(ctx, want_last) @@ -328,6 +340,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, NODES); let cfgs = testonly::new_configs(rng, &setup, 1); + let account = &mut Account::random(); // Run all nodes in parallel. scope::run!(ctx, |ctx, s| async { @@ -342,7 +355,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { .context("main_node") }); tracing::info!("Generate a couple of blocks, before initializing consensus genesis."); - main_node.push_random_blocks(rng, 5).await; + main_node.push_random_blocks(rng, account, 5).await; // API server needs at least 1 L1 batch to start. main_node.seal_batch().await; main_node_pool @@ -381,7 +394,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { } tracing::info!("Make the main node produce blocks and wait for consensus to finalize them"); - main_node.push_random_blocks(rng, 5).await; + main_node.push_random_blocks(rng, account, 5).await; let want_last = main_node.last_block(); let want = main_node_pool .wait_for_block_certificates_and_verify(ctx, want_last) @@ -409,6 +422,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Spawn validator."); @@ -433,7 +447,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; node_pool .wait_for_block_certificate(ctx, validator.last_block()) .await?; @@ -447,7 +461,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_fetcher(ctx, client.clone())); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; node_pool .wait_for_payload(ctx, validator.last_block()) .await?; @@ -461,7 +475,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg)); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; let want = validator_pool .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?; @@ -488,6 +502,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { let validator_pool = ConnectionPool::test(false, version).await; @@ -535,7 +550,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { }); tracing::info!("Sync some blocks"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; let to_prune = validator.last_sealed_batch(); tracing::info!( @@ -546,7 +561,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { tracing::info!( "Seal another batch to make sure that there is at least 1 sealed batch after pruning." ); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; validator_pool .wait_for_batch(ctx, validator.last_sealed_batch()) @@ -565,7 +580,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { .prune_batches(ctx, to_prune) .await .context("prune_batches")?; - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; node_pool .wait_for_block_certificates(ctx, validator.last_block()) .await @@ -582,6 +597,7 @@ async fn test_centralized_fetcher(from_snapshot: bool, version: ProtocolVersionI zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Spawn a validator."); @@ -601,7 +617,7 @@ async fn test_centralized_fetcher(from_snapshot: bool, version: ProtocolVersionI s.spawn_bg(node.run_fetcher(ctx, validator.connect(ctx).await?)); tracing::info!("Produce some blocks and wait for node to fetch them"); - validator.push_random_blocks(rng, 10).await; + validator.push_random_blocks(rng, account, 10).await; let want = validator_pool .wait_for_payload(ctx, validator.last_block()) .await?; diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs new file mode 100644 index 000000000000..f7f14ad8fe0a --- /dev/null +++ b/core/node/consensus/src/vm.rs @@ -0,0 +1,96 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _, scope}; +use zksync_consensus_roles::attester; +use zksync_multivm::interface::TxExecutionMode; +use zksync_node_api_server::{ + execution_sandbox::{TransactionExecutor, TxExecutionArgs, TxSetupArgs, VmConcurrencyLimiter}, + tx_sender::MultiVMBaseSystemContracts, +}; +use zksync_state::PostgresStorageCaches; +use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; +use zksync_types::{ + ethabi, fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, AccountTreeId, L2ChainId, Nonce, U256, +}; +use zksync_vm_interface::ExecutionResult; + +use crate::{abi, storage::ConnectionPool}; + +/// VM executes eth_calls on the db. +#[derive(Debug)] +pub(crate) struct VM { + pool: ConnectionPool, + setup_args: TxSetupArgs, + limiter: VmConcurrencyLimiter, +} + +impl VM { + /// Constructs a new `VM` instance. + pub async fn new(pool: ConnectionPool) -> Self { + Self { + pool, + setup_args: TxSetupArgs { + execution_mode: TxExecutionMode::EthCall, + operator_account: AccountTreeId::default(), + fee_input: BatchFeeInput::sensible_l1_pegged_default(), + base_system_contracts: scope::wait_blocking( + MultiVMBaseSystemContracts::load_eth_call_blocking, + ) + .await, + caches: PostgresStorageCaches::new(1, 1), + validation_computational_gas_limit: u32::MAX, + chain_id: L2ChainId::default(), + whitelisted_tokens_for_aa: vec![], + enforced_base_fee: None, + }, + limiter: VmConcurrencyLimiter::new(1).0, + } + } + + pub async fn call( + &self, + ctx: &ctx::Ctx, + batch: attester::BatchNumber, + address: abi::Address, + call: abi::Call, + ) -> ctx::Result { + let tx = L2Tx::new( + *address, + call.calldata().context("call.calldata()")?, + Nonce(0), + Fee { + gas_limit: U256::from(2000000000u32), + max_fee_per_gas: U256::zero(), + max_priority_fee_per_gas: U256::zero(), + gas_per_pubdata_limit: U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE), + }, + ethabi::Address::zero(), + U256::zero(), + vec![], + Default::default(), + ); + let permit = ctx.wait(self.limiter.acquire()).await?.unwrap(); + let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; + let args = conn + .vm_block_args(ctx, batch) + .await + .wrap("vm_block_args()")?; + let output = ctx + .wait(TransactionExecutor::real(usize::MAX).execute_tx_in_sandbox( + permit, + self.setup_args.clone(), + TxExecutionArgs::for_eth_call(tx.clone()), + conn.0, + args, + None, + vec![], + )) + .await? + .context("execute_tx_in_sandbox()")?; + match output.vm.result { + ExecutionResult::Success { output } => { + Ok(call.decode_outputs(&output).context("decode_output()")?) + } + other => Err(anyhow::format_err!("unsuccessful execution: {other:?}").into()), + } + } +} diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index fe4889225675..d5b19a1d4b01 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -64,6 +64,7 @@ futures.workspace = true anyhow.workspace = true tokio = { workspace = true, features = ["rt"] } ctrlc.workspace = true +semver.workspace = true [dev-dependencies] zksync_env_config.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/consensus/external_node.rs b/core/node/node_framework/src/implementations/layers/consensus/external_node.rs index 14365384c1a4..5acdab568e74 100644 --- a/core/node/node_framework/src/implementations/layers/consensus/external_node.rs +++ b/core/node/node_framework/src/implementations/layers/consensus/external_node.rs @@ -23,6 +23,7 @@ use crate::{ /// Wiring layer for external node consensus component. #[derive(Debug)] pub struct ExternalNodeConsensusLayer { + pub build_version: semver::Version, pub config: Option, pub secrets: Option, } @@ -78,6 +79,7 @@ impl WiringLayer for ExternalNodeConsensusLayer { }; let consensus_task = ExternalNodeTask { + build_version: self.build_version, config, pool, main_node_client, @@ -90,6 +92,7 @@ impl WiringLayer for ExternalNodeConsensusLayer { #[derive(Debug)] pub struct ExternalNodeTask { + build_version: semver::Version, config: Option<(ConsensusConfig, ConsensusSecrets)>, pool: ConnectionPool, main_node_client: Box>, @@ -118,6 +121,7 @@ impl Task for ExternalNodeTask { self.sync_state, self.main_node_client, self.action_queue_sender, + self.build_version, )); // `run_external_node` might return an error or panic, // in which case we need to return immediately, diff --git a/core/node/node_sync/src/client.rs b/core/node/node_sync/src/client.rs index d064803eab59..ee89db10ddd1 100644 --- a/core/node/node_sync/src/client.rs +++ b/core/node/node_sync/src/client.rs @@ -42,12 +42,7 @@ pub trait MainNodeClient: 'static + Send + Sync + fmt::Debug { with_transactions: bool, ) -> EnrichedClientResult>; - async fn fetch_consensus_genesis(&self) -> EnrichedClientResult>; - async fn fetch_genesis_config(&self) -> EnrichedClientResult; - - async fn fetch_attestation_status(&self) - -> EnrichedClientResult>; } #[async_trait] @@ -133,20 +128,6 @@ impl MainNodeClient for Box> { .with_arg("with_transactions", &with_transactions) .await } - - async fn fetch_consensus_genesis(&self) -> EnrichedClientResult> { - self.consensus_genesis() - .rpc_context("consensus_genesis") - .await - } - - async fn fetch_attestation_status( - &self, - ) -> EnrichedClientResult> { - self.attestation_status() - .rpc_context("attestation_status") - .await - } } /// Main node health check. diff --git a/core/node/node_sync/src/testonly.rs b/core/node/node_sync/src/testonly.rs index b9e1adc995af..16027a71a251 100644 --- a/core/node/node_sync/src/testonly.rs +++ b/core/node/node_sync/src/testonly.rs @@ -71,18 +71,6 @@ impl MainNodeClient for MockMainNodeClient { Ok(Some(block)) } - async fn fetch_consensus_genesis( - &self, - ) -> EnrichedClientResult> { - unimplemented!() - } - - async fn fetch_attestation_status( - &self, - ) -> EnrichedClientResult> { - unimplemented!() - } - async fn fetch_genesis_config(&self) -> EnrichedClientResult { Ok(mock_genesis_config()) } diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 0ce8c06be0e7..23aec8af49fb 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -117,7 +117,7 @@ pub async fn fund(pool: &ConnectionPool, addresses: &[Address]) { pub(crate) const DEFAULT_GAS_PER_PUBDATA: u32 = 10000; -pub(crate) fn fee(gas_limit: u32) -> Fee { +pub fn fee(gas_limit: u32) -> Fee { Fee { gas_limit: U256::from(gas_limit), max_fee_per_gas: SYSTEM_CONTEXT_MINIMAL_BASE_FEE.into(), diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 24e8638876bf..c3cfada3a1a9 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7620,9 +7620,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" dependencies = [ "anyhow", "once_cell", @@ -7656,9 +7656,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb7ff3ec44b7b92fd4e28d9d92b83d61dc74125ccfc90bcfb27a5750d8a8580" +checksum = "ace39bdf50b8421c4d546381fe1ecc5212f953ce61cf93d4fa69172078dbe4af" dependencies = [ "anyhow", "blst", @@ -7680,9 +7680,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72223c0b20621775db51bcc4b043addafeaf784d444af2ad4bc8bcdee477367c" +checksum = "06277266e31efdc1465f6a27ce96c7435392a270978a91956b8a848732df2cfa" dependencies = [ "anyhow", "bit-vec", @@ -7702,9 +7702,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1750ad93f7e3a0c2f5880f9bcc1244a3b46d3e6c124c4f65f545032b87464" +checksum = "9099b2295f550176d824b5287f2f31b7739c4d10247faec1132f1c6e9d18059c" dependencies = [ "anyhow", "async-trait", @@ -7722,9 +7722,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" dependencies = [ "anyhow", "rand 0.8.5", @@ -8034,9 +8034,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" dependencies = [ "anyhow", "bit-vec", @@ -8055,9 +8055,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" dependencies = [ "anyhow", "heck 0.5.0", diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index cd5d6a0b280e..75859021979f 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -6349,9 +6349,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" dependencies = [ "anyhow", "once_cell", @@ -6383,9 +6383,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" dependencies = [ "anyhow", "rand", @@ -6434,9 +6434,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" dependencies = [ "anyhow", "bit-vec", @@ -6455,9 +6455,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" dependencies = [ "anyhow", "heck", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index 4a08776558ed..e1ad63136af1 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -30,7 +30,7 @@ types = { path = "crates/types" } zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } -zksync_protobuf = "=0.1.0-rc.11" +zksync_protobuf = "=0.1.0-rc.12" # External dependencies anyhow = "1.0.82"