From b30864b881299b820aff56c9f3b2ce7a351e1cb2 Mon Sep 17 00:00:00 2001 From: Illia Polosukhin Date: Mon, 8 Jun 2020 15:03:38 -0700 Subject: [PATCH] feat(chain): Upgradability functionality (#2701) * Adding nightly test for upgradability: checks that after proper number of epochs active protocol versoins match * Pipe protocol version via epoch manager/runtime * Working on weaving upgradable block headers * Make updatable BlockHeader compile. * Adding support for legacy communication protocl * Fix up networking to use oldest backward compatible version * Update sample genesis to use latest protocol, while adding marker for first backward compatible one * Remove backward compatibility. Bring protocol version from epoch manager to client * Adding database version for future migrations * Update rocksdb to 0.14 * Adding migration and update sample json * Fix merge conflict * Make Block data structure upgradable * Version tracking in the epoch manager * Bump protocol version after merge * Current protocol version in status request * Bump 0.6.2 & use Box * Move migration to the right place * Fix up merge conflicts * Fix comments * Reset Cargo.lock to master Co-authored-by: Alexander Skidanov --- Cargo.lock | 60 +- chain/chain/Cargo.toml | 2 +- chain/chain/src/chain.rs | 539 +++++++++--------- chain/chain/src/doomslug.rs | 2 +- chain/chain/src/lightclient.rs | 29 +- chain/chain/src/store.rs | 191 +++---- chain/chain/src/store_validator/validate.rs | 28 +- chain/chain/src/test_utils.rs | 89 ++- chain/chain/src/types.rs | 68 ++- chain/chain/src/validate.rs | 42 +- chain/chain/tests/challenges.rs | 26 +- chain/chain/tests/gc.rs | 19 +- chain/chain/tests/simple_chain.rs | 80 +-- chain/chain/tests/sync_chain.rs | 4 +- chain/chunks/Cargo.toml | 2 +- chain/chunks/src/lib.rs | 4 +- chain/chunks/src/test_utils.rs | 13 +- chain/client/Cargo.toml | 2 +- chain/client/src/client.rs | 98 ++-- chain/client/src/client_actor.rs | 58 +- chain/client/src/info.rs | 2 +- chain/client/src/sync.rs | 70 +-- chain/client/src/test_utils.rs | 20 +- chain/client/src/view_client.rs | 49 +- chain/client/tests/bug_repros.rs | 18 +- chain/client/tests/catching_up.rs | 96 ++-- chain/client/tests/challenges.rs | 116 ++-- chain/client/tests/chunks_management.rs | 64 +-- chain/client/tests/consensus.rs | 39 +- chain/client/tests/cross_shard_tx.rs | 2 +- chain/client/tests/process_blocks.rs | 126 ++-- chain/client/tests/query_client.rs | 19 +- chain/epoch_manager/Cargo.toml | 3 +- chain/epoch_manager/src/lib.rs | 382 ++++--------- chain/epoch_manager/src/proposals.rs | 19 +- chain/epoch_manager/src/test_utils.rs | 37 +- chain/epoch_manager/src/types.rs | 49 +- chain/jsonrpc/Cargo.toml | 2 +- chain/jsonrpc/tests/rpc_query.rs | 3 +- chain/network/Cargo.toml | 2 +- chain/network/src/peer.rs | 18 +- chain/network/src/recorder.rs | 2 +- chain/network/src/types.rs | 8 +- chain/network/tests/runner/mod.rs | 2 + chain/pool/Cargo.toml | 2 +- core/chain-configs/src/client_config.rs | 3 +- core/chain-configs/src/genesis_config.rs | 24 +- core/chain-configs/src/lib.rs | 7 +- core/crypto/Cargo.toml | 2 +- core/primitives/Cargo.toml | 2 +- core/primitives/benches/serialization.rs | 7 +- core/primitives/src/block.rs | 515 ++++------------- core/primitives/src/block_header.rs | 488 ++++++++++++++++ core/primitives/src/lib.rs | 2 + core/primitives/src/test_utils.rs | 46 +- core/primitives/src/types.rs | 7 - core/primitives/src/validator_signer.rs | 16 +- core/primitives/src/version.rs | 22 + core/primitives/src/views.rs | 106 ++-- core/store/Cargo.toml | 3 +- core/store/src/db.rs | 137 +++-- core/store/src/lib.rs | 25 +- core/store/src/validate.rs | 1 + .../src/csv_to_json_configs.rs | 9 +- genesis-tools/genesis-populate/Cargo.toml | 2 +- genesis-tools/genesis-populate/src/lib.rs | 22 +- neard/Cargo.toml | 2 +- neard/res/genesis_config.json | 10 +- neard/src/config.rs | 23 +- neard/src/lib.rs | 40 +- neard/src/main.rs | 4 +- neard/src/runtime.rs | 143 ++--- neard/src/shard_tracker.rs | 15 +- neard/tests/economics.rs | 19 +- neard/tests/rpc_nodes.rs | 10 +- neard/tests/sync_nodes.rs | 27 +- nightly/nightly.txt | 1 + pytest/empty-contract-rs/Cargo.toml | 2 +- pytest/lib/branches.py | 25 +- pytest/lib/utils.py | 18 +- pytest/tests/sanity/backward_compatible.py | 5 +- pytest/tests/sanity/upgradable.py | 76 +++ runtime/near-vm-errors/Cargo.toml | 2 +- runtime/runtime-params-estimator/Cargo.toml | 2 +- runtime/runtime/Cargo.toml | 2 +- scripts/migrations/22-protocol-upgrade.py | 27 + test-utils/loadtester/Cargo.toml | 2 +- test-utils/loadtester/src/main.rs | 3 +- test-utils/state-viewer/src/main.rs | 54 +- test-utils/state-viewer/src/state_dump.rs | 37 +- test-utils/testlib/Cargo.toml | 2 +- test-utils/testlib/src/lib.rs | 4 +- 92 files changed, 2419 insertions(+), 2088 deletions(-) create mode 100644 core/primitives/src/block_header.rs create mode 100644 core/primitives/src/version.rs create mode 100644 core/store/src/validate.rs create mode 100644 pytest/tests/sanity/upgradable.py create mode 100644 scripts/migrations/22-protocol-upgrade.py diff --git a/Cargo.lock b/Cargo.lock index 563dde4e240..8b9fc6efac6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -513,25 +513,25 @@ dependencies = [ [[package]] name = "borsh" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "borsh-derive 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh-derive 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "borsh-derive" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "borsh-derive-internal 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", - "borsh-schema-derive-internal 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh-derive-internal 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh-schema-derive-internal 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "syn 1.0.18 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "borsh-derive-internal" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -541,7 +541,7 @@ dependencies = [ [[package]] name = "borsh-schema-derive-internal" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "proc-macro2 1.0.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1238,7 +1238,7 @@ dependencies = [ name = "genesis-populate" version = "0.1.0" dependencies = [ - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "indicatif 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1641,7 +1641,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "loadtester" version = "0.1.0" dependencies = [ - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1839,7 +1839,7 @@ dependencies = [ name = "near-chain" version = "0.1.0" dependencies = [ - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "cached 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", "failure 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1880,7 +1880,7 @@ name = "near-chunks" version = "0.1.0" dependencies = [ "actix 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "cached 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1903,7 +1903,7 @@ version = "0.1.0" dependencies = [ "actix 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "cached 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1938,7 +1938,7 @@ version = "0.1.0" dependencies = [ "arrayref 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "blake2 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "c2-chacha 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "curve25519-dalek 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1960,7 +1960,7 @@ dependencies = [ name = "near-epoch-manager" version = "0.0.1" dependencies = [ - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "cached 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "near-chain 0.1.0", @@ -1973,6 +1973,7 @@ dependencies = [ "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", + "smart-default 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1982,7 +1983,7 @@ dependencies = [ "actix 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "actix-cors 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "actix-web 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "near-chain-configs 0.1.0", @@ -2036,7 +2037,7 @@ version = "0.1.0" dependencies = [ "actix 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "bencher 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "bytes 0.5.4 (registry+https://github.com/rust-lang/crates.io-index)", "cached 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2067,7 +2068,7 @@ dependencies = [ name = "near-pool" version = "0.1.0" dependencies = [ - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "near-crypto 0.1.0", "near-primitives 0.1.0", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2079,7 +2080,7 @@ version = "0.1.0" dependencies = [ "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", "bencher 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2161,7 +2162,7 @@ name = "near-store" version = "0.1.0" dependencies = [ "bencher 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "cached 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "derive_more 0.99.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2172,6 +2173,7 @@ dependencies = [ "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", "rocksdb 0.14.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.51 (registry+https://github.com/rust-lang/crates.io-index)", "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2193,7 +2195,7 @@ dependencies = [ name = "near-vm-errors" version = "0.9.1" dependencies = [ - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "near-rpc-error-macro 0.1.0", "serde 1.0.106 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2271,7 +2273,7 @@ name = "neard" version = "1.0.0" dependencies = [ "actix 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2336,7 +2338,7 @@ version = "0.9.0" dependencies = [ "assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "cached 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "indicatif 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2947,7 +2949,7 @@ dependencies = [ name = "runtime-params-estimator" version = "0.9.0" dependencies = [ - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "csv 1.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "gnuplot 0.0.32 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3239,7 +3241,7 @@ name = "state-viewer" version = "0.1.0" dependencies = [ "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "near-chain 0.1.0", "near-chain-configs 0.1.0", @@ -3393,7 +3395,7 @@ version = "0.1.0" dependencies = [ "actix 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "assert_matches 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "futures 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -4097,10 +4099,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum blake3 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "423897d97e11b810c9da22458400b28ec866991c711409073662eb34dc44bfff" "checksum block-buffer 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" "checksum block-padding 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" -"checksum borsh 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9dada4c07fa726bc195503048581e7b1719407f7fbef82741f7b149d3921b3" -"checksum borsh-derive 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "47c6bed3dd7695230e85bd51b6a4e4e4dc7550c1974a79c11e98a8a055211a61" -"checksum borsh-derive-internal 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d34f80970434cd6524ae676b277d024b87dd93ecdd3f53bf470d61730dc6cb80" -"checksum borsh-schema-derive-internal 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3b93230d3769ea99ac75a8a7fee2a229defbc56fe8816c9cde8ed78c848aa33" +"checksum borsh 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c7769f8f6fdc6ac7617bbc8bc7ef9dc263cd459d99d21cf2ab4afc3bc8d7d70d" +"checksum borsh-derive 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d2689a82a5fe57f9e71997b16bea340da338c7fb8db400b8d9d55b59010540d8" +"checksum borsh-derive-internal 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "39b621f19e9891a34f679034fa2238260e27c0eddfe2804e9fb282061cf9b294" +"checksum borsh-schema-derive-internal 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "befebdb9e223ae4528b3d597dbbfb5c68566822d2a3de3e260f235360773ba29" "checksum brotli-sys 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4445dea95f4c2b41cde57cc9fee236ae4dbae88d8fcbdb4750fc1bb5d86aaecd" "checksum brotli2 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0cb036c3eade309815c15ddbacec5b22c4d1f3983a774ab2eac2e3e9ea85568e" "checksum bs58 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" diff --git a/chain/chain/Cargo.toml b/chain/chain/Cargo.toml index cb71eb3ccfb..71aef5c2497 100644 --- a/chain/chain/Cargo.toml +++ b/chain/chain/Cargo.toml @@ -17,7 +17,7 @@ lazy_static = "1.4" num-rational = "0.2.4" tracing = "0.1.13" -borsh = "0.6.1" +borsh = "0.6.2" near-chain-configs = { path = "../../core/chain-configs" } near-crypto = { path = "../../core/crypto" } diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index ba5c0f15fa2..53c0d6553c0 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -7,6 +7,10 @@ use borsh::BorshSerialize; use chrono::prelude::{DateTime, Utc}; use chrono::Duration; use log::{debug, error, info}; +use num_rational::Rational; +use rand::rngs::StdRng; +use rand::seq::SliceRandom; +use rand::SeedableRng; use near_chain_configs::GenesisConfig; use near_primitives::block::{genesis_chunks, Tip}; @@ -28,6 +32,7 @@ use near_primitives::types::{ MerkleHash, NumBlocks, ShardId, StateHeaderKey, ValidatorStake, }; use near_primitives::unwrap_or_return; +use near_primitives::version::ProtocolVersion; use near_primitives::views::{ ExecutionOutcomeWithIdView, ExecutionStatusView, FinalExecutionOutcomeView, FinalExecutionStatus, LightClientBlockView, @@ -40,9 +45,9 @@ use crate::store::{ ChainStore, ChainStoreAccess, ChainStoreUpdate, GCMode, ShardInfo, StateSyncInfo, }; use crate::types::{ - AcceptedBlock, ApplyTransactionResult, Block, BlockHeader, BlockStatus, BlockSyncResponse, - Provenance, ReceiptList, ReceiptProofResponse, ReceiptResponse, RootProof, RuntimeAdapter, - ShardStateSyncResponseHeader, StatePartKey, + AcceptedBlock, ApplyTransactionResult, Block, BlockHeader, BlockHeaderInfo, BlockStatus, + BlockSyncResponse, Provenance, ReceiptList, ReceiptProofResponse, ReceiptResponse, RootProof, + RuntimeAdapter, ShardStateSyncResponseHeader, StatePartKey, }; use crate::validate::{ validate_challenge, validate_chunk_proofs, validate_chunk_with_chunk_extra, @@ -50,10 +55,6 @@ use crate::validate::{ }; use crate::{byzantine_assert, create_light_client_block_view, Doomslug}; use crate::{metrics, DoomslugThresholdMode}; -use num_rational::Rational; -use rand::rngs::StdRng; -use rand::seq::SliceRandom; -use rand::SeedableRng; /// Maximum number of orphans chain can store. pub const MAX_ORPHAN_SIZE: usize = 1024; @@ -117,12 +118,12 @@ impl OrphanBlockPool { fn add(&mut self, orphan: Orphan) { let height_hashes = - self.height_idx.entry(orphan.block.header.inner_lite.height).or_insert_with(|| vec![]); - height_hashes.push(orphan.block.hash()); + self.height_idx.entry(orphan.block.header().height()).or_insert_with(|| vec![]); + height_hashes.push(*orphan.block.hash()); let prev_hash_entries = - self.prev_hash_idx.entry(orphan.block.header.prev_hash).or_insert_with(|| vec![]); - prev_hash_entries.push(orphan.block.hash()); - self.orphans.insert(orphan.block.hash(), orphan); + self.prev_hash_idx.entry(*orphan.block.header().prev_hash()).or_insert_with(|| vec![]); + prev_hash_entries.push(*orphan.block.hash()); + self.orphans.insert(*orphan.block.hash(), orphan); if self.orphans.len() > MAX_ORPHAN_SIZE { let old_len = self.orphans.len(); @@ -185,6 +186,7 @@ pub struct ChainGenesis { pub gas_price_adjustment_rate: Rational, pub transaction_validity_period: NumBlocks, pub epoch_length: BlockHeightDelta, + pub protocol_version: ProtocolVersion, } impl ChainGenesis { @@ -198,6 +200,7 @@ impl ChainGenesis { gas_price_adjustment_rate: Rational, transaction_validity_period: NumBlocks, epoch_length: BlockHeightDelta, + protocol_version: ProtocolVersion, ) -> Self { Self { time, @@ -209,6 +212,7 @@ impl ChainGenesis { gas_price_adjustment_rate, transaction_validity_period, epoch_length, + protocol_version, } } } @@ -229,6 +233,7 @@ where genesis_config.gas_price_adjustment_rate, genesis_config.transaction_validity_period, genesis_config.epoch_length, + genesis_config.protocol_version, ) } } @@ -264,6 +269,7 @@ impl Chain { chain_genesis.height, ); let genesis = Block::genesis( + chain_genesis.protocol_version, genesis_chunks.iter().map(|chunk| chunk.header.clone()).collect(), chain_genesis.time, chain_genesis.height, @@ -282,7 +288,7 @@ impl Chain { // Check that genesis in the store is the same as genesis given in the config. let genesis_hash = store_update.get_block_hash_by_height(chain_genesis.height)?; - if genesis_hash != genesis.hash() { + if &genesis_hash != genesis.hash() { return Err(ErrorKind::Other(format!( "Genesis mismatch between storage and config: {:?} vs {:?}", genesis_hash, @@ -308,26 +314,20 @@ impl Chain { for chunk in genesis_chunks { store_update.save_chunk(&chunk.chunk_hash, chunk.clone()); } - runtime_adapter.add_validator_proposals( - CryptoHash::default(), - genesis.hash(), - genesis.header.inner_rest.random_value, - genesis.header.inner_lite.height, + runtime_adapter.add_validator_proposals(BlockHeaderInfo::new( + &genesis.header(), // genesis height is considered final chain_genesis.height, - vec![], - vec![], - vec![], - chain_genesis.total_supply, - )?; - store_update.save_block_header(genesis.header.clone())?; + ))?; + store_update.save_block_header(genesis.header().clone())?; store_update.save_block(genesis.clone()); store_update.save_block_extra( &genesis.hash(), BlockExtra { challenges_result: vec![] }, ); - for (chunk_header, state_root) in genesis.chunks.iter().zip(state_roots.iter()) + for (chunk_header, state_root) in + genesis.chunks().iter().zip(state_roots.iter()) { store_update.save_chunk_extra( &genesis.hash(), @@ -343,7 +343,7 @@ impl Chain { ); } - head = Tip::from_header(&genesis.header); + head = Tip::from_header(genesis.header()); store_update.save_head(&head)?; store_update.save_sync_head(&head); @@ -363,7 +363,7 @@ impl Chain { runtime_adapter, orphans: OrphanBlockPool::new(), blocks_with_missing_chunks: OrphanBlockPool::new(), - genesis: genesis.header, + genesis: genesis.header().clone(), transaction_validity_period: chain_genesis.transaction_validity_period, epoch_length: chain_genesis.epoch_length, block_economics_config: BlockEconomicsConfig { @@ -403,15 +403,15 @@ impl Chain { chain_store: &mut dyn ChainStoreAccess, ) -> Result { let final_block_header = { - let ret = chain_store.get_block_header(&header.inner_rest.last_final_block)?.clone(); - let two_ahead = chain_store.get_header_by_height(ret.inner_lite.height + 2)?; - if two_ahead.inner_lite.epoch_id != ret.inner_lite.epoch_id { - let one_ahead = chain_store.get_header_by_height(ret.inner_lite.height + 1)?; - if one_ahead.inner_lite.epoch_id != ret.inner_lite.epoch_id { - let new_final_hash = ret.inner_rest.last_final_block.clone(); + let ret = chain_store.get_block_header(header.last_final_block())?.clone(); + let two_ahead = chain_store.get_header_by_height(ret.height() + 2)?; + if two_ahead.epoch_id() != ret.epoch_id() { + let one_ahead = chain_store.get_header_by_height(ret.height() + 1)?; + if one_ahead.epoch_id() != ret.epoch_id() { + let new_final_hash = ret.last_final_block().clone(); chain_store.get_block_header(&new_final_hash)?.clone() } else { - let new_final_hash = one_ahead.inner_rest.last_final_block.clone(); + let new_final_hash = one_ahead.last_final_block().clone(); chain_store.get_block_header(&new_final_hash)?.clone() } } else { @@ -420,8 +420,8 @@ impl Chain { }; let next_block_producers = get_epoch_block_producers_view( - &final_block_header.inner_lite.next_epoch_id, - &header.prev_hash, + &final_block_header.next_epoch_id(), + header.prev_hash(), runtime_adapter, )?; @@ -568,7 +568,7 @@ impl Chain { let blocks_current_height = blocks_current_height.values().flatten().cloned().collect::>(); if let Some(block_hash) = blocks_current_height.first() { - let prev_hash = chain_store_update.get_block_header(block_hash)?.prev_hash; + let prev_hash = *chain_store_update.get_block_header(block_hash)?.prev_hash(); let prev_block_refcount = *chain_store_update.get_block_refcount(&prev_hash)?; if prev_block_refcount > 1 { // Block of `prev_hash` starts a Fork, stopping @@ -609,7 +609,7 @@ impl Chain { let mut chain_store_update = self.store.store_update(); if *chain_store_update.get_block_refcount(¤t_hash)? == 0 { let prev_hash = - chain_store_update.get_block_header(¤t_hash)?.prev_hash; + *chain_store_update.get_block_header(¤t_hash)?.prev_hash(); // It's safe to call `clear_block_data` for prev data because it clears fork only here chain_store_update @@ -686,7 +686,7 @@ impl Chain { F2: Copy + FnMut(Vec) -> (), F3: Copy + FnMut(ChallengeBody) -> (), { - let block_hash = block.hash(); + let block_hash = *block.hash(); let timer = near_metrics::start_timer(&metrics::BLOCK_PROCESSING_TIME); let res = self.process_block_single( me, @@ -750,10 +750,10 @@ impl Chain { F: Copy + FnMut(ChallengeBody) -> (), { // Sort headers by heights if they are out of order. - headers.sort_by(|left, right| left.inner_lite.height.cmp(&right.inner_lite.height)); + headers.sort_by(|left, right| left.height().cmp(&right.height())); if let Some(header) = headers.first() { - debug!(target: "chain", "Sync block headers: {} headers from {} at {}", headers.len(), header.hash(), header.inner_lite.height); + debug!(target: "chain", "Sync block headers: {} headers from {} at {}", headers.len(), header.hash(), header.height()); } else { return Ok(()); }; @@ -790,17 +790,10 @@ impl Chain { chain_update.commit()?; // Add validator proposals for given header. - self.runtime_adapter.add_validator_proposals( - header.prev_hash, - header.hash(), - header.inner_rest.random_value, - header.inner_lite.height, - self.store.get_block_height(&header.inner_rest.last_final_block)?, - header.inner_rest.validator_proposals.clone(), - vec![], - header.inner_rest.chunk_mask.clone(), - header.inner_rest.total_supply, - )?; + self.runtime_adapter.add_validator_proposals(BlockHeaderInfo::new( + &header, + self.store.get_block_height(&header.last_final_block())?, + ))?; } } @@ -844,18 +837,18 @@ impl Chain { let mut oldest_height = header_head.height; let mut current = self.get_block_header(&header_head.last_block_hash).map(|h| h.clone()); while let Ok(header) = current { - if header.inner_lite.height <= block_head.height { + if header.height() <= block_head.height { if self.is_on_current_chain(&header).is_ok() { break; } } - oldest_height = header.inner_lite.height; - hashes.push(header.hash()); + oldest_height = header.height(); + hashes.push(*header.hash()); current = self.get_previous_header(&header).map(|h| h.clone()); } let next_epoch_id = - self.get_block_header(&block_head.last_block_hash)?.inner_lite.next_epoch_id.clone(); + self.get_block_header(&block_head.last_block_hash)?.next_epoch_id().clone(); // Don't run State Sync if header head is not more than one epoch ahead. if block_head.epoch_id != header_head.epoch_id && next_epoch_id != header_head.epoch_id { @@ -874,7 +867,7 @@ impl Chain { /// Returns if given block header is on the current chain. fn is_on_current_chain(&mut self, header: &BlockHeader) -> Result<(), Error> { - let chain_header = self.get_header_by_height(header.inner_lite.height)?; + let chain_header = self.get_header_by_height(header.height())?; if chain_header.hash() == header.hash() { Ok(()) } else { @@ -886,7 +879,7 @@ impl Chain { pub fn find_common_header(&mut self, hashes: &[CryptoHash]) -> Option { for hash in hashes { if let Ok(header) = self.get_block_header(&hash).map(|h| h.clone()) { - if let Ok(header_at_height) = self.get_header_by_height(header.inner_lite.height) { + if let Ok(header_at_height) = self.get_header_by_height(header.height()) { if header.hash() == header_at_height.hash() { return Some(header); } @@ -921,7 +914,7 @@ impl Chain { pub fn reset_data_pre_state_sync(&mut self, sync_hash: CryptoHash) -> Result<(), Error> { // Get header we were syncing into. let header = self.get_block_header(&sync_hash)?; - let gc_height = header.inner_lite.height; + let gc_height = header.height(); // GC all the data from current tail up to `gc_height` let tail = self.store.tail()?; @@ -973,12 +966,12 @@ impl Chain { { // Get header we were syncing into. let header = self.get_block_header(&sync_hash)?; - let hash = header.prev_hash; + let hash = *header.prev_hash(); let prev_block = self.get_block(&hash)?; - let new_tail = prev_block.header.inner_lite.height; + let new_tail = prev_block.header().height(); let new_chunk_tail = - prev_block.chunks.iter().map(|x| x.inner.height_created).min().unwrap(); - let tip = Tip::from_header(&prev_block.header); + prev_block.chunks().iter().map(|x| x.inner.height_created).min().unwrap(); + let tip = Tip::from_header(prev_block.header()); // Update related heads now. let mut chain_store_update = self.mut_store().store_update(); chain_store_update.save_body_head(&tip)?; @@ -1000,18 +993,18 @@ impl Chain { me: &Option, block: &Block, ) -> Result<(), Error> { - let prev_hash = block.header.prev_hash; + let prev_hash = *block.header().prev_hash(); let shards_to_dl = self.get_shards_to_dl_state(me, &prev_hash); let prev_block = self.get_block(&prev_hash)?; debug!(target: "chain", "Downloading state for {:?}, I'm {:?}", shards_to_dl, me); let state_dl_info = StateSyncInfo { - epoch_tail_hash: block.header.hash(), + epoch_tail_hash: *block.header().hash(), shards: shards_to_dl .iter() .map(|shard_id| { - let chunk = &prev_block.chunks[*shard_id as usize]; + let chunk = &prev_block.chunks()[*shard_id as usize]; ShardInfo(*shard_id, chunk.chunk_hash()) }) .collect(), @@ -1042,7 +1035,7 @@ impl Chain { { near_metrics::inc_counter(&metrics::BLOCK_PROCESSED_TOTAL); - if block.chunks.len() != self.runtime_adapter.num_shards() as usize { + if block.chunks().len() != self.runtime_adapter.num_shards() as usize { return Err(ErrorKind::IncorrectNumberOfChunkHeaders.into()); } @@ -1087,9 +1080,8 @@ impl Chain { } // Sum validator balances in full NEARs (divided by 10**24) let sum = block - .header - .inner_rest - .validator_proposals + .header() + .validator_proposals() .iter() .map(|validator_stake| (validator_stake.stake / NEAR_BASE) as i64) .sum::(); @@ -1098,7 +1090,7 @@ impl Chain { let status = self.determine_status(head.clone(), prev_head); // Notify other parts of the system of the update. - block_accepted(AcceptedBlock { hash: block.hash(), status, provenance }); + block_accepted(AcceptedBlock { hash: *block.hash(), status, provenance }); Ok(head) } @@ -1106,8 +1098,8 @@ impl Chain { ErrorKind::Orphan => { let tail_height = self.store.tail()?; // we only add blocks that couldn't have been gc'ed to the orphan pool. - if block.header.inner_lite.height >= tail_height { - let block_hash = block.hash(); + if block.header().height() >= tail_height { + let block_hash = *block.hash(); let orphan = Orphan { block, provenance, added: Instant::now() }; self.orphans.add(orphan); @@ -1127,7 +1119,7 @@ impl Chain { Err(e) } ErrorKind::ChunksMissing(missing_chunks) => { - let block_hash = block.hash(); + let block_hash = *block.hash(); block_misses_chunks(missing_chunks.clone()); let orphan = Orphan { block, provenance, added: Instant::now() }; @@ -1143,7 +1135,7 @@ impl Chain { ErrorKind::EpochOutOfBounds => { // Possibly block arrived before we finished processing all of the blocks for epoch before last. // Or someone is attacking with invalid chain. - debug!(target: "chain", "Received block {}/{} ignored, as epoch is unknown", block.header.inner_lite.height, block.hash()); + debug!(target: "chain", "Received block {}/{} ignored, as epoch is unknown", block.header().height(), block.hash()); Err(e) } ErrorKind::Unfit(ref msg) => { @@ -1151,7 +1143,7 @@ impl Chain { target: "chain", "Block {} at {} is unfit at this time: {}", block.hash(), - block.header.inner_lite.height, + block.header().height(), msg ); Err(ErrorKind::Unfit(msg.clone()).into()) @@ -1209,7 +1201,7 @@ impl Chain { let mut new_blocks_accepted = vec![]; if let Some(orphans) = self.blocks_with_missing_chunks.remove_by_prev_hash(prev_hash) { for orphan in orphans.into_iter() { - let block_hash = orphan.block.header.hash(); + let block_hash = *orphan.block.header().hash(); let res = self.process_block_single( me, orphan.block, @@ -1266,7 +1258,7 @@ impl Chain { if let Some(orphans) = self.orphans.remove_by_prev_hash(queue[queue_idx]) { debug!(target: "chain", "Check orphans: found {} orphans", orphans.len()); for orphan in orphans.into_iter() { - let block_hash = orphan.block.hash(); + let block_hash = *orphan.block.hash(); let timer = near_metrics::start_timer(&metrics::BLOCK_PROCESSING_TIME); let res = self.process_block_single( me, @@ -1327,36 +1319,36 @@ impl Chain { // 3. In inner loops we use all prefixes with no relation to the context described above. let sync_block = self.get_block(&sync_hash).expect("block has already been checked for existence"); - let sync_block_header = sync_block.header.clone(); - let sync_block_epoch_id = sync_block.header.inner_lite.epoch_id.clone(); - if shard_id as usize >= sync_block.chunks.len() { + let sync_block_header = sync_block.header().clone(); + let sync_block_epoch_id = sync_block.header().epoch_id().clone(); + if shard_id as usize >= sync_block.chunks().len() { return Err(ErrorKind::InvalidStateRequest("ShardId out of bounds".into()).into()); } // The chunk was applied at height `chunk_header.height_included`. // Getting the `current` state. - let sync_prev_block = self.get_block(&sync_block_header.prev_hash)?; - if sync_block_epoch_id == sync_prev_block.header.inner_lite.epoch_id { + let sync_prev_block = self.get_block(sync_block_header.prev_hash())?; + if &sync_block_epoch_id == sync_prev_block.header().epoch_id() { return Err(ErrorKind::InvalidStateRequest( "sync_hash is not the first hash of the epoch".into(), ) .into()); } - if shard_id as usize >= sync_prev_block.chunks.len() { + if shard_id as usize >= sync_prev_block.chunks().len() { return Err(ErrorKind::InvalidStateRequest("ShardId out of bounds".into()).into()); } // Chunk header here is the same chunk header as at the `current` height. - let chunk_header = sync_prev_block.chunks[shard_id as usize].clone(); + let chunk_header = sync_prev_block.chunks()[shard_id as usize].clone(); let (chunk_headers_root, chunk_proofs) = merklize( &sync_prev_block - .chunks + .chunks() .iter() .map(|shard_chunk| { ChunkHashHeight(shard_chunk.hash.clone(), shard_chunk.height_included) }) .collect::>(), ); - assert_eq!(chunk_headers_root, sync_prev_block.header.inner_rest.chunk_headers_root); + assert_eq!(&chunk_headers_root, sync_prev_block.header().chunk_headers_root()); let chunk = self.get_chunk_clone_from_header(&chunk_header)?; let chunk_proof = chunk_proofs[shard_id as usize].clone(); @@ -1365,28 +1357,25 @@ impl Chain { // Collecting the `prev` state. let (prev_chunk_header, prev_chunk_proof, prev_chunk_height_included) = match self - .get_block(&block_header.prev_hash) + .get_block(block_header.prev_hash()) { Ok(prev_block) => { - if shard_id as usize >= prev_block.chunks.len() { + if shard_id as usize >= prev_block.chunks().len() { return Err( ErrorKind::InvalidStateRequest("ShardId out of bounds".into()).into() ); } - let prev_chunk_header = prev_block.chunks[shard_id as usize].clone(); + let prev_chunk_header = prev_block.chunks()[shard_id as usize].clone(); let (prev_chunk_headers_root, prev_chunk_proofs) = merklize( &prev_block - .chunks + .chunks() .iter() .map(|shard_chunk| { ChunkHashHeight(shard_chunk.hash.clone(), shard_chunk.height_included) }) .collect::>(), ); - assert_eq!( - prev_chunk_headers_root, - prev_block.header.inner_rest.chunk_headers_root - ); + assert_eq!(&prev_chunk_headers_root, prev_block.header().chunk_headers_root()); let prev_chunk_proof = prev_chunk_proofs[shard_id as usize].clone(); let prev_chunk_height_included = prev_chunk_header.height_included; @@ -1395,7 +1384,7 @@ impl Chain { } Err(e) => match e.kind() { ErrorKind::DBNotFoundErr(_) => { - if block_header.prev_hash == CryptoHash::default() { + if block_header.prev_hash() == &CryptoHash::default() { (None, None, 0) } else { return Err(e); @@ -1417,26 +1406,26 @@ impl Chain { let block = self.get_block(&block_hash)?; let (block_receipts_root, block_receipts_proofs) = merklize( &block - .chunks + .chunks() .iter() .map(|chunk| chunk.inner.outgoing_receipts_root) .collect::>(), ); let mut root_proofs_cur = vec![]; - assert_eq!(receipt_proofs.len(), block_header.inner_rest.chunks_included as usize); + assert_eq!(receipt_proofs.len(), block_header.chunks_included() as usize); for receipt_proof in receipt_proofs { let ReceiptProof(receipts, shard_proof) = receipt_proof; let ShardProof { from_shard_id, to_shard_id: _, proof } = shard_proof; let receipts_hash = hash(&ReceiptList(shard_id, receipts.to_vec()).try_to_vec()?); let from_shard_id = *from_shard_id as usize; - let root_proof = block.chunks[from_shard_id].inner.outgoing_receipts_root; + let root_proof = block.chunks()[from_shard_id].inner.outgoing_receipts_root; root_proofs_cur .push(RootProof(root_proof, block_receipts_proofs[from_shard_id].clone())); // Make sure we send something reasonable. - assert_eq!(block_header.inner_rest.chunk_receipts_root, block_receipts_root); + assert_eq!(block_header.chunk_receipts_root(), &block_receipts_root); assert!(verify_path(root_proof, &proof, &receipts_hash)); assert!(verify_path( block_receipts_root, @@ -1477,22 +1466,22 @@ impl Chain { ) -> Result, Error> { let sync_block = self.get_block(&sync_hash).expect("block has already been checked for existence"); - let sync_block_header = sync_block.header.clone(); - let sync_block_epoch_id = sync_block.header.inner_lite.epoch_id.clone(); - if shard_id as usize >= sync_block.chunks.len() { + let sync_block_header = sync_block.header().clone(); + let sync_block_epoch_id = sync_block.header().epoch_id().clone(); + if shard_id as usize >= sync_block.chunks().len() { return Err(ErrorKind::InvalidStateRequest("shard_id out of bounds".into()).into()); } - let sync_prev_block = self.get_block(&sync_block_header.prev_hash)?; - if sync_block_epoch_id == sync_prev_block.header.inner_lite.epoch_id { + let sync_prev_block = self.get_block(sync_block_header.prev_hash())?; + if &sync_block_epoch_id == sync_prev_block.header().epoch_id() { return Err(ErrorKind::InvalidStateRequest( "sync_hash is not the first hash of the epoch".into(), ) .into()); } - if shard_id as usize >= sync_prev_block.chunks.len() { + if shard_id as usize >= sync_prev_block.chunks().len() { return Err(ErrorKind::InvalidStateRequest("shard_id out of bounds".into()).into()); } - let state_root = sync_prev_block.chunks[shard_id as usize].inner.prev_state_root.clone(); + let state_root = sync_prev_block.chunks()[shard_id as usize].inner.prev_state_root.clone(); let state_root_node = self.runtime_adapter.get_state_root_node(shard_id, &state_root); let num_parts = Self::get_num_state_parts(state_root_node.memory_usage); @@ -1537,9 +1526,9 @@ impl Chain { // 3. Checking that chunks `chunk` and `prev_chunk` are included in appropriate blocks // 3a. Checking that chunk `chunk` is included into block at last height before sync_hash // 3aa. Also checking chunk.height_included - let sync_prev_block_header = self.get_block_header(&sync_block_header.prev_hash)?.clone(); + let sync_prev_block_header = self.get_block_header(sync_block_header.prev_hash())?.clone(); if !verify_path( - sync_prev_block_header.inner_rest.chunk_headers_root, + *sync_prev_block_header.chunk_headers_root(), &chunk_proof, &ChunkHashHeight(chunk.chunk_hash.clone(), chunk.header.height_included), ) { @@ -1561,9 +1550,9 @@ impl Chain { match (prev_chunk_header, prev_chunk_proof) { (Some(prev_chunk_header), Some(prev_chunk_proof)) => { let prev_block_header = - self.get_block_header(&block_header.prev_hash)?.clone(); + self.get_block_header(block_header.prev_hash())?.clone(); if !verify_path( - prev_block_header.inner_rest.chunk_headers_root, + *prev_block_header.chunk_headers_root(), &prev_chunk_proof, &ChunkHashHeight(prev_chunk_header.hash.clone(), prev_chunk_height_included), ) { @@ -1604,19 +1593,19 @@ impl Chain { .into()); } let header = self.get_block_header(&hash_to_compare)?; - hash_to_compare = header.prev_hash; + hash_to_compare = *header.prev_hash(); let block_header = self.get_block_header(&block_hash)?; // 4c. Checking len of receipt_proofs for current block if receipt_proofs.len() != root_proofs[i].len() - || receipt_proofs.len() != block_header.inner_rest.chunks_included as usize + || receipt_proofs.len() != block_header.chunks_included() as usize { byzantine_assert!(false); return Err( ErrorKind::Other("set_shard_state failed: invalid proofs".into()).into() ); } - // We know there were exactly `block_header.inner.chunks_included` chunks included + // We know there were exactly `block_header.chunks_included` chunks included // on the height of block `block_hash`. // There were no other proofs except for included chunks. // According to Pigeonhole principle, it's enough to ensure all receipt_proofs are distinct @@ -1646,7 +1635,7 @@ impl Chain { ); } // 4f. Proving the outgoing_receipts_root matches that in the block - if !verify_path(block_header.inner_rest.chunk_receipts_root, block_proof, root) { + if !verify_path(*block_header.chunk_receipts_root(), block_proof, root) { byzantine_assert!(false); return Err( ErrorKind::Other("set_shard_state failed: invalid proofs".into()).into() @@ -1656,7 +1645,7 @@ impl Chain { } // 4g. Checking that there are no more heights to get incoming_receipts let header = self.get_block_header(&hash_to_compare)?; - if header.inner_lite.height != prev_chunk_height_included { + if header.height() != prev_chunk_height_included { byzantine_assert!(false); return Err(ErrorKind::Other( "set_shard_state failed: invalid incoming receipts".into(), @@ -1816,7 +1805,7 @@ impl Chain { // Apply the epoch start block separately, since it doesn't follow the pattern let block = self.store.get_block(&epoch_first_block)?.clone(); - let prev_block = self.store.get_block(&block.header.prev_hash)?.clone(); + let prev_block = self.store.get_block(block.header().prev_hash())?.clone(); let mut chain_update = ChainUpdate::new( &mut self.store, @@ -1830,9 +1819,9 @@ impl Chain { chain_update.apply_chunks(me, &block, &prev_block, ApplyChunksMode::NextEpoch)?; chain_update.commit()?; - affected_blocks.insert(block.header.hash()); + affected_blocks.insert(*block.header().hash()); - let first_epoch = block.header.inner_lite.epoch_id.clone(); + let first_epoch = block.header().epoch_id().clone(); let mut queue = vec![*epoch_first_block]; let mut cur = 0; @@ -1863,7 +1852,7 @@ impl Chain { chain_update.commit()?; - affected_blocks.insert(block.header.hash()); + affected_blocks.insert(*block.header().hash()); queue.push(next_block_hash); } if saw_one { @@ -1882,7 +1871,7 @@ impl Chain { // `epoch_first_block` we should only remove the pair with hash = epoch_first_block, while // for all the blocks in the queue we can remove all the pairs that have them as `prev_hash` // since we processed all the blocks built on top of them above during the BFS - chain_store_update.remove_block_to_catchup(block.header.prev_hash, *epoch_first_block); + chain_store_update.remove_block_to_catchup(*block.header().prev_hash(), *epoch_first_block); for block_hash in queue { debug!(target: "chain", "Catching up: removing prev={:?} from the queue. I'm {:?}", block_hash, me); @@ -1990,10 +1979,10 @@ impl Chain { &mut self, block_hash: &CryptoHash, ) -> Result<(), Error> { - let last_final_block_hash = self.head_header()?.inner_rest.last_final_block; - let last_final_height = self.get_block_header(&last_final_block_hash)?.inner_lite.height; + let last_final_block_hash = self.head_header()?.last_final_block().clone(); + let last_final_height = self.get_block_header(&last_final_block_hash)?.height(); let block_header = self.get_block_header(block_hash)?.clone(); - if block_header.inner_lite.height <= last_final_height { + if block_header.height() <= last_final_height { self.is_on_current_chain(&block_header) } else { Err(ErrorKind::Other(format!("{} not on current chain", block_hash)).into()) @@ -2419,9 +2408,9 @@ impl<'a> ChainUpdate<'a> { where F: FnMut(ChallengeBody) -> (), { - debug!(target: "chain", "Process block header: {} at {}", header.hash(), header.inner_lite.height); + debug!(target: "chain", "Process block header: {} at {}", header.hash(), header.height()); - self.check_known(&header.hash)?; + self.check_known(header.hash())?; self.validate_header(header, &Provenance::NONE, on_challenge)?; Ok(()) } @@ -2469,15 +2458,15 @@ impl<'a> ChainUpdate<'a> { return Ok(()); } let mut missing = vec![]; - let height = block.header.inner_lite.height; - for (shard_id, chunk_header) in block.chunks.iter().enumerate() { + let height = block.header().height(); + for (shard_id, chunk_header) in block.chunks().iter().enumerate() { // Check if any chunks are invalid in this block. if let Some(encoded_chunk) = self.chain_store_update.is_invalid_chunk(&chunk_header.hash)? { - let merkle_paths = Block::compute_chunk_headers_root(&block.chunks).1; + let merkle_paths = Block::compute_chunk_headers_root(&block.chunks()).1; let chunk_proof = ChunkProofs { - block_header: block.header.try_to_vec().expect("Failed to serialize"), + block_header: block.header().try_to_vec().expect("Failed to serialize"), merkle_proof: merkle_paths[shard_id].clone(), chunk: MaybeEncodedShardChunk::Encoded(encoded_chunk.clone()), }; @@ -2519,13 +2508,13 @@ impl<'a> ChainUpdate<'a> { me: &Option, block: &Block, ) -> Result<(), Error> { - if !self.care_about_any_shard_or_part(me, block.header.prev_hash)? { + if !self.care_about_any_shard_or_part(me, *block.header().prev_hash())? { return Ok(()); } - let height = block.header.inner_lite.height; + let height = block.header().height(); let mut receipt_proofs_by_shard_id = HashMap::new(); - for chunk_header in block.chunks.iter() { + for chunk_header in block.chunks().iter() { if chunk_header.height_included == height { let partial_encoded_chunk = self.chain_store_update.get_partial_chunk(&chunk_header.chunk_hash()).unwrap(); @@ -2557,26 +2546,26 @@ impl<'a> ChainUpdate<'a> { block: &Block, chunk_header: &ShardChunkHeader, ) -> Result { - let prev_chunk_header = &prev_block.chunks[chunk_header.inner.shard_id as usize]; - let prev_merkle_proofs = Block::compute_chunk_headers_root(&prev_block.chunks).1; - let merkle_proofs = Block::compute_chunk_headers_root(&block.chunks).1; + let prev_chunk_header = &prev_block.chunks()[chunk_header.inner.shard_id as usize]; + let prev_merkle_proofs = Block::compute_chunk_headers_root(&prev_block.chunks()).1; + let merkle_proofs = Block::compute_chunk_headers_root(&block.chunks()).1; let prev_chunk = self .chain_store_update .get_chain_store() - .get_chunk_clone_from_header(&prev_block.chunks[chunk_header.inner.shard_id as usize]) + .get_chunk_clone_from_header(&prev_block.chunks()[chunk_header.inner.shard_id as usize]) .unwrap(); let receipt_proof_response: Vec = self.chain_store_update.get_incoming_receipts_for_shard( chunk_header.inner.shard_id, - prev_block.hash(), + *prev_block.hash(), prev_chunk_header.height_included, )?; let receipts = collect_receipts_from_response(&receipt_proof_response); let challenges_result = self.verify_challenges( - &block.challenges, - &block.header.inner_lite.epoch_id, - &block.header.prev_hash, + block.challenges(), + block.header().epoch_id(), + block.header().prev_hash(), Some(&block.hash()), )?; let apply_result = self @@ -2585,13 +2574,13 @@ impl<'a> ChainUpdate<'a> { chunk_header.inner.shard_id, &prev_chunk.header.inner.prev_state_root, prev_chunk.header.height_included, - prev_block.header.inner_lite.timestamp, + prev_block.header().raw_timestamp(), &prev_chunk.header.inner.prev_block_hash, &prev_block.hash(), &receipts, &prev_chunk.transactions, &prev_chunk.header.inner.validator_proposals, - prev_block.header.inner_rest.gas_price, + prev_block.header().gas_price(), prev_chunk.header.inner.gas_limit, &challenges_result, true, @@ -2599,8 +2588,8 @@ impl<'a> ChainUpdate<'a> { .unwrap(); let partial_state = apply_result.proof.unwrap().nodes; Ok(ChunkState { - prev_block_header: prev_block.header.try_to_vec()?, - block_header: block.header.try_to_vec()?, + prev_block_header: prev_block.header().try_to_vec()?, + block_header: block.header().try_to_vec()?, prev_merkle_proof: prev_merkle_proofs[chunk_header.inner.shard_id as usize].clone(), merkle_proof: merkle_proofs[chunk_header.inner.shard_id as usize].clone(), prev_chunk, @@ -2617,44 +2606,44 @@ impl<'a> ChainUpdate<'a> { mode: ApplyChunksMode, ) -> Result<(), Error> { let challenges_result = self.verify_challenges( - &block.challenges, - &block.header.inner_lite.epoch_id, - &block.header.prev_hash, + block.challenges(), + block.header().epoch_id(), + block.header().prev_hash(), Some(&block.hash()), )?; self.chain_store_update.save_block_extra(&block.hash(), BlockExtra { challenges_result }); for (shard_id, (chunk_header, prev_chunk_header)) in - (block.chunks.iter().zip(prev_block.chunks.iter())).enumerate() + (block.chunks().iter().zip(prev_block.chunks().iter())).enumerate() { let shard_id = shard_id as ShardId; let care_about_shard = match mode { ApplyChunksMode::ThisEpoch => self.runtime_adapter.cares_about_shard( me.as_ref(), - &block.header.prev_hash, + &block.header().prev_hash(), shard_id, true, ), ApplyChunksMode::NextEpoch => { self.runtime_adapter.will_care_about_shard( me.as_ref(), - &block.header.prev_hash, + &block.header().prev_hash(), shard_id, true, ) && !self.runtime_adapter.cares_about_shard( me.as_ref(), - &block.header.prev_hash, + &block.header().prev_hash(), shard_id, true, ) } }; if care_about_shard { - if chunk_header.height_included == block.header.inner_lite.height { + if chunk_header.height_included == block.header().height() { // Validate state root. let prev_chunk_extra = self .chain_store_update - .get_chunk_extra(&block.header.prev_hash, shard_id)? + .get_chunk_extra(&block.header().prev_hash(), shard_id)? .clone(); // Validate that all next chunk information matches previous chunk extra. @@ -2663,7 +2652,7 @@ impl<'a> ChainUpdate<'a> { // because we're asking prev_chunk_header for already committed block self.chain_store_update.get_chain_store(), &*self.runtime_adapter, - &block.header.prev_hash, + &block.header().prev_hash(), &prev_chunk_extra, prev_chunk_header, chunk_header, @@ -2682,7 +2671,7 @@ impl<'a> ChainUpdate<'a> { let receipt_proof_response: Vec = self.chain_store_update.get_incoming_receipts_for_shard( shard_id, - block.hash(), + *block.hash(), prev_chunk_header.height_included, )?; let receipts = collect_receipts_from_response(&receipt_proof_response); @@ -2691,9 +2680,9 @@ impl<'a> ChainUpdate<'a> { self.chain_store_update.get_chunk_clone_from_header(&chunk_header)?; if !validate_transactions_order(&chunk.transactions) { - let merkle_paths = Block::compute_chunk_headers_root(&block.chunks).1; + let merkle_paths = Block::compute_chunk_headers_root(&block.chunks()).1; let chunk_proof = ChunkProofs { - block_header: block.header.try_to_vec().expect("Failed to serialize"), + block_header: block.header().try_to_vec().expect("Failed to serialize"), merkle_proof: merkle_paths[shard_id as usize].clone(), chunk: MaybeEncodedShardChunk::Decoded(chunk), }; @@ -2711,15 +2700,15 @@ impl<'a> ChainUpdate<'a> { shard_id, &chunk.header.inner.prev_state_root, chunk_header.height_included, - block.header.inner_lite.timestamp, + block.header().raw_timestamp(), &chunk_header.inner.prev_block_hash, &block.hash(), &receipts, &chunk.transactions, &chunk.header.inner.validator_proposals, - prev_block.header.inner_rest.gas_price, + prev_block.header().gas_price(), chunk.header.inner.gas_limit, - &block.header.inner_rest.challenges_result, + &block.header().challenges_result(), ) .map_err(|e| ErrorKind::Other(e.to_string()))?; @@ -2775,16 +2764,16 @@ impl<'a> ChainUpdate<'a> { .apply_transactions( shard_id, &new_extra.state_root, - block.header.inner_lite.height, - block.header.inner_lite.timestamp, + block.header().height(), + block.header().raw_timestamp(), &prev_block.hash(), &block.hash(), &[], &[], &new_extra.validator_proposals, - block.header.inner_rest.gas_price, + block.header().gas_price(), new_extra.gas_limit, - &block.header.inner_rest.challenges_result, + &block.header().challenges_result(), ) .map_err(|e| ErrorKind::Other(e.to_string()))?; @@ -2812,28 +2801,28 @@ impl<'a> ChainUpdate<'a> { where F: FnMut(ChallengeBody) -> (), { - debug!(target: "chain", "Process block {} at {}, approvals: {}, me: {:?}", block.hash(), block.header.inner_lite.height, block.header.num_approvals(), me); + debug!(target: "chain", "Process block {} at {}, approvals: {}, me: {:?}", block.hash(), block.header().height(), block.header().num_approvals(), me); // Check if we have already processed this block previously. - self.check_known(&block.header.hash)?; + self.check_known(block.header().hash())?; // Delay hitting the db for current chain head until we know this block is not already known. let head = self.chain_store_update.head()?; - let is_next = block.header.prev_hash == head.last_block_hash; + let is_next = block.header().prev_hash() == &head.last_block_hash; // Check that we know the epoch of the block before we try to get the header // (so that a block from unknown epoch doesn't get marked as an orphan) - if !self.runtime_adapter.epoch_exists(&block.header.inner_lite.epoch_id) { + if !self.runtime_adapter.epoch_exists(&block.header().epoch_id()) { return Err(ErrorKind::EpochOutOfBounds.into()); } // First real I/O expense. - let prev = self.get_previous_header(&block.header)?; - let prev_hash = prev.hash(); - let prev_prev_hash = prev.prev_hash; - let prev_gas_price = prev.inner_rest.gas_price; - let prev_epoch_id = prev.inner_lite.epoch_id.clone(); - let prev_random_value = prev.inner_rest.random_value; + let prev = self.get_previous_header(block.header())?; + let prev_hash = *prev.hash(); + let prev_prev_hash = *prev.prev_hash(); + let prev_gas_price = prev.gas_price(); + let prev_epoch_id = prev.epoch_id().clone(); + let prev_random_value = *prev.random_value(); // Block is an orphan if we do not know about the previous full block. if !is_next && !self.chain_store_update.block_exists(&prev_hash)? { @@ -2842,7 +2831,7 @@ impl<'a> ChainUpdate<'a> { // A heuristic to prevent block height to jump too fast towards BlockHeight::max and cause // overflow-related problems - if block.header.inner_lite.height > head.height + self.epoch_length * 20 { + if block.header().height() > head.height + self.epoch_length * 20 { return Err(ErrorKind::InvalidBlockHeight.into()); } @@ -2865,17 +2854,17 @@ impl<'a> ChainUpdate<'a> { debug!(target: "chain", "{:?} Process block {}, is_caught_up: {}, need_to_start_fetching_state: {}", me, block.hash(), is_caught_up, needs_to_start_fetching_state); // Check the header is valid before we proceed with the full block. - self.process_header_for_block(&block.header, provenance, on_challenge)?; + self.process_header_for_block(block.header(), provenance, on_challenge)?; self.runtime_adapter.verify_block_vrf( - &block.header.inner_lite.epoch_id, - block.header.inner_lite.height, + &block.header().epoch_id(), + block.header().height(), &prev_random_value, - block.vrf_value, - block.vrf_proof, + block.vrf_value(), + block.vrf_proof(), )?; - if block.header.inner_rest.random_value != hash(block.vrf_value.0.as_ref()) { + if block.header().random_value() != &hash(block.vrf_value().0.as_ref()) { return Err(ErrorKind::InvalidRandomnessBeaconOutput.into()); } @@ -2899,9 +2888,11 @@ impl<'a> ChainUpdate<'a> { self.save_incoming_receipts_from_block(me, &block)?; // Do basic validation of chunks before applying the transactions - for (chunk_header, prev_chunk_header) in block.chunks.iter().zip(prev_block.chunks.iter()) { - if chunk_header.height_included == block.header.inner_lite.height { - if chunk_header.inner.prev_block_hash != block.header.prev_hash { + for (chunk_header, prev_chunk_header) in + block.chunks().iter().zip(prev_block.chunks().iter()) + { + if chunk_header.height_included == block.header().height() { + if &chunk_header.inner.prev_block_hash != block.header().prev_hash() { return Err(ErrorKind::InvalidChunk.into()); } } else { @@ -2919,46 +2910,39 @@ impl<'a> ChainUpdate<'a> { if is_caught_up { self.apply_chunks(me, block, &prev_block, ApplyChunksMode::NextEpoch)?; } else { - self.chain_store_update.add_block_to_catchup(prev_hash, block.hash()); + self.chain_store_update.add_block_to_catchup(prev_hash, *block.hash()); } // Verify that proposals from chunks match block header proposals. let mut all_chunk_proposals = vec![]; - for chunk in block.chunks.iter() { - if block.header.inner_lite.height == chunk.height_included { + for chunk in block.chunks().iter() { + if block.header().height() == chunk.height_included { all_chunk_proposals.extend(chunk.inner.validator_proposals.clone()); } } - if all_chunk_proposals != block.header.inner_rest.validator_proposals { + if all_chunk_proposals.as_slice() != block.header().validator_proposals() { return Err(ErrorKind::InvalidValidatorProposals.into()); } // If block checks out, record validator proposals for given block. - let last_final_block = &block.header.inner_rest.last_final_block; + let last_final_block = block.header().last_final_block(); let last_finalized_height = if last_final_block == &CryptoHash::default() { self.chain_store_update.get_genesis_height() } else { - self.chain_store_update.get_block_header(last_final_block)?.inner_lite.height + self.chain_store_update.get_block_header(last_final_block)?.height() }; - self.runtime_adapter.add_validator_proposals( - block.header.prev_hash, - block.hash(), - block.header.inner_rest.random_value, - block.header.inner_lite.height, + self.runtime_adapter.add_validator_proposals(BlockHeaderInfo::new( + &block.header(), last_finalized_height, - block.header.inner_rest.validator_proposals.clone(), - block.header.inner_rest.challenges_result.clone(), - block.header.inner_rest.chunk_mask.clone(), - block.header.inner_rest.total_supply, - )?; + ))?; // Add validated block to the db, even if it's not the canonical fork. self.chain_store_update.save_block(block.clone()); - self.chain_store_update.inc_block_refcount(&block.header.prev_hash)?; - for (shard_id, chunk_headers) in block.chunks.iter().enumerate() { - if chunk_headers.height_included == block.header.inner_lite.height { + self.chain_store_update.inc_block_refcount(block.header().prev_hash())?; + for (shard_id, chunk_headers) in block.chunks().iter().enumerate() { + if chunk_headers.height_included == block.header().height() { self.chain_store_update - .save_block_hash_with_new_chunk(block.hash(), shard_id as ShardId); + .save_block_hash_with_new_chunk(*block.hash(), shard_id as ShardId); } } @@ -2975,9 +2959,9 @@ impl<'a> ChainUpdate<'a> { // Presently the epoch boundary is defined by the height, and the fork choice rule // is also just height, so the very first block to cross the epoch end is guaranteed // to be the head of the chain, and result in the light client block produced. - if block.header.inner_lite.epoch_id != prev_epoch_id { - let prev = self.get_previous_header(&block.header)?.clone(); - if prev.inner_rest.last_final_block != CryptoHash::default() { + if block.header().epoch_id() != &prev_epoch_id { + let prev = self.get_previous_header(&block.header())?.clone(); + if prev.last_final_block() != &CryptoHash::default() { let light_client_block = self.create_light_client_block(&prev)?; self.chain_store_update .save_epoch_light_client_block(&prev_epoch_id.0, light_client_block); @@ -2993,7 +2977,7 @@ impl<'a> ChainUpdate<'a> { header: &BlockHeader, ) -> Result { // First update the last next_block, since it might not be set yet - self.chain_store_update.save_next_block_hash(&header.prev_hash, header.hash()); + self.chain_store_update.save_next_block_hash(header.prev_hash(), *header.hash()); Chain::create_light_client_block( header, @@ -3056,13 +3040,13 @@ impl<'a> ChainUpdate<'a> { // If we do - send out double sign challenge and keep going as double signed blocks are valid blocks. if let Ok(epoch_id_to_blocks) = self .chain_store_update - .get_all_block_hashes_by_height(header.inner_lite.height) + .get_all_block_hashes_by_height(header.height()) .map(Clone::clone) { // Check if there is already known block of the same height that has the same epoch id - if let Some(block_hashes) = epoch_id_to_blocks.get(&header.inner_lite.epoch_id) { + if let Some(block_hashes) = epoch_id_to_blocks.get(&header.epoch_id()) { // This should be guaranteed but it doesn't hurt to check again - if !block_hashes.contains(&header.hash) { + if !block_hashes.contains(header.hash()) { let other_header = self .chain_store_update .get_block_header(block_hashes.iter().next().unwrap())?; @@ -3078,42 +3062,42 @@ impl<'a> ChainUpdate<'a> { let prev_header = self.get_previous_header(header)?.clone(); // Check that epoch_id in the header does match epoch given previous header (only if previous header is present). - if self.runtime_adapter.get_epoch_id_from_prev_block(&header.prev_hash).unwrap() - != header.inner_lite.epoch_id + if &self.runtime_adapter.get_epoch_id_from_prev_block(header.prev_hash()).unwrap() + != header.epoch_id() { return Err(ErrorKind::InvalidEpochHash.into()); } // Check that epoch_id in the header does match epoch given previous header (only if previous header is present). - if self.runtime_adapter.get_next_epoch_id_from_prev_block(&header.prev_hash).unwrap() - != header.inner_lite.next_epoch_id + if &self.runtime_adapter.get_next_epoch_id_from_prev_block(header.prev_hash()).unwrap() + != header.next_epoch_id() { return Err(ErrorKind::InvalidEpochHash.into()); } - if header.inner_lite.epoch_id == prev_header.inner_lite.epoch_id { - if header.inner_lite.next_bp_hash != prev_header.inner_lite.next_bp_hash { + if header.epoch_id() == prev_header.epoch_id() { + if header.next_bp_hash() != prev_header.next_bp_hash() { return Err(ErrorKind::InvalidNextBPHash.into()); } } else { - if header.inner_lite.next_bp_hash - != Chain::compute_bp_hash( + if header.next_bp_hash() + != &Chain::compute_bp_hash( &*self.runtime_adapter, - header.inner_lite.next_epoch_id.clone(), - &header.prev_hash, + header.next_epoch_id().clone(), + &header.prev_hash(), )? { return Err(ErrorKind::InvalidNextBPHash.into()); } } - if header.inner_rest.chunk_mask.len() as u64 != self.runtime_adapter.num_shards() { + if header.chunk_mask().len() as u64 != self.runtime_adapter.num_shards() { return Err(ErrorKind::InvalidChunkMask.into()); } // Prevent time warp attacks and some timestamp manipulations by forcing strict // time progression. - if header.inner_lite.timestamp <= prev_header.inner_lite.timestamp { + if header.raw_timestamp() <= prev_header.raw_timestamp() { return Err(ErrorKind::InvalidBlockPastTime( prev_header.timestamp(), header.timestamp(), @@ -3125,10 +3109,10 @@ impl<'a> ChainUpdate<'a> { if *provenance != Provenance::PRODUCED { // first verify aggregated signature if !self.runtime_adapter.verify_approval( - &prev_header.hash, - prev_header.inner_lite.height, - header.inner_lite.height, - &header.inner_rest.approvals, + prev_header.hash(), + prev_header.height(), + header.height(), + &header.approvals(), )? { return Err(ErrorKind::InvalidApprovals.into()); }; @@ -3137,44 +3121,42 @@ impl<'a> ChainUpdate<'a> { let stakes = self .runtime_adapter - .get_epoch_block_approvers_ordered(&header.prev_hash)? + .get_epoch_block_approvers_ordered(header.prev_hash())? .iter() .map(|x| (x.stake_this_epoch, x.stake_next_epoch)) .collect(); if !Doomslug::can_approved_block_be_produced( self.doomslug_threshold_mode, - &header.inner_rest.approvals, + header.approvals(), &stakes, ) { return Err(ErrorKind::NotEnoughApprovals.into()); } - let expected_last_ds_final_block = - if prev_header.inner_lite.height + 1 == header.inner_lite.height { - prev_header.hash - } else { - prev_header.inner_rest.last_ds_final_block - }; + let expected_last_ds_final_block = if prev_header.height() + 1 == header.height() { + prev_header.hash() + } else { + prev_header.last_ds_final_block() + }; - let expected_last_final_block = if prev_header.inner_lite.height + 1 - == header.inner_lite.height - && prev_header.inner_rest.last_ds_final_block == prev_header.prev_hash + let expected_last_final_block = if prev_header.height() + 1 == header.height() + && prev_header.last_ds_final_block() == prev_header.prev_hash() { - prev_header.prev_hash + prev_header.prev_hash() } else { - prev_header.inner_rest.last_final_block + prev_header.last_final_block() }; - if header.inner_rest.last_ds_final_block != expected_last_ds_final_block - || header.inner_rest.last_final_block != expected_last_final_block + if header.last_ds_final_block() != expected_last_ds_final_block + || header.last_final_block() != expected_last_final_block { return Err(ErrorKind::InvalidFinalityInfo.into()); } let mut block_merkle_tree = - self.chain_store_update.get_block_merkle_tree(&header.prev_hash)?.clone(); - block_merkle_tree.insert(header.prev_hash); - if block_merkle_tree.root() != header.inner_lite.block_merkle_root { + self.chain_store_update.get_block_merkle_tree(header.prev_hash())?.clone(); + block_merkle_tree.insert(*header.prev_hash()); + if &block_merkle_tree.root() != header.block_merkle_root() { return Err(ErrorKind::InvalidBlockMerkleRoot.into()); } } @@ -3188,7 +3170,7 @@ impl<'a> ChainUpdate<'a> { header: &BlockHeader, ) -> Result, Error> { let header_head = self.chain_store_update.header_head()?; - if header.inner_lite.height > header_head.height { + if header.height() > header_head.height { let tip = Tip::from_header(header); self.chain_store_update.save_header_head_if_not_challenged(&tip)?; debug!(target: "chain", "Header head updated to {} at {}", tip.last_block_hash, tip.height); @@ -3205,8 +3187,8 @@ impl<'a> ChainUpdate<'a> { // if we made a fork with higher height than the head (which should also be true // when extending the head), update it let head = self.chain_store_update.head()?; - if block.header.inner_lite.height > head.height { - let tip = Tip::from_header(&block.header); + if block.header().height() > head.height { + let tip = Tip::from_header(&block.header()); self.chain_store_update.save_body_head(&tip)?; near_metrics::set_gauge(&metrics::BLOCK_HEIGHT_HEAD, tip.height as i64); @@ -3244,16 +3226,14 @@ impl<'a> ChainUpdate<'a> { }, }; - let cur_block_at_same_height = match self - .chain_store_update - .get_block_hash_by_height(block_header.inner_lite.height) - { - Ok(bh) => Some(bh), - Err(e) => match e.kind() { - ErrorKind::DBNotFoundErr(_) => None, - _ => return Err(e), - }, - }; + let cur_block_at_same_height = + match self.chain_store_update.get_block_hash_by_height(block_header.height()) { + Ok(bh) => Some(bh), + Err(e) => match e.kind() { + ErrorKind::DBNotFoundErr(_) => None, + _ => return Err(e), + }, + }; self.chain_store_update.save_challenged_block(*block_hash); @@ -3265,11 +3245,11 @@ impl<'a> ChainUpdate<'a> { // and even if there's such chain available, the very next block built on it will // bring this node's head to that chain. let prev_header = - self.chain_store_update.get_block_header(&block_header.prev_hash)?.clone(); - let prev_height = prev_header.inner_lite.height; + self.chain_store_update.get_block_header(block_header.prev_hash())?.clone(); + let prev_height = prev_header.height(); let new_head_header = if let Some(hash) = challenger_hash { let challenger_header = self.chain_store_update.get_block_header(hash)?; - if challenger_header.inner_lite.height > prev_height { + if challenger_header.height() > prev_height { challenger_header } else { &prev_header @@ -3288,12 +3268,12 @@ impl<'a> ChainUpdate<'a> { /// Check if header is recent or in the store fn check_header_known(&mut self, header: &BlockHeader) -> Result<(), Error> { let header_head = self.chain_store_update.header_head()?; - if header.hash() == header_head.last_block_hash - || header.hash() == header_head.prev_block_hash + if header.hash() == &header_head.last_block_hash + || header.hash() == &header_head.prev_block_hash { return Err(ErrorKind::Unfit("header already known".to_string()).into()); } - self.check_known_store(&header.hash) + self.check_known_store(header.hash()) } /// Quick in-memory check for fast-reject any block handled recently. @@ -3365,7 +3345,7 @@ impl<'a> ChainUpdate<'a> { for incoming_receipt_proof in incoming_receipts_proofs.iter() { let ReceiptProofResponse(hash, _) = incoming_receipt_proof; let block_header = self.chain_store_update.get_block_header(&hash)?; - if block_header.inner_lite.height <= chunk.header.height_included { + if block_header.height() <= chunk.header.height_included { receipt_proof_response.push(incoming_receipt_proof.clone()); } } @@ -3376,15 +3356,15 @@ impl<'a> ChainUpdate<'a> { shard_id, &chunk.header.inner.prev_state_root, chunk.header.height_included, - block_header.inner_lite.timestamp, + block_header.raw_timestamp(), &chunk.header.inner.prev_block_hash, - &block_header.hash, + block_header.hash(), &receipts, &chunk.transactions, &chunk.header.inner.validator_proposals, - block_header.inner_rest.gas_price, + block_header.gas_price(), chunk.header.inner.gas_limit, - &block_header.inner_rest.challenges_result, + &block_header.challenges_result(), )?; let (outcome_root, outcome_proofs) = @@ -3401,7 +3381,7 @@ impl<'a> ChainUpdate<'a> { gas_limit, apply_result.total_balance_burnt, ); - self.chain_store_update.save_chunk_extra(&block_header.hash, shard_id, chunk_extra); + self.chain_store_update.save_chunk_extra(block_header.hash(), shard_id, chunk_extra); // Saving outgoing receipts. let mut outgoing_receipts = vec![]; @@ -3415,7 +3395,7 @@ impl<'a> ChainUpdate<'a> { ); // Saving transaction results. self.chain_store_update.save_outcomes_with_proofs( - &block_header.hash, + block_header.hash(), apply_result.outcomes, outcome_proofs, ); @@ -3443,12 +3423,12 @@ impl<'a> ChainUpdate<'a> { return Ok(true); } let block_header = block_header_result?.clone(); - if block_header.hash == sync_hash { + if block_header.hash() == &sync_hash { // Don't continue return Ok(false); } let prev_block_header = - self.chain_store_update.get_block_header(&block_header.prev_hash)?.clone(); + self.chain_store_update.get_block_header(&block_header.prev_hash())?.clone(); let mut chunk_extra = self.chain_store_update.get_chunk_extra(&prev_block_header.hash(), shard_id)?.clone(); @@ -3456,16 +3436,16 @@ impl<'a> ChainUpdate<'a> { let apply_result = self.runtime_adapter.apply_transactions( shard_id, &chunk_extra.state_root, - block_header.inner_lite.height, - block_header.inner_lite.timestamp, + block_header.height(), + block_header.raw_timestamp(), &prev_block_header.hash(), &block_header.hash(), &[], &[], &chunk_extra.validator_proposals, - block_header.inner_rest.gas_price, + block_header.gas_price(), chunk_extra.gas_limit, - &block_header.inner_rest.challenges_result, + &block_header.challenges_result(), )?; self.chain_store_update.save_trie_changes(apply_result.trie_changes); @@ -3542,7 +3522,8 @@ pub fn check_refcount_map(chain: &mut Chain) -> Result<(), Error> { _ => vec![], }; for block_hash in blocks_current_height.iter() { - if let Ok(prev_hash) = chain.get_block(&block_hash).map(|block| block.header.prev_hash) + if let Ok(prev_hash) = + chain.get_block(&block_hash).map(|block| *block.header().prev_hash()) { *block_refcounts.entry(prev_hash).or_insert(0) += 1; } diff --git a/chain/chain/src/doomslug.rs b/chain/chain/src/doomslug.rs index 78f4749399d..139bd0fb412 100644 --- a/chain/chain/src/doomslug.rs +++ b/chain/chain/src/doomslug.rs @@ -416,7 +416,7 @@ impl Doomslug { /// * `stakes` - the vector of validator stakes in the current epoch pub fn can_approved_block_be_produced( mode: DoomslugThresholdMode, - approvals: &Vec>, + approvals: &[Option], stakes: &Vec<(Balance, Balance)>, ) -> bool { if mode == DoomslugThresholdMode::NoApprovals { diff --git a/chain/chain/src/lightclient.rs b/chain/chain/src/lightclient.rs index 1493858eb42..12a52dc933c 100644 --- a/chain/chain/src/lightclient.rs +++ b/chain/chain/src/lightclient.rs @@ -1,5 +1,5 @@ use near_primitives::block::BlockHeader; -use near_primitives::hash::CryptoHash; +use near_primitives::hash::{hash, CryptoHash}; use near_primitives::types::EpochId; use near_primitives::views::{BlockHeaderInnerLiteView, LightClientBlockView, ValidatorStakeView}; @@ -36,32 +36,31 @@ pub fn create_light_client_block_view( chain_store: &mut dyn ChainStoreAccess, next_block_producers: Option>, ) -> Result { - let inner_lite = block_header.inner_lite.clone(); let inner_lite_view = BlockHeaderInnerLiteView { - height: inner_lite.height, - epoch_id: inner_lite.epoch_id.0, - next_epoch_id: inner_lite.next_epoch_id.0, - prev_state_root: inner_lite.prev_state_root, - outcome_root: inner_lite.outcome_root, - timestamp: inner_lite.timestamp, - next_bp_hash: inner_lite.next_bp_hash, - block_merkle_root: inner_lite.block_merkle_root, + height: block_header.height(), + epoch_id: block_header.epoch_id().0, + next_epoch_id: block_header.next_epoch_id().0, + prev_state_root: *block_header.prev_state_root(), + outcome_root: *block_header.outcome_root(), + timestamp: block_header.raw_timestamp(), + next_bp_hash: *block_header.next_bp_hash(), + block_merkle_root: *block_header.block_merkle_root(), }; - let inner_rest_hash = block_header.inner_rest.hash(); + let inner_rest_hash = hash(&block_header.inner_rest_bytes()); let next_block_hash = chain_store.get_next_block_hash(&block_header.hash())?.clone(); let next_block_header = chain_store.get_block_header(&next_block_hash)?; let next_block_inner_hash = BlockHeader::compute_inner_hash( - &next_block_header.inner_lite, - &next_block_header.inner_rest, + &next_block_header.inner_lite_bytes(), + &next_block_header.inner_rest_bytes(), ); let after_next_block_hash = chain_store.get_next_block_hash(&next_block_hash)?.clone(); let after_next_block_header = chain_store.get_block_header(&after_next_block_hash)?; - let approvals_after_next = after_next_block_header.inner_rest.approvals.clone(); + let approvals_after_next = after_next_block_header.approvals().to_vec(); Ok(LightClientBlockView { - prev_block_hash: block_header.prev_hash, + prev_block_hash: *block_header.prev_hash(), next_block_inner_hash, inner_lite: inner_lite_view, inner_rest_hash, diff --git a/chain/chain/src/store.rs b/chain/chain/src/store.rs index 7adf12beed7..63e1c564905 100644 --- a/chain/chain/src/store.rs +++ b/chain/chain/src/store.rs @@ -175,11 +175,11 @@ pub trait ChainStoreAccess { ) -> Result<&BlockHeader, Error> { let mut header = self.get_block_header(sync_hash)?; let mut hash = sync_hash.clone(); - while header.inner_lite.height > height { - hash = header.prev_hash; + while header.height() > height { + hash = *header.prev_hash(); header = self.get_block_header(&hash)?; } - if header.inner_lite.height < height { + if header.height() < height { return Err(ErrorKind::InvalidBlockHeight.into()); } self.get_block_header(&hash) @@ -398,7 +398,7 @@ impl ChainStore { loop { let block_header = self.get_block_header(&receipts_block_hash)?; - if block_header.inner_lite.height == last_included_height { + if block_header.height() == last_included_height { let receipts = if let Ok(cur_receipts) = self.get_outgoing_receipts(&receipts_block_hash, shard_id) { @@ -408,7 +408,7 @@ impl ChainStore { }; return Ok(ReceiptResponse(receipts_block_hash, receipts)); } else { - receipts_block_hash = block_header.prev_hash; + receipts_block_hash = *block_header.prev_hash(); } } } @@ -423,16 +423,13 @@ impl ChainStore { ) -> Result<(), InvalidTxError> { // if both are on the canonical chain, comparing height is sufficient // we special case this because it is expected that this scenario will happen in most cases. - let base_height = self - .get_block_header(base_block_hash) - .map_err(|_| InvalidTxError::Expired)? - .inner_lite - .height; - let prev_height = prev_block_header.inner_lite.height; + let base_height = + self.get_block_header(base_block_hash).map_err(|_| InvalidTxError::Expired)?.height(); + let prev_height = prev_block_header.height(); if let Ok(base_block_hash_by_height) = self.get_block_hash_by_height(base_height) { if &base_block_hash_by_height == base_block_hash { if let Ok(prev_hash) = self.get_block_hash_by_height(prev_height) { - if prev_hash == prev_block_header.hash { + if &prev_hash == prev_block_header.hash() { if prev_height <= base_height + validity_period { return Ok(()); } else { @@ -447,7 +444,7 @@ impl ChainStore { // whether the base block is the same as the one with that height on the canonical fork. // Otherwise we walk back the chain to check whether base block is on the same chain. let last_final_height = self - .get_block_height(&prev_block_header.inner_rest.last_final_block) + .get_block_height(&prev_block_header.last_final_block()) .map_err(|_| InvalidTxError::InvalidChain)?; if prev_height > base_height + validity_period { @@ -467,9 +464,9 @@ impl ChainStore { } } else { let header = self - .get_header_on_chain_by_height(&prev_block_header.hash, base_height) + .get_header_on_chain_by_height(prev_block_header.hash(), base_height) .map_err(|_| InvalidTxError::InvalidChain)?; - if &header.hash == base_block_hash { + if header.hash() == base_block_hash { Ok(()) } else { Err(InvalidTxError::InvalidChain) @@ -481,7 +478,7 @@ impl ChainStore { if hash == &CryptoHash::default() { Ok(self.genesis_height) } else { - Ok(self.get_block_header(hash)?.inner_lite.height) + Ok(self.get_block_header(hash)?.height()) } } } @@ -558,7 +555,7 @@ impl ChainStoreAccess for ChainStore { debug_assert!( false, "If the block was not found, the block header may either \ - exist or not found as well, instead the error was returned {:?}", + exist or not found as well, instead the error was returned {:?}", header_error ); debug!( @@ -605,7 +602,7 @@ impl ChainStoreAccess for ChainStore { /// Get previous header. fn get_previous_header(&mut self, header: &BlockHeader) -> Result<&BlockHeader, Error> { - self.get_block_header(&header.prev_hash) + self.get_block_header(header.prev_hash()) } /// Information from applying block. @@ -1124,15 +1121,15 @@ impl<'a> ChainStoreUpdate<'a> { loop { let header = self.get_block_header(&block_hash)?; - if header.inner_lite.height < last_chunk_height_included { + if header.height() < last_chunk_height_included { panic!("get_incoming_receipts_for_shard failed"); } - if header.inner_lite.height == last_chunk_height_included { + if header.height() == last_chunk_height_included { break; } - let prev_hash = header.prev_hash; + let prev_hash = *header.prev_hash(); if let Ok(receipt_proofs) = self.get_incoming_receipts(&block_hash, shard_id) { ret.push(ReceiptProofResponse(block_hash, receipt_proofs.clone())); @@ -1236,7 +1233,7 @@ impl<'a> ChainStoreAccess for ChainStoreUpdate<'a> { /// Get previous header. fn get_previous_header(&mut self, header: &BlockHeader) -> Result<&BlockHeader, Error> { - self.get_block_header(&header.prev_hash) + self.get_block_header(header.prev_hash()) } fn get_block_extra(&mut self, block_hash: &CryptoHash) -> Result<&BlockExtra, Error> { @@ -1552,7 +1549,7 @@ impl<'a> ChainStoreUpdate<'a> { loop { let header = self.get_block_header(&prev_hash)?; let (header_height, header_hash, header_prev_hash) = - (header.inner_lite.height, header.hash(), header.prev_hash); + (header.height(), *header.hash(), *header.prev_hash()); // Clean up block indices between blocks. for height in (header_height + 1)..prev_height { self.chain_store_cache_update.height_to_hashes.insert(height, None); @@ -1637,7 +1634,7 @@ impl<'a> ChainStoreUpdate<'a> { /// Save block. pub fn save_block(&mut self, block: Block) { - self.chain_store_cache_update.blocks.insert(block.hash(), block); + self.chain_store_cache_update.blocks.insert(*block.hash(), block); } /// Save post applying block extra info. @@ -1679,20 +1676,20 @@ impl<'a> ChainStoreUpdate<'a> { } fn update_and_save_block_merkle_tree(&mut self, header: &BlockHeader) -> Result<(), Error> { - let prev_hash = header.prev_hash; + let prev_hash = *header.prev_hash(); if prev_hash == CryptoHash::default() { - self.save_block_merkle_tree(header.hash(), PartialMerkleTree::default()); + self.save_block_merkle_tree(*header.hash(), PartialMerkleTree::default()); } else { let mut block_merkle_tree = self.get_block_merkle_tree(&prev_hash)?.clone(); block_merkle_tree.insert(prev_hash); - self.save_block_merkle_tree(header.hash(), block_merkle_tree); + self.save_block_merkle_tree(*header.hash(), block_merkle_tree); } Ok(()) } pub fn save_block_header(&mut self, header: BlockHeader) -> Result<(), Error> { self.update_and_save_block_merkle_tree(&header)?; - self.chain_store_cache_update.headers.insert(header.hash(), header); + self.chain_store_cache_update.headers.insert(*header.hash(), header); Ok(()) } @@ -1913,7 +1910,7 @@ impl<'a> ChainStoreUpdate<'a> { match gc_mode.clone() { GCMode::Fork(tries) => { // If the block is on a fork, we delete the state that's the result of applying this block - for shard_id in 0..header.inner_rest.chunk_mask.len() as ShardId { + for shard_id in 0..header.chunk_mask().len() as ShardId { self.store() .get_ser(ColTrieChanges, &get_block_shard_id(&block_hash, shard_id))? .map(|trie_changes: TrieChanges| { @@ -1926,7 +1923,7 @@ impl<'a> ChainStoreUpdate<'a> { } GCMode::Canonical(tries) => { // If the block is on canonical chain, we delete the state that's before applying this block - for shard_id in 0..header.inner_rest.chunk_mask.len() as ShardId { + for shard_id in 0..header.chunk_mask().len() as ShardId { self.store() .get_ser(ColTrieChanges, &get_block_shard_id(&block_hash, shard_id))? .map(|trie_changes: TrieChanges| { @@ -1937,7 +1934,7 @@ impl<'a> ChainStoreUpdate<'a> { .unwrap_or(Ok(()))?; } // Set `block_hash` on previous one - block_hash = self.get_block_header(&block_hash)?.prev_hash; + block_hash = *self.get_block_header(&block_hash)?.prev_hash(); } GCMode::StateSync => { // Do nothing here @@ -1948,7 +1945,7 @@ impl<'a> ChainStoreUpdate<'a> { .get_block(&block_hash) .expect("block data is not expected to be already cleaned") .clone(); - let height = block.header.inner_lite.height; + let height = block.header().height(); if height == self.get_genesis_height() { if let GCMode::Fork(_) = gc_mode { // Broken GC prerequisites found @@ -1960,7 +1957,7 @@ impl<'a> ChainStoreUpdate<'a> { } // 2. Delete shard_id-indexed data (shards, receipts, transactions) - for shard_id in 0..block.header.inner_rest.chunk_mask.len() as ShardId { + for shard_id in 0..block.header().chunk_mask().len() as ShardId { // 2a. Delete outgoing receipts (ColOutgoingReceipts) store_update.delete(ColOutgoingReceipts, &get_block_shard_id(&block_hash, shard_id)); self.chain_store @@ -2025,7 +2022,7 @@ impl<'a> ChainStoreUpdate<'a> { let epoch_to_hashes_ref = self.get_all_block_hashes_by_height(height)?; let mut epoch_to_hashes = epoch_to_hashes_ref.clone(); let hashes = epoch_to_hashes - .get_mut(&block.header.inner_lite.epoch_id) + .get_mut(&block.header().epoch_id()) .expect("current epoch id should exist"); hashes.remove(&block_hash); store_update.set_ser( @@ -2037,7 +2034,7 @@ impl<'a> ChainStoreUpdate<'a> { .block_hash_per_height .cache_set(index_to_bytes(height), epoch_to_hashes); // 4b. Decreasing block refcount - self.dec_block_refcount(&block.header.prev_hash)?; + self.dec_block_refcount(block.header().prev_hash())?; } GCMode::Canonical(_) => { // 5. Canonical Chain clearing @@ -2048,7 +2045,7 @@ impl<'a> ChainStoreUpdate<'a> { // 6. Delete chunks and chunk-indexed data let mut min_chunk_height = self.tail()?; - for chunk_header in block.chunks { + for chunk_header in block.chunks() { if min_chunk_height > chunk_header.inner.height_created { min_chunk_height = chunk_header.inner.height_created; } @@ -2098,22 +2095,20 @@ impl<'a> ChainStoreUpdate<'a> { .map_err::(|e| e.into())?; } for (hash, block) in self.chain_store_cache_update.blocks.iter() { - let mut map = match self - .chain_store - .get_all_block_hashes_by_height(block.header.inner_lite.height) - { - Ok(m) => m.clone(), - Err(_) => HashMap::new(), - }; - map.entry(block.header.inner_lite.epoch_id.clone()) + let mut map = + match self.chain_store.get_all_block_hashes_by_height(block.header().height()) { + Ok(m) => m.clone(), + Err(_) => HashMap::new(), + }; + map.entry(block.header().epoch_id().clone()) .or_insert_with(|| HashSet::new()) .insert(*hash); store_update - .set_ser(ColBlockPerHeight, &index_to_bytes(block.header.inner_lite.height), &map) + .set_ser(ColBlockPerHeight, &index_to_bytes(block.header().height()), &map) .map_err::(|e| e.into())?; self.chain_store_cache_update .block_hash_per_height - .insert(block.header.inner_lite.height, map); + .insert(block.header().height(), map); store_update .set_ser(ColBlock, hash.as_ref(), block) .map_err::(|e| e.into())?; @@ -2476,7 +2471,7 @@ mod tests { use near_primitives::hash::hash; use near_primitives::types::{BlockHeight, EpochId, NumBlocks}; use near_primitives::utils::index_to_bytes; - use near_primitives::validator_signer::{InMemoryValidatorSigner, ValidatorSigner}; + use near_primitives::validator_signer::InMemoryValidatorSigner; use near_store::test_utils::create_test_store; use crate::chain::check_refcount_map; @@ -2517,10 +2512,10 @@ mod tests { Arc::new(InMemoryValidatorSigner::from_seed("test1", KeyType::ED25519, "test1")); let short_fork = vec![Block::empty_with_height(&genesis, 1, &*signer.clone())]; let mut store_update = chain.mut_store().store_update(); - store_update.save_block_header(short_fork[0].header.clone()).unwrap(); + store_update.save_block_header(short_fork[0].header().clone()).unwrap(); store_update.commit().unwrap(); - let short_fork_head = short_fork[0].clone().header; + let short_fork_head = short_fork[0].header().clone(); assert!(chain .mut_store() .check_transaction_validity_period( @@ -2535,15 +2530,15 @@ mod tests { for i in 1..(transaction_validity_period + 3) { let block = Block::empty_with_height(&prev_block, i, &*signer.clone()); prev_block = block.clone(); - store_update.save_block_header(block.header.clone()).unwrap(); + store_update.save_block_header(block.header().clone()).unwrap(); store_update - .update_height_if_not_challenged(block.header.inner_lite.height, block.hash()) + .update_height_if_not_challenged(block.header().height(), *block.hash()) .unwrap(); long_fork.push(block); } store_update.commit().unwrap(); let valid_base_hash = long_fork[1].hash(); - let cur_header = &long_fork.last().unwrap().header; + let cur_header = &long_fork.last().unwrap().header(); assert!(chain .mut_store() .check_transaction_validity_period( @@ -2576,15 +2571,15 @@ mod tests { for i in 1..(transaction_validity_period + 2) { let block = Block::empty_with_height(&prev_block, i, &*signer.clone()); prev_block = block.clone(); - store_update.save_block_header(block.header.clone()).unwrap(); + store_update.save_block_header(block.header().clone()).unwrap(); store_update - .update_height_if_not_challenged(block.header.inner_lite.height, block.hash()) + .update_height_if_not_challenged(block.header().height(), *block.hash()) .unwrap(); blocks.push(block); } store_update.commit().unwrap(); let valid_base_hash = blocks[1].hash(); - let cur_header = &blocks.last().unwrap().header; + let cur_header = &blocks.last().unwrap().header(); assert!(chain .mut_store() .check_transaction_validity_period( @@ -2599,14 +2594,14 @@ mod tests { &*signer.clone(), ); let mut store_update = chain.mut_store().store_update(); - store_update.save_block_header(new_block.header.clone()).unwrap(); + store_update.save_block_header(new_block.header().clone()).unwrap(); store_update - .update_height_if_not_challenged(new_block.header.inner_lite.height, new_block.hash()) + .update_height_if_not_challenged(new_block.header().height(), *new_block.hash()) .unwrap(); store_update.commit().unwrap(); assert_eq!( chain.mut_store().check_transaction_validity_period( - &new_block.header, + &new_block.header(), &valid_base_hash, transaction_validity_period ), @@ -2627,12 +2622,12 @@ mod tests { for i in 1..(transaction_validity_period + 2) { let block = Block::empty_with_height(&prev_block, i, &*signer.clone()); prev_block = block.clone(); - store_update.save_block_header(block.header.clone()).unwrap(); + store_update.save_block_header(block.header().clone()).unwrap(); short_fork.push(block); } store_update.commit().unwrap(); - let short_fork_head = short_fork.last().unwrap().clone().header; + let short_fork_head = short_fork.last().unwrap().header().clone(); assert_eq!( chain.mut_store().check_transaction_validity_period( &short_fork_head, @@ -2647,11 +2642,11 @@ mod tests { for i in 1..(transaction_validity_period * 5) { let block = Block::empty_with_height(&prev_block, i, &*signer.clone()); prev_block = block.clone(); - store_update.save_block_header(block.header.clone()).unwrap(); + store_update.save_block_header(block.header().clone()).unwrap(); long_fork.push(block); } store_update.commit().unwrap(); - let long_fork_head = &long_fork.last().unwrap().header; + let long_fork_head = &long_fork.last().unwrap().header(); assert_eq!( chain.mut_store().check_transaction_validity_period( long_fork_head, @@ -2670,18 +2665,15 @@ mod tests { Arc::new(InMemoryValidatorSigner::from_seed("test1", KeyType::ED25519, "test1")); let block1 = Block::empty_with_height(&genesis, 1, &*signer.clone()); let mut block2 = block1.clone(); - block2.header.inner_lite.epoch_id = EpochId(hash(&[1, 2, 3])); - let (block_hash, signature) = signer.sign_block_header_parts( - block2.header.prev_hash, - &block2.header.inner_lite, - &block2.header.inner_rest, - ); - block2.header.hash = block_hash; - block2.header.signature = signature; + block2.mut_header().get_mut().inner_lite.epoch_id = EpochId(hash(&[1, 2, 3])); + block2.mut_header().resign(&*signer); let mut store_update = chain.mut_store().store_update(); store_update.chain_store_cache_update.height_to_hashes.insert(1, Some(hash(&[1]))); - store_update.chain_store_cache_update.blocks.insert(block1.header.hash, block1.clone()); + store_update + .chain_store_cache_update + .blocks + .insert(*block1.header().hash(), block1.clone()); store_update.commit().unwrap(); let block_hash = chain.mut_store().height.cache_get(&index_to_bytes(1)).cloned(); @@ -2690,7 +2682,10 @@ mod tests { let mut store_update = chain.mut_store().store_update(); store_update.chain_store_cache_update.height_to_hashes.insert(1, Some(hash(&[2]))); - store_update.chain_store_cache_update.blocks.insert(block2.header.hash, block2.clone()); + store_update + .chain_store_cache_update + .blocks + .insert(*block2.header().hash(), block2.clone()); store_update.commit().unwrap(); let block_hash1 = chain.mut_store().height.cache_get(&index_to_bytes(1)).cloned(); @@ -2716,14 +2711,14 @@ mod tests { blocks.push(block.clone()); let mut store_update = chain.mut_store().store_update(); store_update.save_block(block.clone()); - store_update.inc_block_refcount(&block.header.prev_hash).unwrap(); - store_update.save_head(&Tip::from_header(&block.header)).unwrap(); - store_update.save_block_header(block.header.clone()).unwrap(); + store_update.inc_block_refcount(block.header().prev_hash()).unwrap(); + store_update.save_head(&Tip::from_header(block.header())).unwrap(); + store_update.save_block_header(block.header().clone()).unwrap(); store_update .chain_store_cache_update .height_to_hashes - .insert(i, Some(block.header.hash)); - store_update.save_next_block_hash(&prev_block.hash(), block.hash()); + .insert(i, Some(*block.header().hash())); + store_update.save_next_block_hash(&prev_block.hash(), *block.hash()); store_update.commit().unwrap(); prev_block = block.clone(); @@ -2767,14 +2762,14 @@ mod tests { let block = Block::empty_with_height(&prev_block, i, &*signer); blocks.push(block.clone()); store_update.save_block(block.clone()); - store_update.inc_block_refcount(&block.header.prev_hash).unwrap(); - store_update.save_head(&Tip::from_header(&block.header)).unwrap(); - store_update.save_block_header(block.header.clone()).unwrap(); + store_update.inc_block_refcount(block.header().prev_hash()).unwrap(); + store_update.save_head(&Tip::from_header(&block.header())).unwrap(); + store_update.save_block_header(block.header().clone()).unwrap(); store_update .chain_store_cache_update .height_to_hashes - .insert(i, Some(block.header.hash)); - store_update.save_next_block_hash(&prev_block.hash(), block.hash()); + .insert(i, Some(*block.header().hash())); + store_update.save_next_block_hash(&prev_block.hash(), *block.hash()); store_update.commit().unwrap(); prev_block = block.clone(); @@ -2793,28 +2788,28 @@ mod tests { .values() .flatten() .collect::>(), - vec![&blocks[5].hash()] + vec![blocks[5].hash()] ); assert!(chain.mut_store().get_next_block_hash(&blocks[5].hash()).is_ok()); let trie = chain.runtime_adapter.get_tries(); let mut store_update = chain.mut_store().store_update(); - assert!(store_update.clear_block_data(blocks[5].hash(), GCMode::Canonical(trie)).is_ok()); + assert!(store_update.clear_block_data(*blocks[5].hash(), GCMode::Canonical(trie)).is_ok()); store_update.commit().unwrap(); - assert!(chain.get_block(&blocks[4].hash()).is_err()); - assert!(chain.get_block(&blocks[5].hash()).is_ok()); - assert!(chain.get_block(&blocks[6].hash()).is_ok()); + assert!(chain.get_block(blocks[4].hash()).is_err()); + assert!(chain.get_block(blocks[5].hash()).is_ok()); + assert!(chain.get_block(blocks[6].hash()).is_ok()); // block header should be available - assert!(chain.get_block_header(&blocks[4].hash()).is_ok()); - assert!(chain.get_block_header(&blocks[5].hash()).is_ok()); - assert!(chain.get_block_header(&blocks[6].hash()).is_ok()); + assert!(chain.get_block_header(blocks[4].hash()).is_ok()); + assert!(chain.get_block_header(blocks[5].hash()).is_ok()); + assert!(chain.get_block_header(blocks[6].hash()).is_ok()); assert!(chain.mut_store().get_all_block_hashes_by_height(4).is_err()); assert!(chain.mut_store().get_all_block_hashes_by_height(5).is_ok()); assert!(chain.mut_store().get_all_block_hashes_by_height(6).is_ok()); - assert!(chain.mut_store().get_next_block_hash(&blocks[4].hash()).is_err()); - assert!(chain.mut_store().get_next_block_hash(&blocks[5].hash()).is_ok()); - assert!(chain.mut_store().get_next_block_hash(&blocks[6].hash()).is_ok()); + assert!(chain.mut_store().get_next_block_hash(blocks[4].hash()).is_err()); + assert!(chain.mut_store().get_next_block_hash(blocks[5].hash()).is_ok()); + assert!(chain.mut_store().get_next_block_hash(blocks[6].hash()).is_ok()); } /// Test that MAX_HEIGHTS_TO_CLEAR works properly @@ -2833,14 +2828,14 @@ mod tests { let mut store_update = chain.mut_store().store_update(); store_update.save_block(block.clone()); - store_update.inc_block_refcount(&block.header.prev_hash).unwrap(); - store_update.save_head(&Tip::from_header(&block.header)).unwrap(); - store_update.save_block_header(block.header.clone()).unwrap(); + store_update.inc_block_refcount(block.header().prev_hash()).unwrap(); + store_update.save_head(&Tip::from_header(&block.header())).unwrap(); + store_update.save_block_header(block.header().clone()).unwrap(); store_update .chain_store_cache_update .height_to_hashes - .insert(i, Some(block.header.hash)); - store_update.save_next_block_hash(&prev_block.hash(), block.hash()); + .insert(i, Some(*block.header().hash())); + store_update.save_next_block_hash(&prev_block.hash(), *block.hash()); store_update.commit().unwrap(); prev_block = block.clone(); diff --git a/chain/chain/src/store_validator/validate.rs b/chain/chain/src/store_validator/validate.rs index 18ac3a1cafd..97907865394 100644 --- a/chain/chain/src/store_validator/validate.rs +++ b/chain/chain/src/store_validator/validate.rs @@ -93,7 +93,7 @@ pub(crate) fn block_header_hash_validity( block_hash: &CryptoHash, header: &BlockHeader, ) -> Result<(), ErrorMessage> { - if header.hash() != *block_hash { + if header.hash() != block_hash { return err!("Invalid Block Header stored, hash = {:?}, header = {:?}", block_hash, header); } Ok(()) @@ -107,7 +107,7 @@ pub(crate) fn block_header_height_validity( if !sv.inner.is_misc_set { return err!("Can't validate, is_misc_set == false"); } - let height = header.inner_lite.height; + let height = header.height(); let head = sv.inner.head; if height > head { return err!("Invalid Block Header stored, Head = {:?}, header = {:?}", head, header); @@ -120,7 +120,7 @@ pub(crate) fn block_hash_validity( block_hash: &CryptoHash, block: &Block, ) -> Result<(), ErrorMessage> { - if block.hash() != *block_hash { + if block.hash() != block_hash { return err!("Invalid Block stored, hash = {:?}, block = {:?}", block_hash, block); } Ok(()) @@ -135,14 +135,14 @@ pub(crate) fn block_height_validity( return err!("Can't validate, is_misc_set == false"); } let head = sv.inner.head; - let height = block.header.inner_lite.height; + let height = block.header().height(); if height > head { return err!("Invalid Block stored, Head = {:?}, block = {:?}", head, block); } let tail = sv.inner.tail; if height < tail && height != sv.config.genesis_height { - sv.inner.block_heights_less_tail.push(block.hash()); + sv.inner.block_heights_less_tail.push(*block.hash()); } sv.inner.is_block_height_cmp_tail_prepared = true; Ok(()) @@ -153,7 +153,7 @@ pub(crate) fn block_indexed_by_height( block_hash: &CryptoHash, block: &Block, ) -> Result<(), ErrorMessage> { - let height = block.header.inner_lite.height; + let height = block.header().height(); let block_hashes: HashSet = unwrap_or_err_db!( sv.store.get_ser::>>( ColBlockPerHeight, @@ -260,17 +260,17 @@ pub(crate) fn block_chunks_exist( _block_hash: &CryptoHash, block: &Block, ) -> Result<(), ErrorMessage> { - for chunk_header in block.chunks.iter() { + for chunk_header in block.chunks().iter() { match &sv.me { Some(me) => { if sv.runtime_adapter.cares_about_shard( Some(&me), - &block.header.prev_hash, + block.header().prev_hash(), chunk_header.inner.shard_id, true, ) || sv.runtime_adapter.will_care_about_shard( Some(&me), - &block.header.prev_hash, + block.header().prev_hash(), chunk_header.inner.shard_id, true, ) { @@ -293,8 +293,8 @@ pub(crate) fn block_chunks_height_validity( _block_hash: &CryptoHash, block: &Block, ) -> Result<(), ErrorMessage> { - for chunk_header in block.chunks.iter() { - if chunk_header.inner.height_created > block.header.inner_lite.height { + for chunk_header in block.chunks().iter() { + if chunk_header.inner.height_created > block.header().height() { return err!( "Invalid ShardChunk included, chunk_header = {:?}, block = {:?}", chunk_header, @@ -332,7 +332,7 @@ pub(crate) fn canonical_header_validity( "Can't get Block Header {:?} from ColBlockHeader", hash ); - if header.inner_lite.height != *height { + if header.height() != *height { return err!("Block on Height {:?} doesn't have required Height, {:?}", height, header); } Ok(()) @@ -349,13 +349,13 @@ pub(crate) fn canonical_prev_block_validity( "Can't get Block Header {:?} from ColBlockHeader", hash ); - let prev_hash = header.prev_hash; + let prev_hash = *header.prev_hash(); let prev_header = unwrap_or_err_db!( sv.store.get_ser::(ColBlockHeader, prev_hash.as_ref()), "Can't get prev Block Header {:?} from ColBlockHeader", prev_hash ); - let prev_height = prev_header.inner_lite.height; + let prev_height = prev_header.height(); let same_prev_hash = unwrap_or_err_db!( sv.store.get_ser::(ColBlockHeight, &index_to_bytes(prev_height)), "Can't get prev Block Hash from ColBlockHeight by Height, {:?}, {:?}", diff --git a/chain/chain/src/test_utils.rs b/chain/chain/src/test_utils.rs index 7202bd0ab9a..a233a7fa3fe 100644 --- a/chain/chain/src/test_utils.rs +++ b/chain/chain/src/test_utils.rs @@ -6,12 +6,13 @@ use std::sync::{Arc, RwLock}; use borsh::{BorshDeserialize, BorshSerialize}; use chrono::Utc; use log::debug; +use num_rational::Rational; use serde::Serialize; use near_crypto::{KeyType, PublicKey, SecretKey, Signature}; use near_pool::types::PoolIterator; use near_primitives::account::{AccessKey, Account}; -use near_primitives::challenge::{ChallengesResult, SlashedValidator}; +use near_primitives::challenge::ChallengesResult; use near_primitives::errors::InvalidTxError; use near_primitives::hash::{hash, CryptoHash}; use near_primitives::receipt::{ActionReceipt, Receipt, ReceiptEnum}; @@ -26,6 +27,7 @@ use near_primitives::types::{ ShardId, StateRoot, StateRootNode, ValidatorStake, ValidatorStats, }; use near_primitives::validator_signer::InMemoryValidatorSigner; +use near_primitives::version::{ProtocolVersion, PROTOCOL_VERSION}; use near_primitives::views::{ AccessKeyInfoView, AccessKeyList, CallResult, EpochValidatorInfo, QueryRequest, QueryResponse, QueryResponseKind, ViewStateResult, @@ -39,9 +41,8 @@ use near_store::{ use crate::chain::{Chain, ChainGenesis, NUM_EPOCHS_TO_KEEP_STORE_DATA}; use crate::error::{Error, ErrorKind}; use crate::store::ChainStoreAccess; -use crate::types::ApplyTransactionResult; +use crate::types::{ApplyTransactionResult, BlockHeaderInfo}; use crate::{BlockHeader, DoomslugThresholdMode, RuntimeAdapter}; -use num_rational::Rational; #[derive( BorshSerialize, BorshDeserialize, Serialize, Hash, PartialEq, Eq, Ord, PartialOrd, Clone, Debug, @@ -191,7 +192,7 @@ impl KeyValueRuntime { let mut hash_to_valset = self.hash_to_valset.write().unwrap(); let mut epoch_start_map = self.epoch_start.write().unwrap(); - let prev_prev_hash = prev_block_header.prev_hash; + let prev_prev_hash = *prev_block_header.prev_hash(); let prev_epoch = hash_to_epoch.get(&prev_prev_hash); let prev_next_epoch = hash_to_next_epoch.get(&prev_prev_hash).unwrap(); let prev_valset = match prev_epoch { @@ -201,23 +202,18 @@ impl KeyValueRuntime { let prev_epoch_start = *epoch_start_map.get(&prev_prev_hash).unwrap(); - let last_final_height = - if prev_block_header.inner_rest.last_final_block == CryptoHash::default() { - 0 - } else { - self.get_block_header(&prev_block_header.inner_rest.last_final_block) - .unwrap() - .unwrap() - .inner_lite - .height - }; + let last_final_height = if prev_block_header.last_final_block() == &CryptoHash::default() { + 0 + } else { + self.get_block_header(prev_block_header.last_final_block()).unwrap().unwrap().height() + }; let increment_epoch = prev_prev_hash == CryptoHash::default() // genesis is in its own epoch || last_final_height + 3 >= prev_epoch_start + self.epoch_length; let needs_next_epoch_approvals = !increment_epoch && last_final_height + 3 < prev_epoch_start + self.epoch_length - && prev_block_header.inner_lite.height + 3 >= prev_epoch_start + self.epoch_length; + && prev_block_header.height() + 3 >= prev_epoch_start + self.epoch_length; let (epoch, next_epoch, valset, epoch_start) = if increment_epoch { let new_valset = match prev_valset { @@ -228,7 +224,7 @@ impl KeyValueRuntime { prev_next_epoch.clone(), EpochId(prev_hash), new_valset, - prev_block_header.inner_lite.height + 1, + prev_block_header.height() + 1, ) } else { ( @@ -281,8 +277,8 @@ impl RuntimeAdapter for KeyValueRuntime { fn verify_block_signature(&self, header: &BlockHeader) -> Result<(), Error> { let validators = &self.validators - [self.get_epoch_and_valset(header.prev_hash).map_err(|err| err.to_string())?.1]; - let validator = &validators[(header.inner_lite.height as usize) % validators.len()]; + [self.get_epoch_and_valset(*header.prev_hash()).map_err(|err| err.to_string())?.1]; + let validator = &validators[(header.height() as usize) % validators.len()]; if !header.verify_block_producer(&validator.public_key) { return Err(ErrorKind::InvalidBlockProposer.into()); } @@ -294,8 +290,8 @@ impl RuntimeAdapter for KeyValueRuntime { _epoch_id: &EpochId, _block_height: BlockHeight, _prev_random_value: &CryptoHash, - _vrf_value: near_crypto::vrf::Value, - _vrf_proof: near_crypto::vrf::Proof, + _vrf_value: &near_crypto::vrf::Value, + _vrf_proof: &near_crypto::vrf::Proof, ) -> Result<(), Error> { Ok(()) } @@ -507,18 +503,7 @@ impl RuntimeAdapter for KeyValueRuntime { Ok(res) } - fn add_validator_proposals( - &self, - _parent_hash: CryptoHash, - _current_hash: CryptoHash, - _rng_seed: CryptoHash, - _height: BlockHeight, - _last_finalized_height: BlockHeight, - _proposals: Vec, - _slashed_validators: Vec, - _validator_mask: Vec, - _total_supply: Balance, - ) -> Result<(), Error> { + fn add_validator_proposals(&self, _block_header_info: BlockHeaderInfo) -> Result<(), Error> { Ok(()) } @@ -861,7 +846,7 @@ impl RuntimeAdapter for KeyValueRuntime { parent_hash ))) })?; - let prev_prev_hash = prev_block_header.prev_hash; + let prev_prev_hash = *prev_block_header.prev_hash(); Ok(self.get_epoch_and_valset(*parent_hash)?.0 != self.get_epoch_and_valset(prev_prev_hash)?.0) } @@ -880,14 +865,14 @@ impl RuntimeAdapter for KeyValueRuntime { fn get_epoch_start_height(&self, block_hash: &CryptoHash) -> Result { let epoch_id = self.get_epoch_and_valset(*block_hash)?.0; match self.get_block_header(&epoch_id.0)? { - Some(block_header) => Ok(block_header.inner_lite.height), + Some(block_header) => Ok(block_header.height()), None => Ok(0), } } fn get_gc_stop_height(&self, block_hash: &CryptoHash) -> Result { let block_height = - self.get_block_header(block_hash)?.map(|h| h.inner_lite.height).unwrap_or_default(); + self.get_block_header(block_hash)?.map(|h| h.height()).unwrap_or_default(); Ok(block_height.saturating_sub(NUM_EPOCHS_TO_KEEP_STORE_DATA * self.epoch_length)) } @@ -899,6 +884,10 @@ impl RuntimeAdapter for KeyValueRuntime { Ok(0) } + fn get_epoch_protocol_version(&self, _epoch_id: &EpochId) -> Result { + Ok(PROTOCOL_VERSION) + } + fn get_validator_info(&self, _block_hash: &CryptoHash) -> Result { Ok(EpochValidatorInfo { current_validators: vec![], @@ -982,6 +971,7 @@ pub fn setup_with_tx_validity_period( Rational::from_integer(0), tx_validity_period, 10, + PROTOCOL_VERSION, ), DoomslugThresholdMode::NoApprovals, ) @@ -1021,6 +1011,7 @@ pub fn setup_with_validators( Rational::from_integer(0), tx_validity_period, epoch_length, + PROTOCOL_VERSION, ), DoomslugThresholdMode::NoApprovals, ) @@ -1050,42 +1041,43 @@ pub fn display_chain(me: &Option, chain: &mut Chain, tail: bool) { .get_block_header(&CryptoHash::try_from(key.as_ref()).unwrap()) .unwrap() .clone(); - if !tail || header.inner_lite.height + 10 > head.height { + if !tail || header.height() + 10 > head.height { headers.push(header); } } headers.sort_by(|h_left, h_right| { - if h_left.inner_lite.height > h_right.inner_lite.height { + if h_left.height() > h_right.height() { Ordering::Greater } else { Ordering::Less } }); for header in headers { - if header.prev_hash == CryptoHash::default() { + if header.prev_hash() == &CryptoHash::default() { // Genesis block. - debug!("{: >3} {}", header.inner_lite.height, format_hash(header.hash())); + debug!("{: >3} {}", header.height(), format_hash(*header.hash())); } else { - let parent_header = chain_store.get_block_header(&header.prev_hash).unwrap().clone(); + let parent_header = chain_store.get_block_header(header.prev_hash()).unwrap().clone(); let maybe_block = chain_store.get_block(&header.hash()).ok().cloned(); - let epoch_id = runtime_adapter.get_epoch_id_from_prev_block(&header.prev_hash).unwrap(); + let epoch_id = + runtime_adapter.get_epoch_id_from_prev_block(header.prev_hash()).unwrap(); let block_producer = - runtime_adapter.get_block_producer(&epoch_id, header.inner_lite.height).unwrap(); + runtime_adapter.get_block_producer(&epoch_id, header.height()).unwrap(); debug!( "{: >3} {} | {: >10} | parent: {: >3} {} | {}", - header.inner_lite.height, - format_hash(header.hash()), + header.height(), + format_hash(*header.hash()), block_producer, - parent_header.inner_lite.height, - format_hash(parent_header.hash()), + parent_header.height(), + format_hash(*parent_header.hash()), if let Some(block) = &maybe_block { - format!("chunks: {}", block.chunks.len()) + format!("chunks: {}", block.chunks().len()) } else { "-".to_string() } ); if let Some(block) = maybe_block { - for chunk_header in block.chunks.iter() { + for chunk_header in block.chunks().iter() { let chunk_producer = runtime_adapter .get_chunk_producer( &epoch_id, @@ -1138,6 +1130,7 @@ impl ChainGenesis { gas_price_adjustment_rate: Rational::from_integer(0), transaction_validity_period: 100, epoch_length: 5, + protocol_version: PROTOCOL_VERSION, } } } diff --git a/chain/chain/src/types.rs b/chain/chain/src/types.rs index 4ab0d475947..5e38fa954e8 100644 --- a/chain/chain/src/types.rs +++ b/chain/chain/src/types.rs @@ -19,6 +19,7 @@ use near_primitives::types::{ AccountId, ApprovalStake, Balance, BlockHeight, EpochId, Gas, MerkleHash, ShardId, StateRoot, StateRootNode, ValidatorStake, ValidatorStats, }; +use near_primitives::version::ProtocolVersion; use near_primitives::views::{EpochValidatorInfo, QueryRequest, QueryResponse}; use near_store::{PartialStorage, ShardTries, Store, StoreUpdate, Trie, WrappedTrieChanges}; @@ -103,6 +104,39 @@ impl ApplyTransactionResult { } } +/// Compressed information about block. +/// Useful for epoch manager. +#[derive(Default, BorshSerialize, BorshDeserialize, Serialize, Clone, Debug)] +pub struct BlockHeaderInfo { + pub hash: CryptoHash, + pub prev_hash: CryptoHash, + pub height: BlockHeight, + pub random_value: CryptoHash, + pub last_finalized_height: BlockHeight, + pub proposals: Vec, + pub slashed_validators: Vec, + pub chunk_mask: Vec, + pub total_supply: Balance, + pub latest_protocol_version: ProtocolVersion, +} + +impl BlockHeaderInfo { + pub fn new(header: &BlockHeader, last_finalized_height: u64) -> Self { + Self { + hash: *header.hash(), + prev_hash: *header.prev_hash(), + height: header.height(), + random_value: *header.random_value(), + last_finalized_height, + proposals: header.validator_proposals().to_vec(), + slashed_validators: vec![], + chunk_mask: header.chunk_mask().to_vec(), + total_supply: header.total_supply(), + latest_protocol_version: header.latest_protocol_version(), + } + } +} + /// Bridge between the chain and the runtime. /// Main function is to update state given transactions. /// Additionally handles validators. @@ -123,8 +157,8 @@ pub trait RuntimeAdapter: Send + Sync { epoch_id: &EpochId, block_height: BlockHeight, prev_random_value: &CryptoHash, - vrf_value: near_crypto::vrf::Value, - vrf_proof: near_crypto::vrf::Proof, + vrf_value: &near_crypto::vrf::Value, + vrf_proof: &near_crypto::vrf::Proof, ) -> Result<(), Error>; /// Validates a given signed transaction. @@ -308,19 +342,11 @@ pub trait RuntimeAdapter: Send + Sync { /// Amount of tokens minted in given epoch. fn get_epoch_minted_amount(&self, epoch_id: &EpochId) -> Result; + /// Epoch active protocol version. + fn get_epoch_protocol_version(&self, epoch_id: &EpochId) -> Result; + /// Add proposals for validators. - fn add_validator_proposals( - &self, - parent_hash: CryptoHash, - current_hash: CryptoHash, - rng_seed: CryptoHash, - height: BlockHeight, - last_finalized_height: BlockHeight, - proposals: Vec, - slashed_validators: Vec, - validator_mask: Vec, - total_supply: Balance, - ) -> Result<(), Error>; + fn add_validator_proposals(&self, block_header_info: BlockHeaderInfo) -> Result<(), Error>; /// Apply transactions to given state root and return store update and new state root. /// Also returns transaction result for each transaction and new receipts. @@ -514,6 +540,7 @@ mod tests { use near_primitives::merkle::verify_path; use near_primitives::transaction::{ExecutionOutcome, ExecutionStatus}; use near_primitives::validator_signer::InMemoryValidatorSigner; + use near_primitives::version::PROTOCOL_VERSION; use crate::Chain; @@ -524,6 +551,7 @@ mod tests { let num_shards = 32; let genesis_chunks = genesis_chunks(vec![StateRoot::default()], num_shards, 1_000_000, 0); let genesis = Block::genesis( + PROTOCOL_VERSION, genesis_chunks.into_iter().map(|chunk| chunk.header).collect(), Utc::now(), 0, @@ -533,20 +561,20 @@ mod tests { ); let signer = InMemoryValidatorSigner::from_seed("other", KeyType::ED25519, "other"); let b1 = Block::empty(&genesis, &signer); - assert!(b1.header.verify_block_producer(&signer.public_key())); + assert!(b1.header().verify_block_producer(&signer.public_key())); let other_signer = InMemoryValidatorSigner::from_seed("other2", KeyType::ED25519, "other2"); - let approvals = vec![Some(Approval::new(b1.hash(), 1, 2, &other_signer).signature)]; + let approvals = vec![Some(Approval::new(*b1.hash(), 1, 2, &other_signer).signature)]; let b2 = Block::empty_with_approvals( &b1, 2, - b1.header.inner_lite.epoch_id.clone(), - EpochId(genesis.hash()), + b1.header().epoch_id().clone(), + EpochId(*genesis.hash()), approvals, &signer, - genesis.header.inner_lite.next_bp_hash, + *genesis.header().next_bp_hash(), CryptoHash::default(), ); - b2.header.verify_block_producer(&signer.public_key()); + b2.header().verify_block_producer(&signer.public_key()); } #[test] diff --git a/chain/chain/src/validate.rs b/chain/chain/src/validate.rs index 350aa958092..1b19f352afb 100644 --- a/chain/chain/src/validate.rs +++ b/chain/chain/src/validate.rs @@ -154,32 +154,30 @@ fn validate_double_sign( ) -> Result<(CryptoHash, Vec), Error> { let left_block_header = BlockHeader::try_from_slice(&block_double_sign.left_block_header)?; let right_block_header = BlockHeader::try_from_slice(&block_double_sign.right_block_header)?; - let block_producer = runtime_adapter.get_block_producer( - &left_block_header.inner_lite.epoch_id, - left_block_header.inner_lite.height, - )?; + let block_producer = runtime_adapter + .get_block_producer(&left_block_header.epoch_id(), left_block_header.height())?; if left_block_header.hash() != right_block_header.hash() - && left_block_header.inner_lite.height == right_block_header.inner_lite.height + && left_block_header.height() == right_block_header.height() && runtime_adapter.verify_validator_signature( - &left_block_header.inner_lite.epoch_id, - &left_block_header.prev_hash, + &left_block_header.epoch_id(), + &left_block_header.prev_hash(), &block_producer, left_block_header.hash().as_ref(), - &left_block_header.signature, + left_block_header.signature(), )? && runtime_adapter.verify_validator_signature( - &right_block_header.inner_lite.epoch_id, - &right_block_header.prev_hash, + &right_block_header.epoch_id(), + &right_block_header.prev_hash(), &block_producer, right_block_header.hash().as_ref(), - &right_block_header.signature, + right_block_header.signature(), )? { // Deterministically return header with higher hash. Ok(if left_block_header.hash() > right_block_header.hash() { - (left_block_header.hash(), vec![block_producer]) + (*left_block_header.hash(), vec![block_producer]) } else { - (right_block_header.hash(), vec![block_producer]) + (*right_block_header.hash(), vec![block_producer]) }) } else { Err(ErrorKind::MaliciousChallenge.into()) @@ -226,10 +224,10 @@ fn validate_chunk_proofs_challenge( MaybeEncodedShardChunk::Decoded(chunk) => &chunk.header, }; let chunk_producer = validate_chunk_authorship(runtime_adapter, &chunk_header)?; - let account_to_slash_for_valid_challenge = Ok((block_header.hash(), vec![chunk_producer])); + let account_to_slash_for_valid_challenge = Ok((*block_header.hash(), vec![chunk_producer])); if !Block::validate_chunk_header_proof( &chunk_header, - &block_header.inner_rest.chunk_headers_root, + &block_header.chunk_headers_root(), &chunk_proofs.merkle_proof, ) { // Merkle proof is invalid. It's a malicious challenge. @@ -279,7 +277,7 @@ fn validate_chunk_state_challenge( let _ = validate_chunk_authorship(runtime_adapter, &chunk_state.prev_chunk.header)?; if !Block::validate_chunk_header_proof( &chunk_state.prev_chunk.header, - &prev_block_header.inner_rest.chunk_headers_root, + &prev_block_header.chunk_headers_root(), &chunk_state.prev_merkle_proof, ) { return Err(ErrorKind::MaliciousChallenge.into()); @@ -290,7 +288,7 @@ fn validate_chunk_state_challenge( let chunk_producer = validate_chunk_authorship(runtime_adapter, &chunk_state.chunk_header)?; if !Block::validate_chunk_header_proof( &chunk_state.chunk_header, - &block_header.inner_rest.chunk_headers_root, + &block_header.chunk_headers_root(), &chunk_state.merkle_proof, ) { return Err(ErrorKind::MaliciousChallenge.into()); @@ -303,14 +301,14 @@ fn validate_chunk_state_challenge( partial_storage, chunk_state.prev_chunk.header.inner.shard_id, &chunk_state.prev_chunk.header.inner.prev_state_root, - block_header.inner_lite.height, - block_header.inner_lite.timestamp, - &block_header.prev_hash, + block_header.height(), + block_header.raw_timestamp(), + &block_header.prev_hash(), &block_header.hash(), &chunk_state.prev_chunk.receipts, &chunk_state.prev_chunk.transactions, &[], - prev_block_header.inner_rest.gas_price, + prev_block_header.gas_price(), chunk_state.prev_chunk.header.inner.gas_limit, &ChallengesResult::default(), ) @@ -321,7 +319,7 @@ fn validate_chunk_state_challenge( || result.validator_proposals != chunk_state.chunk_header.inner.validator_proposals || result.total_gas_burnt != chunk_state.chunk_header.inner.gas_used { - Ok((block_header.hash(), vec![chunk_producer])) + Ok((*block_header.hash(), vec![chunk_producer])) } else { // If all the data matches, this is actually valid chunk and challenge is malicious. Err(ErrorKind::MaliciousChallenge.into()) diff --git a/chain/chain/tests/challenges.rs b/chain/chain/tests/challenges.rs index 7cac85d0d62..a4f2e4ebab5 100644 --- a/chain/chain/tests/challenges.rs +++ b/chain/chain/tests/challenges.rs @@ -8,10 +8,10 @@ fn challenges_new_head_prev() { let (mut chain, _, signer) = setup(); let mut hashes = vec![]; for i in 0..5 { - let prev_hash = chain.head_header().unwrap().hash(); + let prev_hash = *chain.head_header().unwrap().hash(); let prev = chain.get_block(&prev_hash).unwrap(); let block = Block::empty(&prev, &*signer); - hashes.push(block.hash()); + hashes.push(*block.hash()); let tip = chain .process_block(&None, block, Provenance::PRODUCED, |_| {}, |_| {}, |_| {}) .unwrap(); @@ -22,27 +22,27 @@ fn challenges_new_head_prev() { // The block to be added below after we invalidated fourth block. let last_block = Block::empty(&chain.get_block(&hashes[3]).unwrap(), &*signer); - assert_eq!(last_block.header.inner_lite.height, 5); + assert_eq!(last_block.header().height(), 5); let prev = chain.get_block(&hashes[1]).unwrap(); let challenger_block = Block::empty_with_height(&prev, 3, &*signer); - let challenger_hash = challenger_block.hash(); + let challenger_hash = *challenger_block.hash(); let _ = chain .process_block(&None, challenger_block, Provenance::PRODUCED, |_| {}, |_| {}, |_| {}) .unwrap(); // At this point the challenger block is not on canonical chain - assert_eq!(chain.head_header().unwrap().inner_lite.height, 5); + assert_eq!(chain.head_header().unwrap().height(), 5); // Challenge fourth block. The third block and the challenger block have the same height, the // current logic will choose the third block. chain.mark_block_as_challenged(&hashes[3], &challenger_hash).unwrap(); - assert_eq!(chain.head_header().unwrap().hash(), hashes[2]); + assert_eq!(chain.head_header().unwrap().hash(), &hashes[2]); - assert_eq!(chain.get_header_by_height(2).unwrap().hash(), hashes[1]); - assert_eq!(chain.get_header_by_height(3).unwrap().hash(), hashes[2]); + assert_eq!(chain.get_header_by_height(2).unwrap().hash(), &hashes[1]); + assert_eq!(chain.get_header_by_height(3).unwrap().hash(), &hashes[2]); assert!(chain.get_header_by_height(4).is_err()); // Try to add a block on top of the fifth block. @@ -54,7 +54,7 @@ fn challenges_new_head_prev() { } else { assert!(false); } - assert_eq!(chain.head_header().unwrap().hash(), hashes[2]); + assert_eq!(chain.head_header().unwrap().hash(), &hashes[2]); // Add two more blocks let b3 = Block::empty(&chain.get_block(&hashes[2]).unwrap().clone(), &*signer); @@ -70,7 +70,7 @@ fn challenges_new_head_prev() { .unwrap() .last_block_hash; - assert_eq!(chain.head_header().unwrap().hash(), new_head); + assert_eq!(chain.head_header().unwrap().hash(), &new_head); // Add two more blocks on an alternative chain let b3 = Block::empty(&chain.get_block(&hashes[2]).unwrap().clone(), &*signer); @@ -84,7 +84,7 @@ fn challenges_new_head_prev() { .unwrap(); let challenger_hash = b4.hash(); - assert_eq!(chain.head_header().unwrap().hash(), new_head); + assert_eq!(chain.head_header().unwrap().hash(), &new_head); chain.mark_block_as_challenged(&new_head, &challenger_hash).unwrap(); @@ -95,14 +95,14 @@ fn challenges_new_head_prev() { fn test_no_challenge_on_same_header() { init_test_logger(); let (mut chain, _, signer) = setup(); - let prev_hash = chain.head_header().unwrap().hash(); + let prev_hash = *chain.head_header().unwrap().hash(); let prev = chain.get_block(&prev_hash).unwrap(); let block = Block::empty(&prev, &*signer); let tip = chain .process_block(&None, block.clone(), Provenance::PRODUCED, |_| {}, |_| {}, |_| {}) .unwrap(); assert_eq!(tip.unwrap().height, 1); - if let Err(e) = chain.process_block_header(&block.header, |_| panic!("Unexpected Challenge")) { + if let Err(e) = chain.process_block_header(block.header(), |_| panic!("Unexpected Challenge")) { match e.kind() { ErrorKind::Unfit(_) => {} _ => panic!("Wrong error kind {}", e), diff --git a/chain/chain/tests/gc.rs b/chain/chain/tests/gc.rs index 2161c35be19..ab1d21235bd 100644 --- a/chain/chain/tests/gc.rs +++ b/chain/chain/tests/gc.rs @@ -61,12 +61,12 @@ mod tests { let mut store_update = chain.mut_store().store_update(); if i == 0 { store_update - .save_block_merkle_tree(prev_block.hash(), PartialMerkleTree::default()); + .save_block_merkle_tree(*prev_block.hash(), PartialMerkleTree::default()); } store_update.save_block(block.clone()); - store_update.inc_block_refcount(&block.header.prev_hash).unwrap(); - store_update.save_block_header(block.header.clone()).unwrap(); - let tip = Tip::from_header(&block.header); + store_update.inc_block_refcount(block.header().prev_hash()).unwrap(); + store_update.save_block_header(block.header().clone()).unwrap(); + let tip = Tip::from_header(block.header()); if head.height < tip.height { store_update.save_head(&tip).unwrap(); } @@ -79,10 +79,7 @@ mod tests { let trie_changes = trie.update(&state_root, trie_changes_data.iter().cloned()).unwrap(); if verbose { - println!( - "state new {:?} {:?}", - block.header.inner_lite.height, trie_changes_data - ); + println!("state new {:?} {:?}", block.header().height(), trie_changes_data); } let new_root = trie_changes.new_root; @@ -91,7 +88,7 @@ mod tests { shard_id, trie_changes, Default::default(), - block.hash(), + *block.hash(), ); store_update.save_trie_changes(wrapped_trie_changes); @@ -199,7 +196,7 @@ mod tests { .update(&state_root2, changes1[shard_to_check_trie as usize].iter().cloned()) .unwrap(); // i == gc_height is the only height should be processed here - if block1.header.inner_lite.height > gc_height || i == gc_height { + if block1.header().height() > gc_height || i == gc_height { let mut trie_store_update2 = StoreUpdate::new_with_tries(tries2.clone()); tries2 .apply_insertions( @@ -232,7 +229,7 @@ mod tests { for i in start_index..start_index + simple_chain.length { let (block1, state_root1, _) = states1[i as usize].clone(); let state_root1 = state_root1[shard_to_check_trie as usize]; - if block1.header.inner_lite.height > gc_height || i == gc_height { + if block1.header().height() > gc_height || i == gc_height { assert!(trie1.iter(&state_root1).is_ok()); assert!(trie2.iter(&state_root1).is_ok()); let a = trie1 diff --git a/chain/chain/tests/simple_chain.rs b/chain/chain/tests/simple_chain.rs index 2d2f07f04b7..9029e53033d 100644 --- a/chain/chain/tests/simple_chain.rs +++ b/chain/chain/tests/simple_chain.rs @@ -2,6 +2,7 @@ use near_chain::test_utils::setup; use near_chain::{Block, ChainStoreAccess, ErrorKind, Provenance}; use near_logger_utils::init_test_logger; use near_primitives::hash::CryptoHash; +use near_primitives::version::PROTOCOL_VERSION; use num_rational::Rational; #[test] @@ -16,7 +17,7 @@ fn build_chain() { init_test_logger(); let (mut chain, _, signer) = setup(); for i in 0..4 { - let prev_hash = chain.head_header().unwrap().hash(); + let prev_hash = *chain.head_header().unwrap().hash(); let prev = chain.get_block(&prev_hash).unwrap(); let block = Block::empty(&prev, &*signer); let tip = chain @@ -31,18 +32,19 @@ fn build_chain() { fn build_chain_with_orhpans() { init_test_logger(); let (mut chain, _, signer) = setup(); - let mut blocks = vec![chain.get_block(&chain.genesis().hash()).unwrap().clone()]; + let mut blocks = vec![chain.get_block(&chain.genesis().hash().clone()).unwrap().clone()]; for i in 1..4 { let block = Block::empty(&blocks[i - 1], &*signer); blocks.push(block); } let last_block = &blocks[blocks.len() - 1]; let block = Block::produce( - &last_block.header, + PROTOCOL_VERSION, + &last_block.header(), 10, - last_block.chunks.clone(), - last_block.header.inner_lite.epoch_id.clone(), - last_block.header.inner_lite.next_epoch_id.clone(), + last_block.chunks().clone(), + last_block.header().epoch_id().clone(), + last_block.header().next_epoch_id().clone(), vec![], Rational::from_integer(0), 0, @@ -50,7 +52,7 @@ fn build_chain_with_orhpans() { vec![], vec![], &*signer, - last_block.header.inner_lite.next_bp_hash.clone(), + last_block.header().next_bp_hash().clone(), CryptoHash::default(), ); assert_eq!( @@ -117,7 +119,7 @@ fn build_chain_with_orhpans() { fn build_chain_with_skips_and_forks() { init_test_logger(); let (mut chain, _, signer) = setup(); - let genesis = chain.get_block(&chain.genesis().hash()).unwrap(); + let genesis = chain.get_block(&chain.genesis().hash().clone()).unwrap(); let b1 = Block::empty(&genesis, &*signer); let b2 = Block::empty_with_height(&genesis, 2, &*signer); let b3 = Block::empty_with_height(&b1, 3, &*signer); @@ -129,7 +131,7 @@ fn build_chain_with_skips_and_forks() { assert!(chain.process_block(&None, b4, Provenance::PRODUCED, |_| {}, |_| {}, |_| {}).is_ok()); assert!(chain.process_block(&None, b5, Provenance::PRODUCED, |_| {}, |_| {}, |_| {}).is_ok()); assert!(chain.get_header_by_height(1).is_err()); - assert_eq!(chain.get_header_by_height(5).unwrap().inner_lite.height, 5); + assert_eq!(chain.get_header_by_height(5).unwrap().height(), 5); } /// Verifies that the block at height are updated correctly when blocks from different forks are @@ -154,20 +156,20 @@ fn blocks_at_height() { let e_7 = Block::empty_with_height(&b_1, 7, &*signer); - let b_1_hash = b_1.hash(); - let b_2_hash = b_2.hash(); - let b_3_hash = b_3.hash(); + let b_1_hash = *b_1.hash(); + let b_2_hash = *b_2.hash(); + let b_3_hash = *b_3.hash(); - let c_1_hash = c_1.hash(); - let c_3_hash = c_3.hash(); - let c_4_hash = c_4.hash(); - let c_5_hash = c_5.hash(); + let c_1_hash = *c_1.hash(); + let c_3_hash = *c_3.hash(); + let c_4_hash = *c_4.hash(); + let c_5_hash = *c_5.hash(); - let d_3_hash = d_3.hash(); - let d_4_hash = d_4.hash(); - let d_6_hash = d_6.hash(); + let d_3_hash = *d_3.hash(); + let d_4_hash = *d_4.hash(); + let d_6_hash = *d_6.hash(); - let e_7_hash = e_7.hash(); + let e_7_hash = *e_7.hash(); assert_ne!(d_3_hash, b_3_hash); @@ -176,9 +178,9 @@ fn blocks_at_height() { chain.process_block(&None, b_3, Provenance::PRODUCED, |_| {}, |_| {}, |_| {}).unwrap(); assert_eq!(chain.header_head().unwrap().height, 3); - assert_eq!(chain.get_header_by_height(1).unwrap().hash(), b_1_hash); - assert_eq!(chain.get_header_by_height(2).unwrap().hash(), b_2_hash); - assert_eq!(chain.get_header_by_height(3).unwrap().hash(), b_3_hash); + assert_eq!(chain.get_header_by_height(1).unwrap().hash(), &b_1_hash); + assert_eq!(chain.get_header_by_height(2).unwrap().hash(), &b_2_hash); + assert_eq!(chain.get_header_by_height(3).unwrap().hash(), &b_3_hash); chain.process_block(&None, c_1, Provenance::PRODUCED, |_| {}, |_| {}, |_| {}).unwrap(); chain.process_block(&None, c_3, Provenance::PRODUCED, |_| {}, |_| {}, |_| {}).unwrap(); @@ -186,46 +188,46 @@ fn blocks_at_height() { chain.process_block(&None, c_5, Provenance::PRODUCED, |_| {}, |_| {}, |_| {}).unwrap(); assert_eq!(chain.header_head().unwrap().height, 5); - assert_eq!(chain.get_header_by_height(1).unwrap().hash(), c_1_hash); + assert_eq!(chain.get_header_by_height(1).unwrap().hash(), &c_1_hash); assert!(chain.get_header_by_height(2).is_err()); - assert_eq!(chain.get_header_by_height(3).unwrap().hash(), c_3_hash); - assert_eq!(chain.get_header_by_height(4).unwrap().hash(), c_4_hash); - assert_eq!(chain.get_header_by_height(5).unwrap().hash(), c_5_hash); + assert_eq!(chain.get_header_by_height(3).unwrap().hash(), &c_3_hash); + assert_eq!(chain.get_header_by_height(4).unwrap().hash(), &c_4_hash); + assert_eq!(chain.get_header_by_height(5).unwrap().hash(), &c_5_hash); chain.process_block(&None, d_3, Provenance::PRODUCED, |_| {}, |_| {}, |_| {}).unwrap(); chain.process_block(&None, d_4, Provenance::PRODUCED, |_| {}, |_| {}, |_| {}).unwrap(); chain.process_block(&None, d_6, Provenance::PRODUCED, |_| {}, |_| {}, |_| {}).unwrap(); assert_eq!(chain.header_head().unwrap().height, 6); - assert_eq!(chain.get_header_by_height(1).unwrap().hash(), b_1_hash); - assert_eq!(chain.get_header_by_height(2).unwrap().hash(), b_2_hash); - assert_eq!(chain.get_header_by_height(3).unwrap().hash(), d_3_hash); - assert_eq!(chain.get_header_by_height(4).unwrap().hash(), d_4_hash); + assert_eq!(chain.get_header_by_height(1).unwrap().hash(), &b_1_hash); + assert_eq!(chain.get_header_by_height(2).unwrap().hash(), &b_2_hash); + assert_eq!(chain.get_header_by_height(3).unwrap().hash(), &d_3_hash); + assert_eq!(chain.get_header_by_height(4).unwrap().hash(), &d_4_hash); assert!(chain.get_header_by_height(5).is_err()); - assert_eq!(chain.get_header_by_height(6).unwrap().hash(), d_6_hash); + assert_eq!(chain.get_header_by_height(6).unwrap().hash(), &d_6_hash); chain.process_block(&None, e_7, Provenance::PRODUCED, |_| {}, |_| {}, |_| {}).unwrap(); - assert_eq!(chain.get_header_by_height(1).unwrap().hash(), b_1_hash); + assert_eq!(chain.get_header_by_height(1).unwrap().hash(), &b_1_hash); for h in 2..=5 { assert!(chain.get_header_by_height(h).is_err()); } - assert_eq!(chain.get_header_by_height(7).unwrap().hash(), e_7_hash); + assert_eq!(chain.get_header_by_height(7).unwrap().hash(), &e_7_hash); } #[test] fn next_blocks() { init_test_logger(); let (mut chain, _, signer) = setup(); - let genesis = chain.get_block(&chain.genesis().hash()).unwrap(); + let genesis = chain.get_block(&chain.genesis().hash().clone()).unwrap(); let b1 = Block::empty(&genesis, &*signer); let b2 = Block::empty_with_height(&b1, 2, &*signer); let b3 = Block::empty_with_height(&b1, 3, &*signer); let b4 = Block::empty_with_height(&b3, 4, &*signer); - let b1_hash = b1.hash(); - let b2_hash = b2.hash(); - let b3_hash = b3.hash(); - let b4_hash = b4.hash(); + let b1_hash = *b1.hash(); + let b2_hash = *b2.hash(); + let b3_hash = *b3.hash(); + let b4_hash = *b4.hash(); assert!(chain.process_block(&None, b1, Provenance::PRODUCED, |_| {}, |_| {}, |_| {}).is_ok()); assert!(chain.process_block(&None, b2, Provenance::PRODUCED, |_| {}, |_| {}, |_| {}).is_ok()); assert_eq!(chain.mut_store().get_next_block_hash(&b1_hash).unwrap(), &b2_hash); diff --git a/chain/chain/tests/sync_chain.rs b/chain/chain/tests/sync_chain.rs index fa3af8f59f2..a74bb04ce05 100644 --- a/chain/chain/tests/sync_chain.rs +++ b/chain/chain/tests/sync_chain.rs @@ -8,7 +8,7 @@ fn chain_sync_headers() { init_test_logger(); let (mut chain, _, bls_signer) = setup(); assert_eq!(chain.sync_head().unwrap().height, 0); - let mut blocks = vec![chain.get_block(&chain.genesis().hash()).unwrap().clone()]; + let mut blocks = vec![chain.get_block(&chain.genesis().hash().clone()).unwrap().clone()]; let mut block_merkle_tree = PartialMerkleTree::default(); for i in 0..4 { blocks.push(Block::empty_with_block_merkle_tree( @@ -18,7 +18,7 @@ fn chain_sync_headers() { )); } chain - .sync_block_headers(blocks.drain(1..).map(|block| block.header).collect(), |_| { + .sync_block_headers(blocks.drain(1..).map(|block| block.header().clone()).collect(), |_| { panic!("Unexpected") }) .unwrap(); diff --git a/chain/chunks/Cargo.toml b/chain/chunks/Cargo.toml index 6c52729bc2b..4cc01f35906 100644 --- a/chain/chunks/Cargo.toml +++ b/chain/chunks/Cargo.toml @@ -10,7 +10,7 @@ futures = "0.3" rand = "0.7" chrono = "0.4.6" log = "0.4" -borsh = "0.6.1" +borsh = "0.6.2" serde = { version = "1", features = [ "derive" ] } cached = "0.12" reed-solomon-erasure = "4" diff --git a/chain/chunks/src/lib.rs b/chain/chunks/src/lib.rs index 20ac9a5d460..887df5c5f21 100644 --- a/chain/chunks/src/lib.rs +++ b/chain/chunks/src/lib.rs @@ -658,14 +658,14 @@ impl ShardsManager { .entry(shard_id) .and_modify(|stored_chunk| { let epoch_id = unwrap_or_return!( - runtime_adapter.get_epoch_id_from_prev_block(&known_header.prev_hash) + runtime_adapter.get_epoch_id_from_prev_block(known_header.prev_hash()) ); let block_producer = unwrap_or_return!(runtime_adapter.get_block_producer(&epoch_id, height)); if runtime_adapter .verify_validator_signature( &epoch_id, - &known_header.prev_hash, + &known_header.prev_hash(), &block_producer, header.hash.as_ref(), &header.signature, diff --git a/chain/chunks/src/test_utils.rs b/chain/chunks/src/test_utils.rs index 55a8930dce2..bae328899a6 100644 --- a/chain/chunks/src/test_utils.rs +++ b/chain/chunks/src/test_utils.rs @@ -1,5 +1,7 @@ -use crate::{Seal, SealsManager, ACCEPTING_SEAL_PERIOD_MS, PAST_SEAL_HEIGHT_HORIZON}; +use std::sync::Arc; + use chrono::Utc; + use near_chain::test_utils::KeyValueRuntime; use near_chain::types::RuntimeAdapter; use near_chain::ChainStore; @@ -8,8 +10,10 @@ use near_primitives::hash::{self, CryptoHash}; use near_primitives::sharding::ChunkHash; use near_primitives::types::BlockHeight; use near_primitives::types::{AccountId, ShardId}; +use near_primitives::version::PROTOCOL_VERSION; use near_store::Store; -use std::sync::Arc; + +use crate::{Seal, SealsManager, ACCEPTING_SEAL_PERIOD_MS, PAST_SEAL_HEIGHT_HORIZON}; pub struct SealsManagerTestFixture { pub mock_chunk_producer: AccountId, @@ -55,6 +59,7 @@ impl Default for SealsManagerTestFixture { // in the header is used to check that the `SealsManager` seals cache is properly // cleared. let mock_distant_block_header = BlockHeader::genesis( + PROTOCOL_VERSION, mock_height + PAST_SEAL_HEIGHT_HORIZON + 1, Default::default(), Default::default(), @@ -67,7 +72,7 @@ impl Default for SealsManagerTestFixture { Default::default(), Default::default(), ); - let mock_distant_block_hash = mock_distant_block_header.hash.clone(); + let mock_distant_block_hash = mock_distant_block_header.hash().clone(); Self::store_block_header(store, mock_distant_block_header); Self { @@ -86,7 +91,7 @@ impl Default for SealsManagerTestFixture { impl SealsManagerTestFixture { fn store_block_header(store: Arc, header: BlockHeader) { - let mut chain_store = ChainStore::new(store, header.inner_lite.height); + let mut chain_store = ChainStore::new(store, header.height()); let mut update = chain_store.store_update(); update.save_block_header(header).unwrap(); update.commit().unwrap(); diff --git a/chain/client/Cargo.toml b/chain/client/Cargo.toml index 2e45a9ba8cb..f66690c8f80 100644 --- a/chain/client/Cargo.toml +++ b/chain/client/Cargo.toml @@ -19,7 +19,7 @@ sysinfo = { git = "https://github.com/near/sysinfo", rev = "3cb97ee79a02754407d2 strum = { version = "0.18", features = ["derive"] } cached = "0.12" lazy_static = "1.4" -borsh = "0.6.1" +borsh = "0.6.2" reed-solomon-erasure = "4" num-rational = "0.2.4" diff --git a/chain/client/src/client.rs b/chain/client/src/client.rs index 6c2dae68046..5569c38d8e8 100644 --- a/chain/client/src/client.rs +++ b/chain/client/src/client.rs @@ -19,6 +19,7 @@ use near_chain::{ }; use near_chain_configs::ClientConfig; use near_chunks::{ProcessPartialEncodedChunkResult, ShardsManager}; +use near_network::types::PartialEncodedChunkResponseMsg; use near_network::{FullPeerInfo, NetworkAdapter, NetworkClientResponses, NetworkRequests}; use near_primitives::block::{Approval, ApprovalInner, ApprovalMessage, Block, BlockHeader, Tip}; use near_primitives::challenge::{Challenge, ChallengeBody}; @@ -38,7 +39,6 @@ use crate::metrics; use crate::sync::{BlockSync, HeaderSync, StateSync, StateSyncResult}; use crate::types::{Error, ShardSyncDownload}; use crate::SyncStatus; -use near_network::types::PartialEncodedChunkResponseMsg; const NUM_REBROADCAST_BLOCKS: usize = 30; @@ -147,12 +147,12 @@ impl Client { } pub fn remove_transactions_for_block(&mut self, me: AccountId, block: &Block) { - for (shard_id, chunk_header) in block.chunks.iter().enumerate() { + for (shard_id, chunk_header) in block.chunks().iter().enumerate() { let shard_id = shard_id as ShardId; - if block.header.inner_lite.height == chunk_header.height_included { + if block.header().height() == chunk_header.height_included { if self.shards_mgr.cares_about_shard_this_or_next_epoch( Some(&me), - &block.header.prev_hash, + &block.header().prev_hash(), shard_id, true, ) { @@ -164,18 +164,18 @@ impl Client { } } } - for challenge in block.challenges.iter() { + for challenge in block.challenges().iter() { self.challenges.remove(&challenge.hash); } } pub fn reintroduce_transactions_for_block(&mut self, me: AccountId, block: &Block) { - for (shard_id, chunk_header) in block.chunks.iter().enumerate() { + for (shard_id, chunk_header) in block.chunks().iter().enumerate() { let shard_id = shard_id as ShardId; - if block.header.inner_lite.height == chunk_header.height_included { + if block.header().height() == chunk_header.height_included { if self.shards_mgr.cares_about_shard_this_or_next_epoch( Some(&me), - &block.header.prev_hash, + &block.header().prev_hash(), shard_id, false, ) { @@ -187,7 +187,7 @@ impl Client { } } } - for challenge in block.challenges.iter() { + for challenge in block.challenges().iter() { self.challenges.insert(challenge.hash, challenge.clone()); } } @@ -291,9 +291,9 @@ impl Client { let prev = self.chain.get_block_header(&head.last_block_hash)?.clone(); let prev_hash = head.last_block_hash; let prev_height = head.height; - let prev_prev_hash = prev.prev_hash; - let prev_epoch_id = prev.inner_lite.epoch_id.clone(); - let prev_next_bp_hash = prev.inner_lite.next_bp_hash; + let prev_prev_hash = *prev.prev_hash(); + let prev_epoch_id = prev.epoch_id().clone(); + let prev_next_bp_hash = *prev.next_bp_hash(); // Check and update the doomslug tip here. This guarantees that our endorsement will be in the // doomslug witness. Have to do it before checking the ability to produce a block. @@ -323,7 +323,7 @@ impl Client { ))); } - debug!(target: "client", "{:?} Producing block at height {}, parent {} @ {}", validator_signer.validator_id(), next_height, prev.inner_lite.height, format_hash(head.last_block_hash)); + debug!(target: "client", "{:?} Producing block at height {}, parent {} @ {}", validator_signer.validator_id(), next_height, prev.height(), format_hash(head.last_block_hash)); let new_chunks = self.shards_mgr.prepare_chunks(&prev_hash); // If we are producing empty blocks and there are no transactions. @@ -372,7 +372,7 @@ impl Client { let block_merkle_root = block_merkle_tree.root(); let prev_block_extra = self.chain.get_block_extra(&prev_hash)?.clone(); let prev_block = self.chain.get_block(&prev_hash)?; - let mut chunks = prev_block.chunks.clone(); + let mut chunks = prev_block.chunks().clone(); // Collect new chunks. for (shard_id, mut chunk_header) in new_chunks { @@ -380,13 +380,13 @@ impl Client { chunks[shard_id as usize] = chunk_header; } - let prev_header = &prev_block.header; + let prev_header = &prev_block.header(); + + let next_epoch_id = + self.runtime_adapter.get_next_epoch_id_from_prev_block(&head.last_block_hash)?; let minted_amount = if self.runtime_adapter.is_next_block_epoch_start(&head.last_block_hash)? { - let next_epoch_id = self - .runtime_adapter - .get_next_epoch_id_from_prev_block(&head.last_block_hash)?; Some(self.runtime_adapter.get_epoch_minted_amount(&next_epoch_id)?) } else { None @@ -396,7 +396,10 @@ impl Client { // TODO(2445): Enable challenges when they are working correctly. // let challenges = self.challenges.drain().map(|(_, challenge)| challenge).collect(); + let protocol_version = self.runtime_adapter.get_epoch_protocol_version(&next_epoch_id)?; + let block = Block::produce( + protocol_version, &prev_header, next_height, chunks, @@ -444,7 +447,7 @@ impl Client { } if self.runtime_adapter.is_next_block_epoch_start(&prev_block_hash)? { - let prev_prev_hash = self.chain.get_block_header(&prev_block_hash)?.prev_hash; + let prev_prev_hash = *self.chain.get_block_header(&prev_block_hash)?.prev_hash(); if !self.chain.prev_block_is_caught_up(&prev_prev_hash, &prev_block_hash)? { // See comment in similar snipped in `produce_block` debug!(target: "client", "Produce chunk: prev block is not caught up"); @@ -540,7 +543,7 @@ impl Client { let transaction_validity_period = chain.transaction_validity_period; runtime_adapter .prepare_transactions( - prev_block_header.inner_rest.gas_price, + prev_block_header.gas_price(), chunk_extra.gas_limit, shard_id, chunk_extra.state_root.clone(), @@ -649,7 +652,7 @@ impl Client { pub fn rebroadcast_block(&mut self, block: Block) { if self.rebroadcasted_blocks.cache_get(&block.hash()).is_none() { self.network_adapter.do_send(NetworkRequests::Block { block: block.clone() }); - self.rebroadcasted_blocks.cache_set(block.hash(), ()); + self.rebroadcasted_blocks.cache_set(*block.hash(), ()); } } @@ -707,11 +710,11 @@ impl Client { if tip.last_block_hash != self.doomslug.get_tip().0 { // We need to update the doomslug tip let last_final_hash = - self.chain.get_block_header(&tip.last_block_hash)?.inner_rest.last_final_block; + *self.chain.get_block_header(&tip.last_block_hash)?.last_final_block(); let last_final_height = if last_final_hash == CryptoHash::default() { - self.chain.genesis().inner_lite.height + self.chain.genesis().height() } else { - self.chain.get_block_header(&last_final_hash)?.inner_lite.height + self.chain.get_block_header(&last_final_hash)?.height() }; self.doomslug.set_tip( @@ -771,7 +774,7 @@ impl Client { .unwrap_or_default(); let skips = self .pending_approvals - .cache_remove(&ApprovalInner::Skip(block.header.inner_lite.height)) + .cache_remove(&ApprovalInner::Skip(block.header().height())) .unwrap_or_default(); for (_account_id, approval) in endorsements.into_iter().chain(skips.into_iter()) { @@ -782,7 +785,7 @@ impl Client { } if status.is_new_head() { - self.shards_mgr.update_largest_seen_height(block.header.inner_lite.height); + self.shards_mgr.update_largest_seen_height(block.header().height()); if !self.config.archive { if let Err(err) = self .chain @@ -815,29 +818,29 @@ impl Client { // remove transactions from the new chain let mut reintroduce_head = self.chain.get_block_header(&prev_head).unwrap().clone(); - let mut remove_head = block.header.clone(); + let mut remove_head = block.header().clone(); assert_ne!(remove_head.hash(), reintroduce_head.hash()); let mut to_remove = vec![]; let mut to_reintroduce = vec![]; while remove_head.hash() != reintroduce_head.hash() { - while remove_head.inner_lite.height > reintroduce_head.inner_lite.height { - to_remove.push(remove_head.hash()); + while remove_head.height() > reintroduce_head.height() { + to_remove.push(*remove_head.hash()); remove_head = self .chain - .get_block_header(&remove_head.prev_hash) + .get_block_header(remove_head.prev_hash()) .unwrap() .clone(); } - while reintroduce_head.inner_lite.height > remove_head.inner_lite.height - || reintroduce_head.inner_lite.height == remove_head.inner_lite.height + while reintroduce_head.height() > remove_head.height() + || reintroduce_head.height() == remove_head.height() && reintroduce_head.hash() != remove_head.hash() { - to_reintroduce.push(reintroduce_head.hash()); + to_reintroduce.push(*reintroduce_head.hash()); reintroduce_head = self .chain - .get_block_header(&reintroduce_head.prev_hash) + .get_block_header(reintroduce_head.prev_hash()) .unwrap() .clone(); } @@ -870,19 +873,19 @@ impl Client { for shard_id in 0..self.runtime_adapter.num_shards() { let epoch_id = self .runtime_adapter - .get_epoch_id_from_prev_block(&block.header.hash()) + .get_epoch_id_from_prev_block(&block.header().hash()) .unwrap(); let chunk_proposer = self .runtime_adapter - .get_chunk_producer(&epoch_id, block.header.inner_lite.height + 1, shard_id) + .get_chunk_producer(&epoch_id, block.header().height() + 1, shard_id) .unwrap(); if chunk_proposer == *validator_signer.validator_id() { match self.produce_chunk( - block.hash(), + *block.hash(), &epoch_id, - block.chunks[shard_id as usize].clone(), - block.header.inner_lite.height + 1, + block.chunks()[shard_id as usize].clone(), + block.header().height() + 1, shard_id, ) { Ok(Some((encoded_chunk, merkle_paths, receipts))) => self @@ -905,7 +908,7 @@ impl Client { } // Process stored partial encoded chunks - let next_height = block.header.inner_lite.height + 1; + let next_height = block.header().height() + 1; let mut partial_encoded_chunks = self.shards_mgr.get_stored_partial_encoded_chunks(next_height); for (_shard_id, partial_encoded_chunk) in partial_encoded_chunks.drain() { @@ -984,7 +987,7 @@ impl Client { ApprovalInner::Endorsement(parent_hash) => parent_hash.clone(), ApprovalInner::Skip(parent_height) => { match self.chain.get_header_by_height(*parent_height) { - Ok(header) => header.hash(), + Ok(header) => *header.hash(), Err(e) => { process_error(e, approval, &mut self.pending_approvals); return; @@ -1182,7 +1185,7 @@ impl Client { debug!(target: "client", "Invalid tx: expired or from a different fork -- {:?}", tx); return Ok(NetworkClientResponses::InvalidTx(e)); } - let gas_price = cur_block_header.inner_rest.gas_price; + let gas_price = cur_block_header.gas_price(); let epoch_id = self.runtime_adapter.get_epoch_id_from_prev_block(&head.last_block_hash)?; // Fast transaction validation without a state root. @@ -1386,8 +1389,12 @@ impl Client { #[cfg(test)] mod test { - use crate::test_utils::TestEnv; + use std::collections::HashMap; + use std::path::Path; + use std::sync::Arc; + use cached::Cached; + use near_chain::{ChainGenesis, RuntimeAdapter}; use near_chain_configs::Genesis; use near_crypto::KeyType; @@ -1396,9 +1403,8 @@ mod test { use near_primitives::validator_signer::InMemoryValidatorSigner; use near_store::test_utils::create_test_store; use neard::config::GenesisExt; - use std::collections::HashMap; - use std::path::Path; - use std::sync::Arc; + + use crate::test_utils::TestEnv; #[test] fn test_pending_approvals() { diff --git a/chain/client/src/client_actor.rs b/chain/client/src/client_actor.rs index e3d2a1ffc50..b051bd4a3bb 100644 --- a/chain/client/src/client_actor.rs +++ b/chain/client/src/client_actor.rs @@ -13,6 +13,8 @@ use log::{debug, error, info, warn}; use near_chain::check_refcount_map; use near_chain::test_utils::format_hash; use near_chain::types::AcceptedBlock; +#[cfg(feature = "adversarial")] +use near_chain::StoreValidator; use near_chain::{ byzantine_assert, Block, BlockHeader, ChainGenesis, ChainStoreAccess, Provenance, RuntimeAdapter, @@ -35,6 +37,7 @@ use near_primitives::types::{BlockHeight, EpochId}; use near_primitives::unwrap_or_return; use near_primitives::utils::from_timestamp; use near_primitives::validator_signer::ValidatorSigner; +use near_primitives::version::PROTOCOL_VERSION; use near_primitives::views::ValidatorInfo; #[cfg(feature = "adversarial")] use near_store::ColBlock; @@ -48,8 +51,6 @@ use crate::types::{ StatusSyncInfo, SyncStatus, }; use crate::StatusResponse; -#[cfg(feature = "adversarial")] -use near_chain::store_validator::StoreValidator; /// Multiplier on `max_block_time` to wait until deciding that chain stalled. const STATUS_WAIT_TIME_MULTIPLIER: u64 = 10; @@ -288,16 +289,16 @@ impl Handler for ClientActor { .client .chain .mut_store() - .get_all_block_hashes_by_height(block.header.inner_lite.height); + .get_all_block_hashes_by_height(block.header().height()); if was_requested || !blocks_at_height.is_ok() { if let SyncStatus::StateSync(sync_hash, _) = &mut self.client.sync_status { if let Ok(header) = self.client.chain.get_block_header(sync_hash) { - if block.hash() == header.prev_hash { + if block.hash() == header.prev_hash() { if let Err(_) = self.client.chain.save_block(&block) { error!(target: "client", "Failed to save a block during state sync"); } return NetworkClientResponses::NoResponse; - } else if &block.hash() == sync_hash { + } else if block.hash() == sync_hash { self.client.chain.save_orphan(&block); return NetworkClientResponses::NoResponse; } @@ -308,12 +309,12 @@ impl Handler for ClientActor { match self .client .runtime_adapter - .get_epoch_id_from_prev_block(&block.header.prev_hash) + .get_epoch_id_from_prev_block(block.header().prev_hash()) { Ok(epoch_id) => { if let Some(hashes) = blocks_at_height.unwrap().get(&epoch_id) { - if !hashes.contains(&block.header.hash) { - warn!(target: "client", "Rejecting unrequested block {}, height {}", block.header.hash, block.header.inner_lite.height); + if !hashes.contains(block.header().hash()) { + warn!(target: "client", "Rejecting unrequested block {}, height {}", block.header().hash(), block.header().height()); } } } @@ -487,7 +488,7 @@ impl Handler for ClientActor { .chain .get_block_header(&head.last_block_hash) .map_err(|err| err.to_string())?; - let latest_block_time = header.inner_lite.timestamp.clone(); + let latest_block_time = header.raw_timestamp().clone(); if msg.is_health_check { let now = Utc::now(); let block_timestamp = from_timestamp(latest_block_time); @@ -514,15 +515,22 @@ impl Handler for ClientActor { is_slashed, }) .collect(); + let protocol_version = self + .client + .runtime_adapter + .get_epoch_protocol_version(&head.epoch_id) + .map_err(|err| err.to_string())?; Ok(StatusResponse { version: self.client.config.version.clone(), + protocol_version, + latest_protocol_version: PROTOCOL_VERSION, chain_id: self.client.config.chain_id.clone(), rpc_addr: self.client.config.rpc_addr.clone(), validators, sync_info: StatusSyncInfo { latest_block_hash: head.last_block_hash.into(), latest_block_height: head.height, - latest_state_root: header.inner_lite.prev_state_root.clone().into(), + latest_state_root: header.prev_state_root().clone().into(), latest_block_time: from_timestamp(latest_block_time), syncing: self.client.sync_status.is_syncing(), }, @@ -691,7 +699,7 @@ impl ClientActor { fn produce_block(&mut self, next_height: BlockHeight) -> Result<(), Error> { match self.client.produce_block(next_height) { Ok(Some(block)) => { - let block_hash = block.hash(); + let block_hash = *block.hash(); let res = self.process_block(block, Provenance::PRODUCED); match &res { Ok(_) => Ok(()), @@ -729,10 +737,10 @@ impl ClientActor { accepted_block.provenance, ); let block = self.client.chain.get_block(&accepted_block.hash).unwrap(); - let gas_used = Block::compute_gas_used(&block.chunks, block.header.inner_lite.height); - let gas_limit = Block::compute_gas_limit(&block.chunks, block.header.inner_lite.height); + let gas_used = Block::compute_gas_used(&block.chunks(), block.header().height()); + let gas_limit = Block::compute_gas_limit(&block.chunks(), block.header().height()); - let last_final_hash = block.header.inner_rest.last_final_block; + let last_final_hash = *block.header().last_final_block(); self.info_helper.block_processed(gas_used, gas_limit); self.check_send_announce_account(last_final_hash); @@ -753,11 +761,11 @@ impl ClientActor { } else if provenance == Provenance::NONE { // Don't care about challenge here since it will be handled when we actually process // the block. - if self.client.chain.process_block_header(&block.header, |_| {}).is_ok() { + if self.client.chain.process_block_header(&block.header(), |_| {}).is_ok() { let head = self.client.chain.head()?; // do not broadcast blocks that are too far back. - if head.height < block.header.inner_lite.height - || head.epoch_id == block.header.inner_lite.epoch_id + if head.height < block.header().height() + || &head.epoch_id == block.header().epoch_id() { self.client.rebroadcast_block(block.clone()); } @@ -775,15 +783,15 @@ impl ClientActor { peer_id: PeerId, was_requested: bool, ) -> NetworkClientResponses { - let hash = block.hash(); - debug!(target: "client", "{:?} Received block {} <- {} at {} from {}, requested: {}", self.client.validator_signer.as_ref().map(|vs| vs.validator_id()), hash, block.header.prev_hash, block.header.inner_lite.height, peer_id, was_requested); + let hash = *block.hash(); + debug!(target: "client", "{:?} Received block {} <- {} at {} from {}, requested: {}", self.client.validator_signer.as_ref().map(|vs| vs.validator_id()), hash, block.header().prev_hash(), block.header().height(), peer_id, was_requested); // drop the block if it is too far ahead let head = unwrap_or_return!(self.client.chain.head(), NetworkClientResponses::NoResponse); - if block.header.inner_lite.height >= head.height + BLOCK_HORIZON { - debug!(target: "client", "dropping block {} that is too far ahead. Block height {} current head height {}", block.hash(), block.header.inner_lite.height, head.height); + if block.header().height() >= head.height + BLOCK_HORIZON { + debug!(target: "client", "dropping block {} that is too far ahead. Block height {} current head height {}", block.hash(), block.header().height(), head.height); return NetworkClientResponses::NoResponse; } - let prev_hash = block.header.prev_hash; + let prev_hash = *block.header().prev_hash(); let provenance = if was_requested { near_chain::Provenance::SYNC } else { near_chain::Provenance::NONE }; match self.process_block(block, provenance) { @@ -946,7 +954,7 @@ impl ClientActor { let header_head = self.client.chain.header_head()?; let mut sync_hash = header_head.prev_block_hash; for _ in 0..self.client.config.state_fetch_horizon { - sync_hash = self.client.chain.get_block_header(&sync_hash)?.prev_hash; + sync_hash = *self.client.chain.get_block_header(&sync_hash)?.prev_hash(); } let epoch_start_sync_hash = StateSync::get_epoch_start_sync_hash(&mut self.client.chain, &sync_hash)?; @@ -1113,7 +1121,9 @@ impl ClientActor { highest_height_peer(&self.network_info.highest_height_peers) { if let Ok(header) = self.client.chain.get_block_header(&sync_hash) { - for hash in vec![header.prev_hash, header.hash].into_iter() { + for hash in + vec![*header.prev_hash(), *header.hash()].into_iter() + { self.request_block_by_hash( hash, peer_info.peer_info.id.clone(), diff --git a/chain/client/src/info.rs b/chain/client/src/info.rs index 09c6b1354cb..c6e6cd06bd2 100644 --- a/chain/client/src/info.rs +++ b/chain/client/src/info.rs @@ -15,9 +15,9 @@ use near_primitives::serialize::to_base; use near_primitives::telemetry::{ TelemetryAgentInfo, TelemetryChainInfo, TelemetryInfo, TelemetrySystemInfo, }; -use near_primitives::types::Version; use near_primitives::types::{BlockHeight, Gas}; use near_primitives::validator_signer::ValidatorSigner; +use near_primitives::version::Version; use near_telemetry::{telemetry, TelemetryActor}; use crate::types::ShardSyncStatus; diff --git a/chain/client/src/sync.rs b/chain/client/src/sync.rs index dc0038dd10d..f1c3c6ce78b 100644 --- a/chain/client/src/sync.rs +++ b/chain/client/src/sync.rs @@ -253,8 +253,8 @@ impl HeaderSync { // Walk backwards to find last known hash. let last_loc = locator.last().unwrap().clone(); if let Ok(header) = chain.get_header_by_height(h) { - if header.inner_lite.height != last_loc.0 { - locator.push((header.inner_lite.height, header.hash())); + if header.height() != last_loc.0 { + locator.push((header.height(), *header.hash())); } } } @@ -480,7 +480,7 @@ impl StateSync { chain: &mut Chain, now: DateTime, ) -> Result<(bool, bool), near_chain::Error> { - let prev_hash = chain.get_block_header(&sync_hash)?.prev_hash.clone(); + let prev_hash = chain.get_block_header(&sync_hash)?.prev_hash().clone(); let (request_block, have_block) = if !chain.block_exists(&prev_hash)? { match self.last_time_block_requested { None => (true, false), @@ -691,17 +691,17 @@ impl StateSync { sync_hash: &CryptoHash, ) -> Result { let mut header = chain.get_block_header(sync_hash)?; - let mut epoch_id = header.inner_lite.epoch_id.clone(); - let mut hash = header.hash.clone(); - let mut prev_hash = header.prev_hash.clone(); + let mut epoch_id = header.epoch_id().clone(); + let mut hash = header.hash().clone(); + let mut prev_hash = header.prev_hash().clone(); loop { header = chain.get_block_header(&prev_hash)?; - if epoch_id != header.inner_lite.epoch_id { + if &epoch_id != header.epoch_id() { return Ok(hash); } - epoch_id = header.inner_lite.epoch_id.clone(); - hash = header.hash.clone(); - prev_hash = header.prev_hash.clone(); + epoch_id = header.epoch_id().clone(); + hash = header.hash().clone(); + prev_hash = header.prev_hash().clone(); } } @@ -717,8 +717,8 @@ impl StateSync { highest_height_peers: &Vec, ) -> Result { let prev_block_hash = - unwrap_or_return!(chain.get_block_header(&sync_hash), Ok(shard_sync_download)) - .prev_hash; + *unwrap_or_return!(chain.get_block_header(&sync_hash), Ok(shard_sync_download)) + .prev_hash(); let epoch_hash = unwrap_or_return!( runtime_adapter.get_epoch_id_from_prev_block(&prev_block_hash), Ok(shard_sync_download) @@ -880,6 +880,7 @@ mod test { use near_primitives::merkle::PartialMerkleTree; use near_primitives::types::EpochId; use near_primitives::validator_signer::InMemoryValidatorSigner; + use near_primitives::version::PROTOCOL_VERSION; use num_rational::Ratio; #[test] @@ -934,7 +935,7 @@ mod test { chain_info: PeerChainInfo { genesis_id: GenesisId { chain_id: "unittest".to_string(), - hash: chain.genesis().hash(), + hash: *chain.genesis().hash(), }, height: chain2.head().unwrap().height, tracked_shards: vec![], @@ -952,7 +953,7 @@ mod test { NetworkRequests::BlockHeadersRequest { hashes: [3, 1, 0] .iter() - .map(|i| chain.get_block_by_height(*i).unwrap().hash()) + .map(|i| *chain.get_block_by_height(*i).unwrap().hash()) .collect(), peer_id: peer1.peer_info.id } @@ -1003,7 +1004,7 @@ mod test { 1000, 100, ); - let genesis = chain.get_block(&chain.genesis().hash()).unwrap().clone(); + let genesis = chain.get_block(&chain.genesis().hash().clone()).unwrap().clone(); let mut last_block = &genesis; let mut all_blocks = vec![]; @@ -1021,8 +1022,8 @@ mod test { account_id, ); Approval::new( - last_block.hash(), - last_block.header.inner_lite.height, + *last_block.hash(), + last_block.header().height(), current_height, &signer, ) @@ -1030,19 +1031,20 @@ mod test { }) }) .collect(); - let (epoch_id, next_epoch_id) = if last_block.header.prev_hash == CryptoHash::default() - { - (last_block.header.inner_lite.next_epoch_id.clone(), EpochId(last_block.hash())) - } else { - ( - last_block.header.inner_lite.epoch_id.clone(), - last_block.header.inner_lite.next_epoch_id.clone(), - ) - }; + let (epoch_id, next_epoch_id) = + if last_block.header().prev_hash() == &CryptoHash::default() { + (last_block.header().next_epoch_id().clone(), EpochId(*last_block.hash())) + } else { + ( + last_block.header().epoch_id().clone(), + last_block.header().next_epoch_id().clone(), + ) + }; let block = Block::produce( - &last_block.header, + PROTOCOL_VERSION, + &last_block.header(), current_height, - last_block.chunks.clone(), + last_block.chunks().clone(), epoch_id, next_epoch_id, approvals, @@ -1052,10 +1054,10 @@ mod test { vec![], vec![], &*signers[3], - last_block.header.inner_lite.next_bp_hash.clone(), + last_block.header().next_bp_hash().clone(), block_merkle_tree.root(), ); - block_merkle_tree.insert(block.hash()); + block_merkle_tree.insert(*block.hash()); all_blocks.push(block); @@ -1067,11 +1069,11 @@ mod test { // banned for _iter in 0..12 { let block = &all_blocks[last_added_block_ord]; - let current_height = block.header.inner_lite.height; + let current_height = block.header().height(); set_syncing_peer(&mut header_sync); header_sync.header_sync_due( &SyncStatus::HeaderSync { current_height, highest_height }, - &Tip::from_header(&block.header), + &Tip::from_header(&block.header()), ); last_added_block_ord += 3; @@ -1084,11 +1086,11 @@ mod test { // Now the same, but only 20 heights / sec for _iter in 0..12 { let block = &all_blocks[last_added_block_ord]; - let current_height = block.header.inner_lite.height; + let current_height = block.header().height(); set_syncing_peer(&mut header_sync); header_sync.header_sync_due( &SyncStatus::HeaderSync { current_height, highest_height }, - &Tip::from_header(&block.header), + &Tip::from_header(&block.header()), ); last_added_block_ord += 2; diff --git a/chain/client/src/test_utils.rs b/chain/client/src/test_utils.rs index 88e9970830e..85669e50000 100644 --- a/chain/client/src/test_utils.rs +++ b/chain/client/src/test_utils.rs @@ -31,6 +31,7 @@ use near_primitives::types::{ AccountId, Balance, BlockHeight, BlockHeightDelta, NumBlocks, NumSeats, NumShards, }; use near_primitives::validator_signer::{InMemoryValidatorSigner, ValidatorSigner}; +use near_primitives::version::PROTOCOL_VERSION; use near_primitives::views::{AccountView, QueryRequest, QueryResponseKind}; use near_store::test_utils::create_test_store; use near_store::Store; @@ -77,6 +78,7 @@ pub fn setup( Rational::from_integer(0), transaction_validity_period, epoch_length, + PROTOCOL_VERSION, ); let doomslug_threshold_mode = if enable_doomslug { DoomslugThresholdMode::TwoThirds @@ -84,7 +86,7 @@ pub fn setup( DoomslugThresholdMode::NoApprovals }; let mut chain = Chain::new(runtime.clone(), &chain_genesis, doomslug_threshold_mode).unwrap(); - let genesis_block = chain.get_block(&chain.genesis().hash()).unwrap().clone(); + let genesis_block = chain.get_block(&chain.genesis().hash().clone()).unwrap().clone(); let signer = Arc::new(InMemoryValidatorSigner::from_seed(account_id, KeyType::ED25519, account_id)); @@ -352,12 +354,12 @@ pub fn setup_mock_all_validators( let my_height = &mut last_height1[my_ord]; - *my_height = max(*my_height, block.header.inner_lite.height); + *my_height = max(*my_height, block.header().height()); hash_to_height1 .write() .unwrap() - .insert(block.header.hash(), block.header.inner_lite.height); + .insert(*block.header().hash(), block.header().height()); } NetworkRequests::PartialEncodedChunkRequest { account_id: their_account_id, @@ -686,7 +688,7 @@ pub fn setup_mock_all_validators( hash_to_height .write() .unwrap() - .insert(genesis_block.read().unwrap().as_ref().unwrap().header.clone().hash(), 0); + .insert(*genesis_block.read().unwrap().as_ref().unwrap().header().clone().hash(), 0); *locked_connectors = ret.clone(); let value = genesis_block.read().unwrap(); (value.clone().unwrap(), ret) @@ -893,11 +895,11 @@ impl TestEnv { .runtime_adapter .query( 0, - &last_block.chunks[0].inner.prev_state_root, - last_block.header.inner_lite.height, - last_block.header.inner_lite.timestamp, - &last_block.header.hash, - &last_block.header.inner_lite.epoch_id, + &last_block.chunks()[0].inner.prev_state_root, + last_block.header().height(), + last_block.header().raw_timestamp(), + last_block.header().hash(), + last_block.header().epoch_id(), &QueryRequest::ViewAccount { account_id }, ) .unwrap(); diff --git a/chain/client/src/view_client.rs b/chain/client/src/view_client.rs index c6a16ca67dd..dc5e5b10ffd 100644 --- a/chain/client/src/view_client.rs +++ b/chain/client/src/view_client.rs @@ -113,7 +113,7 @@ impl ViewClientActor { ) -> Result { match block_id { None => Ok(self.chain.head()?.last_block_hash), - Some(BlockId::Height(height)) => Ok(self.chain.get_header_by_height(height)?.hash()), + Some(BlockId::Height(height)) => Ok(*self.chain.get_header_by_height(height)?.hash()), Some(BlockId::Hash(block_hash)) => Ok(block_hash), } } @@ -133,9 +133,9 @@ impl ViewClientActor { fn get_block_hash_by_finality(&mut self, finality: &Finality) -> Result { let head_header = self.chain.head_header()?; match finality { - Finality::None => Ok(head_header.hash), - Finality::DoomSlug => Ok(head_header.inner_rest.last_ds_final_block), - Finality::Final => Ok(head_header.inner_rest.last_final_block), + Finality::None => Ok(*head_header.hash()), + Finality::DoomSlug => Ok(*head_header.last_ds_final_block()), + Finality::Final => Ok(*head_header.last_final_block()), } } @@ -171,17 +171,17 @@ impl ViewClientActor { // If we have state for the shard that we query return query result directly. // Otherwise route query to peers. - match self.chain.get_chunk_extra(&header.hash, shard_id) { + match self.chain.get_chunk_extra(header.hash(), shard_id) { Ok(chunk_extra) => { let state_root = chunk_extra.state_root; self.runtime_adapter .query( shard_id, &state_root, - header.inner_lite.height, - header.inner_lite.timestamp, - &header.hash, - &header.inner_lite.epoch_id, + header.height(), + header.raw_timestamp(), + header.hash(), + header.epoch_id(), &msg.request, ) .map(Some) @@ -316,7 +316,7 @@ impl ViewClientActor { let mut headers = vec![]; let max_height = self.chain.header_head()?.height; // TODO: this may be inefficient if there are a lot of skipped blocks. - for h in header.inner_lite.height + 1..=max_height { + for h in header.height() + 1..=max_height { if let Ok(header) = self.chain.get_header_by_height(h) { headers.push(header.clone()); if headers.len() >= sync::MAX_BLOCK_HEADERS as usize { @@ -390,10 +390,7 @@ impl Handler for ViewClientActor { } .and_then(|block| { self.runtime_adapter - .get_block_producer( - &block.header.inner_lite.epoch_id, - block.header.inner_lite.height, - ) + .get_block_producer(&block.header().epoch_id(), block.header().height()) .map(|author| BlockView::from_author_block(author, block)) }) .map_err(|err| err.to_string()) @@ -422,7 +419,7 @@ impl Handler for ViewClientActor { GetChunk::BlockHash(block_hash, shard_id) => { self.chain.get_block(&block_hash).map(Clone::clone).and_then(|block| { let chunk_hash = block - .chunks + .chunks() .get(shard_id as usize) .ok_or_else(|| { near_chain::Error::from(ErrorKind::InvalidShardId(shard_id)) @@ -434,7 +431,7 @@ impl Handler for ViewClientActor { GetChunk::Height(height, shard_id) => { self.chain.get_block_by_height(height).map(Clone::clone).and_then(|block| { let chunk_hash = block - .chunks + .chunks() .get(shard_id as usize) .ok_or_else(|| { near_chain::Error::from(ErrorKind::InvalidShardId(shard_id)) @@ -447,7 +444,7 @@ impl Handler for ViewClientActor { .and_then(|chunk| { self.chain .get_block_by_height(chunk.header.height_included) - .map(|block| (block.header.inner_lite.epoch_id.clone(), chunk)) + .map(|block| (block.header().epoch_id().clone(), chunk)) }) .and_then(|(epoch_id, chunk)| { self.runtime_adapter @@ -520,9 +517,9 @@ impl Handler for ViewClientActor { fn handle(&mut self, request: GetNextLightClientBlock, _: &mut Context) -> Self::Result { let last_block_header = self.chain.get_block_header(&request.last_block_hash).map_err(|err| err.to_string())?; - let last_epoch_id = last_block_header.inner_lite.epoch_id.clone(); - let last_next_epoch_id = last_block_header.inner_lite.next_epoch_id.clone(); - let last_height = last_block_header.inner_lite.height; + let last_epoch_id = last_block_header.epoch_id().clone(); + let last_next_epoch_id = last_block_header.next_epoch_id().clone(); + let last_height = last_block_header.height(); let head = self.chain.head().map_err(|err| err.to_string())?; if last_epoch_id == head.epoch_id || last_next_epoch_id == head.epoch_id { @@ -586,7 +583,7 @@ impl Handler for ViewClientActor { .chain .get_block(&h) .map_err(|e| e.to_string())? - .chunks + .chunks() .iter() .map(|header| header.inner.outcome_root) .collect::>(); @@ -727,9 +724,9 @@ impl Handler for ViewClientActor { self.chain.get_next_block_hash_with_new_chunk(&block_hash, shard_id) { if let Ok(block) = self.chain.get_block(&next_block_hash) { - if shard_id < block.chunks.len() as u64 { + if shard_id < block.chunks().len() as u64 { if verify_path( - block.chunks[shard_id as usize].inner.outcome_root, + block.chunks()[shard_id as usize].inner.outcome_root, &response.proof, &response.outcome_with_id.to_hashes(), ) { @@ -778,7 +775,7 @@ impl Handler for ViewClientActor { NetworkViewClientResponses::ChainInfo { genesis_id: GenesisId { chain_id: self.config.chain_id.clone(), - hash: self.chain.genesis().hash(), + hash: *self.chain.genesis().hash(), }, height, tracked_shards: self.config.tracked_shards.clone(), @@ -886,8 +883,6 @@ impl Handler for ViewClientActor { let header = self .maybe_block_id_to_block_hash(msg.block_id) .and_then(|block_hash| self.chain.get_block_header(&block_hash)); - header - .map(|b| GasPriceView { gas_price: b.inner_rest.gas_price }) - .map_err(|e| e.to_string()) + header.map(|b| GasPriceView { gas_price: b.gas_price() }).map_err(|e| e.to_string()) } } diff --git a/chain/client/tests/bug_repros.rs b/chain/client/tests/bug_repros.rs index 767480b93a6..b1a91c4a9b8 100644 --- a/chain/client/tests/bug_repros.rs +++ b/chain/client/tests/bug_repros.rs @@ -1,6 +1,8 @@ // This test tracks tests that reproduce previously fixed bugs to make sure the regressions we // fix do not resurface +use std::cmp::max; +use std::collections::HashMap; use std::sync::{Arc, RwLock}; use actix::{Addr, System}; @@ -15,8 +17,6 @@ use near_network::types::NetworkRequests::PartialEncodedChunkMessage; use near_network::{NetworkClientMessages, NetworkRequests, NetworkResponses, PeerInfo}; use near_primitives::block::Block; use near_primitives::transaction::SignedTransaction; -use std::cmp::max; -use std::collections::HashMap; #[test] fn repro_1183() { @@ -92,12 +92,12 @@ fn repro_1183() { .0 .do_send(NetworkClientMessages::Transaction { transaction: SignedTransaction::send_money( - block.header.inner_lite.height * 16 + nonce_delta, + block.header().height() * 16 + nonce_delta, from.to_string(), to.to_string(), &InMemorySigner::from_seed(from, KeyType::ED25519, from), 1, - block.header.prev_hash, + *block.header().prev_hash(), ), is_forwarded: false, check_only: false, @@ -109,7 +109,7 @@ fn repro_1183() { *last_block = Some(block.clone()); *delayed_one_parts = vec![]; - if block.header.inner_lite.height >= 25 { + if block.header().height() >= 25 { System::current().stop(); } (NetworkResponses::NoResponse, false) @@ -167,7 +167,7 @@ fn test_sync_from_achival_node() { Box::new(move |_: String, msg: &NetworkRequests| -> (NetworkResponses, bool) { if let NetworkRequests::Block { block } = msg { let mut largest_height = largest_height.write().unwrap(); - *largest_height = max(block.header.inner_lite.height, *largest_height); + *largest_height = max(block.header().height(), *largest_height); } if *largest_height.read().unwrap() >= 50 { System::current().stop(); @@ -184,8 +184,8 @@ fn test_sync_from_achival_node() { )) } } - if block.header.inner_lite.height <= 10 { - blocks.write().unwrap().insert(block.hash(), block.clone()); + if block.header().height() <= 10 { + blocks.write().unwrap().insert(*block.hash(), block.clone()); } (NetworkResponses::NoResponse, false) } @@ -215,7 +215,7 @@ fn test_sync_from_achival_node() { } match msg { NetworkRequests::Block { block } => { - if block.header.inner_lite.height <= 10 { + if block.header().height() <= 10 { block_counter += 1; } (NetworkResponses::NoResponse, true) diff --git a/chain/client/tests/catching_up.rs b/chain/client/tests/catching_up.rs index 1448f12639d..10b42c5be92 100644 --- a/chain/client/tests/catching_up.rs +++ b/chain/client/tests/catching_up.rs @@ -160,7 +160,7 @@ mod tests { match *phase { ReceiptsSyncPhases::WaitingForFirstBlock => { if let NetworkRequests::Block { block } = msg { - assert!(block.header.inner_lite.height <= send); + assert!(block.header().height() <= send); // This tx is rather fragile, specifically it's important that // 1. the `from` and `to` account are not in the same shard; // 2. ideally the producer of the chunk at height 3 for the shard @@ -170,7 +170,7 @@ mod tests { // for height 1, because such block producer will produce // the chunk for height 2 right away, before we manage to send // the transaction. - if block.header.inner_lite.height == send { + if block.header().height() == send { println!( "From shard: {}, to shard: {}", source_shard_id, destination_shard_id, @@ -182,7 +182,7 @@ mod tests { account_to.clone(), 111, 1, - block.header.prev_hash, + *block.header().prev_hash(), ); } *phase = ReceiptsSyncPhases::WaitingForSecondBlock; @@ -192,8 +192,8 @@ mod tests { ReceiptsSyncPhases::WaitingForSecondBlock => { // This block now contains a chunk with the transaction sent above. if let NetworkRequests::Block { block } = msg { - assert!(block.header.inner_lite.height <= send + 1); - if block.header.inner_lite.height == send + 1 { + assert!(block.header().height() <= send + 1); + if block.header().height() == send + 1 { *phase = ReceiptsSyncPhases::WaitingForDistantEpoch; } } @@ -201,9 +201,9 @@ mod tests { ReceiptsSyncPhases::WaitingForDistantEpoch => { // This block now contains a chunk with the transaction sent above. if let NetworkRequests::Block { block } = msg { - assert!(block.header.inner_lite.height >= send + 1); - assert!(block.header.inner_lite.height <= wait_till); - if block.header.inner_lite.height == wait_till { + assert!(block.header().height() >= send + 1); + assert!(block.header().height() <= wait_till); + if block.header().height() == wait_till { *phase = ReceiptsSyncPhases::VerifyingOutgoingReceipts; } } @@ -303,12 +303,12 @@ mod tests { ReceiptsSyncPhases::WaitingForValidate => { // This block now contains a chunk with the transaction sent above. if let NetworkRequests::Block { block } = msg { - assert!(block.header.inner_lite.height >= wait_till); - assert!(block.header.inner_lite.height <= wait_till + 20); - if block.header.inner_lite.height == wait_till + 20 { + assert!(block.header().height() >= wait_till); + assert!(block.header().height() <= wait_till + 20); + if block.header().height() == wait_till + 20 { System::current().stop(); } - if block.header.inner_lite.height == wait_till + 10 { + if block.header().height() == wait_till + 10 { for i in 0..16 { actix::spawn( connectors1.write().unwrap()[i] @@ -450,20 +450,20 @@ mod tests { match *phase { RandomSinglePartPhases::WaitingForFirstBlock => { if let NetworkRequests::Block { block } = msg { - assert_eq!(block.header.inner_lite.height, 1); + assert_eq!(block.header().height(), 1); *phase = RandomSinglePartPhases::WaitingForThirdEpoch; } } RandomSinglePartPhases::WaitingForThirdEpoch => { if let NetworkRequests::Block { block } = msg { - if block.header.inner_lite.height == 1 { + if block.header().height() == 1 { return (NetworkResponses::NoResponse, false); } - assert!(block.header.inner_lite.height >= 2); - assert!(block.header.inner_lite.height <= height); + assert!(block.header().height() >= 2); + assert!(block.header().height() <= height); let mut tx_count = 0; - if block.header.inner_lite.height == height - && block.header.inner_lite.height >= 2 + if block.header().height() == height + && block.header().height() >= 2 { for (i, validator1) in flat_validators.iter().enumerate() { for (j, validator2) in @@ -491,7 +491,7 @@ mod tests { validator2.to_string(), amount, (12345 + tx_count) as u64, - block.header.prev_hash, + *block.header().prev_hash(), ); } tx_count += 1; @@ -504,14 +504,11 @@ mod tests { } RandomSinglePartPhases::WaitingForSixEpoch => { if let NetworkRequests::Block { block } = msg { - assert!(block.header.inner_lite.height >= height); - assert!(block.header.inner_lite.height <= 32); + assert!(block.header().height() >= height); + assert!(block.header().height() <= 32); let check_height = if skip_15 { 28 } else { 26 }; - if block.header.inner_lite.height >= check_height { - println!( - "BLOCK HEIGHT {:?}", - block.header.inner_lite.height - ); + if block.header().height() >= check_height { + println!("BLOCK HEIGHT {:?}", block.header().height()); for i in 0..16 { for j in 0..16 { let amounts1 = amounts.clone(); @@ -548,7 +545,7 @@ mod tests { } } } - if block.header.inner_lite.height == 32 { + if block.header().height() == 32 { println!( "SEEN HEIGHTS SAME BLOCK {:?}", seen_heights_same_block.len() @@ -648,13 +645,10 @@ mod tests { Arc::new(RwLock::new(Box::new( move |_account_id: String, msg: &NetworkRequests| { if let NetworkRequests::Block { block } = msg { - check_height(block.hash(), block.header.inner_lite.height); - check_height( - block.header.prev_hash, - block.header.inner_lite.height - 1, - ); + check_height(*block.hash(), block.header().height()); + check_height(*block.header().prev_hash(), block.header().height() - 1); - if block.header.inner_lite.height >= 25 { + if block.header().height() >= 25 { System::current().stop(); } } @@ -711,26 +705,26 @@ mod tests { Arc::new(RwLock::new(Box::new( move |_account_id: String, msg: &NetworkRequests| { let propagate = if let NetworkRequests::Block { block } = msg { - check_height(block.hash(), block.header.inner_lite.height); + check_height(*block.hash(), block.header().height()); - if block.header.inner_lite.height % 10 == 5 { + if block.header().height() % 10 == 5 { check_height( - block.header.prev_hash, - block.header.inner_lite.height - 2, + *block.header().prev_hash(), + block.header().height() - 2, ); } else { check_height( - block.header.prev_hash, - block.header.inner_lite.height - 1, + *block.header().prev_hash(), + block.header().height() - 1, ); } - if block.header.inner_lite.height >= 25 { + if block.header().height() >= 25 { System::current().stop(); } // Do not propagate blocks at heights %10=4 - block.header.inner_lite.height % 10 != 4 + block.header().height() % 10 != 4 } else { true }; @@ -814,10 +808,10 @@ mod tests { } } if let NetworkRequests::Block { block } = msg { - if block.header.inner_lite.height == 12 { - println!("BLOCK {:?}", block,); - *unaccepted_block_hash = block.header.hash; - assert_eq!(4, block.header.inner_rest.chunks_included); + if block.header().height() == 12 { + println!("BLOCK {:?}", block); + *unaccepted_block_hash = *block.header().hash(); + assert_eq!(4, block.header().chunks_included()); *phase = ChunkGrievingPhases::SecondAttack; } } @@ -865,11 +859,11 @@ mod tests { } } if let NetworkRequests::Block { block } = msg { - if block.header.inner_lite.height == 42 { + if block.header().height() == 42 { println!("BLOCK {:?}", block,); // This is the main assert of the test // Chunk from malicious node shouldn't be accepted at all - assert_eq!(3, block.header.inner_rest.chunks_included); + assert_eq!(3, block.header().chunks_included()); System::current().stop(); } } @@ -1001,12 +995,12 @@ mod tests { } if let NetworkRequests::Block { block } = msg { // There is no chunks at height 1 - if block.header.inner_lite.height > 1 { + if block.header().height() > 1 { println!("BLOCK {:?}", block,); - if block.header.inner_lite.height % epoch_length != 1 { - assert_eq!(4, block.header.inner_rest.chunks_included); + if block.header().height() % epoch_length != 1 { + assert_eq!(4, block.header().chunks_included()); } - if block.header.inner_lite.height == last_height { + if block.header().height() == last_height { System::current().stop(); } } diff --git a/chain/client/tests/challenges.rs b/chain/client/tests/challenges.rs index 353f844f982..98266a11fca 100644 --- a/chain/client/tests/challenges.rs +++ b/chain/client/tests/challenges.rs @@ -29,6 +29,7 @@ use near_primitives::sharding::{EncodedShardChunk, ReedSolomonWrapper}; use near_primitives::transaction::SignedTransaction; use near_primitives::types::StateRoot; use near_primitives::validator_signer::InMemoryValidatorSigner; +use near_primitives::version::PROTOCOL_VERSION; use near_store::test_utils::create_test_store; use neard::config::{GenesisExt, FISHERMEN_THRESHOLD}; use neard::NightshadeRuntime; @@ -45,13 +46,14 @@ fn test_verify_block_double_sign_challenge() { let signer = InMemoryValidatorSigner::from_seed("test0", KeyType::ED25519, "test0"); let mut block_merkle_tree = PartialMerkleTree::default(); - block_merkle_tree.insert(genesis.hash()); + block_merkle_tree.insert(*genesis.hash()); let b2 = Block::produce( - &genesis.header, + PROTOCOL_VERSION, + genesis.header(), 2, - genesis.chunks.clone(), - b1.header.inner_lite.epoch_id.clone(), - b1.header.inner_lite.next_epoch_id.clone(), + genesis.chunks().clone(), + b1.header().epoch_id().clone(), + b1.header().next_epoch_id().clone(), vec![], Rational::from_integer(0), 0, @@ -59,28 +61,28 @@ fn test_verify_block_double_sign_challenge() { vec![], vec![], &signer, - b1.header.inner_lite.next_bp_hash.clone(), + b1.header().next_bp_hash().clone(), block_merkle_tree.root(), ); - let epoch_id = b1.header.inner_lite.epoch_id.clone(); + let epoch_id = b1.header().epoch_id().clone(); let valid_challenge = Challenge::produce( ChallengeBody::BlockDoubleSign(BlockDoubleSign { - left_block_header: b2.header.try_to_vec().unwrap(), - right_block_header: b1.header.try_to_vec().unwrap(), + left_block_header: b2.header().try_to_vec().unwrap(), + right_block_header: b1.header().try_to_vec().unwrap(), }), &signer, ); let runtime_adapter = env.clients[1].chain.runtime_adapter.clone(); assert_eq!( - validate_challenge(&*runtime_adapter, &epoch_id, &genesis.hash(), &valid_challenge,) + &validate_challenge(&*runtime_adapter, &epoch_id, &genesis.hash(), &valid_challenge) .unwrap() .0, if b1.hash() > b2.hash() { b1.hash() } else { b2.hash() } ); let invalid_challenge = Challenge::produce( ChallengeBody::BlockDoubleSign(BlockDoubleSign { - left_block_header: b1.header.try_to_vec().unwrap(), - right_block_header: b1.header.try_to_vec().unwrap(), + left_block_header: b1.header().try_to_vec().unwrap(), + right_block_header: b1.header().try_to_vec().unwrap(), }), &signer, ); @@ -90,8 +92,8 @@ fn test_verify_block_double_sign_challenge() { let b3 = env.clients[0].produce_block(3).unwrap().unwrap(); let invalid_challenge = Challenge::produce( ChallengeBody::BlockDoubleSign(BlockDoubleSign { - left_block_header: b1.header.try_to_vec().unwrap(), - right_block_header: b3.header.try_to_vec().unwrap(), + left_block_header: b1.header().try_to_vec().unwrap(), + right_block_header: b3.header().try_to_vec().unwrap(), }), &signer, ); @@ -138,9 +140,9 @@ fn create_chunk( client.chain.get_block_by_height(client.chain.head().unwrap().height).unwrap().clone(); let (mut chunk, mut merkle_paths, receipts) = client .produce_chunk( - last_block.hash(), - &last_block.header.inner_lite.epoch_id, - last_block.chunks[0].clone(), + *last_block.hash(), + last_block.header().epoch_id(), + last_block.chunks()[0].clone(), 2, 0, ) @@ -187,13 +189,14 @@ fn create_chunk( } let mut block_merkle_tree = client.chain.mut_store().get_block_merkle_tree(&last_block.hash()).unwrap().clone(); - block_merkle_tree.insert(last_block.hash()); + block_merkle_tree.insert(*last_block.hash()); let block = Block::produce( - &last_block.header, + PROTOCOL_VERSION, + &last_block.header(), 2, vec![chunk.header.clone()], - last_block.header.inner_lite.epoch_id.clone(), - last_block.header.inner_lite.next_epoch_id.clone(), + last_block.header().epoch_id().clone(), + last_block.header().next_epoch_id().clone(), vec![], Rational::from_integer(0), 0, @@ -201,7 +204,7 @@ fn create_chunk( vec![], vec![], &*client.validator_signer.as_ref().unwrap().clone(), - last_block.header.inner_lite.next_bp_hash, + *last_block.header().next_bp_hash(), block_merkle_tree.root(), ); (chunk, merkle_paths, receipts, block) @@ -219,7 +222,7 @@ fn test_verify_chunk_invalid_proofs_challenge() { MaybeEncodedShardChunk::Encoded(chunk), &block, ); - assert_eq!(challenge_result.unwrap(), (block.hash(), vec!["test0".to_string()])); + assert_eq!(challenge_result.unwrap(), (*block.hash(), vec!["test0".to_string()])); } #[test] @@ -237,7 +240,7 @@ fn test_verify_chunk_invalid_proofs_challenge_decoded_chunk() { MaybeEncodedShardChunk::Decoded(chunk), &block, ); - assert_eq!(challenge_result.unwrap(), (block.hash(), vec!["test0".to_string()])); + assert_eq!(challenge_result.unwrap(), (*block.hash(), vec!["test0".to_string()])); } #[test] @@ -261,7 +264,7 @@ fn test_verify_chunk_proofs_malicious_challenge_valid_order_transactions() { let mut env = TestEnv::new(ChainGenesis::test(), 1, 1); env.produce_block(0, 1); - let genesis_hash = env.clients[0].chain.genesis().hash(); + let genesis_hash = *env.clients[0].chain.genesis().hash(); let signer = InMemorySigner::from_seed("test0", KeyType::ED25519, "test0"); let (chunk, _merkle_paths, _receipts, block) = create_chunk_with_transactions( @@ -300,7 +303,7 @@ fn test_verify_chunk_proofs_challenge_transaction_order() { let mut env = TestEnv::new(ChainGenesis::test(), 1, 1); env.produce_block(0, 1); - let genesis_hash = env.clients[0].chain.genesis().hash(); + let genesis_hash = *env.clients[0].chain.genesis().hash(); let signer = InMemorySigner::from_seed("test0", KeyType::ED25519, "test0"); let (chunk, _merkle_paths, _receipts, block) = create_chunk_with_transactions( @@ -330,7 +333,7 @@ fn test_verify_chunk_proofs_challenge_transaction_order() { MaybeEncodedShardChunk::Encoded(chunk), &block, ); - assert_eq!(challenge_result.unwrap(), (block.hash(), vec!["test0".to_string()])); + assert_eq!(challenge_result.unwrap(), (*block.hash(), vec!["test0".to_string()])); } fn challenge( @@ -339,10 +342,10 @@ fn challenge( chunk: MaybeEncodedShardChunk, block: &Block, ) -> Result<(CryptoHash, Vec), Error> { - let merkle_paths = Block::compute_chunk_headers_root(&block.chunks).1; + let merkle_paths = Block::compute_chunk_headers_root(&block.chunks()).1; let valid_challenge = Challenge::produce( ChallengeBody::ChunkProofs(ChunkProofs { - block_header: block.header.try_to_vec().unwrap(), + block_header: block.header().try_to_vec().unwrap(), chunk, merkle_proof: merkle_paths[shard_id].clone(), }), @@ -351,8 +354,8 @@ fn challenge( let runtime_adapter = env.clients[0].chain.runtime_adapter.clone(); validate_challenge( &*runtime_adapter, - &block.header.inner_lite.epoch_id, - &block.header.prev_hash, + &block.header().epoch_id(), + &block.header().prev_hash(), &valid_challenge, ) } @@ -371,7 +374,7 @@ fn test_verify_chunk_invalid_state_challenge() { let mut env = TestEnv::new_with_runtime(ChainGenesis::test(), 1, 1, runtimes); let signer = InMemorySigner::from_seed("test0", KeyType::ED25519, "test0"); let validator_signer = InMemoryValidatorSigner::from_seed("test0", KeyType::ED25519, "test0"); - let genesis_hash = env.clients[0].chain.genesis().hash(); + let genesis_hash = *env.clients[0].chain.genesis().hash(); env.produce_block(0, 1); env.clients[0].process_tx( SignedTransaction::send_money( @@ -397,10 +400,10 @@ fn test_verify_chunk_invalid_state_challenge() { let (mut invalid_chunk, merkle_paths) = env.clients[0] .shards_mgr .create_encoded_shard_chunk( - last_block.hash(), + *last_block.hash(), StateRoot::default(), CryptoHash::default(), - last_block.header.inner_lite.height + 1, + last_block.header().height() + 1, 0, 0, 1_000, @@ -408,7 +411,7 @@ fn test_verify_chunk_invalid_state_challenge() { vec![], vec![], &vec![], - last_block.chunks[0].inner.outgoing_receipts_root, + last_block.chunks()[0].inner.outgoing_receipts_root, CryptoHash::default(), &validator_signer, &mut rs, @@ -428,16 +431,17 @@ fn test_verify_chunk_invalid_state_challenge() { ) .unwrap(); - invalid_chunk.header.height_included = last_block.header.inner_lite.height + 1; + invalid_chunk.header.height_included = last_block.header().height() + 1; let mut block_merkle_tree = client.chain.mut_store().get_block_merkle_tree(&last_block.hash()).unwrap().clone(); - block_merkle_tree.insert(last_block.hash()); + block_merkle_tree.insert(*last_block.hash()); let block = Block::produce( - &last_block.header, - last_block.header.inner_lite.height + 1, + PROTOCOL_VERSION, + &last_block.header(), + last_block.header().height() + 1, vec![invalid_chunk.header.clone()], - last_block.header.inner_lite.epoch_id.clone(), - last_block.header.inner_lite.next_epoch_id.clone(), + last_block.header().epoch_id().clone(), + last_block.header().next_epoch_id().clone(), vec![], Rational::from_integer(0), 0, @@ -445,7 +449,7 @@ fn test_verify_chunk_invalid_state_challenge() { vec![], vec![], &validator_signer, - last_block.header.inner_lite.next_bp_hash, + *last_block.header().next_bp_hash(), block_merkle_tree.root(), ); @@ -471,12 +475,12 @@ fn test_verify_chunk_invalid_state_challenge() { ); chain_update - .create_chunk_state_challenge(&last_block, &block, &block.chunks[0].clone()) + .create_chunk_state_challenge(&last_block, &block, &block.chunks()[0].clone()) .unwrap() }; { - let prev_merkle_proofs = Block::compute_chunk_headers_root(&last_block.chunks).1; - let merkle_proofs = Block::compute_chunk_headers_root(&block.chunks).1; + let prev_merkle_proofs = Block::compute_chunk_headers_root(&last_block.chunks()).1; + let merkle_proofs = Block::compute_chunk_headers_root(&block.chunks()).1; assert_eq!(prev_merkle_proofs[0], challenge_body.prev_merkle_proof); assert_eq!(merkle_proofs[0], challenge_body.merkle_proof); assert_eq!( @@ -503,12 +507,12 @@ fn test_verify_chunk_invalid_state_challenge() { assert_eq!( validate_challenge( &*runtime_adapter, - &block.header.inner_lite.epoch_id, - &block.header.prev_hash, + &block.header().epoch_id(), + &block.header().prev_hash(), &challenge, ) .unwrap(), - (block.hash(), vec!["test0".to_string()]) + (*block.hash(), vec!["test0".to_string()]) ); // Process the block with invalid chunk and make sure it's marked as invalid at the end. @@ -610,10 +614,10 @@ fn test_block_challenge() { env.produce_block(0, 1); let (chunk, _merkle_paths, _receipts, block) = create_invalid_proofs_chunk(&mut env.clients[0]); - let merkle_paths = Block::compute_chunk_headers_root(&block.chunks).1; + let merkle_paths = Block::compute_chunk_headers_root(&block.chunks()).1; let challenge = Challenge::produce( ChallengeBody::ChunkProofs(ChunkProofs { - block_header: block.header.try_to_vec().unwrap(), + block_header: block.header().try_to_vec().unwrap(), chunk: MaybeEncodedShardChunk::Encoded(chunk.clone()), merkle_proof: merkle_paths[chunk.header.inner.shard_id as usize].clone(), }), @@ -621,7 +625,7 @@ fn test_block_challenge() { ); env.clients[0].process_challenge(challenge.clone()).unwrap(); env.produce_block(0, 2); - assert_eq!(env.clients[0].chain.get_block_by_height(2).unwrap().challenges, vec![challenge]); + assert_eq!(env.clients[0].chain.get_block_by_height(2).unwrap().challenges(), &[challenge]); assert!(env.clients[0].chain.mut_store().is_block_challenged(&block.hash()).unwrap()); } @@ -650,7 +654,7 @@ fn test_fishermen_challenge() { let mut env = TestEnv::new_with_runtime(ChainGenesis::test(), 3, 1, vec![runtime1, runtime2, runtime3]); let signer = InMemorySigner::from_seed("test1", KeyType::ED25519, "test1"); - let genesis_hash = env.clients[0].chain.genesis().hash(); + let genesis_hash = *env.clients[0].chain.genesis().hash(); let stake_transaction = SignedTransaction::stake( 1, "test1".to_string(), @@ -666,9 +670,9 @@ fn test_fishermen_challenge() { let (chunk, _merkle_paths, _receipts, block) = create_invalid_proofs_chunk(&mut env.clients[0]); - let merkle_paths = Block::compute_chunk_headers_root(&block.chunks).1; + let merkle_paths = Block::compute_chunk_headers_root(&block.chunks()).1; let challenge_body = ChallengeBody::ChunkProofs(ChunkProofs { - block_header: block.header.try_to_vec().unwrap(), + block_header: block.header().try_to_vec().unwrap(), chunk: MaybeEncodedShardChunk::Encoded(chunk.clone()), merkle_proof: merkle_paths[chunk.header.inner.shard_id as usize].clone(), }); @@ -683,7 +687,7 @@ fn test_fishermen_challenge() { assert!(env.clients[0].process_challenge(challenge1).is_err()); env.clients[0].process_challenge(challenge.clone()).unwrap(); env.produce_block(0, 12); - assert_eq!(env.clients[0].chain.get_block_by_height(12).unwrap().challenges, vec![challenge]); + assert_eq!(env.clients[0].chain.get_block_by_height(12).unwrap().challenges(), &[challenge]); assert!(env.clients[0].chain.mut_store().is_block_challenged(&block.hash()).unwrap()); } @@ -732,7 +736,7 @@ fn test_challenge_in_different_epoch() { let fork2_block = env.clients[1].produce_block(9).unwrap().unwrap(); fork_blocks.push(fork2_block); for block in fork_blocks { - let height = block.header.inner_lite.height; + let height = block.header().height(); let (_, result) = env.clients[0].process_block(block, Provenance::NONE); match env.clients[0].run_catchup(&vec![]) { Ok(accepted_blocks) => { diff --git a/chain/client/tests/chunks_management.rs b/chain/client/tests/chunks_management.rs index a585ef3f9ef..9f494502c37 100644 --- a/chain/client/tests/chunks_management.rs +++ b/chain/client/tests/chunks_management.rs @@ -14,7 +14,6 @@ use near_crypto::KeyType; use near_logger_utils::{init_integration_logger, init_test_logger}; use near_network::types::PartialEncodedChunkRequestMsg; use near_network::{NetworkClientMessages, NetworkRequests, NetworkResponses, PeerInfo}; -use near_primitives::block::BlockHeader; use near_primitives::hash::{hash, CryptoHash}; use near_primitives::sharding::{PartialEncodedChunk, ShardChunkHeader}; use near_primitives::transaction::SignedTransaction; @@ -123,63 +122,63 @@ fn chunks_produced_and_distributed_common( Arc::new(RwLock::new(Box::new(move |from_whom: String, msg: &NetworkRequests| { match msg { NetworkRequests::Block { block } => { - check_height(block.hash(), block.header.inner_lite.height); - check_height(block.header.prev_hash, block.header.inner_lite.height - 1); + check_height(*block.hash(), block.header().height()); + check_height(*block.header().prev_hash(), block.header().height() - 1); - let h = block.header.inner_lite.height; + let h = block.header().height(); let mut height_to_hash = height_to_hash.write().unwrap(); - height_to_hash.insert(h, block.hash()); + height_to_hash.insert(h, *block.hash()); let mut height_to_epoch = height_to_epoch.write().unwrap(); - height_to_epoch.insert(h, block.header.inner_lite.epoch_id.clone()); + height_to_epoch.insert(h, block.header().epoch_id().clone()); println!( "[{:?}]: BLOCK {} HEIGHT {}; HEADER HEIGHTS: {} / {} / {} / {};\nAPPROVALS: {:?}", Instant::now(), block.hash(), - block.header.inner_lite.height, - block.chunks[0].inner.height_created, - block.chunks[1].inner.height_created, - block.chunks[2].inner.height_created, - block.chunks[3].inner.height_created, - block.header.inner_rest.approvals, + block.header().height(), + block.chunks()[0].inner.height_created, + block.chunks()[1].inner.height_created, + block.chunks()[2].inner.height_created, + block.chunks()[3].inner.height_created, + block.header().approvals(), ); if h > 1 { // Make sure doomslug finality is computed correctly. - assert_eq!(block.header.inner_rest.last_ds_final_block, *height_to_hash.get(&(h - 1)).unwrap()); + assert_eq!(block.header().last_ds_final_block(), height_to_hash.get(&(h - 1)).unwrap()); // Make sure epoch length actually corresponds to the desired epoch length // The switches are expected at 0->1, 5->6 and 10->11 let prev_epoch_id = height_to_epoch.get(&(h - 1)).unwrap().clone(); - assert_eq!(block.header.inner_lite.epoch_id == prev_epoch_id, h % 5 != 1); + assert_eq!(block.header().epoch_id() == &prev_epoch_id, h % 5 != 1); // Make sure that the blocks leading to the epoch switch have twice as // many approval slots - assert_eq!(block.header.inner_rest.approvals.len() == 8, h % 5 == 0 || h % 5 == 4); + assert_eq!(block.header().approvals().len() == 8, h % 5 == 0 || h % 5 == 4); } if h > 2 { // Make sure BFT finality is computed correctly - assert_eq!(block.header.inner_rest.last_final_block, *height_to_hash.get(&(h - 2)).unwrap()); + assert_eq!(block.header().last_final_block(), height_to_hash.get(&(h - 2)).unwrap()); } - if block.header.inner_lite.height > 1 { + if block.header().height() > 1 { for shard_id in 0..4 { // If messages from 1 to 4 are dropped, 4 at their heights will // receive the block significantly later than the chunks, and // thus would discard the chunks - if !drop_from_1_to_4 || block.header.inner_lite.height % 4 != 3 { + if !drop_from_1_to_4 || block.header().height() % 4 != 3 { assert_eq!( - block.header.inner_lite.height, - block.chunks[shard_id].inner.height_created + block.header().height(), + block.chunks()[shard_id].inner.height_created ); } } } - if block.header.inner_lite.height >= 12 { - println!("PREV BLOCK HASH: {}", block.header.prev_hash); + if block.header().height() >= 12 { + println!("PREV BLOCK HASH: {}", block.header().prev_hash()); println!( "STATS: responses: {} requests: {}", partial_chunk_msgs, partial_chunk_request_msgs @@ -226,8 +225,7 @@ fn chunks_produced_and_distributed_common( let view_client = connectors.write().unwrap()[0].1.clone(); actix::spawn(view_client.send(GetBlock::latest()).then(move |res| { - let header: BlockHeader = res.unwrap().unwrap().header.into(); - let block_hash = header.hash; + let block_hash = res.unwrap().unwrap().header.hash; let connectors_ = connectors.write().unwrap(); connectors_[0] .0 @@ -254,7 +252,7 @@ fn test_request_chunk_restart() { } let block1 = env.clients[0].chain.get_block_by_height(3).unwrap().clone(); let request = PartialEncodedChunkRequestMsg { - chunk_hash: block1.chunks[0].chunk_hash(), + chunk_hash: block1.chunks()[0].chunk_hash(), part_ords: vec![0], tracking_shards: HashSet::default(), }; @@ -275,7 +273,7 @@ fn test_request_chunk_restart() { ); let response = env.network_adapters[0].pop().unwrap(); if let NetworkRequests::PartialEncodedChunkResponse { response: response_body, .. } = response { - assert_eq!(response_body.chunk_hash, block1.chunks[0].chunk_hash()); + assert_eq!(response_body.chunk_hash, block1.chunks()[0].chunk_hash()); } else { println!("{:?}", response); assert!(false); @@ -307,12 +305,12 @@ fn store_partial_encoded_chunk_sanity() { parts: vec![], receipts: vec![], }; - let block_hash = env.clients[0].chain.genesis().hash(); + let block_hash = *env.clients[0].chain.genesis().hash(); let block = env.clients[0].chain.get_block(&block_hash).unwrap().clone(); assert_eq!(env.clients[0].shards_mgr.get_stored_partial_encoded_chunks(1).len(), 0); env.clients[0] .shards_mgr - .store_partial_encoded_chunk(&block.header, partial_encoded_chunk.clone()); + .store_partial_encoded_chunk(&block.header(), partial_encoded_chunk.clone()); assert_eq!(env.clients[0].shards_mgr.get_stored_partial_encoded_chunks(1).len(), 1); assert_eq!( env.clients[0].shards_mgr.get_stored_partial_encoded_chunks(1)[&0], @@ -323,7 +321,7 @@ fn store_partial_encoded_chunk_sanity() { partial_encoded_chunk.header.hash.0 = hash(&[123]); env.clients[0] .shards_mgr - .store_partial_encoded_chunk(&block.header, partial_encoded_chunk.clone()); + .store_partial_encoded_chunk(&block.header(), partial_encoded_chunk.clone()); assert_eq!(env.clients[0].shards_mgr.get_stored_partial_encoded_chunks(1).len(), 1); assert_eq!( env.clients[0].shards_mgr.get_stored_partial_encoded_chunks(1)[&0], @@ -352,7 +350,7 @@ fn store_partial_encoded_chunk_sanity() { assert_eq!(env.clients[0].shards_mgr.get_stored_partial_encoded_chunks(1).len(), 1); env.clients[0] .shards_mgr - .store_partial_encoded_chunk(&block.header, partial_encoded_chunk2.clone()); + .store_partial_encoded_chunk(&block.header(), partial_encoded_chunk2.clone()); assert_eq!(env.clients[0].shards_mgr.get_stored_partial_encoded_chunks(1).len(), 2); assert_eq!( env.clients[0].shards_mgr.get_stored_partial_encoded_chunks(1)[&0], @@ -385,18 +383,18 @@ fn store_partial_encoded_chunk_sanity() { partial_encoded_chunk3.header = h.clone(); env.clients[0] .shards_mgr - .store_partial_encoded_chunk(&block.header, partial_encoded_chunk3.clone()); + .store_partial_encoded_chunk(&block.header(), partial_encoded_chunk3.clone()); assert_eq!(env.clients[0].shards_mgr.get_stored_partial_encoded_chunks(2).len(), 0); h.inner.height_created = 9; partial_encoded_chunk3.header = h.clone(); env.clients[0] .shards_mgr - .store_partial_encoded_chunk(&block.header, partial_encoded_chunk3.clone()); + .store_partial_encoded_chunk(&block.header(), partial_encoded_chunk3.clone()); assert_eq!(env.clients[0].shards_mgr.get_stored_partial_encoded_chunks(9).len(), 0); h.inner.height_created = 5; partial_encoded_chunk3.header = h.clone(); env.clients[0] .shards_mgr - .store_partial_encoded_chunk(&block.header, partial_encoded_chunk3.clone()); + .store_partial_encoded_chunk(&block.header(), partial_encoded_chunk3.clone()); assert_eq!(env.clients[0].shards_mgr.get_stored_partial_encoded_chunks(5).len(), 1); } diff --git a/chain/client/tests/consensus.rs b/chain/client/tests/consensus.rs index d3b330de4b0..2a9b9d765df 100644 --- a/chain/client/tests/consensus.rs +++ b/chain/client/tests/consensus.rs @@ -82,32 +82,31 @@ mod tests { match msg { NetworkRequests::Block { block } => { - if !all_blocks.contains_key(&block.header.inner_lite.height) { + if !all_blocks.contains_key(&block.header().height()) { println!( "BLOCK @{} EPOCH: {:?}, APPROVALS: {:?}", - block.header.inner_lite.height, - block.header.inner_lite.epoch_id, + block.header().height(), + block.header().epoch_id(), block - .header - .inner_rest - .approvals + .header() + .approvals() .iter() .map(|x| if x.is_some() { 1 } else { 0 }) .collect::>() ); } - all_blocks.insert(block.header.inner_lite.height, block.clone()); - block_to_prev_block.insert(block.hash(), block.header.prev_hash); - block_to_height.insert(block.hash(), block.header.inner_lite.height); + all_blocks.insert(block.header().height(), block.clone()); + block_to_prev_block.insert(*block.hash(), *block.header().prev_hash()); + block_to_height.insert(*block.hash(), block.header().height()); - if *largest_block_height / 20 < block.header.inner_lite.height / 20 { + if *largest_block_height / 20 < block.header().height() / 20 { // Periodically verify the finality println!("VERIFYING FINALITY CONDITIONS"); for block in all_blocks.values() { if let Some(prev_hash) = block_to_prev_block.get(&block.hash()) { if let Some(prev_height) = block_to_height.get(prev_hash) { - let cur_height = block.header.inner_lite.height; + let cur_height = block.header().height(); for f in final_block_heights.iter() { if f < &cur_height && f > prev_height { assert!( @@ -126,26 +125,22 @@ mod tests { } } - if block.header.inner_lite.height > *largest_block_height + 3 { - *largest_block_height = block.header.inner_lite.height; + if block.header().height() > *largest_block_height + 3 { + *largest_block_height = block.header().height(); if delayed_blocks.len() < 2 { delayed_blocks.push(block.clone()); return (NetworkResponses::NoResponse, false); } } - *largest_block_height = std::cmp::max( - block.header.inner_lite.height, - *largest_block_height, - ); + *largest_block_height = + std::cmp::max(block.header().height(), *largest_block_height); let mut new_delayed_blocks = vec![]; for delayed_block in delayed_blocks.iter() { if delayed_block.hash() == block.hash() { return (NetworkResponses::NoResponse, false); } - if delayed_block.header.inner_lite.height - <= block.header.inner_lite.height + 2 - { + if delayed_block.header().height() <= block.header().height() + 2 { for target_ord in 0..24 { connectors1.write().unwrap()[target_ord].0.do_send( NetworkClientMessages::Block( @@ -162,7 +157,7 @@ mod tests { *delayed_blocks = new_delayed_blocks; let mut heights = vec![]; - let mut cur_hash = block.hash(); + let mut cur_hash = *block.hash(); while let Some(height) = block_to_height.get(&cur_hash) { heights.push(height); cur_hash = block_to_prev_block.get(&cur_hash).unwrap().clone(); @@ -178,7 +173,7 @@ mod tests { is_final, delayed_blocks .iter() - .map(|x| x.header.inner_lite.height) + .map(|x| x.header().height()) .collect::>(), block.hash(), heights, diff --git a/chain/client/tests/cross_shard_tx.rs b/chain/client/tests/cross_shard_tx.rs index a1afbdd2bc8..cf8849f01f1 100644 --- a/chain/client/tests/cross_shard_tx.rs +++ b/chain/client/tests/cross_shard_tx.rs @@ -428,7 +428,7 @@ mod tests { ))), ); *connectors.write().unwrap() = conn; - let block_hash = genesis_block.hash(); + let block_hash = *genesis_block.hash(); let connectors_ = connectors.write().unwrap(); let iteration = Arc::new(AtomicUsize::new(0)); diff --git a/chain/client/tests/process_blocks.rs b/chain/client/tests/process_blocks.rs index 64835dd8137..3638874d4a6 100644 --- a/chain/client/tests/process_blocks.rs +++ b/chain/client/tests/process_blocks.rs @@ -26,7 +26,7 @@ use near_network::{ FullPeerInfo, NetworkClientMessages, NetworkClientResponses, NetworkRequests, NetworkResponses, PeerInfo, }; -use near_primitives::block::{Approval, ApprovalInner, BlockHeader}; +use near_primitives::block::{Approval, ApprovalInner}; use near_primitives::errors::InvalidTxError; use near_primitives::hash::{hash, CryptoHash}; use near_primitives::merkle::{merklize, verify_hash}; @@ -35,6 +35,7 @@ use near_primitives::transaction::{SignedTransaction, Transaction}; use near_primitives::types::{BlockHeight, EpochId, MerkleHash, NumBlocks}; use near_primitives::utils::to_timestamp; use near_primitives::validator_signer::{InMemoryValidatorSigner, ValidatorSigner}; +use near_primitives::version::PROTOCOL_VERSION; use near_primitives::views::{QueryRequest, QueryResponseKind}; use near_store::test_utils::create_test_store; use neard::config::{GenesisExt, TESTING_INIT_BALANCE, TESTING_INIT_STAKE}; @@ -119,8 +120,7 @@ fn produce_blocks_with_tx() { ); near_network::test_utils::wait_or_panic(5000); actix::spawn(view_client.send(GetBlock::latest()).then(move |res| { - let header: BlockHeader = res.unwrap().unwrap().header.into(); - let block_hash = header.hash; + let block_hash = res.unwrap().unwrap().header.hash; client.do_send(NetworkClientMessages::Transaction { transaction: SignedTransaction::empty(block_hash), is_forwarded: false, @@ -163,6 +163,7 @@ fn receive_network_block() { let signer = InMemoryValidatorSigner::from_seed("test1", KeyType::ED25519, "test1"); block_merkle_tree.insert(last_block.header.hash); let block = Block::produce( + PROTOCOL_VERSION, &last_block.header.clone().into(), last_block.header.height + 1, last_block.chunks.into_iter().map(Into::into).collect(), @@ -209,15 +210,15 @@ fn produce_block_with_approvals() { // test1 will only create their approval for height 10 after their doomslug timer // runs 10 iterations, which is way further in the future than them producing the // block - if block.header.num_approvals() == validators.len() as u64 - 2 { + if block.header().num_approvals() == validators.len() as u64 - 2 { System::current().stop(); - } else if block.header.inner_lite.height == 10 { - println!("{}", block.header.inner_lite.height); + } else if block.header().height() == 10 { + println!("{}", block.header().height()); println!( "{} != {} -2 (height: {})", - block.header.num_approvals(), + block.header().num_approvals(), validators.len(), - block.header.inner_lite.height + block.header().height() ); assert!(false); @@ -231,6 +232,7 @@ fn produce_block_with_approvals() { let signer1 = InMemoryValidatorSigner::from_seed("test2", KeyType::ED25519, "test2"); block_merkle_tree.insert(last_block.header.hash); let block = Block::produce( + PROTOCOL_VERSION, &last_block.header.clone().into(), last_block.header.height + 1, last_block.chunks.into_iter().map(Into::into).collect(), @@ -260,8 +262,8 @@ fn produce_block_with_approvals() { let s = if i > 10 { "test1".to_string() } else { format!("test{}", i) }; let signer = InMemoryValidatorSigner::from_seed(&s, KeyType::ED25519, &s); let approval = Approval::new( - block.hash(), - block.header.inner_lite.height, + *block.hash(), + block.header().height(), 10, // the height at which "test1" is producing &signer, ); @@ -308,7 +310,7 @@ fn produce_block_with_approvals_arrived_early() { Box::new(move |_: String, msg: &NetworkRequests| -> (NetworkResponses, bool) { match msg { NetworkRequests::Block { block } => { - if block.header.inner_lite.height == 3 { + if block.header().height() == 3 { for (i, (client, _)) in conns.clone().into_iter().enumerate() { if i > 0 { client.do_send(NetworkClientMessages::Block( @@ -320,7 +322,7 @@ fn produce_block_with_approvals_arrived_early() { } *block_holder.write().unwrap() = Some(block.clone()); return (NetworkResponses::NoResponse, false); - } else if block.header.inner_lite.height == 4 { + } else if block.header().height() == 4 { System::current().stop(); } (NetworkResponses::NoResponse, true) @@ -363,10 +365,10 @@ fn invalid_blocks() { Box::new(move |msg, _ctx, _client_actor| { match msg { NetworkRequests::Block { block } => { - assert_eq!(block.header.inner_lite.height, 1); + assert_eq!(block.header().height(), 1); assert_eq!( - block.header.inner_lite.prev_state_root, - merklize(&vec![MerkleHash::default()]).0 + block.header().prev_state_root(), + &merklize(&vec![MerkleHash::default()]).0 ); System::current().stop(); } @@ -380,6 +382,7 @@ fn invalid_blocks() { let signer = InMemoryValidatorSigner::from_seed("test", KeyType::ED25519, "test"); // Send block with invalid chunk mask let mut block = Block::produce( + PROTOCOL_VERSION, &last_block.header.clone().into(), last_block.header.height + 1, last_block.chunks.iter().cloned().map(Into::into).collect(), @@ -399,7 +402,7 @@ fn invalid_blocks() { last_block.header.next_bp_hash, CryptoHash::default(), ); - block.header.inner_rest.chunk_mask = vec![]; + block.mut_header().get_mut().inner_rest.chunk_mask = vec![]; client.do_send(NetworkClientMessages::Block( block.clone(), PeerInfo::random().id, @@ -409,6 +412,7 @@ fn invalid_blocks() { // Send proper block. block_merkle_tree.insert(last_block.header.hash); let block2 = Block::produce( + PROTOCOL_VERSION, &last_block.header.clone().into(), last_block.header.height + 1, last_block.chunks.into_iter().map(Into::into).collect(), @@ -450,7 +454,7 @@ fn skip_block_production() { Box::new(move |msg, _ctx, _client_actor| { match msg { NetworkRequests::Block { block } => { - if block.header.inner_lite.height > 3 { + if block.header().height() > 3 { System::current().stop(); } } @@ -560,7 +564,7 @@ fn test_process_invalid_tx() { public_key: signer.public_key(), nonce: 0, receiver_id: "".to_string(), - block_hash: client.chain.genesis().hash(), + block_hash: *client.chain.genesis().hash(), actions: vec![], }, ); @@ -606,15 +610,9 @@ fn test_time_attack() { let signer = InMemoryValidatorSigner::from_seed("test1", KeyType::ED25519, "test1"); let genesis = client.chain.get_block_by_height(0).unwrap(); let mut b1 = Block::empty_with_height(genesis, 1, &signer); - b1.header.inner_lite.timestamp = - to_timestamp(b1.header.timestamp() + chrono::Duration::seconds(60)); - let (hash, signature) = signer.sign_block_header_parts( - b1.header.prev_hash, - &b1.header.inner_lite, - &b1.header.inner_rest, - ); - b1.header.hash = hash; - b1.header.signature = signature; + b1.mut_header().get_mut().inner_lite.timestamp = + to_timestamp(b1.header().timestamp() + chrono::Duration::seconds(60)); + b1.mut_header().resign(&signer); let _ = client.process_block(b1, Provenance::NONE); @@ -643,7 +641,7 @@ fn test_invalid_approvals() { let signer = InMemoryValidatorSigner::from_seed("test1", KeyType::ED25519, "test1"); let genesis = client.chain.get_block_by_height(0).unwrap(); let mut b1 = Block::empty_with_height(genesis, 1, &signer); - b1.header.inner_rest.approvals = (0..100) + b1.mut_header().get_mut().inner_rest.approvals = (0..100) .map(|i| { Some( InMemoryValidatorSigner::from_seed( @@ -651,17 +649,12 @@ fn test_invalid_approvals() { KeyType::ED25519, &format!("test{}", i), ) - .sign_approval(&ApprovalInner::Endorsement(genesis.hash()), 1), + .sign_approval(&ApprovalInner::Endorsement(*genesis.hash()), 1), ) }) .collect(); - let (hash, signature) = signer.sign_block_header_parts( - b1.header.prev_hash, - &b1.header.inner_lite, - &b1.header.inner_rest, - ); - b1.header.hash = hash; - b1.header.signature = signature; + b1.mut_header().resign(&signer); + let (_, tip) = client.process_block(b1, Provenance::NONE); match tip { Err(e) => match e.kind() { @@ -700,14 +693,8 @@ fn test_invalid_gas_price() { let signer = InMemoryValidatorSigner::from_seed("test1", KeyType::ED25519, "test1"); let genesis = client.chain.get_block_by_height(0).unwrap(); let mut b1 = Block::empty_with_height(genesis, 1, &signer); - b1.header.inner_rest.gas_price = 0; - let (hash, signature) = signer.sign_block_header_parts( - b1.header.prev_hash, - &b1.header.inner_lite, - &b1.header.inner_rest, - ); - b1.header.hash = hash; - b1.header.signature = signature; + b1.mut_header().get_mut().inner_rest.gas_price = 0; + b1.mut_header().resign(&signer); let (_, result) = client.process_block(b1, Provenance::NONE); match result { @@ -747,7 +734,7 @@ fn test_minimum_gas_price() { env.produce_block(0, i); } let block = env.clients[0].chain.get_block_by_height(100).unwrap(); - assert!(block.header.inner_rest.gas_price >= min_gas_price); + assert!(block.header().gas_price() >= min_gas_price); } fn test_gc_with_epoch_length_common(epoch_length: NumBlocks) { @@ -773,7 +760,7 @@ fn test_gc_with_epoch_length_common(epoch_length: NumBlocks) { for i in 1..=epoch_length * (NUM_EPOCHS_TO_KEEP_STORE_DATA + 1) { println!("height = {}", i); if i < epoch_length { - let block_hash = blocks[i as usize - 1].hash(); + let block_hash = *blocks[i as usize - 1].hash(); assert!(matches!( env.clients[0].chain.get_block(&block_hash).unwrap_err().kind(), ErrorKind::BlockMissing(missing_block_hash) if missing_block_hash == block_hash @@ -853,7 +840,7 @@ fn test_gc_long_epoch() { assert!(env.clients[0] .chain .mut_store() - .get_all_block_hashes_by_height(block.header.inner_lite.height) + .get_all_block_hashes_by_height(block.header().height()) .is_ok()); } assert!(check_refcount_map(&mut env.clients[0].chain).is_ok()); @@ -891,7 +878,7 @@ fn test_tx_forwarding() { chain_genesis.epoch_length = 100; let mut env = TestEnv::new(chain_genesis, 50, 50); let genesis_block = env.clients[0].chain.get_block_by_height(0).unwrap(); - let genesis_hash = genesis_block.hash(); + let genesis_hash = *genesis_block.hash(); // forward to 2 chunk producers env.clients[0].process_tx(SignedTransaction::empty(genesis_hash), false, false); assert_eq!(env.network_adapters[0].requests.read().unwrap().len(), 4); @@ -903,7 +890,7 @@ fn test_tx_forwarding_no_double_forwarding() { chain_genesis.epoch_length = 100; let mut env = TestEnv::new(chain_genesis, 50, 50); let genesis_block = env.clients[0].chain.get_block_by_height(0).unwrap(); - let genesis_hash = genesis_block.hash(); + let genesis_hash = *genesis_block.hash(); env.clients[0].process_tx(SignedTransaction::empty(genesis_hash), true, false); assert!(env.network_adapters[0].requests.read().unwrap().is_empty()); } @@ -933,7 +920,7 @@ fn test_tx_forward_around_epoch_boundary() { chain_genesis.epoch_length = epoch_length; chain_genesis.gas_limit = genesis.config.gas_limit; let mut env = TestEnv::new_with_runtime(chain_genesis, 3, 2, runtimes); - let genesis_hash = env.clients[0].chain.genesis().hash(); + let genesis_hash = *env.clients[0].chain.genesis().hash(); let signer = InMemorySigner::from_seed("test1", KeyType::ED25519, "test1"); let tx = SignedTransaction::stake( 1, @@ -1037,12 +1024,12 @@ fn test_gc_tail_update() { env.process_block(0, block.clone(), Provenance::PRODUCED); blocks.push(block); } - let headers = blocks.clone().into_iter().map(|b| b.header).collect::>(); + let headers = blocks.iter().map(|b| b.header().clone()).collect::>(); env.clients[1].sync_block_headers(headers).unwrap(); // simulate save sync hash block let prev_sync_block = blocks[blocks.len() - 3].clone(); let sync_block = blocks[blocks.len() - 2].clone(); - env.clients[1].chain.reset_data_pre_state_sync(sync_block.hash()).unwrap(); + env.clients[1].chain.reset_data_pre_state_sync(*sync_block.hash()).unwrap(); let mut store_update = env.clients[1].chain.mut_store().store_update(); store_update.save_block(prev_sync_block.clone()); store_update.inc_block_refcount(&prev_sync_block.hash()).unwrap(); @@ -1050,13 +1037,10 @@ fn test_gc_tail_update() { store_update.commit().unwrap(); env.clients[1] .chain - .reset_heads_post_state_sync(&None, sync_block.hash(), |_| {}, |_| {}, |_| {}) + .reset_heads_post_state_sync(&None, *sync_block.hash(), |_| {}, |_| {}, |_| {}) .unwrap(); env.process_block(1, blocks.pop().unwrap(), Provenance::NONE); - assert_eq!( - env.clients[1].chain.store().tail().unwrap(), - prev_sync_block.header.inner_lite.height - ); + assert_eq!(env.clients[1].chain.store().tail().unwrap(), prev_sync_block.header().height()); assert!(check_refcount_map(&mut env.clients[0].chain).is_ok()); assert!(check_refcount_map(&mut env.clients[1].chain).is_ok()); } @@ -1107,7 +1091,7 @@ fn test_gas_price_change() { ))]; let mut env = TestEnv::new_with_runtime(chain_genesis, 1, 1, runtimes); let genesis_block = env.clients[0].chain.get_block_by_height(0).unwrap(); - let genesis_hash = genesis_block.hash(); + let genesis_hash = *genesis_block.hash(); let signer = InMemorySigner::from_seed("test1", KeyType::ED25519, "test1"); let tx = SignedTransaction::send_money( 1, @@ -1140,14 +1124,8 @@ fn test_invalid_block_root() { let mut env = TestEnv::new(ChainGenesis::test(), 1, 1); let mut b1 = env.clients[0].produce_block(1).unwrap().unwrap(); let signer = InMemoryValidatorSigner::from_seed("test0", KeyType::ED25519, "test0"); - b1.header.inner_lite.block_merkle_root = CryptoHash::default(); - let (hash, signature) = signer.sign_block_header_parts( - b1.header.prev_hash, - &b1.header.inner_lite, - &b1.header.inner_rest, - ); - b1.header.hash = hash; - b1.header.signature = signature; + b1.mut_header().get_mut().inner_lite.block_merkle_root = CryptoHash::default(); + b1.mut_header().resign(&signer); let (_, tip) = env.clients[0].process_block(b1, Provenance::NONE); match tip { Err(e) => match e.kind() { @@ -1199,10 +1177,10 @@ fn test_block_merkle_proof_with_len(n: NumBlocks) { env.process_block(0, block, Provenance::PRODUCED); } let head = blocks.pop().unwrap(); - let root = head.header.inner_lite.block_merkle_root; + let root = head.header().block_merkle_root(); for block in blocks { let proof = env.clients[0].chain.get_block_proof(&block.hash(), &head.hash()).unwrap(); - assert!(verify_hash(root, &proof, block.hash())); + assert!(verify_hash(*root, &proof, *block.hash())); } } @@ -1237,7 +1215,7 @@ fn test_data_reset_before_state_sync() { let mut env = TestEnv::new_with_runtime(ChainGenesis::test(), 1, 1, runtimes); let signer = InMemorySigner::from_seed("test0", KeyType::ED25519, "test0"); let genesis_block = env.clients[0].chain.get_block_by_height(0).unwrap(); - let genesis_hash = genesis_block.hash(); + let genesis_hash = *genesis_block.hash(); let tx = SignedTransaction::create_account( 1, "test0".to_string(), @@ -1258,24 +1236,24 @@ fn test_data_reset_before_state_sync() { .runtime_adapter .query( 0, - &head_block.chunks[0].inner.prev_state_root, + &head_block.chunks()[0].inner.prev_state_root, head.height, 0, &head.last_block_hash, - &head_block.header.inner_lite.epoch_id, + head_block.header().epoch_id(), &QueryRequest::ViewAccount { account_id: "test_account".to_string() }, ) .unwrap(); assert!(matches!(response.kind, QueryResponseKind::ViewAccount(_))); - env.clients[0].chain.reset_data_pre_state_sync(head_block.hash()).unwrap(); + env.clients[0].chain.reset_data_pre_state_sync(*head_block.hash()).unwrap(); // account should not exist after clearing state let response = env.clients[0].runtime_adapter.query( 0, - &head_block.chunks[0].inner.prev_state_root, + &head_block.chunks()[0].inner.prev_state_root, head.height, 0, &head.last_block_hash, - &head_block.header.inner_lite.epoch_id, + head_block.header().epoch_id(), &QueryRequest::ViewAccount { account_id: "test_account".to_string() }, ); assert!(response.is_err()); diff --git a/chain/client/tests/query_client.rs b/chain/client/tests/query_client.rs index 6906d3f1c48..733148ee0c4 100644 --- a/chain/client/tests/query_client.rs +++ b/chain/client/tests/query_client.rs @@ -9,7 +9,8 @@ use near_network::{NetworkClientMessages, PeerInfo}; use near_primitives::block::{Block, BlockHeader}; use near_primitives::types::{BlockIdOrFinality, EpochId}; use near_primitives::utils::to_timestamp; -use near_primitives::validator_signer::{InMemoryValidatorSigner, ValidatorSigner}; +use near_primitives::validator_signer::InMemoryValidatorSigner; +use near_primitives::version::PROTOCOL_VERSION; use near_primitives::views::{QueryRequest, QueryResponseKind}; use num_rational::Rational; @@ -49,8 +50,9 @@ fn query_status_not_crash() { actix::spawn(view_client.send(GetBlockWithMerkleTree::latest()).then(move |res| { let (block, mut block_merkle_tree) = res.unwrap().unwrap(); let header: BlockHeader = block.header.clone().into(); - block_merkle_tree.insert(header.hash); + block_merkle_tree.insert(*header.hash()); let mut next_block = Block::produce( + PROTOCOL_VERSION, &header, block.header.height + 1, block.chunks.into_iter().map(|c| c.into()).collect(), @@ -66,15 +68,10 @@ fn query_status_not_crash() { block.header.next_bp_hash, block_merkle_tree.root(), ); - next_block.header.inner_lite.timestamp = - to_timestamp(next_block.header.timestamp() + chrono::Duration::seconds(60)); - let (hash, signature) = signer.sign_block_header_parts( - next_block.header.prev_hash, - &next_block.header.inner_lite, - &next_block.header.inner_rest, - ); - next_block.header.hash = hash; - next_block.header.signature = signature; + next_block.mut_header().get_mut().inner_lite.timestamp = + to_timestamp(next_block.header().timestamp() + chrono::Duration::seconds(60)); + next_block.mut_header().resign(&signer); + actix::spawn( client .send(NetworkClientMessages::Block(next_block, PeerInfo::random().id, false)) diff --git a/chain/epoch_manager/Cargo.toml b/chain/epoch_manager/Cargo.toml index 0fab11b3d04..b13949e1930 100644 --- a/chain/epoch_manager/Cargo.toml +++ b/chain/epoch_manager/Cargo.toml @@ -10,10 +10,11 @@ edition = "2018" protocol_defining_rand = { package = "rand", version = "0.6.5", default-features = false } log = "0.4" cached = "0.12" -borsh = "0.6.1" +borsh = "0.6.2" rand = "0.7" serde = { version = "1", features = [ "derive" ] } serde_json = "1" +smart-default = "0.6" primitive-types = { version = "0.7", default-features = false } num-rational = "0.2.4" diff --git a/chain/epoch_manager/src/lib.rs b/chain/epoch_manager/src/lib.rs index 561baeed852..5af36440b95 100644 --- a/chain/epoch_manager/src/lib.rs +++ b/chain/epoch_manager/src/lib.rs @@ -12,6 +12,7 @@ use near_primitives::types::{ AccountId, ApprovalStake, Balance, BlockChunkValidatorStats, BlockHeight, EpochId, ShardId, ValidatorId, ValidatorKickoutReason, ValidatorStake, ValidatorStats, }; +use near_primitives::version::ProtocolVersion; use near_primitives::views::{ CurrentEpochValidatorInfo, EpochValidatorInfo, NextEpochValidatorInfo, ValidatorKickoutView, }; @@ -51,6 +52,7 @@ impl EpochManager { pub fn new( store: Arc, config: EpochConfig, + genesis_protocol_version: ProtocolVersion, reward_calculator: RewardCalculator, validators: Vec, ) -> Result { @@ -76,6 +78,7 @@ impl EpochManager { HashMap::default(), validator_reward, 0, + genesis_protocol_version, )?; // Dummy block info. // Artificial block we add to simplify implementation: dummy block is the @@ -199,6 +202,35 @@ impl EpochManager { let block_validator_tracker = last_block_info.block_tracker.clone(); let chunk_validator_tracker = last_block_info.shard_tracker.clone(); + // Next protocol version calculation. + // Implements https://github.com/nearprotocol/NEPs/pull/64/files#diff-45f773511fe4321b446c3c4226324873R76 + let mut versions = HashMap::new(); + for (validator_id, version) in last_block_info.version_tracker.iter() { + let stake = epoch_info.validators[*validator_id as usize].stake; + *versions.entry(version).or_insert(0) += stake; + } + let total_block_producer_stake: u128 = epoch_info + .block_producers_settlement + .iter() + .map(|id| epoch_info.validators[*id as usize].stake) + .sum(); + + let next_version = if let Some((&version, stake)) = + versions.into_iter().max_by(|left, right| left.1.cmp(&right.1)) + { + if stake + > (total_block_producer_stake + * *self.config.protocol_upgrade_stake_threshold.numer() as u128) + / *self.config.protocol_upgrade_stake_threshold.denom() as u128 + { + version + } else { + epoch_info.protocol_version + } + } else { + epoch_info.protocol_version + }; + // Gather slashed validators and add them to kick out first. let slashed_validators = last_block_info.slashed.clone(); for (account_id, _) in slashed_validators.iter() { @@ -247,6 +279,7 @@ impl EpochManager { all_proposals, validator_kickout, validator_block_chunk_stats, + next_version, }) } @@ -284,6 +317,7 @@ impl EpochManager { all_proposals, validator_kickout, validator_block_chunk_stats, + next_version, } = self.collect_blocks_info(&block_info, last_block_hash)?; let epoch_id = self.get_epoch_id(last_block_hash)?; let epoch_info = self.get_epoch_info(&epoch_id)?; @@ -308,6 +342,7 @@ impl EpochManager { validator_kickout, validator_reward, minted_amount, + next_version, ) { Ok(next_next_epoch_info) => next_next_epoch_info, Err(EpochError::ThresholdError { stake_sum, num_seats }) => { @@ -397,8 +432,13 @@ impl EpochManager { } } - let BlockInfo { block_tracker, mut all_proposals, shard_tracker, .. } = - prev_block_info; + let BlockInfo { + block_tracker, + mut all_proposals, + shard_tracker, + version_tracker, + .. + } = prev_block_info; // Update block produced/expected tracker. block_info.update_block_tracker( @@ -411,6 +451,10 @@ impl EpochManager { prev_block_info.height, if is_epoch_start { HashMap::default() } else { shard_tracker }, ); + block_info.update_version_tracker( + &epoch_info, + if is_epoch_start { HashMap::default() } else { version_tracker }, + ); // accumulate values if is_epoch_start { block_info.all_proposals = block_info.proposals.clone(); @@ -896,7 +940,10 @@ impl EpochManager { Ok(false) } - fn block_producer_from_info(epoch_info: &EpochInfo, height: BlockHeight) -> ValidatorId { + pub(crate) fn block_producer_from_info( + epoch_info: &EpochInfo, + height: BlockHeight, + ) -> ValidatorId { epoch_info.block_producers_settlement [(height as u64 % (epoch_info.block_producers_settlement.len() as u64)) as usize] } @@ -1061,10 +1108,11 @@ mod tests { use near_primitives::challenge::SlashedValidator; use near_primitives::hash::hash; use near_primitives::types::ValidatorKickoutReason::NotEnoughBlocks; + use near_primitives::version::PROTOCOL_VERSION; use near_store::test_utils::create_test_store; use crate::test_utils::{ - change_stake, default_reward_calculator, epoch_config, epoch_info, hash_range, + block_info, change_stake, default_reward_calculator, epoch_config, epoch_info, hash_range, record_block, reward, setup_default_epoch_manager, setup_epoch_manager, stake, DEFAULT_TOTAL_SUPPLY, }; @@ -1128,6 +1176,7 @@ mod tests { let mut epoch_manager2 = EpochManager::new( epoch_manager.store.clone(), epoch_manager.config.clone(), + PROTOCOL_VERSION, epoch_manager.reward_calculator, validators.iter().map(|(account_id, balance)| stake(*account_id, *balance)).collect(), ) @@ -1296,17 +1345,9 @@ mod tests { /// of kickout for this epoch and last epoch equals the entire validator set. #[test] fn test_validator_kickout() { - let store = create_test_store(); - let config = epoch_config(4, 1, 2, 0, 90, 60, 0); let amount_staked = 1_000_000; - let validators = vec![stake("test1", amount_staked), stake("test2", amount_staked)]; - let mut epoch_manager = EpochManager::new( - store.clone(), - config.clone(), - default_reward_calculator(), - validators.clone(), - ) - .unwrap(); + let validators = vec![("test1", amount_staked), ("test2", amount_staked)]; + let mut epoch_manager = setup_default_epoch_manager(validators, 4, 1, 2, 0, 90, 60); let h = hash_range(12); record_block(&mut epoch_manager, CryptoHash::default(), h[0], 0, vec![]); @@ -1354,6 +1395,7 @@ mod tests { let mut epoch_manager = EpochManager::new( store.clone(), config.clone(), + PROTOCOL_VERSION, default_reward_calculator(), validators.clone(), ) @@ -1428,6 +1470,7 @@ mod tests { let mut epoch_manager = EpochManager::new( store.clone(), config.clone(), + PROTOCOL_VERSION, default_reward_calculator(), validators.clone(), ) @@ -1450,6 +1493,7 @@ mod tests { vec![], vec![SlashedValidator::new("test1".to_string(), false)], DEFAULT_TOTAL_SUPPLY, + PROTOCOL_VERSION, ), [0; 32], ) @@ -1513,6 +1557,7 @@ mod tests { let mut epoch_manager = EpochManager::new( store.clone(), config.clone(), + PROTOCOL_VERSION, default_reward_calculator(), validators.clone(), ) @@ -1536,6 +1581,7 @@ mod tests { SlashedValidator::new("test1".to_string(), false), ], DEFAULT_TOTAL_SUPPLY, + PROTOCOL_VERSION, ), [0; 32], ) @@ -1562,6 +1608,7 @@ mod tests { vec![], vec![SlashedValidator::new("test1".to_string(), true)], DEFAULT_TOTAL_SUPPLY, + PROTOCOL_VERSION, ), [0; 32], ) @@ -1595,17 +1642,9 @@ mod tests { /// Test that two double sign challenge in two epochs works #[test] fn test_double_sign_slashing2() { - let store = create_test_store(); - let config = epoch_config(2, 1, 2, 0, 90, 60, 0); let amount_staked = 1_000_000; - let validators = vec![stake("test1", amount_staked), stake("test2", amount_staked)]; - let mut epoch_manager = EpochManager::new( - store.clone(), - config.clone(), - default_reward_calculator(), - validators.clone(), - ) - .unwrap(); + let validators = vec![("test1", amount_staked), ("test2", amount_staked)]; + let mut epoch_manager = setup_default_epoch_manager(validators, 2, 1, 2, 0, 90, 60); let h = hash_range(10); record_block(&mut epoch_manager, CryptoHash::default(), h[0], 0, vec![]); @@ -1621,6 +1660,7 @@ mod tests { vec![], vec![SlashedValidator::new("test1".to_string(), true)], DEFAULT_TOTAL_SUPPLY, + PROTOCOL_VERSION, ), [0; 32], ) @@ -1648,6 +1688,7 @@ mod tests { vec![], vec![SlashedValidator::new("test1".to_string(), true)], DEFAULT_TOTAL_SUPPLY, + PROTOCOL_VERSION, ), [0; 32], ) @@ -1719,60 +1760,21 @@ mod tests { epoch_manager .record_block_info( &h[0], - BlockInfo { - height: 0, - last_finalized_height: 0, - prev_hash: Default::default(), - epoch_first_block: h[0], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![true], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(0, 0, Default::default(), h[0], vec![true], total_supply), rng_seed, ) .unwrap(); epoch_manager .record_block_info( &h[1], - BlockInfo { - height: 1, - last_finalized_height: 1, - prev_hash: h[0], - epoch_first_block: h[1], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![true], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(1, 1, h[0], h[1], vec![true], total_supply), rng_seed, ) .unwrap(); epoch_manager .record_block_info( &h[2], - BlockInfo { - height: 2, - last_finalized_height: 2, - prev_hash: h[1], - epoch_first_block: h[1], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![true], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(2, 2, h[1], h[1], vec![true], total_supply), rng_seed, ) .unwrap(); @@ -1843,60 +1845,21 @@ mod tests { epoch_manager .record_block_info( &h[0], - BlockInfo { - height: 0, - last_finalized_height: 0, - prev_hash: Default::default(), - epoch_first_block: h[0], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![true], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(0, 0, Default::default(), h[0], vec![true], total_supply), rng_seed, ) .unwrap(); epoch_manager .record_block_info( &h[1], - BlockInfo { - height: 1, - last_finalized_height: 1, - prev_hash: h[0], - epoch_first_block: h[1], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![true], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(1, 1, h[0], h[1], vec![true], total_supply), rng_seed, ) .unwrap(); epoch_manager .record_block_info( &h[2], - BlockInfo { - height: 2, - last_finalized_height: 2, - prev_hash: h[1], - epoch_first_block: h[1], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![true], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(2, 2, h[1], h[1], vec![true], total_supply), rng_seed, ) .unwrap(); @@ -1986,60 +1949,21 @@ mod tests { epoch_manager .record_block_info( &h[0], - BlockInfo { - height: 0, - last_finalized_height: 0, - prev_hash: Default::default(), - epoch_first_block: h[0], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(0, 0, Default::default(), h[0], vec![true], total_supply), rng_seed, ) .unwrap(); epoch_manager .record_block_info( &h[1], - BlockInfo { - height: 1, - last_finalized_height: 1, - prev_hash: h[0], - epoch_first_block: h[1], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![true, false], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(1, 1, h[0], h[1], vec![true, false], total_supply), rng_seed, ) .unwrap(); epoch_manager .record_block_info( &h[2], - BlockInfo { - height: 2, - last_finalized_height: 2, - prev_hash: h[1], - epoch_first_block: h[1], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![true, true], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(2, 2, h[1], h[1], vec![true, true], total_supply), rng_seed, ) .unwrap(); @@ -2083,17 +2007,9 @@ mod tests { #[test] fn test_unstake_and_then_change_stake() { - let store = create_test_store(); - let config = epoch_config(2, 1, 2, 0, 90, 60, 0); let amount_staked = 1_000_000; - let validators = vec![stake("test1", amount_staked), stake("test2", amount_staked)]; - let mut epoch_manager = EpochManager::new( - store.clone(), - config.clone(), - default_reward_calculator(), - validators.clone(), - ) - .unwrap(); + let validators = vec![("test1", amount_staked), ("test2", amount_staked)]; + let mut epoch_manager = setup_default_epoch_manager(validators, 2, 1, 2, 0, 90, 60); let h = hash_range(8); record_block(&mut epoch_manager, CryptoHash::default(), h[0], 0, vec![]); // test1 unstakes in epoch 1, and should be kicked out in epoch 3 (validators stored at h2). @@ -2144,60 +2060,21 @@ mod tests { epoch_manager .record_block_info( &h[0], - BlockInfo { - height: 0, - last_finalized_height: 0, - prev_hash: Default::default(), - epoch_first_block: h[0], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(0, 0, Default::default(), h[0], vec![], total_supply), rng_seed, ) .unwrap(); epoch_manager .record_block_info( &h[1], - BlockInfo { - height: 1, - last_finalized_height: 1, - prev_hash: h[0], - epoch_first_block: h[1], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![true, true, true], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(1, 1, h[0], h[1], vec![true, true, true], total_supply), rng_seed, ) .unwrap(); epoch_manager .record_block_info( &h[3], - BlockInfo { - height: 3, - last_finalized_height: 3, - prev_hash: h[1], - epoch_first_block: h[2], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![true, true, true], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(3, 3, h[1], h[2], vec![true, true, true], total_supply), rng_seed, ) .unwrap(); @@ -2247,20 +2124,7 @@ mod tests { epoch_manager .record_block_info( &h[3], - BlockInfo { - height: 3, - last_finalized_height: 1, - prev_hash: h[1], - epoch_first_block: h[1], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![false], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(3, 1, h[1], h[1], vec![false], total_supply), rng_seed, ) .unwrap(); @@ -2404,58 +2268,19 @@ mod tests { record_block(&mut em, Default::default(), h[0], 0, vec![]); em.record_block_info( &h[1], - BlockInfo { - height: 1, - last_finalized_height: 1, - prev_hash: h[0], - epoch_first_block: h[1], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![true, true, true, false], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(1, 1, h[0], h[1], vec![true, true, true, false], total_supply), rng_seed, ) .unwrap(); em.record_block_info( &h[2], - BlockInfo { - height: 2, - last_finalized_height: 2, - prev_hash: h[1], - epoch_first_block: h[1], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![true, true, true, false], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(2, 2, h[1], h[1], vec![true, true, true, false], total_supply), rng_seed, ) .unwrap(); em.record_block_info( &h[3], - BlockInfo { - height: 3, - last_finalized_height: 3, - prev_hash: h[2], - epoch_first_block: h[3], - epoch_id: Default::default(), - proposals: vec![], - chunk_mask: vec![true, true, true, true], - slashed: Default::default(), - total_supply, - block_tracker: Default::default(), - shard_tracker: Default::default(), - all_proposals: vec![], - }, + block_info(3, 3, h[2], h[3], vec![true, true, true, true], total_supply), rng_seed, ) .unwrap(); @@ -2481,17 +2306,9 @@ mod tests { #[test] fn test_compare_epoch_id() { - let store = create_test_store(); - let config = epoch_config(2, 1, 2, 0, 90, 60, 0); let amount_staked = 1_000_000; - let validators = vec![stake("test1", amount_staked), stake("test2", amount_staked)]; - let mut epoch_manager = EpochManager::new( - store.clone(), - config.clone(), - default_reward_calculator(), - validators.clone(), - ) - .unwrap(); + let validators = vec![("test1", amount_staked), ("test2", amount_staked)]; + let mut epoch_manager = setup_default_epoch_manager(validators, 2, 1, 2, 0, 90, 60); let h = hash_range(8); record_block(&mut epoch_manager, CryptoHash::default(), h[0], 0, vec![]); // test1 unstakes in epoch 1, and should be kicked out in epoch 3 (validators stored at h2). @@ -2829,4 +2646,33 @@ mod tests { ) ); } + + #[test] + fn test_protocol_version_switch() { + let store = create_test_store(); + let config = epoch_config(2, 1, 2, 0, 90, 60, 0); + let amount_staked = 1_000_000; + let validators = vec![stake("test1", amount_staked), stake("test2", amount_staked)]; + let mut epoch_manager = EpochManager::new( + store.clone(), + config.clone(), + 0, + default_reward_calculator(), + validators.clone(), + ) + .unwrap(); + let h = hash_range(8); + record_block(&mut epoch_manager, CryptoHash::default(), h[0], 0, vec![]); + let mut block_info1 = block_info(1, 1, h[0], h[0], vec![], DEFAULT_TOTAL_SUPPLY); + block_info1.latest_protocol_version = 0; + epoch_manager.record_block_info(&h[1], block_info1, [0; 32]).unwrap(); + for i in 2..6 { + record_block(&mut epoch_manager, h[i - 1], h[i], i as u64, vec![]); + } + assert_eq!(epoch_manager.get_epoch_info(&EpochId(h[2])).unwrap().protocol_version, 0); + assert_eq!( + epoch_manager.get_epoch_info(&EpochId(h[4])).unwrap().protocol_version, + PROTOCOL_VERSION + ); + } } diff --git a/chain/epoch_manager/src/proposals.rs b/chain/epoch_manager/src/proposals.rs index 709bbf6109f..8242afa5097 100644 --- a/chain/epoch_manager/src/proposals.rs +++ b/chain/epoch_manager/src/proposals.rs @@ -5,6 +5,7 @@ use near_primitives::errors::EpochError; use near_primitives::types::{ AccountId, Balance, NumSeats, ValidatorId, ValidatorKickoutReason, ValidatorStake, }; +use near_primitives::version::ProtocolVersion; use crate::types::{EpochConfig, EpochInfo, RngSeed}; @@ -41,6 +42,7 @@ pub fn proposals_to_epoch_info( mut validator_kickout: HashMap, validator_reward: HashMap, minted_amount: Balance, + next_version: ProtocolVersion, ) -> Result { // Combine proposals with rollovers. let mut ordered_proposals = BTreeMap::new(); @@ -201,6 +203,7 @@ pub fn proposals_to_epoch_info( validator_kickout, fishermen_to_index, minted_amount, + protocol_version: next_version, }) } @@ -208,6 +211,8 @@ pub fn proposals_to_epoch_info( mod tests { use num_rational::Rational; + use near_primitives::version::PROTOCOL_VERSION; + use crate::test_utils::{change_stake, epoch_config, epoch_info, stake}; use super::*; @@ -231,7 +236,8 @@ mod tests { vec![stake("test1", 1_000_000)], HashMap::default(), HashMap::default(), - 0 + 0, + PROTOCOL_VERSION, ) .unwrap(), epoch_info( @@ -260,6 +266,8 @@ mod tests { fishermen_threshold: 10, online_min_threshold: Rational::new(90, 100), online_max_threshold: Rational::new(99, 100), + protocol_upgrade_stake_threshold: Rational::new(80, 100), + protocol_upgrade_num_epochs: 2, }, [0; 32], &EpochInfo::default(), @@ -271,7 +279,8 @@ mod tests { ], HashMap::default(), HashMap::default(), - 0 + 0, + PROTOCOL_VERSION ) .unwrap(), epoch_info( @@ -317,7 +326,8 @@ mod tests { ], HashMap::default(), HashMap::default(), - 0 + 0, + PROTOCOL_VERSION ) .unwrap(), epoch_info( @@ -356,7 +366,8 @@ mod tests { vec![stake("test1", 9), stake("test2", 9), stake("test3", 9), stake("test4", 9)], HashMap::default(), HashMap::default(), - 0 + 0, + PROTOCOL_VERSION ) .unwrap(), epoch_info diff --git a/chain/epoch_manager/src/test_utils.rs b/chain/epoch_manager/src/test_utils.rs index 0b935520964..d2966f012c7 100644 --- a/chain/epoch_manager/src/test_utils.rs +++ b/chain/epoch_manager/src/test_utils.rs @@ -1,19 +1,21 @@ use std::collections::{BTreeMap, HashMap}; +use num_rational::Rational; + use near_crypto::{KeyType, SecretKey}; +use near_primitives::challenge::SlashedValidator; use near_primitives::hash::{hash, CryptoHash}; use near_primitives::types::{ AccountId, Balance, BlockHeight, BlockHeightDelta, EpochHeight, NumSeats, NumShards, ValidatorId, ValidatorKickoutReason, ValidatorStake, }; use near_primitives::utils::get_num_seats_per_shard; +use near_primitives::version::PROTOCOL_VERSION; use near_store::test_utils::create_test_store; use crate::types::{EpochConfig, EpochInfo, ValidatorWeight}; use crate::RewardCalculator; use crate::{BlockInfo, EpochManager}; -use near_primitives::challenge::SlashedValidator; -use num_rational::Rational; pub const DEFAULT_GAS_PRICE: u128 = 100; pub const DEFAULT_TOTAL_SUPPLY: u128 = 1_000_000_000_000; @@ -77,6 +79,7 @@ pub fn epoch_info( validator_reward, validator_kickout, minted_amount, + protocol_version: PROTOCOL_VERSION, } } @@ -105,6 +108,8 @@ pub fn epoch_config( fishermen_threshold, online_min_threshold: Rational::new(90, 100), online_max_threshold: Rational::new(99, 100), + protocol_upgrade_stake_threshold: Rational::new(80, 100), + protocol_upgrade_num_epochs: 2, } } @@ -154,6 +159,7 @@ pub fn setup_epoch_manager( EpochManager::new( store, config, + PROTOCOL_VERSION, reward_calculator, validators.iter().map(|(account_id, balance)| stake(*account_id, *balance)).collect(), ) @@ -201,6 +207,7 @@ pub fn record_block_with_slashes( vec![], slashed, DEFAULT_TOTAL_SUPPLY, + PROTOCOL_VERSION, ), [0; 32], ) @@ -218,3 +225,29 @@ pub fn record_block( ) { record_block_with_slashes(epoch_manager, prev_h, cur_h, height, proposals, vec![]); } + +pub fn block_info( + height: BlockHeight, + last_finalized_height: BlockHeight, + prev_hash: CryptoHash, + epoch_first_block: CryptoHash, + chunk_mask: Vec, + total_supply: Balance, +) -> BlockInfo { + BlockInfo { + height, + last_finalized_height, + prev_hash, + epoch_first_block, + epoch_id: Default::default(), + proposals: vec![], + chunk_mask, + latest_protocol_version: PROTOCOL_VERSION, + slashed: Default::default(), + total_supply, + block_tracker: Default::default(), + shard_tracker: Default::default(), + all_proposals: vec![], + version_tracker: Default::default(), + } +} diff --git a/chain/epoch_manager/src/types.rs b/chain/epoch_manager/src/types.rs index 30e702d542c..c63f60ee61f 100644 --- a/chain/epoch_manager/src/types.rs +++ b/chain/epoch_manager/src/types.rs @@ -1,9 +1,4 @@ -use std::collections::{BTreeMap, HashMap}; - use borsh::{BorshDeserialize, BorshSerialize}; -use num_rational::Rational; -use serde::Serialize; - use near_primitives::challenge::SlashedValidator; use near_primitives::hash::CryptoHash; use near_primitives::types::{ @@ -11,6 +6,11 @@ use near_primitives::types::{ EpochId, NumSeats, NumShards, ShardId, ValidatorId, ValidatorKickoutReason, ValidatorStake, ValidatorStats, }; +use near_primitives::version::{ProtocolVersion, PROTOCOL_VERSION}; +use num_rational::Rational; +use serde::Serialize; +use smart_default::SmartDefault; +use std::collections::{BTreeMap, HashMap}; use crate::EpochManager; @@ -40,13 +40,17 @@ pub struct EpochConfig { pub online_max_threshold: Rational, /// Stake threshold for becoming a fisherman. pub fishermen_threshold: Balance, + /// Threshold of stake that needs to indicate that they ready for upgrade. + pub protocol_upgrade_stake_threshold: Rational, + /// Number of epochs after stake threshold was achieved to start next prtocol version. + pub protocol_upgrade_num_epochs: EpochHeight, } #[derive(Default, BorshSerialize, BorshDeserialize, Serialize, Clone, Debug, PartialEq, Eq)] pub struct ValidatorWeight(ValidatorId, u64); /// Information per epoch. -#[derive(Default, BorshSerialize, BorshDeserialize, Serialize, Clone, Debug, PartialEq, Eq)] +#[derive(SmartDefault, BorshSerialize, BorshDeserialize, Serialize, Clone, Debug, PartialEq, Eq)] pub struct EpochInfo { /// Ordinal of given epoch from genesis. /// There can be multiple epochs with the same ordinal in case of long forks. @@ -73,6 +77,9 @@ pub struct EpochInfo { pub validator_kickout: HashMap, /// Total minted tokens in the epoch. pub minted_amount: Balance, + /// Current protocol version during this epoch. + #[default(PROTOCOL_VERSION)] + pub protocol_version: ProtocolVersion, } /// Information per each block. @@ -85,7 +92,9 @@ pub struct BlockInfo { pub epoch_id: EpochId, pub proposals: Vec, pub chunk_mask: Vec, - /// Validators slashed since the start of epoch or in previous epoch + /// Latest protocol version this validator observes. + pub latest_protocol_version: ProtocolVersion, + /// Validators slashed since the start of epoch or in previous epoch. pub slashed: HashMap, /// Total supply at this block. pub total_supply: Balance, @@ -95,6 +104,8 @@ pub struct BlockInfo { pub shard_tracker: HashMap>, /// All proposals in this epoch up to this block. pub all_proposals: Vec, + /// Protocol versions per validator. + pub version_tracker: HashMap, } impl BlockInfo { @@ -106,6 +117,7 @@ impl BlockInfo { validator_mask: Vec, slashed: Vec, total_supply: Balance, + latest_protocol_version: ProtocolVersion, ) -> Self { Self { height, @@ -122,12 +134,15 @@ impl BlockInfo { }) .collect(), total_supply, - // These values are not set. This code is suboptimal + latest_protocol_version, + // TODO(2610): These values are "tip" and maintain latest in the current chain. + // Current implementation is suboptimal and should be improved. epoch_first_block: CryptoHash::default(), epoch_id: EpochId::default(), block_tracker: HashMap::default(), shard_tracker: HashMap::default(), all_proposals: vec![], + version_tracker: HashMap::default(), } } @@ -186,16 +201,28 @@ impl BlockInfo { } self.shard_tracker = prev_shard_tracker; } + + pub fn update_version_tracker( + &mut self, + epoch_info: &EpochInfo, + mut prev_version_tracker: HashMap, + ) { + let block_producer_id = EpochManager::block_producer_from_info(epoch_info, self.height); + prev_version_tracker.insert(block_producer_id, self.latest_protocol_version); + self.version_tracker = prev_version_tracker; + } } pub struct EpochSummary { pub prev_epoch_last_block_hash: CryptoHash, - // Proposals from the epoch, only the latest one per account + /// Proposals from the epoch, only the latest one per account pub all_proposals: Vec, - // Kickout set, includes slashed + /// Kickout set, includes slashed pub validator_kickout: HashMap, - // Only for validators who met the threshold and didn't get slashed + /// Only for validators who met the threshold and didn't get slashed pub validator_block_chunk_stats: HashMap, + /// Protocol version for next epoch. + pub next_version: ProtocolVersion, } /// State that a slashed validator can be in. diff --git a/chain/jsonrpc/Cargo.toml b/chain/jsonrpc/Cargo.toml index 5ba074c9bb0..49aee88dfdc 100644 --- a/chain/jsonrpc/Cargo.toml +++ b/chain/jsonrpc/Cargo.toml @@ -15,7 +15,7 @@ prometheus = "0.8" serde = { version = "1", features = ["derive"] } serde_json = "1" validator = "0.10" -borsh = "0.6.1" +borsh = "0.6.2" near-chain-configs = { path = "../../core/chain-configs" } near-crypto = { path = "../../core/crypto" } diff --git a/chain/jsonrpc/tests/rpc_query.rs b/chain/jsonrpc/tests/rpc_query.rs index 4b80f642ebb..c705f6b84a4 100644 --- a/chain/jsonrpc/tests/rpc_query.rs +++ b/chain/jsonrpc/tests/rpc_query.rs @@ -3,7 +3,6 @@ use std::convert::TryFrom; use actix::{Actor, System}; use futures::{future, FutureExt}; -use near_chain_configs::PROTOCOL_VERSION; use near_crypto::{KeyType, PublicKey, Signature}; use near_jsonrpc::client::new_client; use near_jsonrpc_client::ChunkId; @@ -13,6 +12,7 @@ use near_primitives::account::{AccessKey, AccessKeyPermission}; use near_primitives::hash::CryptoHash; use near_primitives::rpc::{RpcGenesisRecordsRequest, RpcPagination, RpcQueryRequest}; use near_primitives::types::{BlockId, BlockIdOrFinality, Finality, ShardId}; +use near_primitives::version::PROTOCOL_VERSION; use near_primitives::views::{QueryRequest, QueryResponseKind}; #[macro_use] @@ -457,7 +457,6 @@ fn test_health_ok() { fn test_genesis_config() { test_with_client!(test_utils::NodeType::NonValidator, client, async move { let genesis_config = client.EXPERIMENTAL_genesis_config().await.unwrap(); - assert_eq!(genesis_config["config_version"].as_u64().unwrap(), 1); assert_eq!(genesis_config["protocol_version"].as_u64().unwrap(), PROTOCOL_VERSION as u64); assert!(!genesis_config["chain_id"].as_str().unwrap().is_empty()); assert!(!genesis_config.as_object().unwrap().contains_key("records")); diff --git a/chain/network/Cargo.toml b/chain/network/Cargo.toml index 1c912266360..c9084823490 100644 --- a/chain/network/Cargo.toml +++ b/chain/network/Cargo.toml @@ -19,7 +19,7 @@ byteorder = "1.2" lazy_static = "1.4" tracing = "0.1.13" -borsh = "0.6.1" +borsh = "0.6.2" cached = "0.12" near-chain-configs = { path = "../../core/chain-configs" } diff --git a/chain/network/src/peer.rs b/chain/network/src/peer.rs index c2665112e91..ea6067beee3 100644 --- a/chain/network/src/peer.rs +++ b/chain/network/src/peer.rs @@ -10,13 +10,13 @@ use actix::{ }; use tracing::{debug, error, info, warn}; -use near_chain_configs::PROTOCOL_VERSION; use near_metrics; use near_primitives::block::GenesisId; use near_primitives::hash::CryptoHash; use near_primitives::network::PeerId; use near_primitives::unwrap_option_or_return; use near_primitives::utils::DisplayOption; +use near_primitives::version::PROTOCOL_VERSION; use crate::codec::{bytes_to_peer_message, peer_message_to_bytes, Codec}; use crate::rate_counter::RateCounter; @@ -108,16 +108,16 @@ impl Tracker { self.sent_bytes.increment(size); } - fn has_received(&self, hash: CryptoHash) -> bool { - self.received.contains(&hash) + fn has_received(&self, hash: &CryptoHash) -> bool { + self.received.contains(hash) } fn push_received(&mut self, hash: CryptoHash) { self.received.push(hash); } - fn has_request(&self, hash: CryptoHash) -> bool { - self.requested.contains(&hash) + fn has_request(&self, hash: &CryptoHash) -> bool { + self.requested.contains(hash) } fn push_request(&mut self, hash: CryptoHash) { @@ -381,6 +381,7 @@ impl Peer { .do_send(PeerRequest::RouteBack(body, msg_hash.unwrap())); } Ok(NetworkViewClientResponses::Block(block)) => { + // MOO need protocol version act.send_message(PeerMessage::Block(*block)) } Ok(NetworkViewClientResponses::BlockHeaders(headers)) => { @@ -410,11 +411,10 @@ impl Peer { let network_client_msg = match msg { PeerMessage::Block(block) => { near_metrics::inc_counter(&metrics::PEER_BLOCK_RECEIVED_TOTAL); - let block_hash = block.hash(); + let block_hash = *block.hash(); self.tracker.push_received(block_hash); - self.chain_info.height = - max(self.chain_info.height, block.header.inner_lite.height); - NetworkClientMessages::Block(block, peer_id, self.tracker.has_request(block_hash)) + self.chain_info.height = max(self.chain_info.height, block.header().height()); + NetworkClientMessages::Block(block, peer_id, self.tracker.has_request(&block_hash)) } PeerMessage::Transaction(transaction) => { near_metrics::inc_counter(&metrics::PEER_TRANSACTION_RECEIVED_TOTAL); diff --git a/chain/network/src/recorder.rs b/chain/network/src/recorder.rs index 381bef939b6..c6b73f38b97 100644 --- a/chain/network/src/recorder.rs +++ b/chain/network/src/recorder.rs @@ -205,7 +205,7 @@ impl From<&PeerMessage> for PeerMessageMetadata { fn from(msg: &PeerMessage) -> Self { let hash = match msg { PeerMessage::Challenge(challenge) => Some(challenge.hash), - PeerMessage::Block(block) => Some(block.hash()), + PeerMessage::Block(block) => Some(*block.hash()), _ => None, }; diff --git a/chain/network/src/types.rs b/chain/network/src/types.rs index 6d9932a8781..1c1cb928a21 100644 --- a/chain/network/src/types.rs +++ b/chain/network/src/types.rs @@ -17,7 +17,6 @@ use tracing::{error, warn}; use near_chain::types::ShardStateSyncResponse; use near_chain::{Block, BlockHeader}; -use near_chain_configs::PROTOCOL_VERSION; use near_crypto::{PublicKey, SecretKey, Signature}; use near_metrics; use near_primitives::block::{Approval, ApprovalMessage, GenesisId}; @@ -31,6 +30,7 @@ use near_primitives::sharding::{ use near_primitives::transaction::{ExecutionOutcomeWithIdAndProof, SignedTransaction}; use near_primitives::types::{AccountId, BlockHeight, BlockIdOrFinality, EpochId, ShardId}; use near_primitives::utils::{from_timestamp, to_timestamp}; +use near_primitives::version::FIRST_BACKWARD_COMPATIBLE_PROTOCOL_VERSION; use near_primitives::views::{FinalExecutionOutcomeView, QueryRequest, QueryResponse}; use crate::metrics; @@ -177,7 +177,8 @@ impl Handshake { edge_info: EdgeInfo, ) -> Self { Handshake { - version: PROTOCOL_VERSION, + // TODO: figure out how we are going to indicate backward compatible versions of protocol. + version: FIRST_BACKWARD_COMPATIBLE_PROTOCOL_VERSION, peer_id, target_peer_id, listen_port, @@ -391,6 +392,9 @@ impl SyncData { } } +/// Warning, position of each message type in this enum defines the protocol due to serialization. +/// DO NOT MOVE, REORDER, DELETE items from the list. Only add new items to the end. +/// If need to remove old items - replace with `None`. #[derive(BorshSerialize, BorshDeserialize, Serialize, PartialEq, Eq, Clone, Debug)] // TODO(#1313): Use Box #[allow(clippy::large_enum_variant)] diff --git a/chain/network/tests/runner/mod.rs b/chain/network/tests/runner/mod.rs index b202081da97..109177b0cac 100644 --- a/chain/network/tests/runner/mod.rs +++ b/chain/network/tests/runner/mod.rs @@ -25,6 +25,7 @@ use near_network::{ }; use near_primitives::types::{AccountId, ValidatorId}; use near_primitives::validator_signer::InMemoryValidatorSigner; +use near_primitives::version::PROTOCOL_VERSION; use near_store::test_utils::create_test_store; use near_telemetry::{TelemetryActor, TelemetryConfig}; use num_rational::Rational; @@ -68,6 +69,7 @@ pub fn setup_network_node( Rational::from_integer(0), 1000, 5, + PROTOCOL_VERSION, ); let peer_manager = PeerManagerActor::create(move |ctx| { diff --git a/chain/pool/Cargo.toml b/chain/pool/Cargo.toml index 48a071ce42d..f7949446a6c 100644 --- a/chain/pool/Cargo.toml +++ b/chain/pool/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] rand = "0.7" -borsh = "0.6.1" +borsh = "0.6.2" near-crypto = { path = "../../core/crypto" } near-primitives = { path = "../../core/primitives" } diff --git a/core/chain-configs/src/client_config.rs b/core/chain-configs/src/client_config.rs index 421ef63c171..bb9ecd30211 100644 --- a/core/chain-configs/src/client_config.rs +++ b/core/chain-configs/src/client_config.rs @@ -4,7 +4,8 @@ use std::time::Duration; use serde::{Deserialize, Serialize}; -use near_primitives::types::{AccountId, BlockHeightDelta, NumBlocks, NumSeats, ShardId, Version}; +use near_primitives::types::{AccountId, BlockHeightDelta, NumBlocks, NumSeats, ShardId}; +use near_primitives::version::Version; #[derive(Clone, Serialize, Deserialize)] pub struct ClientConfig { diff --git a/core/chain-configs/src/genesis_config.rs b/core/chain-configs/src/genesis_config.rs index 2a479e5f5bf..13d42173d03 100644 --- a/core/chain-configs/src/genesis_config.rs +++ b/core/chain-configs/src/genesis_config.rs @@ -15,14 +15,12 @@ use smart_default::SmartDefault; use near_primitives::serialize::{u128_dec_format, u128_dec_format_compatible}; use near_primitives::state_record::StateRecord; use near_primitives::types::{ - AccountId, AccountInfo, Balance, BlockHeight, BlockHeightDelta, Gas, NumBlocks, NumSeats, + AccountId, AccountInfo, Balance, BlockHeight, BlockHeightDelta, EpochHeight, Gas, NumBlocks, + NumSeats, }; +use near_primitives::version::{ProtocolVersion, PROTOCOL_VERSION}; use near_runtime_configs::RuntimeConfig; -use crate::PROTOCOL_VERSION; - -pub const CONFIG_VERSION: u32 = 1; - fn default_online_min_threshold() -> Rational { Rational::new(90, 100) } @@ -31,14 +29,14 @@ fn default_online_max_threshold() -> Rational { Rational::new(99, 100) } +fn default_protocol_upgrade_stake_threshold() -> Rational { + Rational::new(8, 10) +} + #[derive(Debug, Clone, SmartDefault, Serialize, Deserialize)] pub struct GenesisConfig { - /// This is a version of a genesis config structure this version of binary works with. - /// If the binary tries to load a JSON config with a different version it will panic. - /// It's not a major protocol version, but used for automatic config migrations using scripts. - pub config_version: u32, /// Protocol version that this genesis works with. - pub protocol_version: u32, + pub protocol_version: ProtocolVersion, /// Official time of blockchain start. #[default(Utc::now())] pub genesis_time: DateTime, @@ -55,6 +53,12 @@ pub struct GenesisConfig { pub avg_hidden_validator_seats_per_shard: Vec, /// Enable dynamic re-sharding. pub dynamic_resharding: bool, + /// Threshold of stake that needs to indicate that they ready for upgrade. + #[serde(default = "default_protocol_upgrade_stake_threshold")] + #[default(Rational::new(8, 10))] + pub protocol_upgrade_stake_threshold: Rational, + /// Number of epochs after stake threshold was achieved to start next prtocol version. + pub protocol_upgrade_num_epochs: EpochHeight, /// Epoch length counted in block heights. pub epoch_length: BlockHeightDelta, /// Initial gas limit. diff --git a/core/chain-configs/src/lib.rs b/core/chain-configs/src/lib.rs index d337b4404d4..b824e5f12b9 100644 --- a/core/chain-configs/src/lib.rs +++ b/core/chain-configs/src/lib.rs @@ -2,9 +2,4 @@ mod client_config; mod genesis_config; pub use client_config::ClientConfig; -pub use genesis_config::{ - Genesis, GenesisConfig, GenesisRecords, CONFIG_VERSION as GENESIS_CONFIG_VERSION, -}; - -/// Current latest version of the protocol -pub const PROTOCOL_VERSION: u32 = 21; +pub use genesis_config::{Genesis, GenesisConfig, GenesisRecords}; diff --git a/core/crypto/Cargo.toml b/core/crypto/Cargo.toml index 9c8b3ae5a3b..26594dda394 100644 --- a/core/crypto/Cargo.toml +++ b/core/crypto/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] arrayref = "0.3" blake2 = "0.8" -borsh = "0.6.1" +borsh = "0.6.2" bs58 = "0.3" c2-chacha = "0.2" curve25519-dalek = "2" diff --git a/core/primitives/Cargo.toml b/core/primitives/Cargo.toml index af76a846139..54498258fcb 100644 --- a/core/primitives/Cargo.toml +++ b/core/primitives/Cargo.toml @@ -25,7 +25,7 @@ jemallocator = { version = "0.3", optional = true } hex = "0.4" num-rational = "0.2.4" -borsh = "0.6.1" +borsh = "0.6.2" near-crypto = { path = "../crypto" } near-vm-errors = { path = "../../runtime/near-vm-errors" } diff --git a/core/primitives/benches/serialization.rs b/core/primitives/benches/serialization.rs index d5f9487ab92..7639c177552 100644 --- a/core/primitives/benches/serialization.rs +++ b/core/primitives/benches/serialization.rs @@ -13,6 +13,7 @@ use near_primitives::test_utils::account_new; use near_primitives::transaction::{Action, SignedTransaction, Transaction, TransferAction}; use near_primitives::types::{EpochId, StateRoot}; use near_primitives::validator_signer::InMemoryValidatorSigner; +use near_primitives::version::PROTOCOL_VERSION; use num_rational::Rational; fn create_transaction() -> SignedTransaction { @@ -36,6 +37,7 @@ fn create_transaction() -> SignedTransaction { fn create_block() -> Block { let genesis_chunks = genesis_chunks(vec![StateRoot::default()], 1, 1_000, 0); let genesis = Block::genesis( + PROTOCOL_VERSION, genesis_chunks.into_iter().map(|chunk| chunk.header).collect(), Utc::now(), 0, @@ -45,9 +47,10 @@ fn create_block() -> Block { ); let signer = InMemoryValidatorSigner::from_random("".to_string(), KeyType::ED25519); Block::produce( - &genesis.header, + PROTOCOL_VERSION, + genesis.header(), 10, - vec![genesis.chunks[0].clone()], + vec![genesis.chunks()[0].clone()], EpochId::default(), EpochId::default(), vec![], diff --git a/core/primitives/src/block.rs b/core/primitives/src/block.rs index 412658b301e..8efb166a437 100644 --- a/core/primitives/src/block.rs +++ b/core/primitives/src/block.rs @@ -2,372 +2,33 @@ use std::cmp::max; use borsh::{BorshDeserialize, BorshSerialize}; use chrono::{DateTime, Utc}; +use num_rational::Rational; use serde::Serialize; -use near_crypto::{KeyType, PublicKey, Signature}; +use near_crypto::Signature; +pub use crate::block_header::*; use crate::challenge::{Challenges, ChallengesResult}; use crate::hash::{hash, CryptoHash}; -use crate::merkle::{combine_hash, merklize, verify_path, MerklePath}; +use crate::merkle::{merklize, verify_path, MerklePath}; use crate::sharding::{ ChunkHashHeight, EncodedShardChunk, ReedSolomonWrapper, ShardChunk, ShardChunkHeader, }; -use crate::types::{ - AccountId, Balance, BlockHeight, EpochId, Gas, MerkleHash, NumShards, StateRoot, ValidatorStake, -}; -use crate::utils::{from_timestamp, to_timestamp}; +use crate::types::{Balance, BlockHeight, EpochId, Gas, NumShards, StateRoot}; +use crate::utils::to_timestamp; use crate::validator_signer::{EmptyValidatorSigner, ValidatorSigner}; -use num_rational::Rational; - -#[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, Eq, PartialEq)] -pub struct BlockHeaderInnerLite { - /// Height of this block since the genesis block (height 0). - pub height: BlockHeight, - /// Epoch start hash of this block's epoch. - /// Used for retrieving validator information - pub epoch_id: EpochId, - pub next_epoch_id: EpochId, - /// Root hash of the state at the previous block. - pub prev_state_root: MerkleHash, - /// Root of the outcomes of transactions and receipts. - pub outcome_root: MerkleHash, - /// Timestamp at which the block was built (number of non-leap-nanoseconds since January 1, 1970 0:00:00 UTC). - pub timestamp: u64, - /// Hash of the next epoch block producers set - pub next_bp_hash: CryptoHash, - /// Merkle root of block hashes up to the current block. - pub block_merkle_root: CryptoHash, -} - -#[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, Eq, PartialEq)] -pub struct BlockHeaderInnerRest { - /// Root hash of the chunk receipts in the given block. - pub chunk_receipts_root: MerkleHash, - /// Root hash of the chunk headers in the given block. - pub chunk_headers_root: MerkleHash, - /// Root hash of the chunk transactions in the given block. - pub chunk_tx_root: MerkleHash, - /// Number of chunks included into the block. - pub chunks_included: u64, - /// Root hash of the challenges in the given block. - pub challenges_root: MerkleHash, - /// The output of the randomness beacon - pub random_value: CryptoHash, - /// Validator proposals. - pub validator_proposals: Vec, - /// Mask for new chunks included in the block - pub chunk_mask: Vec, - /// Gas price. Same for all chunks - pub gas_price: Balance, - /// Total supply of tokens in the system - pub total_supply: Balance, - /// List of challenges result from previous block. - pub challenges_result: ChallengesResult, - - /// Last block that has full BFT finality - pub last_final_block: CryptoHash, - /// Last block that has doomslug finality - pub last_ds_final_block: CryptoHash, - - /// All the approvals included in this block - pub approvals: Vec>, -} - -impl BlockHeaderInnerLite { - pub fn new( - height: BlockHeight, - epoch_id: EpochId, - next_epoch_id: EpochId, - prev_state_root: MerkleHash, - outcome_root: MerkleHash, - timestamp: u64, - next_bp_hash: CryptoHash, - block_merkle_root: CryptoHash, - ) -> Self { - Self { - height, - epoch_id, - next_epoch_id, - prev_state_root, - outcome_root, - timestamp, - next_bp_hash, - block_merkle_root, - } - } - - pub fn hash(&self) -> CryptoHash { - hash(&self.try_to_vec().expect("Failed to serialize")) - } -} - -impl BlockHeaderInnerRest { - pub fn new( - chunk_receipts_root: MerkleHash, - chunk_headers_root: MerkleHash, - chunk_tx_root: MerkleHash, - chunks_included: u64, - challenges_root: MerkleHash, - random_value: CryptoHash, - validator_proposals: Vec, - chunk_mask: Vec, - gas_price: Balance, - total_supply: Balance, - challenges_result: ChallengesResult, - last_final_block: CryptoHash, - last_ds_final_block: CryptoHash, - approvals: Vec>, - ) -> Self { - Self { - chunk_receipts_root, - chunk_headers_root, - chunk_tx_root, - chunks_included, - challenges_root, - random_value, - validator_proposals, - chunk_mask, - gas_price, - total_supply, - challenges_result, - last_final_block, - last_ds_final_block, - approvals, - } - } - - pub fn hash(&self) -> CryptoHash { - hash(&self.try_to_vec().expect("Failed to serialize")) - } -} - -/// The part of the block approval that is different for endorsements and skips -#[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, PartialEq, Eq, Hash)] -pub enum ApprovalInner { - Endorsement(CryptoHash), - Skip(BlockHeight), -} - -/// Block approval by other block producers with a signature -#[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, PartialEq, Eq)] -pub struct Approval { - pub inner: ApprovalInner, - pub target_height: BlockHeight, - pub signature: Signature, - pub account_id: AccountId, -} - -/// Block approval by other block producers. -#[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, PartialEq, Eq)] -pub struct ApprovalMessage { - pub approval: Approval, - pub target: AccountId, -} - -impl ApprovalInner { - pub fn new( - parent_hash: &CryptoHash, - parent_height: BlockHeight, - target_height: BlockHeight, - ) -> Self { - if target_height == parent_height + 1 { - ApprovalInner::Endorsement(parent_hash.clone()) - } else { - ApprovalInner::Skip(parent_height) - } - } -} - -impl Approval { - pub fn new( - parent_hash: CryptoHash, - parent_height: BlockHeight, - target_height: BlockHeight, - signer: &dyn ValidatorSigner, - ) -> Self { - let inner = ApprovalInner::new(&parent_hash, parent_height, target_height); - let signature = signer.sign_approval(&inner, target_height); - Approval { inner, target_height, signature, account_id: signer.validator_id().clone() } - } - - pub fn get_data_for_sig(inner: &ApprovalInner, target_height: BlockHeight) -> Vec { - [inner.try_to_vec().unwrap().as_ref(), target_height.to_le_bytes().as_ref()].concat() - } -} +use crate::version::ProtocolVersion; -impl ApprovalMessage { - pub fn new(approval: Approval, target: AccountId) -> Self { - ApprovalMessage { approval, target } - } -} - -#[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, Eq, PartialEq)] -#[borsh_init(init)] -pub struct BlockHeader { - pub prev_hash: CryptoHash, - - /// Inner part of the block header that gets hashed, split into two parts, one that is sent - /// to light clients, and the rest - pub inner_lite: BlockHeaderInnerLite, - pub inner_rest: BlockHeaderInnerRest, - - /// Signature of the block producer. - pub signature: Signature, - - /// Cached value of hash for this block. - #[borsh_skip] +#[derive(BorshSerialize, BorshDeserialize, Serialize, Clone, Debug, Eq, PartialEq, Default)] +pub struct GenesisId { + /// Chain Id + pub chain_id: String, + /// Hash of genesis block pub hash: CryptoHash, } -impl BlockHeader { - pub fn compute_inner_hash( - inner_lite: &BlockHeaderInnerLite, - inner_rest: &BlockHeaderInnerRest, - ) -> CryptoHash { - let hash_lite = inner_lite.hash(); - let hash_rest = inner_rest.hash(); - combine_hash(hash_lite, hash_rest) - } - - pub fn compute_hash( - prev_hash: CryptoHash, - inner_lite: &BlockHeaderInnerLite, - inner_rest: &BlockHeaderInnerRest, - ) -> CryptoHash { - let hash_inner = BlockHeader::compute_inner_hash(inner_lite, inner_rest); - - return combine_hash(hash_inner, prev_hash); - } - - pub fn init(&mut self) { - self.hash = BlockHeader::compute_hash(self.prev_hash, &self.inner_lite, &self.inner_rest); - } - - pub fn new( - height: BlockHeight, - prev_hash: CryptoHash, - prev_state_root: MerkleHash, - chunk_receipts_root: MerkleHash, - chunk_headers_root: MerkleHash, - chunk_tx_root: MerkleHash, - outcome_root: MerkleHash, - timestamp: u64, - chunks_included: u64, - challenges_root: MerkleHash, - random_value: CryptoHash, - validator_proposals: Vec, - chunk_mask: Vec, - epoch_id: EpochId, - next_epoch_id: EpochId, - gas_price: Balance, - total_supply: Balance, - challenges_result: ChallengesResult, - signer: &dyn ValidatorSigner, - last_final_block: CryptoHash, - last_ds_final_block: CryptoHash, - approvals: Vec>, - next_bp_hash: CryptoHash, - block_merkle_root: CryptoHash, - ) -> Self { - let inner_lite = BlockHeaderInnerLite::new( - height, - epoch_id, - next_epoch_id, - prev_state_root, - outcome_root, - timestamp, - next_bp_hash, - block_merkle_root, - ); - let inner_rest = BlockHeaderInnerRest::new( - chunk_receipts_root, - chunk_headers_root, - chunk_tx_root, - chunks_included, - challenges_root, - random_value, - validator_proposals, - chunk_mask, - gas_price, - total_supply, - challenges_result, - last_final_block, - last_ds_final_block, - approvals, - ); - let (hash, signature) = signer.sign_block_header_parts(prev_hash, &inner_lite, &inner_rest); - Self { prev_hash, inner_lite, inner_rest, signature, hash } - } - - pub fn genesis( - height: BlockHeight, - state_root: MerkleHash, - chunk_receipts_root: MerkleHash, - chunk_headers_root: MerkleHash, - chunk_tx_root: MerkleHash, - chunks_included: u64, - challenges_root: MerkleHash, - timestamp: DateTime, - initial_gas_price: Balance, - initial_total_supply: Balance, - next_bp_hash: CryptoHash, - ) -> Self { - let inner_lite = BlockHeaderInnerLite::new( - height, - EpochId::default(), - EpochId::default(), - state_root, - CryptoHash::default(), - to_timestamp(timestamp), - next_bp_hash, - CryptoHash::default(), - ); - let inner_rest = BlockHeaderInnerRest::new( - chunk_receipts_root, - chunk_headers_root, - chunk_tx_root, - chunks_included, - challenges_root, - CryptoHash::default(), - vec![], - vec![], - initial_gas_price, - initial_total_supply, - vec![], - CryptoHash::default(), - CryptoHash::default(), - vec![], - ); - let hash = BlockHeader::compute_hash(CryptoHash::default(), &inner_lite, &inner_rest); - Self { - prev_hash: CryptoHash::default(), - inner_lite, - inner_rest, - signature: Signature::empty(KeyType::ED25519), - hash, - } - } - - pub fn hash(&self) -> CryptoHash { - self.hash - } - - /// Verifies that given public key produced the block. - pub fn verify_block_producer(&self, public_key: &PublicKey) -> bool { - self.signature.verify(self.hash.as_ref(), public_key) - } - - pub fn timestamp(&self) -> DateTime { - from_timestamp(self.inner_lite.timestamp) - } - - pub fn num_approvals(&self) -> u64 { - self.inner_rest.approvals.iter().filter(|x| x.is_some()).count() as u64 - } -} - #[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, Eq, PartialEq)] -pub struct Block { +pub struct BlockV1 { pub header: BlockHeader, pub chunks: Vec, pub challenges: Challenges, @@ -377,6 +38,13 @@ pub struct Block { pub vrf_proof: near_crypto::vrf::Proof, } +/// Versioned Block data structure. +/// For each next version, document what are the changes between versions. +#[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, Eq, PartialEq)] +pub enum Block { + BlockV1(Box), +} + pub fn genesis_chunks( state_roots: Vec, num_shards: NumShards, @@ -416,6 +84,7 @@ pub fn genesis_chunks( impl Block { /// Returns genesis block for given genesis date and state root. pub fn genesis( + genesis_protocol_version: ProtocolVersion, chunks: Vec, timestamp: DateTime, height: BlockHeight, @@ -424,8 +93,9 @@ impl Block { next_bp_hash: CryptoHash, ) -> Self { let challenges = vec![]; - Block { + Block::BlockV1(Box::new(BlockV1 { header: BlockHeader::genesis( + genesis_protocol_version, height, Block::compute_state_root(&chunks), Block::compute_chunk_receipts_root(&chunks), @@ -443,11 +113,12 @@ impl Block { vrf_value: near_crypto::vrf::Value([0; 32]), vrf_proof: near_crypto::vrf::Proof([0; 64]), - } + })) } /// Produces new block from header of previous block, current state root and set of transactions. pub fn produce( + protocol_version: ProtocolVersion, prev: &BlockHeader, height: BlockHeight, chunks: Vec, @@ -482,42 +153,36 @@ impl Block { } } let new_gas_price = Self::compute_new_gas_price( - prev.inner_rest.gas_price, + prev.gas_price(), gas_used, gas_limit, gas_price_adjustment_rate, ); let new_gas_price = std::cmp::max(new_gas_price, min_gas_price); - let new_total_supply = - prev.inner_rest.total_supply + minted_amount.unwrap_or(0) - balance_burnt; + let new_total_supply = prev.total_supply() + minted_amount.unwrap_or(0) - balance_burnt; let now = to_timestamp(Utc::now()); - let time = - if now <= prev.inner_lite.timestamp { prev.inner_lite.timestamp + 1 } else { now }; + let time = if now <= prev.raw_timestamp() { prev.raw_timestamp() + 1 } else { now }; - let (vrf_value, vrf_proof) = - signer.compute_vrf_with_proof(prev.inner_rest.random_value.as_ref()); + let (vrf_value, vrf_proof) = signer.compute_vrf_with_proof(prev.random_value().as_ref()); let random_value = hash(vrf_value.0.as_ref()); - let last_ds_final_block = if height == prev.inner_lite.height + 1 { - prev.hash() - } else { - prev.inner_rest.last_ds_final_block - }; + let last_ds_final_block = + if height == prev.height() + 1 { prev.hash() } else { prev.last_ds_final_block() }; - let last_final_block = if height == prev.inner_lite.height + 1 - && prev.inner_rest.last_ds_final_block == prev.prev_hash - { - prev.prev_hash - } else { - prev.inner_rest.last_final_block - }; + let last_final_block = + if height == prev.height() + 1 && prev.last_ds_final_block() == prev.prev_hash() { + prev.prev_hash() + } else { + prev.last_final_block() + }; - Block { + Block::BlockV1(Box::new(BlockV1 { header: BlockHeader::new( + protocol_version, height, - prev.hash(), + prev.hash().clone(), Block::compute_state_root(&chunks), Block::compute_chunk_receipts_root(&chunks), Block::compute_chunk_headers_root(&chunks).0, @@ -535,8 +200,8 @@ impl Block { new_total_supply, challenges_result, signer, - last_final_block, - last_ds_final_block, + last_final_block.clone(), + last_ds_final_block.clone(), approvals, next_bp_hash, block_merkle_root, @@ -546,7 +211,7 @@ impl Block { vrf_value, vrf_proof, - } + })) } pub fn verify_gas_price( @@ -555,15 +220,15 @@ impl Block { min_gas_price: Balance, gas_price_adjustment_rate: Rational, ) -> bool { - let gas_used = Self::compute_gas_used(&self.chunks, self.header.inner_lite.height); - let gas_limit = Self::compute_gas_limit(&self.chunks, self.header.inner_lite.height); + let gas_used = Self::compute_gas_used(self.chunks(), self.header().height()); + let gas_limit = Self::compute_gas_limit(self.chunks(), self.header().height()); let expected_price = Self::compute_new_gas_price( prev_gas_price, gas_used, gas_limit, gas_price_adjustment_rate, ); - self.header.inner_rest.gas_price == max(expected_price, min_gas_price) + self.header().gas_price() == max(expected_price, min_gas_price) } pub fn compute_new_gas_price( @@ -584,14 +249,14 @@ impl Block { } } - pub fn compute_state_root(chunks: &Vec) -> CryptoHash { + pub fn compute_state_root(chunks: &[ShardChunkHeader]) -> CryptoHash { merklize( &chunks.iter().map(|chunk| chunk.inner.prev_state_root).collect::>(), ) .0 } - pub fn compute_chunk_receipts_root(chunks: &Vec) -> CryptoHash { + pub fn compute_chunk_receipts_root(chunks: &[ShardChunkHeader]) -> CryptoHash { merklize( &chunks .iter() @@ -602,7 +267,7 @@ impl Block { } pub fn compute_chunk_headers_root( - chunks: &Vec, + chunks: &[ShardChunkHeader], ) -> (CryptoHash, Vec) { merklize( &chunks @@ -612,15 +277,15 @@ impl Block { ) } - pub fn compute_chunk_tx_root(chunks: &Vec) -> CryptoHash { + pub fn compute_chunk_tx_root(chunks: &[ShardChunkHeader]) -> CryptoHash { merklize(&chunks.iter().map(|chunk| chunk.inner.tx_root).collect::>()).0 } - pub fn compute_chunks_included(chunks: &Vec, height: BlockHeight) -> u64 { + pub fn compute_chunks_included(chunks: &[ShardChunkHeader], height: BlockHeight) -> u64 { chunks.iter().filter(|chunk| chunk.height_included == height).count() as u64 } - pub fn compute_outcome_root(chunks: &Vec) -> CryptoHash { + pub fn compute_outcome_root(chunks: &[ShardChunkHeader]) -> CryptoHash { merklize(&chunks.iter().map(|chunk| chunk.inner.outcome_root).collect::>()) .0 } @@ -661,45 +326,75 @@ impl Block { ) } - pub fn hash(&self) -> CryptoHash { - self.header.hash() + pub fn header(&self) -> &BlockHeader { + match self { + Block::BlockV1(block) => &block.header, + } + } + + pub fn chunks(&self) -> &Vec { + match self { + Block::BlockV1(block) => &block.chunks, + } + } + + pub fn challenges(&self) -> &Challenges { + match self { + Block::BlockV1(block) => &block.challenges, + } + } + + pub fn vrf_value(&self) -> &near_crypto::vrf::Value { + match self { + Block::BlockV1(block) => &block.vrf_value, + } + } + + pub fn vrf_proof(&self) -> &near_crypto::vrf::Proof { + match self { + Block::BlockV1(block) => &block.vrf_proof, + } + } + + pub fn hash(&self) -> &CryptoHash { + self.header().hash() } pub fn check_validity(&self) -> bool { // Check that state root stored in the header matches the state root of the chunks - let state_root = Block::compute_state_root(&self.chunks); - if self.header.inner_lite.prev_state_root != state_root { + let state_root = Block::compute_state_root(self.chunks()); + if self.header().prev_state_root() != &state_root { return false; } // Check that chunk receipts root stored in the header matches the state root of the chunks - let chunk_receipts_root = Block::compute_chunk_receipts_root(&self.chunks); - if self.header.inner_rest.chunk_receipts_root != chunk_receipts_root { + let chunk_receipts_root = Block::compute_chunk_receipts_root(self.chunks()); + if self.header().chunk_receipts_root() != &chunk_receipts_root { return false; } // Check that chunk headers root stored in the header matches the chunk headers root of the chunks - let chunk_headers_root = Block::compute_chunk_headers_root(&self.chunks).0; - if self.header.inner_rest.chunk_headers_root != chunk_headers_root { + let chunk_headers_root = Block::compute_chunk_headers_root(self.chunks()).0; + if self.header().chunk_headers_root() != &chunk_headers_root { return false; } // Check that chunk tx root stored in the header matches the tx root of the chunks - let chunk_tx_root = Block::compute_chunk_tx_root(&self.chunks); - if self.header.inner_rest.chunk_tx_root != chunk_tx_root { + let chunk_tx_root = Block::compute_chunk_tx_root(self.chunks()); + if self.header().chunk_tx_root() != &chunk_tx_root { return false; } // Check that chunk included root stored in the header matches the chunk included root of the chunks let chunks_included_root = - Block::compute_chunks_included(&self.chunks, self.header.inner_lite.height); - if self.header.inner_rest.chunks_included != chunks_included_root { + Block::compute_chunks_included(self.chunks(), self.header().height()); + if self.header().chunks_included() != chunks_included_root { return false; } // Check that challenges root stored in the header matches the challenges root of the challenges - let challenges_root = Block::compute_challenges_root(&self.challenges); - if self.header.inner_rest.challenges_root != challenges_root { + let challenges_root = Block::compute_challenges_root(&self.challenges()); + if self.header().challenges_root() != &challenges_root { return false; } @@ -707,14 +402,6 @@ impl Block { } } -#[derive(BorshSerialize, BorshDeserialize, Serialize, Clone, Debug, Eq, PartialEq, Default)] -pub struct GenesisId { - /// Chain Id - pub chain_id: String, - /// Hash of genesis block - pub hash: CryptoHash, -} - /// The tip of a fork. A handle to the fork ancestry from its leaf in the /// blockchain tree. References the max height and the latest and previous /// blocks for convenience @@ -736,11 +423,11 @@ impl Tip { /// Creates a new tip based on provided header. pub fn from_header(header: &BlockHeader) -> Tip { Tip { - height: header.inner_lite.height, - last_block_hash: header.hash(), - prev_block_hash: header.prev_hash, - epoch_id: header.inner_lite.epoch_id.clone(), - next_epoch_id: header.inner_lite.next_epoch_id.clone(), + height: header.height(), + last_block_hash: header.hash().clone(), + prev_block_hash: header.prev_hash().clone(), + epoch_id: header.epoch_id().clone(), + next_epoch_id: header.next_epoch_id().clone(), } } } diff --git a/core/primitives/src/block_header.rs b/core/primitives/src/block_header.rs new file mode 100644 index 00000000000..da76bcf62c6 --- /dev/null +++ b/core/primitives/src/block_header.rs @@ -0,0 +1,488 @@ +use borsh::{BorshDeserialize, BorshSerialize}; +use chrono::{DateTime, Utc}; +use serde::Serialize; + +use near_crypto::{KeyType, PublicKey, Signature}; + +use crate::challenge::ChallengesResult; +use crate::hash::{hash, CryptoHash}; +use crate::merkle::combine_hash; +use crate::types::{AccountId, Balance, BlockHeight, EpochId, MerkleHash, ValidatorStake}; +use crate::utils::{from_timestamp, to_timestamp}; +use crate::validator_signer::ValidatorSigner; +use crate::version::{ProtocolVersion, PROTOCOL_VERSION}; + +#[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, Eq, PartialEq)] +pub struct BlockHeaderInnerLite { + /// Height of this block since the genesis block (height 0). + pub height: BlockHeight, + /// Epoch start hash of this block's epoch. + /// Used for retrieving validator information + pub epoch_id: EpochId, + pub next_epoch_id: EpochId, + /// Root hash of the state at the previous block. + pub prev_state_root: MerkleHash, + /// Root of the outcomes of transactions and receipts. + pub outcome_root: MerkleHash, + /// Timestamp at which the block was built (number of non-leap-nanoseconds since January 1, 1970 0:00:00 UTC). + pub timestamp: u64, + /// Hash of the next epoch block producers set + pub next_bp_hash: CryptoHash, + /// Merkle root of block hashes up to the current block. + pub block_merkle_root: CryptoHash, +} + +#[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, Eq, PartialEq)] +pub struct BlockHeaderInnerRest { + /// Root hash of the chunk receipts in the given block. + pub chunk_receipts_root: MerkleHash, + /// Root hash of the chunk headers in the given block. + pub chunk_headers_root: MerkleHash, + /// Root hash of the chunk transactions in the given block. + pub chunk_tx_root: MerkleHash, + /// Number of chunks included into the block. + pub chunks_included: u64, + /// Root hash of the challenges in the given block. + pub challenges_root: MerkleHash, + /// The output of the randomness beacon + pub random_value: CryptoHash, + /// Validator proposals. + pub validator_proposals: Vec, + /// Mask for new chunks included in the block + pub chunk_mask: Vec, + /// Gas price. Same for all chunks + pub gas_price: Balance, + /// Total supply of tokens in the system + pub total_supply: Balance, + /// List of challenges result from previous block. + pub challenges_result: ChallengesResult, + + /// Last block that has full BFT finality + pub last_final_block: CryptoHash, + /// Last block that has doomslug finality + pub last_ds_final_block: CryptoHash, + + /// All the approvals included in this block + pub approvals: Vec>, + + /// Latest protocol version that this block producer has. + pub latest_protocol_version: ProtocolVersion, +} + +/// The part of the block approval that is different for endorsements and skips +#[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, PartialEq, Eq, Hash)] +pub enum ApprovalInner { + Endorsement(CryptoHash), + Skip(BlockHeight), +} + +/// Block approval by other block producers with a signature +#[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, PartialEq, Eq)] +pub struct Approval { + pub inner: ApprovalInner, + pub target_height: BlockHeight, + pub signature: Signature, + pub account_id: AccountId, +} + +/// Block approval by other block producers. +#[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, PartialEq, Eq)] +pub struct ApprovalMessage { + pub approval: Approval, + pub target: AccountId, +} + +impl ApprovalInner { + pub fn new( + parent_hash: &CryptoHash, + parent_height: BlockHeight, + target_height: BlockHeight, + ) -> Self { + if target_height == parent_height + 1 { + ApprovalInner::Endorsement(parent_hash.clone()) + } else { + ApprovalInner::Skip(parent_height) + } + } +} + +impl Approval { + pub fn new( + parent_hash: CryptoHash, + parent_height: BlockHeight, + target_height: BlockHeight, + signer: &dyn ValidatorSigner, + ) -> Self { + let inner = ApprovalInner::new(&parent_hash, parent_height, target_height); + let signature = signer.sign_approval(&inner, target_height); + Approval { inner, target_height, signature, account_id: signer.validator_id().clone() } + } + + pub fn get_data_for_sig(inner: &ApprovalInner, target_height: BlockHeight) -> Vec { + [inner.try_to_vec().unwrap().as_ref(), target_height.to_be_bytes().as_ref()].concat() + } +} + +impl ApprovalMessage { + pub fn new(approval: Approval, target: AccountId) -> Self { + ApprovalMessage { approval, target } + } +} + +#[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, Eq, PartialEq)] +#[borsh_init(init)] +pub struct BlockHeaderV1 { + pub prev_hash: CryptoHash, + + /// Inner part of the block header that gets hashed, split into two parts, one that is sent + /// to light clients, and the rest + pub inner_lite: BlockHeaderInnerLite, + pub inner_rest: BlockHeaderInnerRest, + + /// Signature of the block producer. + pub signature: Signature, + + /// Cached value of hash for this block. + #[borsh_skip] + pub hash: CryptoHash, +} + +impl BlockHeaderV1 { + pub fn init(&mut self) { + self.hash = BlockHeader::compute_hash( + self.prev_hash, + &self.inner_lite.try_to_vec().expect("Failed to serialize"), + &self.inner_rest.try_to_vec().expect("Failed to serialize"), + ); + } +} + +/// Versioned BlockHeader data structure. +/// For each next version, document what are the changes between versions. +#[derive(BorshSerialize, BorshDeserialize, Serialize, Debug, Clone, Eq, PartialEq)] +pub enum BlockHeader { + BlockHeaderV1(Box), +} + +impl BlockHeader { + pub fn compute_inner_hash(inner_lite: &[u8], inner_rest: &[u8]) -> CryptoHash { + let hash_lite = hash(inner_lite); + let hash_rest = hash(inner_rest); + combine_hash(hash_lite, hash_rest) + } + + pub fn compute_hash(prev_hash: CryptoHash, inner_lite: &[u8], inner_rest: &[u8]) -> CryptoHash { + let hash_inner = BlockHeader::compute_inner_hash(inner_lite, inner_rest); + + return combine_hash(hash_inner, prev_hash); + } + + pub fn new( + _protocol_version: ProtocolVersion, + height: BlockHeight, + prev_hash: CryptoHash, + prev_state_root: MerkleHash, + chunk_receipts_root: MerkleHash, + chunk_headers_root: MerkleHash, + chunk_tx_root: MerkleHash, + outcome_root: MerkleHash, + timestamp: u64, + chunks_included: u64, + challenges_root: MerkleHash, + random_value: CryptoHash, + validator_proposals: Vec, + chunk_mask: Vec, + epoch_id: EpochId, + next_epoch_id: EpochId, + gas_price: Balance, + total_supply: Balance, + challenges_result: ChallengesResult, + signer: &dyn ValidatorSigner, + last_final_block: CryptoHash, + last_ds_final_block: CryptoHash, + approvals: Vec>, + next_bp_hash: CryptoHash, + block_merkle_root: CryptoHash, + ) -> Self { + let inner_lite = BlockHeaderInnerLite { + height, + epoch_id, + next_epoch_id, + prev_state_root, + outcome_root, + timestamp, + next_bp_hash, + block_merkle_root, + }; + let inner_rest = BlockHeaderInnerRest { + chunk_receipts_root, + chunk_headers_root, + chunk_tx_root, + chunks_included, + challenges_root, + random_value, + validator_proposals, + chunk_mask, + gas_price, + total_supply, + challenges_result, + last_final_block, + last_ds_final_block, + approvals, + latest_protocol_version: PROTOCOL_VERSION, + }; + let (hash, signature) = signer.sign_block_header_parts( + prev_hash, + &inner_lite.try_to_vec().expect("Failed to serialize"), + &inner_rest.try_to_vec().expect("Failed to serialize"), + ); + Self::BlockHeaderV1(Box::new(BlockHeaderV1 { + prev_hash, + inner_lite, + inner_rest, + signature, + hash, + })) + } + + pub fn genesis( + genesis_protocol_version: ProtocolVersion, + height: BlockHeight, + state_root: MerkleHash, + chunk_receipts_root: MerkleHash, + chunk_headers_root: MerkleHash, + chunk_tx_root: MerkleHash, + chunks_included: u64, + challenges_root: MerkleHash, + timestamp: DateTime, + initial_gas_price: Balance, + initial_total_supply: Balance, + next_bp_hash: CryptoHash, + ) -> Self { + let inner_lite = BlockHeaderInnerLite { + height, + epoch_id: EpochId::default(), + next_epoch_id: EpochId::default(), + prev_state_root: state_root, + outcome_root: CryptoHash::default(), + timestamp: to_timestamp(timestamp), + next_bp_hash, + block_merkle_root: CryptoHash::default(), + }; + let inner_rest = BlockHeaderInnerRest { + chunk_receipts_root, + chunk_headers_root, + chunk_tx_root, + chunks_included, + challenges_root, + random_value: CryptoHash::default(), + validator_proposals: vec![], + chunk_mask: vec![], + gas_price: initial_gas_price, + total_supply: initial_total_supply, + challenges_result: vec![], + last_final_block: CryptoHash::default(), + last_ds_final_block: CryptoHash::default(), + approvals: vec![], + latest_protocol_version: genesis_protocol_version, + }; + let hash = BlockHeader::compute_hash( + CryptoHash::default(), + &inner_lite.try_to_vec().expect("Failed to serialize"), + &inner_rest.try_to_vec().expect("Failed to serialize"), + ); + // Genesis always has v1 of BlockHeader. + Self::BlockHeaderV1(Box::new(BlockHeaderV1 { + prev_hash: CryptoHash::default(), + inner_lite, + inner_rest, + signature: Signature::empty(KeyType::ED25519), + hash, + })) + } + + pub fn hash(&self) -> &CryptoHash { + match self { + BlockHeader::BlockHeaderV1(header) => &header.hash, + } + } + + pub fn prev_hash(&self) -> &CryptoHash { + match self { + BlockHeader::BlockHeaderV1(header) => &header.prev_hash, + } + } + + pub fn signature(&self) -> &Signature { + match self { + BlockHeader::BlockHeaderV1(header) => &header.signature, + } + } + + pub fn height(&self) -> BlockHeight { + match self { + BlockHeader::BlockHeaderV1(header) => header.inner_lite.height, + } + } + + pub fn epoch_id(&self) -> &EpochId { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_lite.epoch_id, + } + } + + pub fn next_epoch_id(&self) -> &EpochId { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_lite.next_epoch_id, + } + } + + pub fn prev_state_root(&self) -> &MerkleHash { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_lite.prev_state_root, + } + } + + pub fn chunk_receipts_root(&self) -> &MerkleHash { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_rest.chunk_receipts_root, + } + } + + pub fn chunk_headers_root(&self) -> &MerkleHash { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_rest.chunk_headers_root, + } + } + + pub fn chunk_tx_root(&self) -> &MerkleHash { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_rest.chunk_tx_root, + } + } + + pub fn chunks_included(&self) -> u64 { + match self { + BlockHeader::BlockHeaderV1(header) => header.inner_rest.chunks_included, + } + } + + pub fn challenges_root(&self) -> &MerkleHash { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_rest.challenges_root, + } + } + + pub fn outcome_root(&self) -> &MerkleHash { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_lite.outcome_root, + } + } + + pub fn raw_timestamp(&self) -> u64 { + match self { + BlockHeader::BlockHeaderV1(header) => header.inner_lite.timestamp, + } + } + + pub fn validator_proposals(&self) -> &[ValidatorStake] { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_rest.validator_proposals, + } + } + + pub fn chunk_mask(&self) -> &[bool] { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_rest.chunk_mask, + } + } + + pub fn gas_price(&self) -> Balance { + match self { + BlockHeader::BlockHeaderV1(header) => header.inner_rest.gas_price, + } + } + + pub fn total_supply(&self) -> Balance { + match self { + BlockHeader::BlockHeaderV1(header) => header.inner_rest.total_supply, + } + } + + pub fn random_value(&self) -> &CryptoHash { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_rest.random_value, + } + } + + pub fn last_final_block(&self) -> &CryptoHash { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_rest.last_final_block, + } + } + + pub fn last_ds_final_block(&self) -> &CryptoHash { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_rest.last_ds_final_block, + } + } + + pub fn challenges_result(&self) -> &ChallengesResult { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_rest.challenges_result, + } + } + + pub fn next_bp_hash(&self) -> &CryptoHash { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_lite.next_bp_hash, + } + } + + pub fn block_merkle_root(&self) -> &CryptoHash { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_lite.block_merkle_root, + } + } + + pub fn approvals(&self) -> &[Option] { + match self { + BlockHeader::BlockHeaderV1(header) => &header.inner_rest.approvals, + } + } + + /// Verifies that given public key produced the block. + pub fn verify_block_producer(&self, public_key: &PublicKey) -> bool { + self.signature().verify(self.hash().as_ref(), public_key) + } + + pub fn timestamp(&self) -> DateTime { + from_timestamp(self.raw_timestamp()) + } + + pub fn num_approvals(&self) -> u64 { + self.approvals().iter().filter(|x| x.is_some()).count() as u64 + } + + pub fn latest_protocol_version(&self) -> u32 { + match self { + BlockHeader::BlockHeaderV1(header) => header.inner_rest.latest_protocol_version, + } + } + + pub fn inner_lite_bytes(&self) -> Vec { + match self { + BlockHeader::BlockHeaderV1(header) => { + header.inner_lite.try_to_vec().expect("Failed to serialize") + } + } + } + + pub fn inner_rest_bytes(&self) -> Vec { + match self { + BlockHeader::BlockHeaderV1(header) => { + header.inner_rest.try_to_vec().expect("Failed to serialize") + } + } + } +} diff --git a/core/primitives/src/lib.rs b/core/primitives/src/lib.rs index 1a2db4f9e9d..83663f29685 100644 --- a/core/primitives/src/lib.rs +++ b/core/primitives/src/lib.rs @@ -9,6 +9,7 @@ pub use borsh; pub mod account; pub mod block; +pub mod block_header; pub mod challenge; pub mod contract; pub mod errors; @@ -28,4 +29,5 @@ pub mod trie_key; pub mod types; pub mod utils; pub mod validator_signer; +pub mod version; pub mod views; diff --git a/core/primitives/src/test_utils.rs b/core/primitives/src/test_utils.rs index 7cd8a044a4f..0b4fbbf7f4d 100644 --- a/core/primitives/src/test_utils.rs +++ b/core/primitives/src/test_utils.rs @@ -2,6 +2,7 @@ use near_crypto::{EmptySigner, PublicKey, Signature, Signer}; use crate::account::{AccessKey, AccessKeyPermission, Account}; use crate::block::Block; +use crate::block_header::{BlockHeader, BlockHeaderV1}; use crate::errors::EpochError; use crate::hash::CryptoHash; use crate::merkle::PartialMerkleTree; @@ -12,6 +13,7 @@ use crate::transaction::{ }; use crate::types::{AccountId, Balance, BlockHeight, EpochId, EpochInfoProvider, Gas, Nonce}; use crate::validator_signer::ValidatorSigner; +use crate::version::PROTOCOL_VERSION; use num_rational::Rational; use std::collections::HashMap; @@ -240,7 +242,32 @@ impl SignedTransaction { } } +impl BlockHeader { + pub fn get_mut(&mut self) -> &mut BlockHeaderV1 { + match self { + BlockHeader::BlockHeaderV1(header) => header, + } + } + + pub fn resign(&mut self, signer: &dyn ValidatorSigner) { + let (hash, signature) = signer.sign_block_header_parts( + *self.prev_hash(), + &self.inner_lite_bytes(), + &self.inner_rest_bytes(), + ); + let mut header = self.get_mut(); + header.hash = hash; + header.signature = signature; + } +} + impl Block { + pub fn mut_header(&mut self) -> &mut BlockHeader { + match self { + Block::BlockV1(block) => &mut block.header, + } + } + pub fn empty_with_epoch( prev: &Block, height: BlockHeight, @@ -250,7 +277,7 @@ impl Block { signer: &dyn ValidatorSigner, block_merkle_tree: &mut PartialMerkleTree, ) -> Self { - block_merkle_tree.insert(prev.hash()); + block_merkle_tree.insert(*prev.hash()); Self::empty_with_approvals( prev, height, @@ -285,13 +312,13 @@ impl Block { Self::empty_with_epoch( prev, height, - prev.header.inner_lite.epoch_id.clone(), - if prev.header.prev_hash == CryptoHash::default() { - EpochId(prev.hash()) + prev.header().epoch_id().clone(), + if prev.header().prev_hash() == &CryptoHash::default() { + EpochId(*prev.hash()) } else { - prev.header.inner_lite.next_epoch_id.clone() + prev.header().next_epoch_id().clone() }, - prev.header.inner_lite.next_bp_hash, + *prev.header().next_bp_hash(), signer, block_merkle_tree, ) @@ -304,7 +331,7 @@ impl Block { ) -> Self { Self::empty_with_height_and_block_merkle_tree( prev, - prev.header.inner_lite.height + 1, + prev.header().height() + 1, signer, block_merkle_tree, ) @@ -327,9 +354,10 @@ impl Block { block_merkle_root: CryptoHash, ) -> Self { Block::produce( - &prev.header, + PROTOCOL_VERSION, + prev.header(), height, - prev.chunks.clone(), + prev.chunks().clone(), epoch_id, next_epoch_id, approvals, diff --git a/core/primitives/src/types.rs b/core/primitives/src/types.rs index 8e58db6bf94..3c784dbb73d 100644 --- a/core/primitives/src/types.rs +++ b/core/primitives/src/types.rs @@ -473,13 +473,6 @@ impl ChunkExtra { } } -/// Data structure for semver version and github tag or commit. -#[derive(Serialize, Deserialize, Clone, Debug, Default)] -pub struct Version { - pub version: String, - pub build: String, -} - #[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, Serialize, Deserialize)] #[serde(untagged)] pub enum BlockId { diff --git a/core/primitives/src/validator_signer.rs b/core/primitives/src/validator_signer.rs index 96157bd9019..4539338d423 100644 --- a/core/primitives/src/validator_signer.rs +++ b/core/primitives/src/validator_signer.rs @@ -5,9 +5,7 @@ use borsh::BorshSerialize; use near_crypto::{InMemorySigner, KeyType, PublicKey, Signature, Signer}; -use crate::block::{ - Approval, ApprovalInner, BlockHeader, BlockHeaderInnerLite, BlockHeaderInnerRest, -}; +use crate::block::{Approval, ApprovalInner, BlockHeader}; use crate::challenge::ChallengeBody; use crate::hash::{hash, CryptoHash}; use crate::network::{AnnounceAccount, PeerId}; @@ -30,8 +28,8 @@ pub trait ValidatorSigner: Sync + Send { fn sign_block_header_parts( &self, prev_hash: CryptoHash, - inner_lite: &BlockHeaderInnerLite, - inner_rest: &BlockHeaderInnerRest, + inner_lite: &[u8], + inner_rest: &[u8], ) -> (CryptoHash, Signature); /// Signs given inner of the chunk header. @@ -86,8 +84,8 @@ impl ValidatorSigner for EmptyValidatorSigner { fn sign_block_header_parts( &self, prev_hash: CryptoHash, - inner_lite: &BlockHeaderInnerLite, - inner_rest: &BlockHeaderInnerRest, + inner_lite: &[u8], + inner_rest: &[u8], ) -> (CryptoHash, Signature) { let hash = BlockHeader::compute_hash(prev_hash, inner_lite, inner_rest); (hash, Signature::default()) @@ -182,8 +180,8 @@ impl ValidatorSigner for InMemoryValidatorSigner { fn sign_block_header_parts( &self, prev_hash: CryptoHash, - inner_lite: &BlockHeaderInnerLite, - inner_rest: &BlockHeaderInnerRest, + inner_lite: &[u8], + inner_rest: &[u8], ) -> (CryptoHash, Signature) { let hash = BlockHeader::compute_hash(prev_hash, inner_lite, inner_rest); (hash, self.signer.sign(hash.as_ref())) diff --git a/core/primitives/src/version.rs b/core/primitives/src/version.rs new file mode 100644 index 00000000000..255cd739b80 --- /dev/null +++ b/core/primitives/src/version.rs @@ -0,0 +1,22 @@ +use serde::{Deserialize, Serialize}; + +/// Data structure for semver version and github tag or commit. +#[derive(Serialize, Deserialize, Clone, Debug, Default)] +pub struct Version { + pub version: String, + pub build: String, +} + +/// Database version. +pub type DbVersion = u32; + +/// Current version of the database. +pub const DB_VERSION: DbVersion = 1; + +/// Protocol version type. +pub type ProtocolVersion = u32; + +/// Current latest version of the protocol. +pub const PROTOCOL_VERSION: ProtocolVersion = 22; + +pub const FIRST_BACKWARD_COMPATIBLE_PROTOCOL_VERSION: ProtocolVersion = PROTOCOL_VERSION; diff --git a/core/primitives/src/views.rs b/core/primitives/src/views.rs index 97f7176c14b..f0c3f6666ca 100644 --- a/core/primitives/src/views.rs +++ b/core/primitives/src/views.rs @@ -14,7 +14,8 @@ use serde::{Deserialize, Serialize}; use near_crypto::{PublicKey, Signature}; use crate::account::{AccessKey, AccessKeyPermission, Account, FunctionCallPermission}; -use crate::block::{Block, BlockHeader, BlockHeaderInnerLite, BlockHeaderInnerRest}; +use crate::block::{Block, BlockHeader}; +use crate::block_header::{BlockHeaderInnerLite, BlockHeaderInnerRest, BlockHeaderV1}; use crate::challenge::{Challenge, ChallengesResult}; use crate::errors::TxExecutionError; use crate::hash::{hash, CryptoHash}; @@ -37,8 +38,9 @@ use crate::types::{ AccountId, AccountWithPublicKey, Balance, BlockHeight, EpochId, FunctionArgs, Gas, Nonce, NumBlocks, ShardId, StateChangeCause, StateChangeKind, StateChangeValue, StateChangeWithCause, StateChangesRequest, StateRoot, StorageUsage, StoreKey, StoreValue, ValidatorKickoutReason, - ValidatorStake, Version, + ValidatorStake, }; +use crate::version::{ProtocolVersion, Version}; /// A view of the account #[derive(BorshSerialize, BorshDeserialize, Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] @@ -254,6 +256,10 @@ pub struct StatusResponse { pub version: Version, /// Unique chain id. pub chain_id: String, + /// Currently active protocol version. + pub protocol_version: u32, + /// Latest protocol version that this client supports. + pub latest_protocol_version: u32, /// Address for RPC server. pub rpc_addr: String, /// Current epoch validators. @@ -341,50 +347,51 @@ pub struct BlockHeaderView { pub block_merkle_root: CryptoHash, pub approvals: Vec>, pub signature: Signature, + pub latest_protocol_version: ProtocolVersion, } impl From for BlockHeaderView { fn from(header: BlockHeader) -> Self { Self { - height: header.inner_lite.height, - epoch_id: header.inner_lite.epoch_id.0, - next_epoch_id: header.inner_lite.next_epoch_id.0, - hash: header.hash, - prev_hash: header.prev_hash, - prev_state_root: header.inner_lite.prev_state_root, - chunk_receipts_root: header.inner_rest.chunk_receipts_root, - chunk_headers_root: header.inner_rest.chunk_headers_root, - chunk_tx_root: header.inner_rest.chunk_tx_root, - chunks_included: header.inner_rest.chunks_included, - challenges_root: header.inner_rest.challenges_root, - outcome_root: header.inner_lite.outcome_root, - timestamp: header.inner_lite.timestamp, - random_value: header.inner_rest.random_value, + height: header.height(), + epoch_id: header.epoch_id().0, + next_epoch_id: header.next_epoch_id().0, + hash: header.hash().clone(), + prev_hash: header.prev_hash().clone(), + prev_state_root: header.prev_state_root().clone(), + chunk_receipts_root: header.chunk_receipts_root().clone(), + chunk_headers_root: header.chunk_headers_root().clone(), + chunk_tx_root: header.chunk_tx_root().clone(), + chunks_included: header.chunks_included(), + challenges_root: header.challenges_root().clone(), + outcome_root: header.outcome_root().clone(), + timestamp: header.raw_timestamp(), + random_value: header.random_value().clone(), validator_proposals: header - .inner_rest - .validator_proposals - .into_iter() - .map(|v| v.into()) + .validator_proposals() + .iter() + .map(|v| v.clone().into()) .collect(), - chunk_mask: header.inner_rest.chunk_mask, - gas_price: header.inner_rest.gas_price, + chunk_mask: header.chunk_mask().to_vec(), + gas_price: header.gas_price(), rent_paid: 0, validator_reward: 0, - total_supply: header.inner_rest.total_supply, - challenges_result: header.inner_rest.challenges_result, - last_final_block: header.inner_rest.last_final_block, - last_ds_final_block: header.inner_rest.last_ds_final_block, - next_bp_hash: header.inner_lite.next_bp_hash, - block_merkle_root: header.inner_lite.block_merkle_root, - approvals: header.inner_rest.approvals.clone(), - signature: header.signature, + total_supply: header.total_supply(), + challenges_result: header.challenges_result().clone(), + last_final_block: header.last_final_block().clone(), + last_ds_final_block: header.last_ds_final_block().clone(), + next_bp_hash: header.next_bp_hash().clone(), + block_merkle_root: header.block_merkle_root().clone(), + approvals: header.approvals().to_vec(), + signature: header.signature().clone(), + latest_protocol_version: header.latest_protocol_version(), } } } impl From for BlockHeader { fn from(view: BlockHeaderView) -> Self { - let mut header = Self { + let mut header = BlockHeaderV1 { prev_hash: view.prev_hash, inner_lite: BlockHeaderInnerLite { height: view.height, @@ -415,12 +422,13 @@ impl From for BlockHeader { last_final_block: view.last_final_block, last_ds_final_block: view.last_ds_final_block, approvals: view.approvals.clone(), + latest_protocol_version: view.latest_protocol_version, }, signature: view.signature, hash: CryptoHash::default(), }; header.init(); - header + BlockHeader::BlockHeaderV1(Box::new(header)) } } @@ -436,17 +444,19 @@ pub struct BlockHeaderInnerLiteView { pub block_merkle_root: CryptoHash, } -impl From for BlockHeaderInnerLiteView { - fn from(header_lite: BlockHeaderInnerLite) -> Self { - BlockHeaderInnerLiteView { - height: header_lite.height, - epoch_id: header_lite.epoch_id.0, - next_epoch_id: header_lite.next_epoch_id.0, - prev_state_root: header_lite.prev_state_root, - outcome_root: header_lite.outcome_root, - timestamp: header_lite.timestamp, - next_bp_hash: header_lite.next_bp_hash, - block_merkle_root: header_lite.block_merkle_root, +impl From for BlockHeaderInnerLiteView { + fn from(header: BlockHeader) -> Self { + match header { + BlockHeader::BlockHeaderV1(header) => BlockHeaderInnerLiteView { + height: header.inner_lite.height, + epoch_id: header.inner_lite.epoch_id.0, + next_epoch_id: header.inner_lite.next_epoch_id.0, + prev_state_root: header.inner_lite.prev_state_root, + outcome_root: header.inner_lite.outcome_root, + timestamp: header.inner_lite.timestamp, + next_bp_hash: header.inner_lite.next_bp_hash, + block_merkle_root: header.inner_lite.block_merkle_root, + }, } } } @@ -546,8 +556,8 @@ impl BlockView { pub fn from_author_block(author: AccountId, block: Block) -> Self { BlockView { author, - header: block.header.into(), - chunks: block.chunks.into_iter().map(Into::into).collect(), + header: block.header().clone().into(), + chunks: block.chunks().iter().cloned().map(Into::into).collect(), } } } @@ -1043,9 +1053,9 @@ pub struct LightClientBlockLiteView { impl From for LightClientBlockLiteView { fn from(header: BlockHeader) -> Self { Self { - prev_block_hash: header.prev_hash, - inner_rest_hash: header.inner_rest.hash(), - inner_lite: header.inner_lite.into(), + prev_block_hash: header.prev_hash().clone(), + inner_rest_hash: hash(&header.inner_rest_bytes()), + inner_lite: header.into(), } } } diff --git a/core/store/Cargo.toml b/core/store/Cargo.toml index 8ec91c8e6b7..452d7ee0573 100644 --- a/core/store/Cargo.toml +++ b/core/store/Cargo.toml @@ -10,11 +10,12 @@ derive_more = "0.99.3" elastic-array = "0.11" rocksdb = "0.14" serde = { version = "1", features = [ "derive" ] } +serde_json = "1" cached = "0.12" num_cpus = "1.11" rand = "0.7.2" -borsh = "0.6.1" +borsh = "0.6.2" near-crypto = { path = "../crypto" } near-primitives = { path = "../primitives" } diff --git a/core/store/src/db.rs b/core/store/src/db.rs index c9a45e9e350..2cc13ea6a96 100644 --- a/core/store/src/db.rs +++ b/core/store/src/db.rs @@ -1,12 +1,15 @@ -use rocksdb::{ - BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, Direction, IteratorMode, Options, - ReadOptions, WriteBatch, DB, -}; use std::cmp; use std::collections::HashMap; use std::io; use std::sync::RwLock; +use rocksdb::{ + BlockBasedOptions, ColumnFamily, ColumnFamilyDescriptor, Direction, IteratorMode, Options, + ReadOptions, WriteBatch, DB, +}; + +use near_primitives::version::DbVersion; + #[derive(Debug, Clone, PartialEq)] pub struct DBError(rocksdb::Error); @@ -32,67 +35,70 @@ impl Into for DBError { #[derive(PartialEq, Debug, Copy, Clone)] pub enum DBCol { - ColBlockMisc = 0, - ColBlock = 1, - ColBlockHeader = 2, - ColBlockHeight = 3, - ColState = 4, - ColChunkExtra = 5, - ColTransactionResult = 6, - ColOutgoingReceipts = 7, - ColIncomingReceipts = 8, - ColPeers = 9, - ColEpochInfo = 10, - ColBlockInfo = 11, - ColChunks = 12, - ColPartialChunks = 13, + /// Column to indicate which version of database this is. + ColDbVersion = 0, + ColBlockMisc = 1, + ColBlock = 2, + ColBlockHeader = 3, + ColBlockHeight = 4, + ColState = 5, + ColChunkExtra = 6, + ColTransactionResult = 7, + ColOutgoingReceipts = 8, + ColIncomingReceipts = 9, + ColPeers = 10, + ColEpochInfo = 11, + ColBlockInfo = 12, + ColChunks = 13, + ColPartialChunks = 14, /// Blocks for which chunks need to be applied after the state is downloaded for a particular epoch - ColBlocksToCatchup = 14, + ColBlocksToCatchup = 15, /// Blocks for which the state is being downloaded - ColStateDlInfos = 15, - ColChallengedBlocks = 16, - ColStateHeaders = 17, - ColInvalidChunks = 18, - ColBlockExtra = 19, + ColStateDlInfos = 16, + ColChallengedBlocks = 17, + ColStateHeaders = 18, + ColInvalidChunks = 19, + ColBlockExtra = 20, /// Store hash of a block per each height, to detect double signs. - ColBlockPerHeight = 20, - ColStateParts = 21, - ColEpochStart = 22, + ColBlockPerHeight = 21, + ColStateParts = 22, + ColEpochStart = 23, /// Map account_id to announce_account - ColAccountAnnouncements = 23, + ColAccountAnnouncements = 24, /// Next block hashes in the sequence of the canonical chain blocks - ColNextBlockHashes = 24, + ColNextBlockHashes = 25, /// `LightClientBlock`s corresponding to the last final block of each completed epoch - ColEpochLightClientBlocks = 25, - ColReceiptIdToShardId = 26, - ColNextBlockWithNewChunk = 27, - ColLastBlockWithNewChunk = 28, + ColEpochLightClientBlocks = 26, + ColReceiptIdToShardId = 27, + ColNextBlockWithNewChunk = 28, + ColLastBlockWithNewChunk = 29, /// Map each saved peer on disk with its component id. - ColPeerComponent = 29, + ColPeerComponent = 30, /// Map component id with all edges in this component. - ColComponentEdges = 30, + ColComponentEdges = 31, /// Biggest nonce used. - LastComponentNonce = 31, + LastComponentNonce = 32, /// Transactions - ColTransactions = 32, - ColChunkPerHeightShard = 33, + ColTransactions = 33, + ColChunkPerHeightShard = 34, /// Changes to key-values that we have recorded. - ColStateChanges = 34, - ColBlockRefCount = 35, - ColTrieChanges = 36, + ColStateChanges = 35, + ColBlockRefCount = 36, + ColTrieChanges = 37, /// Merkle tree of block hashes - ColBlockMerkleTree = 37, - ColChunkHashesByHeight = 38, + ColBlockMerkleTree = 38, + ColChunkHashesByHeight = 39, /// Block ordinals. - ColBlockOrdinal = 39, + ColBlockOrdinal = 40, } // Do not move this line from enum DBCol -const NUM_COLS: usize = 40; +const NUM_COLS: usize = 41; impl std::fmt::Display for DBCol { fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { let desc = match self { + Self::ColDbVersion => "db version", Self::ColBlockMisc => "miscellaneous block data", Self::ColBlock => "block data", Self::ColBlockHeader => "block header data", @@ -145,6 +151,7 @@ pub const SYNC_HEAD_KEY: &[u8; 9] = b"SYNC_HEAD"; pub const HEADER_HEAD_KEY: &[u8; 11] = b"HEADER_HEAD"; pub const LATEST_KNOWN_KEY: &[u8; 12] = b"LATEST_KNOWN"; pub const LARGEST_TARGET_HEIGHT_KEY: &[u8; 21] = b"LARGEST_TARGET_HEIGHT"; +pub const VERSION_KEY: &[u8; 7] = b"VERSION"; pub struct DBTransaction { pub ops: Vec, @@ -172,7 +179,6 @@ impl DBTransaction { pub struct RocksDB { db: DB, cfs: Vec<*const ColumnFamily>, - read_options: ReadOptions, } // DB was already Send+Sync. cf and read_options are const pointers using only functions in @@ -200,14 +206,15 @@ pub trait Database: Sync + Send { impl Database for RocksDB { fn get(&self, col: DBCol, key: &[u8]) -> Result>, DBError> { - unsafe { Ok(self.db.get_cf_opt(&*self.cfs[col as usize], key, &self.read_options)?) } + let read_options = rocksdb_read_options(); + unsafe { Ok(self.db.get_cf_opt(&*self.cfs[col as usize], key, &read_options)?) } } fn iter<'a>(&'a self, col: DBCol) -> Box, Box<[u8]>)> + 'a> { + let read_options = rocksdb_read_options(); unsafe { let cf_handle = &*self.cfs[col as usize]; - let iterator = - self.db.iterator_cf_opt(cf_handle, rocksdb_read_options(), IteratorMode::Start); + let iterator = self.db.iterator_cf_opt(cf_handle, read_options, IteratorMode::Start); Box::new(iterator) } } @@ -332,6 +339,27 @@ fn rocksdb_column_options() -> Options { } impl RocksDB { + /// Returns version of the database state on disk. + pub fn get_version>(path: P) -> Result { + let db = RocksDB::new_read_only(path)?; + db.get(DBCol::ColDbVersion, VERSION_KEY).map(|result| { + serde_json::from_slice( + &result + .expect("Failed to find version in first column. Database must be corrupted."), + ) + .expect("Failed to parse version. Database must be corrupted.") + }) + } + + fn new_read_only>(path: P) -> Result { + let options = Options::default(); + let cf_names: Vec<_> = vec!["col0".to_string()]; + let db = DB::open_cf_for_read_only(&options, path, cf_names.iter(), false)?; + let cfs = + cf_names.iter().map(|n| db.cf_handle(n).unwrap() as *const ColumnFamily).collect(); + Ok(Self { db, cfs }) + } + pub fn new>(path: P) -> Result { let options = rocksdb_options(); let cf_names: Vec<_> = (0..NUM_COLS).map(|col| format!("col{}", col)).collect(); @@ -339,14 +367,9 @@ impl RocksDB { .iter() .map(|cf_name| ColumnFamilyDescriptor::new(cf_name, rocksdb_column_options())); let db = DB::open_cf_descriptors(&options, path, cf_descriptors)?; - let cfs = cf_names - .iter() - .map(|n| { - let ptr: *const ColumnFamily = db.cf_handle(n).unwrap(); - ptr - }) - .collect(); - Ok(Self { db, cfs, read_options: rocksdb_read_options() }) + let cfs = + cf_names.iter().map(|n| db.cf_handle(n).unwrap() as *const ColumnFamily).collect(); + Ok(Self { db, cfs }) } } diff --git a/core/store/src/lib.rs b/core/store/src/lib.rs index de0d46c745a..a2cd17c08dd 100644 --- a/core/store/src/lib.rs +++ b/core/store/src/lib.rs @@ -22,8 +22,9 @@ use near_primitives::receipt::{Receipt, ReceivedData}; use near_primitives::serialize::to_base; use near_primitives::trie_key::{trie_key_parsers, TrieKey}; use near_primitives::types::AccountId; +use near_primitives::version::{DbVersion, DB_VERSION}; -use crate::db::{DBOp, DBTransaction, Database, RocksDB}; +use crate::db::{DBOp, DBTransaction, Database, RocksDB, VERSION_KEY}; pub use crate::trie::{ iterator::TrieIterator, update::TrieUpdate, update::TrieUpdateIterator, update::TrieUpdateValuePtr, KeyForStateChanges, PartialStorage, ShardTries, Trie, TrieChanges, @@ -201,12 +202,8 @@ impl StoreUpdate { .ops .iter() .map(|op| match op { - DBOp::Insert { col, key, .. } => { - (*col as u8, key) - } - DBOp::Delete { col, key } => { - (*col as u8, key) - } + DBOp::Insert { col, key, .. } => (*col as u8, key), + DBOp::Delete { col, key } => (*col as u8, key), }) .collect::>() .len(), @@ -254,6 +251,20 @@ pub fn read_with_cache<'a, T: BorshDeserialize + 'a>( Ok(None) } +pub fn get_store_version(path: &str) -> DbVersion { + RocksDB::get_version(path).expect("Failed to open the database") +} + +pub fn set_store_version(store: &Store) { + let mut store_update = store.store_update(); + store_update.set( + DBCol::ColDbVersion, + VERSION_KEY, + &serde_json::to_vec(&DB_VERSION).expect("Faile to serialize version"), + ); + store_update.commit().expect("Failed to write version to database"); +} + pub fn create_store(path: &str) -> Arc { let db = Arc::new(RocksDB::new(path).expect("Failed to open the database")); Arc::new(Store::new(db)) diff --git a/core/store/src/validate.rs b/core/store/src/validate.rs new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/core/store/src/validate.rs @@ -0,0 +1 @@ + diff --git a/genesis-tools/genesis-csv-to-json/src/csv_to_json_configs.rs b/genesis-tools/genesis-csv-to-json/src/csv_to_json_configs.rs index be0ac724073..31b5b80cc6c 100644 --- a/genesis-tools/genesis-csv-to-json/src/csv_to_json_configs.rs +++ b/genesis-tools/genesis-csv-to-json/src/csv_to_json_configs.rs @@ -1,14 +1,16 @@ use std::fs::File; use std::path::Path; -use near_chain_configs::{Genesis, GenesisConfig, GENESIS_CONFIG_VERSION, PROTOCOL_VERSION}; +use near_chain_configs::{Genesis, GenesisConfig}; use near_primitives::types::{Balance, NumShards, ShardId}; use near_primitives::utils::get_num_seats_per_shard; +use near_primitives::version::PROTOCOL_VERSION; use neard::config::{ Config, BLOCK_PRODUCER_KICKOUT_THRESHOLD, CHUNK_PRODUCER_KICKOUT_THRESHOLD, CONFIG_FILENAME, EXPECTED_EPOCH_LENGTH, FISHERMEN_THRESHOLD, GAS_PRICE_ADJUSTMENT_RATE, GENESIS_CONFIG_FILENAME, INITIAL_GAS_LIMIT, MAX_INFLATION_RATE, MIN_GAS_PRICE, NODE_KEY_FILE, NUM_BLOCKS_PER_YEAR, - NUM_BLOCK_PRODUCER_SEATS, PROTOCOL_REWARD_RATE, TRANSACTION_VALIDITY_PERIOD, + NUM_BLOCK_PRODUCER_SEATS, PROTOCOL_REWARD_RATE, PROTOCOL_UPGRADE_NUM_EPOCHS, + PROTOCOL_UPGRADE_STAKE_THRESHOLD, TRANSACTION_VALIDITY_PERIOD, }; use neard::NEAR_BASE; @@ -54,7 +56,6 @@ pub fn csv_to_json_configs(home: &Path, chain_id: String, tracked_shards: Vec>().join(","); let genesis_config = GenesisConfig { protocol_version: PROTOCOL_VERSION, - config_version: GENESIS_CONFIG_VERSION, genesis_time, chain_id: chain_id.clone(), num_block_producer_seats: NUM_BLOCK_PRODUCER_SEATS, @@ -64,6 +65,8 @@ pub fn csv_to_json_configs(home: &Path, chain_id: String, tracked_shards: Vec"] edition = "2018" [dependencies] -borsh = "0.6.1" +borsh = "0.6.2" byteorder = "1.2" indicatif = "0.13.0" clap = "2.33.0" diff --git a/genesis-tools/genesis-populate/src/lib.rs b/genesis-tools/genesis-populate/src/lib.rs index 7d3bc3a3fa4..d3d7467fe54 100644 --- a/genesis-tools/genesis-populate/src/lib.rs +++ b/genesis-tools/genesis-populate/src/lib.rs @@ -9,6 +9,7 @@ use std::sync::Arc; use borsh::BorshSerialize; use indicatif::{ProgressBar, ProgressStyle}; +use near_chain::types::BlockHeaderInfo; use near_chain::{Block, Chain, ChainStore, RuntimeAdapter}; use near_chain_configs::Genesis; use near_crypto::{InMemorySigner, KeyType}; @@ -188,6 +189,7 @@ impl GenesisBuilder { self.genesis.config.genesis_height, ); let genesis = Block::genesis( + self.genesis.config.protocol_version, genesis_chunks.into_iter().map(|chunk| chunk.header).collect(), self.genesis.config.genesis_time, self.genesis.config.genesis_height, @@ -199,25 +201,13 @@ impl GenesisBuilder { let mut store = ChainStore::new(self.store.clone(), self.genesis.config.genesis_height); let mut store_update = store.store_update(); - self.runtime - .add_validator_proposals( - CryptoHash::default(), - genesis.hash(), - genesis.header.inner_rest.random_value, - genesis.header.inner_lite.height, - 0, - vec![], - vec![], - vec![], - self.genesis.config.total_supply.clone(), - ) - .unwrap(); + self.runtime.add_validator_proposals(BlockHeaderInfo::new(&genesis.header(), 0)).unwrap(); store_update - .save_block_header(genesis.header.clone()) + .save_block_header(genesis.header().clone()) .expect("save genesis block header shouldn't fail"); store_update.save_block(genesis.clone()); - for (chunk_header, state_root) in genesis.chunks.iter().zip(self.roots.values()) { + for (chunk_header, state_root) in genesis.chunks().iter().zip(self.roots.values()) { store_update.save_chunk_extra( &genesis.hash(), chunk_header.inner.shard_id, @@ -232,7 +222,7 @@ impl GenesisBuilder { ); } - let head = Tip::from_header(&genesis.header); + let head = Tip::from_header(&genesis.header()); store_update.save_head(&head).unwrap(); store_update.save_sync_head(&head); store_update.commit().unwrap(); diff --git a/neard/Cargo.toml b/neard/Cargo.toml index 2e376a2b9c7..36bf83b387f 100644 --- a/neard/Cargo.toml +++ b/neard/Cargo.toml @@ -19,7 +19,7 @@ serde = { version = "1", features = [ "derive" ] } serde_json = "1" lazy_static = "1.4" dirs = "2.0.2" -borsh = "0.6.1" +borsh = "0.6.2" tracing = "0.1.13" tracing-subscriber = "0.2.4" num-rational = { version = "0.2.4", features = ["serde"] } diff --git a/neard/res/genesis_config.json b/neard/res/genesis_config.json index f41678661e0..b98ae21372b 100644 --- a/neard/res/genesis_config.json +++ b/neard/res/genesis_config.json @@ -1,6 +1,5 @@ { - "config_version": 1, - "protocol_version": 21, + "protocol_version": 22, "genesis_time": "1970-01-01T00:00:00.000000000Z", "chain_id": "sample", "genesis_height": 0, @@ -12,6 +11,11 @@ 0 ], "dynamic_resharding": false, + "protocol_upgrade_stake_threshold": [ + 4, + 5 + ], + "protocol_upgrade_num_epochs": 2, "epoch_length": 500, "gas_limit": 1000000000000000, "min_gas_price": "5000", @@ -224,4 +228,4 @@ "protocol_treasury_account": "test.near", "fishermen_threshold": "10000000000000000000000000", "records": [] -} +} \ No newline at end of file diff --git a/neard/src/config.rs b/neard/src/config.rs index b35bf803090..d8e3a08eca9 100644 --- a/neard/src/config.rs +++ b/neard/src/config.rs @@ -7,14 +7,12 @@ use std::sync::Arc; use std::time::Duration; use chrono::Utc; -use lazy_static::lazy_static; use log::info; use num_rational::Rational; use serde::{Deserialize, Serialize}; -use near_chain_configs::{ - ClientConfig, Genesis, GenesisConfig, GENESIS_CONFIG_VERSION, PROTOCOL_VERSION, -}; +use lazy_static::lazy_static; +use near_chain_configs::{ClientConfig, Genesis, GenesisConfig}; use near_crypto::{InMemorySigner, KeyFile, KeyType, PublicKey, Signer}; use near_jsonrpc::RpcConfig; use near_network::test_utils::open_port; @@ -25,10 +23,12 @@ use near_primitives::account::{AccessKey, Account}; use near_primitives::hash::CryptoHash; use near_primitives::state_record::StateRecord; use near_primitives::types::{ - AccountId, AccountInfo, Balance, BlockHeightDelta, Gas, NumBlocks, NumSeats, NumShards, ShardId, + AccountId, AccountInfo, Balance, BlockHeightDelta, EpochHeight, Gas, NumBlocks, NumSeats, + NumShards, ShardId, }; use near_primitives::utils::{generate_random_string, get_num_seats_per_shard}; use near_primitives::validator_signer::{InMemoryValidatorSigner, ValidatorSigner}; +use near_primitives::version::PROTOCOL_VERSION; use near_runtime_configs::RuntimeConfig; use near_telemetry::TelemetryConfig; @@ -119,6 +119,9 @@ pub const NUM_BLOCK_PRODUCER_SEATS: NumSeats = 50; /// How much height horizon to give to consider peer up to date. pub const HIGHEST_PEER_HORIZON: u64 = 5; +/// Number of epochs before protocol upgrade. +pub const PROTOCOL_UPGRADE_NUM_EPOCHS: EpochHeight = 2; + pub const CONFIG_FILENAME: &str = "config.json"; pub const GENESIS_CONFIG_FILENAME: &str = "genesis.json"; pub const NODE_KEY_FILE: &str = "node_key.json"; @@ -138,6 +141,9 @@ lazy_static! { /// Maximum inflation rate per year pub static ref MAX_INFLATION_RATE: Rational = Rational::new(5, 100); + + /// Protocol upgrade stake threshold. + pub static ref PROTOCOL_UPGRADE_STAKE_THRESHOLD: Rational = Rational::new(8, 10); } /// Maximum number of active peers. Hard limit. @@ -429,13 +435,14 @@ impl Genesis { add_protocol_account(&mut records); let config = GenesisConfig { protocol_version: PROTOCOL_VERSION, - config_version: GENESIS_CONFIG_VERSION, genesis_time: Utc::now(), chain_id: random_chain_id(), num_block_producer_seats: num_validator_seats, num_block_producer_seats_per_shard: num_validator_seats_per_shard.clone(), avg_hidden_validator_seats_per_shard: vec![0; num_validator_seats_per_shard.len()], dynamic_resharding: false, + protocol_upgrade_stake_threshold: *PROTOCOL_UPGRADE_STAKE_THRESHOLD, + protocol_upgrade_num_epochs: PROTOCOL_UPGRADE_NUM_EPOCHS, epoch_length: FAST_EPOCH_LENGTH, gas_limit: INITIAL_GAS_LIMIT, gas_price_adjustment_rate: *GAS_PRICE_ADJUSTMENT_RATE, @@ -746,7 +753,6 @@ pub fn init_configs( let genesis_config = GenesisConfig { protocol_version: PROTOCOL_VERSION, - config_version: GENESIS_CONFIG_VERSION, genesis_time: Utc::now(), chain_id, genesis_height: 0, @@ -757,6 +763,8 @@ pub fn init_configs( ), avg_hidden_validator_seats_per_shard: (0..num_shards).map(|_| 0).collect(), dynamic_resharding: false, + protocol_upgrade_stake_threshold: *PROTOCOL_UPGRADE_STAKE_THRESHOLD, + protocol_upgrade_num_epochs: PROTOCOL_UPGRADE_NUM_EPOCHS, epoch_length: if fast { FAST_EPOCH_LENGTH } else { EXPECTED_EPOCH_LENGTH }, gas_limit: INITIAL_GAS_LIMIT, gas_price_adjustment_rate: *GAS_PRICE_ADJUSTMENT_RATE, @@ -932,6 +940,5 @@ mod test { let genesis_config_str = include_str!("../res/genesis_config.json"); let genesis_config = GenesisConfig::from_json(&genesis_config_str); assert_eq!(genesis_config.protocol_version, PROTOCOL_VERSION); - assert_eq!(genesis_config.config_version, GENESIS_CONFIG_VERSION); } } diff --git a/neard/src/lib.rs b/neard/src/lib.rs index a51ba28af24..1093c55a2f0 100644 --- a/neard/src/lib.rs +++ b/neard/src/lib.rs @@ -4,14 +4,14 @@ use std::sync::Arc; use actix::{Actor, Addr}; use log::info; +use tracing::trace; use near_chain::ChainGenesis; use near_client::{ClientActor, ViewClientActor}; use near_jsonrpc::start_http; use near_network::{NetworkRecipient, PeerManagerActor}; -use near_store::create_store; +use near_store::{create_store, get_store_version, set_store_version, Store}; use near_telemetry::TelemetryActor; -use tracing::trace; pub use crate::config::{init_configs, load_config, load_test_config, NearConfig, NEAR_BASE}; pub use crate::runtime::NightshadeRuntime; @@ -23,15 +23,18 @@ mod shard_tracker; const STORE_PATH: &str = "data"; +pub fn store_path_exists>(path: P) -> bool { + fs::canonicalize(path).is_ok() +} + pub fn get_store_path(base_path: &Path) -> String { let mut store_path = base_path.to_owned(); store_path.push(STORE_PATH); - match fs::canonicalize(store_path.clone()) { - Ok(path) => info!(target: "near", "Opening store database at {:?}", path), - _ => { - info!(target: "near", "Did not find {:?} path, will be creating new store database", store_path) - } - }; + if store_path_exists(&store_path) { + info!(target: "near", "Opening store database at {:?}", store_path); + } else { + info!(target: "near", "Did not find {:?} path, will be creating new store database", store_path); + } store_path.to_str().unwrap().to_owned() } @@ -48,11 +51,30 @@ pub fn get_default_home() -> String { } } +/// Function checks current version of the database and applies migrations to the database. +pub fn apply_store_migrations(path: &String) { + let _db_version = get_store_version(path); + // Add migrations here based on `db_version`. +} + +pub fn init_and_migrate_store(home_dir: &Path) -> Arc { + let path = get_store_path(home_dir); + let store_exists = store_path_exists(&path); + if store_exists { + apply_store_migrations(&path); + } + let store = create_store(&path); + if !store_exists { + set_store_version(&store); + } + store +} + pub fn start_with_config( home_dir: &Path, config: NearConfig, ) -> (Addr, Addr) { - let store = create_store(&get_store_path(home_dir)); + let store = init_and_migrate_store(home_dir); near_actix_utils::init_stop_on_panic(); let runtime = Arc::new(NightshadeRuntime::new( home_dir, diff --git a/neard/src/main.rs b/neard/src/main.rs index d6aa29ddcc3..28ecee80c84 100644 --- a/neard/src/main.rs +++ b/neard/src/main.rs @@ -13,7 +13,7 @@ use tracing_subscriber::filter::LevelFilter; use tracing_subscriber::EnvFilter; use git_version::git_version; -use near_primitives::types::Version; +use near_primitives::version::{Version, PROTOCOL_VERSION}; use neard::config::init_testnet_configs; use neard::genesis_validate::validate_genesis; use neard::{get_default_home, get_store_path, init_configs, load_config, start_with_config}; @@ -100,7 +100,7 @@ fn main() { .get_matches(); init_logging(matches.value_of("verbose")); - info!(target: "near", "Version: {}, Build: {}", version.version, version.build); + info!(target: "near", "Version: {}, Build: {}, Latest Protocol: {}", version.version, version.build, PROTOCOL_VERSION); #[cfg(feature = "adversarial")] { diff --git a/neard/src/runtime.rs b/neard/src/runtime.rs index e5192247e89..f34b8904f66 100644 --- a/neard/src/runtime.rs +++ b/neard/src/runtime.rs @@ -10,9 +10,8 @@ use borsh::ser::BorshSerialize; use borsh::BorshDeserialize; use log::{debug, error, warn}; -use crate::shard_tracker::{account_id_to_shard_id, ShardTracker}; use near_chain::chain::NUM_EPOCHS_TO_KEEP_STORE_DATA; -use near_chain::types::ApplyTransactionResult; +use near_chain::types::{ApplyTransactionResult, BlockHeaderInfo}; use near_chain::{BlockHeader, Error, ErrorKind, RuntimeAdapter}; use near_chain_configs::Genesis; use near_crypto::{PublicKey, Signature}; @@ -20,7 +19,7 @@ use near_epoch_manager::{BlockInfo, EpochConfig, EpochManager, RewardCalculator} use near_pool::types::PoolIterator; use near_primitives::account::{AccessKey, Account}; use near_primitives::block::{Approval, ApprovalInner}; -use near_primitives::challenge::{ChallengesResult, SlashedValidator}; +use near_primitives::challenge::ChallengesResult; use near_primitives::errors::{EpochError, InvalidTxError, RuntimeError}; use near_primitives::hash::{hash, CryptoHash}; use near_primitives::receipt::Receipt; @@ -33,6 +32,7 @@ use near_primitives::types::{ MerkleHash, NumShards, ShardId, StateChangeCause, StateRoot, StateRootNode, ValidatorStake, ValidatorStats, }; +use near_primitives::version::ProtocolVersion; use near_primitives::views::{ AccessKeyInfoView, CallResult, EpochValidatorInfo, QueryError, QueryRequest, QueryResponse, QueryResponseKind, ViewStateResult, @@ -48,6 +48,8 @@ use node_runtime::{ ValidatorAccountsUpdate, }; +use crate::shard_tracker::{account_id_to_shard_id, ShardTracker}; + const POISONED_LOCK_ERR: &str = "The lock was poisoned."; const STATE_DUMP_FILE: &str = "state_dump"; const GENESIS_ROOTS_FILE: &str = "genesis_roots"; @@ -146,6 +148,8 @@ impl NightshadeRuntime { fishermen_threshold: genesis.config.fishermen_threshold, online_min_threshold: genesis.config.online_min_threshold, online_max_threshold: genesis.config.online_max_threshold, + protocol_upgrade_num_epochs: genesis.config.protocol_upgrade_num_epochs, + protocol_upgrade_stake_threshold: genesis.config.protocol_upgrade_stake_threshold, }; let reward_calculator = RewardCalculator { max_inflation_rate: genesis.config.max_inflation_rate, @@ -160,6 +164,7 @@ impl NightshadeRuntime { EpochManager::new( store.clone(), initial_epoch_config, + genesis.config.protocol_version, reward_calculator, genesis .config @@ -474,8 +479,8 @@ impl RuntimeAdapter for NightshadeRuntime { fn verify_block_signature(&self, header: &BlockHeader) -> Result<(), Error> { let mut epoch_manager = self.epoch_manager.as_ref().write().expect(POISONED_LOCK_ERR); - let validator = epoch_manager - .get_block_producer_info(&header.inner_lite.epoch_id, header.inner_lite.height)?; + let validator = + epoch_manager.get_block_producer_info(header.epoch_id(), header.height())?; if !header.verify_block_producer(&validator.public_key) { return Err(ErrorKind::InvalidBlockProposer.into()); } @@ -487,8 +492,8 @@ impl RuntimeAdapter for NightshadeRuntime { epoch_id: &EpochId, block_height: BlockHeight, prev_random_value: &CryptoHash, - vrf_value: near_crypto::vrf::Value, - vrf_proof: near_crypto::vrf::Proof, + vrf_value: &near_crypto::vrf::Value, + vrf_proof: &near_crypto::vrf::Proof, ) -> Result<(), Error> { let mut epoch_manager = self.epoch_manager.as_ref().write().expect(POISONED_LOCK_ERR); let validator = epoch_manager.get_block_producer_info(&epoch_id, block_height)?; @@ -497,7 +502,7 @@ impl RuntimeAdapter for NightshadeRuntime { ) .unwrap(); - if !public_key.is_vrf_valid(&prev_random_value.as_ref(), &vrf_value, &vrf_proof) { + if !public_key.is_vrf_valid(&prev_random_value.as_ref(), vrf_value, vrf_proof) { return Err(ErrorKind::InvalidRandomnessBeaconOutput.into()); } Ok(()) @@ -648,16 +653,16 @@ impl RuntimeAdapter for NightshadeRuntime { fn verify_header_signature(&self, header: &BlockHeader) -> Result { let mut epoch_manager = self.epoch_manager.as_ref().write().expect(POISONED_LOCK_ERR); - let block_producer = epoch_manager - .get_block_producer_info(&header.inner_lite.epoch_id, header.inner_lite.height)?; - let slashed = match epoch_manager.get_slashed_validators(&header.prev_hash) { + let block_producer = + epoch_manager.get_block_producer_info(&header.epoch_id(), header.height())?; + let slashed = match epoch_manager.get_slashed_validators(header.prev_hash()) { Ok(slashed) => slashed, - Err(_) => return Err(EpochError::MissingBlock(header.prev_hash).into()), + Err(_) => return Err(EpochError::MissingBlock(*header.prev_hash()).into()), }; if slashed.contains_key(&block_producer.account_id) { return Ok(false); } - Ok(header.signature.verify(header.hash.as_ref(), &block_producer.public_key)) + Ok(header.signature().verify(header.hash().as_ref(), &block_producer.public_key)) } fn verify_chunk_header_signature(&self, header: &ShardChunkHeader) -> Result { @@ -904,36 +909,35 @@ impl RuntimeAdapter for NightshadeRuntime { Ok(epoch_manager.get_epoch_info(epoch_id)?.minted_amount) } - fn add_validator_proposals( - &self, - parent_hash: CryptoHash, - current_hash: CryptoHash, - rng_seed: CryptoHash, - height: BlockHeight, - last_finalized_height: BlockHeight, - proposals: Vec, - slashed_validators: Vec, - chunk_mask: Vec, - total_supply: Balance, - ) -> Result<(), Error> { + fn get_epoch_protocol_version(&self, epoch_id: &EpochId) -> Result { + let mut epoch_manager = self.epoch_manager.as_ref().write().expect(POISONED_LOCK_ERR); + Ok(epoch_manager.get_epoch_info(epoch_id)?.protocol_version) + } + + fn add_validator_proposals(&self, block_header_info: BlockHeaderInfo) -> Result<(), Error> { // Check that genesis block doesn't have any proposals. - assert!(height > 0 || (proposals.is_empty() && slashed_validators.is_empty())); - debug!(target: "runtime", "add validator proposals at block height {} {:?}", height, proposals); + assert!( + block_header_info.height > 0 + || (block_header_info.proposals.is_empty() + && block_header_info.slashed_validators.is_empty()) + ); + debug!(target: "runtime", "add validator proposals at block height {} {:?}", block_header_info.height, block_header_info.proposals); // Deal with validator proposals and epoch finishing. let mut epoch_manager = self.epoch_manager.as_ref().write().expect(POISONED_LOCK_ERR); let block_info = BlockInfo::new( - height, - last_finalized_height, - parent_hash, - proposals, - chunk_mask, - slashed_validators, - total_supply, + block_header_info.height, + block_header_info.last_finalized_height, + block_header_info.prev_hash, + block_header_info.proposals, + block_header_info.chunk_mask, + block_header_info.slashed_validators, + block_header_info.total_supply, + block_header_info.latest_protocol_version, ); - let rng_seed = (rng_seed.0).0; + let rng_seed = (block_header_info.random_value.0).0; // TODO: don't commit here, instead contribute to upstream store update. epoch_manager - .record_block_info(¤t_hash, block_info, rng_seed)? + .record_block_info(&block_header_info.hash, block_info, rng_seed)? .commit() .map_err(|err| err.into()) } @@ -1346,6 +1350,7 @@ mod test { use near_crypto::{InMemorySigner, KeyType, Signer}; use near_logger_utils::init_test_logger; use near_primitives::block::Tip; + use near_primitives::challenge::SlashedValidator; use near_primitives::transaction::{ Action, CreateAccountAction, DeleteAccountAction, StakeAction, }; @@ -1459,6 +1464,7 @@ mod test { genesis.config.max_inflation_rate = Rational::from_integer(0); } let genesis_total_supply = genesis.config.total_supply; + let genesis_protocol_version = genesis.config.protocol_version; let runtime = NightshadeRuntime::new( dir.path(), store, @@ -1470,17 +1476,18 @@ mod test { store_update.commit().unwrap(); let genesis_hash = hash(&vec![0]); runtime - .add_validator_proposals( - CryptoHash::default(), - genesis_hash, - [0; 32].as_ref().try_into().unwrap(), - 0, - 0, - vec![], - vec![], - vec![], - genesis_total_supply, - ) + .add_validator_proposals(BlockHeaderInfo { + prev_hash: CryptoHash::default(), + hash: genesis_hash, + random_value: [0; 32].as_ref().try_into().unwrap(), + height: 0, + last_finalized_height: 0, + proposals: vec![], + slashed_validators: vec![], + chunk_mask: vec![], + total_supply: genesis_total_supply, + latest_protocol_version: genesis_protocol_version, + }) .unwrap(); Self { runtime, @@ -1536,17 +1543,18 @@ mod test { self.last_shard_proposals.insert(i as ShardId, proposals); } self.runtime - .add_validator_proposals( - self.head.last_block_hash, - new_hash, - [0; 32].as_ref().try_into().unwrap(), - self.head.height + 1, - self.head.height.saturating_sub(1), - self.last_proposals.clone(), - challenges_result, + .add_validator_proposals(BlockHeaderInfo { + prev_hash: self.head.last_block_hash, + hash: new_hash, + random_value: [0; 32].as_ref().try_into().unwrap(), + height: self.head.height + 1, + last_finalized_height: self.head.height.saturating_sub(1), + proposals: self.last_proposals.clone(), + slashed_validators: challenges_result, chunk_mask, - self.runtime.genesis.config.total_supply, - ) + total_supply: self.runtime.genesis.config.total_supply, + latest_protocol_version: self.runtime.genesis.config.protocol_version, + }) .unwrap(); self.last_receipts = new_receipts; self.last_proposals = all_proposals; @@ -1949,17 +1957,18 @@ mod test { }; new_env .runtime - .add_validator_proposals( + .add_validator_proposals(BlockHeaderInfo { prev_hash, - cur_hash, - [0; 32].as_ref().try_into().unwrap(), - i, - i.saturating_sub(2), - new_env.last_proposals.clone(), - vec![], - vec![true], - new_env.runtime.genesis.config.total_supply, - ) + hash: cur_hash, + random_value: [0; 32].as_ref().try_into().unwrap(), + height: i, + last_finalized_height: i.saturating_sub(2), + proposals: new_env.last_proposals.clone(), + slashed_validators: vec![], + chunk_mask: vec![true], + total_supply: new_env.runtime.genesis.config.total_supply, + latest_protocol_version: new_env.runtime.genesis.config.protocol_version, + }) .unwrap(); new_env.head.height = i; new_env.head.last_block_hash = cur_hash; diff --git a/neard/src/shard_tracker.rs b/neard/src/shard_tracker.rs index 72871442d34..27d16e74104 100644 --- a/neard/src/shard_tracker.rs +++ b/neard/src/shard_tracker.rs @@ -237,6 +237,7 @@ mod tests { use near_store::test_utils::create_test_store; use super::{account_id_to_shard_id, ShardTracker, POISONED_LOCK_ERR}; + use near_primitives::version::PROTOCOL_VERSION; use num_rational::Rational; const DEFAULT_TOTAL_SUPPLY: u128 = 1_000_000_000_000; @@ -254,6 +255,8 @@ mod tests { fishermen_threshold: 0, online_max_threshold: Rational::from_integer(1), online_min_threshold: Rational::new(90, 100), + protocol_upgrade_stake_threshold: Rational::new(80, 100), + protocol_upgrade_num_epochs: 2, }; let reward_calculator = RewardCalculator { max_inflation_rate: Rational::from_integer(0), @@ -268,6 +271,7 @@ mod tests { EpochManager::new( store, initial_epoch_config, + PROTOCOL_VERSION, reward_calculator, vec![ValidatorStake { account_id: "test".to_string(), @@ -289,7 +293,16 @@ mod tests { epoch_manager .record_block_info( &cur_h, - BlockInfo::new(height, 0, prev_h, proposals, vec![], vec![], DEFAULT_TOTAL_SUPPLY), + BlockInfo::new( + height, + 0, + prev_h, + proposals, + vec![], + vec![], + DEFAULT_TOTAL_SUPPLY, + PROTOCOL_VERSION, + ), [0; 32], ) .unwrap() diff --git a/neard/tests/economics.rs b/neard/tests/economics.rs index 4174bb2a8ce..39cbdd467e9 100644 --- a/neard/tests/economics.rs +++ b/neard/tests/economics.rs @@ -75,7 +75,7 @@ fn test_burn_mint() { }); let signer = InMemorySigner::from_seed("test0", KeyType::ED25519, "test0"); let initial_total_supply = env.chain_genesis.total_supply; - let genesis_hash = env.clients[0].chain.genesis().hash(); + let genesis_hash = *env.clients[0].chain.genesis().hash(); env.clients[0].process_tx( SignedTransaction::send_money( 1, @@ -96,7 +96,7 @@ fn test_burn_mint() { // print_accounts(&mut env); // assert_eq!( // calc_total_supply(&mut env), - // env.clients[0].chain.get_block_by_height(i + 1).unwrap().header.inner_rest.total_supply + // env.clients[0].chain.get_block_by_height(i + 1).unwrap().header.total_supply() // ); } @@ -105,18 +105,15 @@ fn test_burn_mint() { // We burn half of the cost when tx executed and the other half in the next block for the receipt processing. let half_transfer_cost = fee_helper.transfer_cost() / 2; assert_eq!( - block3.header.inner_rest.total_supply, + block3.header().total_supply(), // supply + 1% of protocol rewards + 3/4 * 9% of validator rewards. initial_total_supply * 10775 / 10000 - half_transfer_cost ); - assert_eq!(block3.chunks[0].inner.balance_burnt, half_transfer_cost); + assert_eq!(block3.chunks()[0].inner.balance_burnt, half_transfer_cost); // Block 4: subtract 2nd part of transfer. let block4 = env.clients[0].chain.get_block_by_height(4).unwrap().clone(); - assert_eq!( - block4.header.inner_rest.total_supply, - block3.header.inner_rest.total_supply - half_transfer_cost - ); - assert_eq!(block4.chunks[0].inner.balance_burnt, half_transfer_cost); + assert_eq!(block4.header().total_supply(), block3.header().total_supply() - half_transfer_cost); + assert_eq!(block4.chunks()[0].inner.balance_burnt, half_transfer_cost); // Check that Protocol Treasury account got it's 1% as well. assert_eq!( env.query_balance("near".to_string()), @@ -125,8 +122,8 @@ fn test_burn_mint() { // Block 5: reward from previous block. let block5 = env.clients[0].chain.get_block_by_height(5).unwrap().clone(); assert_eq!( - block5.header.inner_rest.total_supply, + block5.header().total_supply(), // previous supply + 10% - block4.header.inner_rest.total_supply * 110 / 100 + block4.header().total_supply() * 110 / 100 ); } diff --git a/neard/tests/rpc_nodes.rs b/neard/tests/rpc_nodes.rs index e39dc71db4e..5cce64727a7 100644 --- a/neard/tests/rpc_nodes.rs +++ b/neard/tests/rpc_nodes.rs @@ -38,7 +38,7 @@ fn test_tx_propagation() { let (genesis_config, rpc_addrs, clients) = start_nodes(4, &dirs, 2, 2, 10, 0); let view_client = clients[0].1.clone(); - let genesis_hash = genesis_block(genesis_config).hash(); + let genesis_hash = *genesis_block(genesis_config).hash(); let signer = InMemorySigner::from_seed("near.1", KeyType::ED25519, "near.1"); let transaction = SignedTransaction::send_money( 1, @@ -117,7 +117,7 @@ fn test_tx_propagation_through_rpc() { let (genesis_config, rpc_addrs, clients) = start_nodes(4, &dirs, 2, 2, 10, 0); let view_client = clients[0].1.clone(); - let genesis_hash = genesis_block(genesis_config).hash(); + let genesis_hash = *genesis_block(genesis_config).hash(); let signer = InMemorySigner::from_seed("near.1", KeyType::ED25519, "near.1"); let transaction = SignedTransaction::send_money( 1, @@ -182,7 +182,7 @@ fn test_tx_status_with_light_client() { let (genesis_config, rpc_addrs, clients) = start_nodes(4, &dirs, 2, 2, 10, 0); let view_client = clients[0].1.clone(); - let genesis_hash = genesis_block(genesis_config).hash(); + let genesis_hash = *genesis_block(genesis_config).hash(); let signer = InMemorySigner::from_seed("near.1", KeyType::ED25519, "near.1"); let transaction = SignedTransaction::send_money( 1, @@ -255,7 +255,7 @@ fn test_tx_status_with_light_client1() { let (genesis_config, rpc_addrs, clients) = start_nodes(4, &dirs, 2, 2, 10, 0); let view_client = clients[0].1.clone(); - let genesis_hash = genesis_block(genesis_config).hash(); + let genesis_hash = *genesis_block(genesis_config).hash(); let signer = InMemorySigner::from_seed("near.3", KeyType::ED25519, "near.3"); let transaction = SignedTransaction::send_money( 1, @@ -488,7 +488,7 @@ fn test_get_execution_outcome(is_tx_successful: bool) { let (genesis_config, rpc_addrs, clients) = start_nodes(1, &dirs, 1, 1, 1000, 0); let view_client = clients[0].1.clone(); - let genesis_hash = genesis_block(genesis_config).hash(); + let genesis_hash = *genesis_block(genesis_config).hash(); let signer = InMemorySigner::from_seed("near.0", KeyType::ED25519, "near.0"); let transaction = if is_tx_successful { SignedTransaction::send_money( diff --git a/neard/tests/sync_nodes.rs b/neard/tests/sync_nodes.rs index aa81555f824..5441b72deaf 100644 --- a/neard/tests/sync_nodes.rs +++ b/neard/tests/sync_nodes.rs @@ -18,6 +18,7 @@ use near_primitives::merkle::PartialMerkleTree; use near_primitives::transaction::SignedTransaction; use near_primitives::types::{BlockHeightDelta, EpochId, ValidatorStake}; use near_primitives::validator_signer::{InMemoryValidatorSigner, ValidatorSigner}; +use near_primitives::version::PROTOCOL_VERSION; use neard::config::{GenesisExt, TESTING_INIT_STAKE}; use neard::{load_test_config, start_with_config}; use testlib::{genesis_block, test_helpers::heavy_test}; @@ -33,30 +34,30 @@ fn add_blocks( let mut prev = &blocks[blocks.len() - 1]; let mut block_merkle_tree = PartialMerkleTree::default(); for block in blocks.iter() { - block_merkle_tree.insert(block.hash()); + block_merkle_tree.insert(*block.hash()); } for _ in 0..num { - let epoch_id = match prev.header.inner_lite.height + 1 { + let epoch_id = match prev.header().height() + 1 { height if height <= epoch_length => EpochId::default(), height => { - EpochId(blocks[(((height - 1) / epoch_length - 1) * epoch_length) as usize].hash()) + EpochId(*blocks[(((height - 1) / epoch_length - 1) * epoch_length) as usize].hash()) } }; let next_epoch_id = EpochId( - blocks[(((prev.header.inner_lite.height) / epoch_length) * epoch_length) as usize] - .hash(), + *blocks[(((prev.header().height()) / epoch_length) * epoch_length) as usize].hash(), ); let block = Block::produce( - &prev.header, - prev.header.inner_lite.height + 1, - blocks[0].chunks.clone(), + PROTOCOL_VERSION, + &prev.header(), + prev.header().height() + 1, + blocks[0].chunks().clone(), epoch_id, next_epoch_id, vec![Some( Approval::new( - prev.hash(), - prev.header.inner_lite.height, - prev.header.inner_lite.height + 1, + *prev.hash(), + prev.header().height(), + prev.header().height() + 1, signer, ) .signature, @@ -75,7 +76,7 @@ fn add_blocks( .unwrap(), block_merkle_tree.root(), ); - block_merkle_tree.insert(block.hash()); + block_merkle_tree.insert(*block.hash()); let _ = client.do_send(NetworkClientMessages::Block( block.clone(), PeerInfo::random().id, @@ -234,7 +235,7 @@ fn sync_state_stake_change() { let dir2 = tempfile::Builder::new().prefix("sync_state_stake_change_2").tempdir().unwrap(); let (client1, view_client1) = start_with_config(dir1.path(), near1.clone()); - let genesis_hash = genesis_block(genesis).hash(); + let genesis_hash = *genesis_block(genesis).hash(); let signer = Arc::new(InMemorySigner::from_seed("test1", KeyType::ED25519, "test1")); let unstake_transaction = SignedTransaction::stake( 1, diff --git a/nightly/nightly.txt b/nightly/nightly.txt index 2b15bfa4acf..4743e41b658 100644 --- a/nightly/nightly.txt +++ b/nightly/nightly.txt @@ -35,6 +35,7 @@ pytest --timeout=300 sanity/gc_after_sync.py pytest --timeout=300 sanity/gc_sync_after_sync.py pytest --timeout=300 sanity/gc_sync_after_sync.py swap_nodes pytest --timeout=300 tests/sanity/large_messages.py +pytest --timeout=300 sanity/upgradable.py # python tests for smart contract deployment and invocation pytest contracts/deploy_call_smart_contract.py diff --git a/pytest/empty-contract-rs/Cargo.toml b/pytest/empty-contract-rs/Cargo.toml index 93cad8fc598..cbd27caeeec 100644 --- a/pytest/empty-contract-rs/Cargo.toml +++ b/pytest/empty-contract-rs/Cargo.toml @@ -12,7 +12,7 @@ serde = { version = "1", features = ["derive"] } serde_json = "1" wee_alloc = "0.4.5" -borsh = "0.6.1" +borsh = "0.6.2" near-sdk = { git = "https://github.com/near/near-sdk-rs", branch = "master"} diff --git a/pytest/lib/branches.py b/pytest/lib/branches.py index 2d8341eb872..c8fc5bed814 100644 --- a/pytest/lib/branches.py +++ b/pytest/lib/branches.py @@ -29,7 +29,7 @@ def escaped(branch): def compile_current(): - """ Compile current branch """ + """Compile current branch.""" branch = current_branch() try: # Accommodate rename from near to neard @@ -44,14 +44,16 @@ def compile_current(): subprocess.check_output(['git', 'checkout', '../Cargo.lock']) -def download_binary(branch): - url = f'https://s3-us-west-1.amazonaws.com/build.nearprotocol.com/nearcore/Linux/{branch}/near' +def download_binary(uname, branch): + """Download binary for given platform and branch.""" + url = f'https://s3-us-west-1.amazonaws.com/build.nearprotocol.com/nearcore/{uname}/{branch}/near' + print(f'Downloading near & state-viewer for {branch}@{uname}') subprocess.check_output([ 'curl', '--proto', '=https', '--tlsv1.2', '-sSfL', url, '-o', f'../target/debug/near-{branch}' ]) subprocess.check_output(['chmod', '+x', f'../target/debug/near-{branch}']) - url = f'https://s3-us-west-1.amazonaws.com/build.nearprotocol.com/nearcore/Linux/{branch}/state-viewer' + url = f'https://s3-us-west-1.amazonaws.com/build.nearprotocol.com/nearcore/{uname}/{branch}/state-viewer' subprocess.check_output([ 'curl', '--proto', '=https', '--tlsv1.2', '-sSfL', url, '-o', f'../target/debug/state-viewer-{branch}' @@ -61,11 +63,12 @@ def download_binary(branch): def prepare_ab_test(other_branch): - compile_current() - if os.environ.get('BUILDKITE') and other_branch in [ - 'master', 'beta', 'stable' - ]: - download_binary(other_branch) - else: - compile_binary(other_branch) + # Use NEAR_AB_BINARY_EXISTS to avoid rebuild / re-download when testing locally. + if not os.environ.get('NEAR_AB_BINARY_EXISTS'): + compile_current() + uname = os.uname()[0] + if other_branch in ['master', 'beta', 'stable'] and uname in ['Linux', 'Darwin']: + download_binary(uname, other_branch) + else: + compile_binary(other_branch) return '../target/debug/', [other_branch, escaped(current_branch())] diff --git a/pytest/lib/utils.py b/pytest/lib/utils.py index 32fa08b0bd0..f229994a8dd 100644 --- a/pytest/lib/utils.py +++ b/pytest/lib/utils.py @@ -8,6 +8,7 @@ import tempfile import json import hashlib +import time class TxContext: @@ -284,7 +285,7 @@ def obj_to_string(obj, extra=' '): else: return str(obj) - + def combine_hash(hash1, hash2): return hashlib.sha256(hash1 + hash2).digest() @@ -298,3 +299,18 @@ def compute_merkle_root_from_path(path, leaf_hash): else: res = combine_hash(res, base58.b58decode(node['hash'])) return res + + +def wait_for_blocks_or_timeout(node, num_blocks, timeout, callback=None, check_sec=1): + status = node.get_status() + start_height = status['sync_info']['latest_block_height'] + max_height = 0 + started = time.time() + while max_height < start_height + num_blocks: + assert time.time() - started < timeout + status = node.get_status() + max_height = status['sync_info']['latest_block_height'] + if callback is not None: + if callback(): + break + time.sleep(check_sec) diff --git a/pytest/tests/sanity/backward_compatible.py b/pytest/tests/sanity/backward_compatible.py index a717d73ac8f..b7175087294 100755 --- a/pytest/tests/sanity/backward_compatible.py +++ b/pytest/tests/sanity/backward_compatible.py @@ -22,8 +22,9 @@ def main(): shutil.rmtree(node_root) subprocess.check_output('mkdir -p /tmp/near', shell=True) - near_root, (stable_branch, - current_branch) = branches.prepare_ab_test("beta") + # near_root, (stable_branch, + # current_branch) = branches.prepare_ab_test("beta") + (near_root, (stable_branch, current_branch)) = ('../target/debug', ('beta', 'upgradability')) # Setup local network. subprocess.call([ diff --git a/pytest/tests/sanity/upgradable.py b/pytest/tests/sanity/upgradable.py new file mode 100644 index 00000000000..4f6e7e0a51b --- /dev/null +++ b/pytest/tests/sanity/upgradable.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python +""" +First run network with 3 `stable` nodes and 1 `new` node. +Then start switching `stable` nodes one by one with new nodes. +At the end run for 3 epochs and observe that current protocol version of the network matches `new` nodes. +""" + +import os +import subprocess +import shutil +import sys + +sys.path.append('lib') + +import branches +import cluster +from utils import wait_for_blocks_or_timeout + + +def main(): + node_root = "/tmp/near/upgradable" + if os.path.exists(node_root): + shutil.rmtree(node_root) + subprocess.check_output('mkdir -p /tmp/near', shell=True) + + near_root, (stable_branch, + current_branch) = branches.prepare_ab_test("beta") + + # Setup local network. + print([ + "%snear-%s" % (near_root, stable_branch), + "--home=%s" % node_root, "testnet", "--v", "4", "--prefix", "test" + ]) + subprocess.call([ + "%snear-%s" % (near_root, stable_branch), + "--home=%s" % node_root, "testnet", "--v", "4", "--prefix", "test" + ]) + genesis_config_changes = [("epoch_length", 10)] + node_dirs = [os.path.join(node_root, 'test%d' % i) for i in range(4)] + for i, node_dir in enumerate(node_dirs): + cluster.apply_genesis_changes(node_dir, genesis_config_changes) + + # Start 3 stable nodes and one current node. + config = { + "local": True, + 'near_root': near_root, + 'binary_name': "near-%s" % stable_branch + } + nodes = [cluster.spin_up_node( + config, near_root, node_dirs[0], 0, None, None)] + for i in range(1, 3): + nodes.append(cluster.spin_up_node( + config, near_root, node_dirs[i], i, nodes[0].node_key.pk, nodes[0].addr())) + config["binary_name"] = "near-%s" % current_branch + nodes.append(cluster.spin_up_node( + config, near_root, node_dirs[3], 3, nodes[0].node_key.pk, nodes[0].addr())) + + wait_for_blocks_or_timeout(nodes[0], 20, 120) + + # Restart stable nodes into new version. + for i in range(3): + nodes[i].kill() + nodes[i].binary_name = config['binary_name'] + nodes[i].start(nodes[0].node_key.pk, nodes[0].addr()) + + wait_for_blocks_or_timeout(nodes[3], 30, 120) + status0 = nodes[0].get_status() + status3 = nodes[3].get_status() + protocol_version = status0.get("protocol_version", 14) + latest_protocol_version = status3["latest_protocol_version"] + assert protocol_version == latest_protocol_version,\ + "Latest protocol version %d should match active protocol version %d" % (latest_protocol_version, protocol_version) + + +if __name__ == "__main__": + main() diff --git a/runtime/near-vm-errors/Cargo.toml b/runtime/near-vm-errors/Cargo.toml index ec9952bbc93..2495b97ee43 100644 --- a/runtime/near-vm-errors/Cargo.toml +++ b/runtime/near-vm-errors/Cargo.toml @@ -15,7 +15,7 @@ Error that can occur inside Near Runtime encapsulated in a separate crate. Might [dependencies] serde = { version = "1", features = ["derive"] } -borsh = "0.6.1" +borsh = "0.6.2" near-rpc-error-macro = { path = "../../tools/rpctypegen/macro", version = "0.1.0" } diff --git a/runtime/runtime-params-estimator/Cargo.toml b/runtime/runtime-params-estimator/Cargo.toml index 92c3722dc6c..e44b44cdffa 100644 --- a/runtime/runtime-params-estimator/Cargo.toml +++ b/runtime/runtime-params-estimator/Cargo.toml @@ -14,7 +14,7 @@ serde_json = "1" csv = "1.1.1" clap = "2.33" -borsh = "0.6.1" +borsh = "0.6.2" num-rational = "0.2.4" near-runtime-fees = { path = "../../runtime/near-runtime-fees" } diff --git a/runtime/runtime/Cargo.toml b/runtime/runtime/Cargo.toml index 0a6b1eb6f5c..98137e4b6b7 100644 --- a/runtime/runtime/Cargo.toml +++ b/runtime/runtime/Cargo.toml @@ -16,7 +16,7 @@ sha3 = "0.8" lazy_static = "1.4" num-rational = "0.2.4" -borsh = "0.6.1" +borsh = "0.6.2" cached = "0.12.0" near-crypto = { path = "../../core/crypto" } diff --git a/scripts/migrations/22-protocol-upgrade.py b/scripts/migrations/22-protocol-upgrade.py new file mode 100644 index 00000000000..c2fb2df30ed --- /dev/null +++ b/scripts/migrations/22-protocol-upgrade.py @@ -0,0 +1,27 @@ +""" +Protocol upgrade parameters, including: +* Adding `latest_protocol_version` to BlockHeaderInnerRest. +* Collecting this information inside EpochManager. +* Switching protocol version to the next one based on `protocol_upgrade_stake_threshold` and `protocol_upgrade_num_epochs`. +* Removing `config_version`. +""" + +import sys +import os +import json +from collections import OrderedDict + +home = sys.argv[1] +output_home = sys.argv[2] + +config = json.load(open(os.path.join(home, 'output.json')), object_pairs_hook=OrderedDict) + +assert config['protocol_version'] == 21 + +config['protocol_version'] = 22 + +config.pop('config_version') +config['protocol_upgrade_stake_threshold'] = [4, 5] +config['protocol_upgrade_num_epochs'] = 2 + +json.dump(config, open(os.path.join(output_home, 'output.json'), 'w'), indent=2) diff --git a/test-utils/loadtester/Cargo.toml b/test-utils/loadtester/Cargo.toml index 86c2c904270..bd344ce512d 100644 --- a/test-utils/loadtester/Cargo.toml +++ b/test-utils/loadtester/Cargo.toml @@ -16,7 +16,7 @@ reqwest = { version = "0.10", features = ["rustls-tls", "blocking", "json"] } git-version = "0.3.2" byteorder = "1.2" -borsh = "0.6.1" +borsh = "0.6.2" near-crypto = { path = "../../core/crypto" } near-primitives = { path = "../../core/primitives" } diff --git a/test-utils/loadtester/src/main.rs b/test-utils/loadtester/src/main.rs index 760cf68339f..f3a560bdec2 100644 --- a/test-utils/loadtester/src/main.rs +++ b/test-utils/loadtester/src/main.rs @@ -13,8 +13,9 @@ use log::info; use git_version::git_version; use near_crypto::Signer; -use near_primitives::types::{NumSeats, NumShards, Version}; +use near_primitives::types::{NumSeats, NumShards}; use near_primitives::validator_signer::ValidatorSigner; +use near_primitives::version::Version; use near_store::{create_store, ColState}; use neard::config::create_testnet_configs; use neard::{get_default_home, get_store_path}; diff --git a/test-utils/state-viewer/src/main.rs b/test-utils/state-viewer/src/main.rs index dc77a077ef5..249585bde32 100644 --- a/test-utils/state-viewer/src/main.rs +++ b/test-utils/state-viewer/src/main.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use ansi_term::Color::Red; use clap::{App, Arg, SubCommand}; +use near_chain::types::BlockHeaderInfo; use near_chain::{ChainStore, ChainStoreAccess, RuntimeAdapter}; use near_logger_utils::init_integration_logger; use near_network::peer_store::PeerStore; @@ -59,13 +60,10 @@ fn load_trie_stop_at_height( continue; } }; - let last_final_block_hash = chain_store - .get_block_header(&cur_block_hash) - .unwrap() - .inner_rest - .last_final_block; + let last_final_block_hash = + *chain_store.get_block_header(&cur_block_hash).unwrap().last_final_block(); let last_final_block = chain_store.get_block(&last_final_block_hash).unwrap(); - if last_final_block.header.inner_lite.height >= height { + if last_final_block.header().height() >= height { break last_final_block.clone(); } else { cur_height += 1; @@ -75,8 +73,8 @@ fn load_trie_stop_at_height( } None => chain_store.get_block(&head.last_block_hash).unwrap().clone(), }; - let state_roots = last_block.chunks.iter().map(|chunk| chunk.inner.prev_state_root).collect(); - (runtime, state_roots, last_block.header) + let state_roots = last_block.chunks().iter().map(|chunk| chunk.inner.prev_state_root).collect(); + (runtime, state_roots, last_block.header().clone()) } pub fn format_hash(h: CryptoHash) -> String { @@ -104,12 +102,12 @@ fn print_chain( if let Ok(block_hash) = chain_store.get_block_hash_by_height(height) { let header = chain_store.get_block_header(&block_hash).unwrap().clone(); if height == 0 { - println!("{: >3} {}", header.inner_lite.height, format_hash(header.hash())); + println!("{: >3} {}", header.height(), format_hash(*header.hash())); } else { - let parent_header = chain_store.get_block_header(&header.prev_hash).unwrap(); - let epoch_id = runtime.get_epoch_id_from_prev_block(&header.prev_hash).unwrap(); + let parent_header = chain_store.get_block_header(header.prev_hash()).unwrap(); + let epoch_id = runtime.get_epoch_id_from_prev_block(header.prev_hash()).unwrap(); cur_epoch_id = Some(epoch_id.clone()); - if runtime.is_next_block_epoch_start(&header.prev_hash).unwrap() { + if runtime.is_next_block_epoch_start(header.prev_hash()).unwrap() { println!("{:?}", account_id_to_blocks); account_id_to_blocks = HashMap::new(); println!( @@ -121,18 +119,18 @@ fn print_chain( ); } let block_producer = - runtime.get_block_producer(&epoch_id, header.inner_lite.height).unwrap(); + runtime.get_block_producer(&epoch_id, header.height()).unwrap(); account_id_to_blocks .entry(block_producer.clone()) .and_modify(|e| *e += 1) .or_insert(1); println!( "{: >3} {} | {: >10} | parent: {: >3} {}", - header.inner_lite.height, - format_hash(header.hash()), + header.height(), + format_hash(*header.hash()), block_producer, - parent_header.inner_lite.height, - format_hash(parent_header.hash()), + parent_header.height(), + format_hash(*parent_header.hash()), ); } } else { @@ -171,17 +169,10 @@ fn replay_chain( if let Ok(block_hash) = chain_store.get_block_hash_by_height(height) { let header = chain_store.get_block_header(&block_hash).unwrap().clone(); runtime - .add_validator_proposals( - header.prev_hash, - header.hash(), - header.inner_rest.random_value, - header.inner_lite.height, - chain_store.get_block_height(&header.inner_rest.last_final_block).unwrap(), - header.inner_rest.validator_proposals, - vec![], - header.inner_rest.chunk_mask, - header.inner_rest.total_supply, - ) + .add_validator_proposals(BlockHeaderInfo::new( + &header, + chain_store.get_block_height(&header.last_final_block()).unwrap(), + )) .unwrap(); } } @@ -261,10 +252,7 @@ fn main() { } ("state", Some(_args)) => { let (runtime, state_roots, header) = load_trie(store, &home_dir, &near_config); - println!( - "Storage roots are {:?}, block height is {}", - state_roots, header.inner_lite.height - ); + println!("Storage roots are {:?}, block height is {}", state_roots, header.height()); for (shard_id, state_root) in state_roots.iter().enumerate() { let trie = runtime.get_trie_for_shard(shard_id as u64); let trie = TrieIterator::new(&trie, &state_root).unwrap(); @@ -280,7 +268,7 @@ fn main() { let height = args.value_of("height").map(|s| s.parse::().unwrap()); let (runtime, state_roots, header) = load_trie_stop_at_height(store, home_dir, &near_config, height); - let height = header.inner_lite.height; + let height = header.height(); let home_dir = PathBuf::from(&home_dir); let new_genesis = diff --git a/test-utils/state-viewer/src/state_dump.rs b/test-utils/state-viewer/src/state_dump.rs index 13d0a1543ef..2db5a60263e 100644 --- a/test-utils/state-viewer/src/state_dump.rs +++ b/test-utils/state-viewer/src/state_dump.rs @@ -16,14 +16,12 @@ pub fn state_dump( ) -> Genesis { println!( "Generating genesis from state data of #{} / {}", - last_block_header.inner_lite.height, last_block_header.hash + last_block_header.height(), + last_block_header.hash() ); - let genesis_height = last_block_header.inner_lite.height + 1; + let genesis_height = last_block_header.height() + 1; let block_producers = runtime - .get_epoch_block_producers_ordered( - &last_block_header.inner_lite.epoch_id, - &last_block_header.hash, - ) + .get_epoch_block_producers_ordered(&last_block_header.epoch_id(), last_block_header.hash()) .unwrap(); let validators = block_producers .into_iter() @@ -112,7 +110,7 @@ mod test { fn test_dump_state_preserve_validators() { let epoch_length = 4; let (store, genesis, mut env) = setup(epoch_length); - let genesis_hash = env.clients[0].chain.genesis().hash(); + let genesis_hash = *env.clients[0].chain.genesis().hash(); let signer = InMemorySigner::from_seed("test1", KeyType::ED25519, "test1"); let tx = SignedTransaction::stake( 1, @@ -139,7 +137,7 @@ mod test { ); let last_block = env.clients[0].chain.get_block(&head.last_block_hash).unwrap().clone(); let state_roots = - last_block.chunks.iter().map(|chunk| chunk.inner.prev_state_root).collect(); + last_block.chunks().iter().map(|chunk| chunk.inner.prev_state_root).collect(); let runtime = NightshadeRuntime::new( Path::new("."), store.clone(), @@ -147,7 +145,8 @@ mod test { vec![], vec![], ); - let new_genesis = state_dump(runtime, state_roots, last_block.header, &genesis.config); + let new_genesis = + state_dump(runtime, state_roots, last_block.header().clone(), &genesis.config); assert_eq!(new_genesis.config.validators.len(), 2); validate_genesis(&new_genesis); } @@ -157,7 +156,7 @@ mod test { fn test_dump_state_return_locked() { let epoch_length = 4; let (store, genesis, mut env) = setup(epoch_length); - let genesis_hash = env.clients[0].chain.genesis().hash(); + let genesis_hash = *env.clients[0].chain.genesis().hash(); let signer = InMemorySigner::from_seed("test1", KeyType::ED25519, "test1"); let tx = SignedTransaction::stake( 1, @@ -174,7 +173,7 @@ mod test { let head = env.clients[0].chain.head().unwrap(); let last_block = env.clients[0].chain.get_block(&head.last_block_hash).unwrap().clone(); let state_roots = - last_block.chunks.iter().map(|chunk| chunk.inner.prev_state_root).collect(); + last_block.chunks().iter().map(|chunk| chunk.inner.prev_state_root).collect(); let runtime = NightshadeRuntime::new( Path::new("."), store.clone(), @@ -182,7 +181,8 @@ mod test { vec![], vec![], ); - let new_genesis = state_dump(runtime, state_roots, last_block.header, &genesis.config); + let new_genesis = + state_dump(runtime, state_roots, last_block.header().clone(), &genesis.config); assert_eq!( new_genesis .config @@ -218,7 +218,7 @@ mod test { chain_genesis.epoch_length = epoch_length; chain_genesis.gas_limit = genesis.config.gas_limit; let mut env = TestEnv::new_with_runtime(chain_genesis, 2, 1, runtimes); - let genesis_hash = env.clients[0].chain.genesis().hash(); + let genesis_hash = *env.clients[0].chain.genesis().hash(); let signer = InMemorySigner::from_seed("test1", KeyType::ED25519, "test1"); let tx = SignedTransaction::send_money( 1, @@ -241,11 +241,11 @@ mod test { } let last_block = blocks.pop().unwrap(); let state_roots = - last_block.chunks.iter().map(|chunk| chunk.inner.prev_state_root).collect::>(); + last_block.chunks().iter().map(|chunk| chunk.inner.prev_state_root).collect::>(); let runtime2 = create_runtime(store2); let _ = - state_dump(runtime2, state_roots.clone(), last_block.header.clone(), &genesis.config); + state_dump(runtime2, state_roots.clone(), last_block.header().clone(), &genesis.config); } #[test] @@ -267,7 +267,7 @@ mod test { let mut chain_genesis = ChainGenesis::test(); chain_genesis.epoch_length = epoch_length; let mut env = TestEnv::new_with_runtime(chain_genesis, 1, 2, runtimes); - let genesis_hash = env.clients[0].chain.genesis().hash(); + let genesis_hash = *env.clients[0].chain.genesis().hash(); let signer = InMemorySigner::from_seed("test1", KeyType::ED25519, "test1"); let tx = SignedTransaction::stake( 1, @@ -294,7 +294,7 @@ mod test { ); let last_block = env.clients[0].chain.get_block(&head.last_block_hash).unwrap().clone(); let state_roots = - last_block.chunks.iter().map(|chunk| chunk.inner.prev_state_root).collect(); + last_block.chunks().iter().map(|chunk| chunk.inner.prev_state_root).collect(); let runtime = NightshadeRuntime::new( Path::new("."), store.clone(), @@ -302,7 +302,8 @@ mod test { vec![], vec![], ); - let new_genesis = state_dump(runtime, state_roots, last_block.header, &genesis.config); + let new_genesis = + state_dump(runtime, state_roots, last_block.header().clone(), &genesis.config); assert_eq!(new_genesis.config.validators.len(), 2); validate_genesis(&new_genesis); } diff --git a/test-utils/testlib/Cargo.toml b/test-utils/testlib/Cargo.toml index 7e2a9ace42b..5925d8060af 100644 --- a/test-utils/testlib/Cargo.toml +++ b/test-utils/testlib/Cargo.toml @@ -20,7 +20,7 @@ tempfile = "3" assert_matches = "1.3" num-rational = "0.2.4" -borsh = "0.6.1" +borsh = "0.6.2" near-logger-utils = { path = "../../test-utils/logger" } near-chain-configs = { path = "../../core/chain-configs" } diff --git a/test-utils/testlib/src/lib.rs b/test-utils/testlib/src/lib.rs index 4e6eb9a006a..8d706706c3c 100644 --- a/test-utils/testlib/src/lib.rs +++ b/test-utils/testlib/src/lib.rs @@ -24,7 +24,7 @@ pub mod user; /// Compute genesis hash from genesis. pub fn genesis_hash(genesis: Arc) -> CryptoHash { - genesis_header(genesis).hash + *genesis_header(genesis).hash() } /// Utility to generate genesis header from config for testing purposes. @@ -44,7 +44,7 @@ pub fn genesis_block(genesis: Arc) -> Block { let chain_genesis = ChainGenesis::from(&genesis); let runtime = Arc::new(NightshadeRuntime::new(dir.path(), store, genesis, vec![], vec![])); let mut chain = Chain::new(runtime, &chain_genesis, DoomslugThresholdMode::TwoThirds).unwrap(); - chain.get_block(&chain.genesis().hash()).unwrap().clone() + chain.get_block(&chain.genesis().hash().clone()).unwrap().clone() } pub fn start_nodes(