From 0c8e91eca2e5a984be5e5049d4f137b37197d8ee Mon Sep 17 00:00:00 2001 From: quake Date: Mon, 24 Apr 2023 19:05:32 +0900 Subject: [PATCH] refactor: remove sync struct HeaderView --- resource/ckb.toml | 2 +- rpc/src/module/chain.rs | 2 +- rpc/src/module/stats.rs | 2 +- store/src/data_loader_wrapper.rs | 19 +- sync/src/relayer/compact_block_process.rs | 35 +- .../relayer/tests/compact_block_process.rs | 6 +- sync/src/synchronizer/block_fetcher.rs | 23 +- sync/src/synchronizer/headers_process.rs | 17 +- sync/src/synchronizer/mod.rs | 16 +- sync/src/tests/synchronizer/functions.rs | 5 +- sync/src/tests/types.rs | 10 +- sync/src/types/header_map/backend.rs | 10 +- sync/src/types/header_map/backend_sled.rs | 25 +- sync/src/types/header_map/kernel_lru.rs | 10 +- sync/src/types/header_map/memory.rs | 96 ++++- sync/src/types/header_map/mod.rs | 14 +- sync/src/types/mod.rs | 371 ++++++++++-------- traits/src/header_provider.rs | 46 ++- traits/src/lib.rs | 2 +- util/app-config/src/configs/network.rs | 4 +- util/snapshot/src/lib.rs | 16 +- util/test-chain-utils/src/median_time.rs | 24 +- verification/src/header_verifier.rs | 34 +- verification/src/tests/header_verifier.rs | 28 +- verification/src/transaction_verifier.rs | 35 +- 25 files changed, 515 insertions(+), 337 deletions(-) diff --git a/resource/ckb.toml b/resource/ckb.toml index 7cb1f2321d0..89ec89f6fbe 100644 --- a/resource/ckb.toml +++ b/resource/ckb.toml @@ -101,7 +101,7 @@ bootnode_mode = false support_protocols = ["Ping", "Discovery", "Identify", "Feeler", "DisconnectMessage", "Sync", "Relay", "Time", "Alert", "LightClient", "Filter"] # [network.sync.header_map] -# memory_limit = "600MB" +# memory_limit = "256MB" [rpc] # By default RPC only binds to localhost, thus it only allows accessing from the same machine. diff --git a/rpc/src/module/chain.rs b/rpc/src/module/chain.rs index 0e6d2233692..66d95ac4ade 100644 --- a/rpc/src/module/chain.rs +++ b/rpc/src/module/chain.rs @@ -10,7 +10,7 @@ use ckb_logger::error; use ckb_reward_calculator::RewardCalculator; use ckb_shared::{shared::Shared, Snapshot}; use ckb_store::{data_loader_wrapper::AsDataLoader, ChainStore}; -use ckb_traits::HeaderProvider; +use ckb_traits::HeaderFieldsProvider; use ckb_types::core::tx_pool::TransactionWithStatus; use ckb_types::{ core::{ diff --git a/rpc/src/module/stats.rs b/rpc/src/module/stats.rs index b5d218507f3..bf0441835c3 100644 --- a/rpc/src/module/stats.rs +++ b/rpc/src/module/stats.rs @@ -1,7 +1,7 @@ use ckb_jsonrpc_types::{AlertMessage, ChainInfo, DeploymentInfo, DeploymentPos, DeploymentsInfo}; use ckb_network_alert::notifier::Notifier as AlertNotifier; use ckb_shared::shared::Shared; -use ckb_traits::HeaderProvider; +use ckb_traits::HeaderFieldsProvider; use ckb_types::prelude::Unpack; use ckb_util::Mutex; use jsonrpc_core::Result; diff --git a/store/src/data_loader_wrapper.rs b/store/src/data_loader_wrapper.rs index 77d9e2bb5ef..084ba6b0604 100644 --- a/store/src/data_loader_wrapper.rs +++ b/store/src/data_loader_wrapper.rs @@ -1,6 +1,8 @@ //! TODO(doc): @quake use crate::ChainStore; -use ckb_traits::{CellDataProvider, EpochProvider, HeaderProvider}; +use ckb_traits::{ + CellDataProvider, EpochProvider, HeaderFields, HeaderFieldsProvider, HeaderProvider, +}; use ckb_types::{ bytes::Bytes, core::{BlockExt, BlockNumber, EpochExt, HeaderView}, @@ -56,6 +58,21 @@ where } } +impl HeaderFieldsProvider for DataLoaderWrapper +where + T: ChainStore, +{ + fn get_header_fields(&self, hash: &Byte32) -> Option { + self.0.get_block_header(hash).map(|header| HeaderFields { + number: header.number(), + epoch: header.epoch(), + parent_hash: header.data().raw().parent_hash(), + timestamp: header.timestamp(), + hash: header.hash(), + }) + } +} + impl EpochProvider for DataLoaderWrapper where T: ChainStore, diff --git a/sync/src/relayer/compact_block_process.rs b/sync/src/relayer/compact_block_process.rs index 42057821de3..458bb82f505 100644 --- a/sync/src/relayer/compact_block_process.rs +++ b/sync/src/relayer/compact_block_process.rs @@ -9,7 +9,7 @@ use ckb_chain_spec::consensus::Consensus; use ckb_logger::{self, debug_target}; use ckb_network::{CKBProtocolContext, PeerIndex}; use ckb_systemtime::unix_time_as_millis; -use ckb_traits::HeaderProvider; +use ckb_traits::{HeaderFields, HeaderFieldsProvider}; use ckb_types::{ core::{EpochNumberWithFraction, HeaderView}, packed::{self, Byte32, CompactBlock}, @@ -167,11 +167,11 @@ impl<'a> CompactBlockProcess<'a> { } struct CompactBlockMedianTimeView<'a> { - fn_get_pending_header: Box Option + 'a>, + fn_get_pending_header: Box Option + 'a>, } -impl<'a> HeaderProvider for CompactBlockMedianTimeView<'a> { - fn get_header(&self, hash: &packed::Byte32) -> Option { +impl<'a> HeaderFieldsProvider for CompactBlockMedianTimeView<'a> { + fn get_header_fields(&self, hash: &packed::Byte32) -> Option { // Note: don't query store because we already did that in `fn_get_pending_header -> get_header_view`. (self.fn_get_pending_header)(hash.to_owned()) } @@ -233,7 +233,7 @@ fn contextual_check( if status.contains(BlockStatus::BLOCK_STORED) { // update last common header and best known let parent = shared - .get_header_view(&compact_block_header.data().raw().parent_hash(), Some(true)) + .get_header_index_view(&compact_block_header.data().raw().parent_hash(), true) .expect("parent block must exist"); let header_index = HeaderIndex::new( @@ -253,9 +253,9 @@ fn contextual_check( } let store_first = tip.number() + 1 >= compact_block_header.number(); - let parent = shared.get_header_view( + let parent = shared.get_header_index_view( &compact_block_header.data().raw().parent_hash(), - Some(store_first), + store_first, ); if parent.is_none() { debug_target!( @@ -287,11 +287,26 @@ fn contextual_check( |block_hash| { pending_compact_blocks .get(&block_hash) - .map(|(compact_block, _, _)| compact_block.header().into_view()) + .map(|(compact_block, _, _)| { + let header = compact_block.header().into_view(); + HeaderFields { + hash: header.hash(), + number: header.number(), + epoch: header.epoch(), + timestamp: header.timestamp(), + parent_hash: header.parent_hash(), + } + }) .or_else(|| { shared - .get_header_view(&block_hash, None) - .map(|header_view| header_view.into_inner()) + .get_header_index_view(&block_hash, false) + .map(|header| HeaderFields { + hash: header.hash(), + number: header.number(), + epoch: header.epoch(), + timestamp: header.timestamp(), + parent_hash: header.parent_hash(), + }) }) } }; diff --git a/sync/src/relayer/tests/compact_block_process.rs b/sync/src/relayer/tests/compact_block_process.rs index a4d906b4929..6904c649c45 100644 --- a/sync/src/relayer/tests/compact_block_process.rs +++ b/sync/src/relayer/tests/compact_block_process.rs @@ -404,9 +404,9 @@ fn test_accept_block() { fn test_ignore_a_too_old_block() { let (relayer, _) = build_chain(1804); - let active_chain = relayer.shared.active_chain(); - let parent = active_chain.tip_header(); - let parent = active_chain.get_ancestor(&parent.hash(), 2).unwrap(); + let snapshot = relayer.shared.shared().snapshot(); + let parent = snapshot.tip_header(); + let parent = snapshot.get_ancestor(&parent.hash(), 2).unwrap(); let too_old_block = new_header_builder(relayer.shared.shared(), &parent).build(); diff --git a/sync/src/synchronizer/block_fetcher.rs b/sync/src/synchronizer/block_fetcher.rs index 5875993dc72..ec0688f238f 100644 --- a/sync/src/synchronizer/block_fetcher.rs +++ b/sync/src/synchronizer/block_fetcher.rs @@ -1,13 +1,13 @@ use crate::block_status::BlockStatus; use crate::synchronizer::Synchronizer; -use crate::types::{ActiveChain, BlockNumberAndHash, HeaderIndex, IBDState}; +use crate::types::{ActiveChain, BlockNumberAndHash, HeaderIndex, HeaderIndexView, IBDState}; use ckb_constant::sync::{ BLOCK_DOWNLOAD_WINDOW, CHECK_POINT_WINDOW, INIT_BLOCKS_IN_TRANSIT_PER_PEER, }; use ckb_logger::{debug, trace}; use ckb_network::PeerIndex; use ckb_systemtime::unix_time_as_millis; -use ckb_types::{core, packed}; +use ckb_types::packed; use std::cmp::min; pub struct BlockFetcher<'a> { @@ -86,15 +86,10 @@ impl<'a> BlockFetcher<'a> { while let Some(hash) = state.peers().take_unknown_last(self.peer) { // Here we need to first try search from headermap, if not, fallback to search from the db. // if not search from db, it can stuck here when the headermap may have been removed just as the block was downloaded - if let Some(header) = self.synchronizer.shared.get_header_view(&hash, None) { - let header_index = HeaderIndex::new( - header.number(), - header.hash(), - header.total_difficulty().clone(), - ); + if let Some(header) = self.synchronizer.shared.get_header_index_view(&hash, false) { state .peers() - .may_set_best_known_header(self.peer, header_index); + .may_set_best_known_header(self.peer, header.as_header_index()); } else { state.peers().insert_unknown_header_hash(self.peer, hash); break; @@ -155,7 +150,7 @@ impl<'a> BlockFetcher<'a> { // So we can skip the search of this space directly self.synchronizer .peers() - .set_last_common_header(self.peer, (&header).into()); + .set_last_common_header(self.peer, header.number_and_hash()); end = min(best_known.number(), header.number() + BLOCK_DOWNLOAD_WINDOW); break; } else if status.contains(BlockStatus::BLOCK_RECEIVED) { @@ -171,11 +166,7 @@ impl<'a> BlockFetcher<'a> { header = self .synchronizer .shared - .get_header_view( - &parent_hash, - Some(status.contains(BlockStatus::BLOCK_STORED)), - )? - .into_inner(); + .get_header_index_view(&parent_hash, false)?; } // Move `start` forward @@ -209,7 +200,7 @@ impl<'a> BlockFetcher<'a> { Some( fetch .chunks(INIT_BLOCKS_IN_TRANSIT_PER_PEER) - .map(|headers| headers.iter().map(core::HeaderView::hash).collect()) + .map(|headers| headers.iter().map(HeaderIndexView::hash).collect()) .collect(), ) } diff --git a/sync/src/synchronizer/headers_process.rs b/sync/src/synchronizer/headers_process.rs index e9198e4e889..ec0b7323352 100644 --- a/sync/src/synchronizer/headers_process.rs +++ b/sync/src/synchronizer/headers_process.rs @@ -6,7 +6,7 @@ use ckb_constant::sync::MAX_HEADERS_LEN; use ckb_error::Error; use ckb_logger::{debug, log_enabled, warn, Level}; use ckb_network::{CKBProtocolContext, PeerIndex}; -use ckb_traits::HeaderProvider; +use ckb_traits::HeaderFieldsProvider; use ckb_types::{core, packed, prelude::*}; use ckb_verification::{HeaderError, HeaderVerifier}; use ckb_verification_traits::Verifier; @@ -209,14 +209,14 @@ impl<'a> HeadersProcess<'a> { } } -pub struct HeaderAcceptor<'a, DL: HeaderProvider> { +pub struct HeaderAcceptor<'a, DL: HeaderFieldsProvider> { header: &'a core::HeaderView, active_chain: ActiveChain, peer: PeerIndex, verifier: HeaderVerifier<'a, DL>, } -impl<'a, DL: HeaderProvider> HeaderAcceptor<'a, DL> { +impl<'a, DL: HeaderFieldsProvider> HeaderAcceptor<'a, DL> { pub fn new( header: &'a core::HeaderView, peer: PeerIndex, @@ -283,15 +283,16 @@ impl<'a, DL: HeaderProvider> HeaderAcceptor<'a, DL> { // type should we return? let status = self.active_chain.get_block_status(&self.header.hash()); if status.contains(BlockStatus::HEADER_VALID) { - let header_view = shared - .get_header_view( + let header_index = shared + .get_header_index_view( &self.header.hash(), - Some(status.contains(BlockStatus::BLOCK_STORED)), + status.contains(BlockStatus::BLOCK_STORED), ) - .expect("header with HEADER_VALID should exist"); + .expect("header with HEADER_VALID should exist") + .as_header_index(); state .peers() - .may_set_best_known_header(self.peer, header_view.as_header_index()); + .may_set_best_known_header(self.peer, header_index); return result; } diff --git a/sync/src/synchronizer/mod.rs b/sync/src/synchronizer/mod.rs index 89b3beb46f9..82f7f9e4888 100644 --- a/sync/src/synchronizer/mod.rs +++ b/sync/src/synchronizer/mod.rs @@ -21,7 +21,7 @@ pub(crate) use self::headers_process::HeadersProcess; pub(crate) use self::in_ibd_process::InIBDProcess; use crate::block_status::BlockStatus; -use crate::types::{HeadersSyncController, IBDState, Peers, SyncShared}; +use crate::types::{HeaderIndexView, HeadersSyncController, IBDState, Peers, SyncShared}; use crate::utils::{metric_ckb_message_bytes, send_message_to, MetricDirection}; use crate::{Status, StatusCode}; @@ -305,7 +305,7 @@ impl Synchronizer { self.shared().state().peers() } - fn better_tip_header(&self) -> core::HeaderView { + fn better_tip_header(&self) -> HeaderIndexView { let (header, total_difficulty) = { let active_chain = self.shared.active_chain(); ( @@ -316,9 +316,9 @@ impl Synchronizer { let best_known = self.shared.state().shared_best_header(); // is_better_chain if total_difficulty > *best_known.total_difficulty() { - header + (header, total_difficulty).into() } else { - best_known.into_inner() + best_known } } @@ -387,7 +387,11 @@ impl Synchronizer { continue; } } else { - active_chain.send_getheaders_to_peer(nc, *peer, (&better_tip_header).into()); + active_chain.send_getheaders_to_peer( + nc, + *peer, + better_tip_header.number_and_hash(), + ); } } @@ -512,7 +516,7 @@ impl Synchronizer { } debug!("start sync peer={}", peer); - active_chain.send_getheaders_to_peer(nc, peer, (&tip).into()); + active_chain.send_getheaders_to_peer(nc, peer, tip.number_and_hash()); } } diff --git a/sync/src/tests/synchronizer/functions.rs b/sync/src/tests/synchronizer/functions.rs index 38b21fd055c..2571934b949 100644 --- a/sync/src/tests/synchronizer/functions.rs +++ b/sync/src/tests/synchronizer/functions.rs @@ -308,13 +308,14 @@ fn test_get_ancestor() { assert!(tip.is_some()); assert!(header.is_some()); assert!(noop.is_none()); - assert_eq!(tip.unwrap(), shared.snapshot().tip_header().to_owned()); + assert_eq!(tip.unwrap().hash(), shared.snapshot().tip_header().hash()); assert_eq!( - header.unwrap(), + header.unwrap().hash(), shared .store() .get_block_header(&shared.store().get_block_hash(100).unwrap()) .unwrap() + .hash() ); } diff --git a/sync/src/tests/types.rs b/sync/src/tests/types.rs index 5a16cded12c..081c95a012b 100644 --- a/sync/src/tests/types.rs +++ b/sync/src/tests/types.rs @@ -10,13 +10,13 @@ use std::{ sync::atomic::{AtomicUsize, Ordering::Relaxed}, }; -use crate::types::{HeaderView, TtlFilter, FILTER_TTL}; +use crate::types::{HeaderIndexView, TtlFilter, FILTER_TTL}; const SKIPLIST_LENGTH: u64 = 10_000; #[test] fn test_get_ancestor_use_skip_list() { - let mut header_map: HashMap = HashMap::default(); + let mut header_map: HashMap = HashMap::default(); let mut hashes: BTreeMap = BTreeMap::default(); let mut parent_hash = None; @@ -31,7 +31,7 @@ fn test_get_ancestor_use_skip_list() { hashes.insert(number, header.hash()); parent_hash = Some(header.hash()); - let mut view = HeaderView::new(header, U256::zero()); + let mut view: HeaderIndexView = (header, U256::zero()).into(); view.build_skip(0, |hash, _| header_map.get(hash).cloned(), |_, _| None); header_map.insert(view.hash(), view); } @@ -40,7 +40,7 @@ fn test_get_ancestor_use_skip_list() { if *number > 0 { let skip_view = header_map .get(hash) - .and_then(|view| header_map.get(view.skip_hash.as_ref().unwrap())) + .and_then(|view| header_map.get(view.skip_hash().unwrap())) .unwrap(); assert_eq!( Some(skip_view.hash()).as_ref(), @@ -48,7 +48,7 @@ fn test_get_ancestor_use_skip_list() { ); assert!(skip_view.number() < *number); } else { - assert!(header_map[hash].skip_hash.is_none()); + assert!(header_map[hash].skip_hash().is_none()); } } diff --git a/sync/src/types/header_map/backend.rs b/sync/src/types/header_map/backend.rs index befc1986828..9724e89aa93 100644 --- a/sync/src/types/header_map/backend.rs +++ b/sync/src/types/header_map/backend.rs @@ -2,7 +2,7 @@ use std::path; use ckb_types::packed::Byte32; -use crate::types::HeaderView; +use crate::types::HeaderIndexView; pub(crate) trait KeyValueBackend { fn new

(tmpdir: Option

) -> Self @@ -15,9 +15,9 @@ pub(crate) trait KeyValueBackend { } fn contains_key(&self, key: &Byte32) -> bool; - fn get(&self, key: &Byte32) -> Option; - fn insert(&self, value: &HeaderView) -> Option<()>; - fn insert_batch(&self, values: &[HeaderView]); - fn remove(&self, key: &Byte32) -> Option; + fn get(&self, key: &Byte32) -> Option; + fn insert(&self, value: &HeaderIndexView) -> Option<()>; + fn insert_batch(&self, values: &[HeaderIndexView]); + fn remove(&self, key: &Byte32) -> Option; fn remove_no_return(&self, key: &Byte32); } diff --git a/sync/src/types/header_map/backend_sled.rs b/sync/src/types/header_map/backend_sled.rs index a41523d1864..b3653f84539 100644 --- a/sync/src/types/header_map/backend_sled.rs +++ b/sync/src/types/header_map/backend_sled.rs @@ -1,7 +1,7 @@ use super::KeyValueBackend; -use crate::types::HeaderView; +use crate::types::HeaderIndexView; use ckb_types::{packed::Byte32, prelude::*}; -use sled::Db; +use sled::{Config, Db, Mode}; use std::path; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; @@ -27,8 +27,15 @@ impl KeyValueBackend for SledBackend { } .expect("failed to create a tempdir to save header map into disk"); - let db: Db = sled::open(tmpdir.path()) + // use a smaller system page cache here since we are using sled as a temporary storage, + // most of the time we will only read header from memory. + let db: Db = Config::new() + .mode(Mode::HighThroughput) + .cache_capacity(64 * 1024 * 1024) + .path(tmpdir.path()) + .open() .expect("failed to open a key-value database to save header map into disk"); + Self { db, _tmpdir: tmpdir, @@ -46,14 +53,14 @@ impl KeyValueBackend for SledBackend { .expect("sled contains_key") } - fn get(&self, key: &Byte32) -> Option { + fn get(&self, key: &Byte32) -> Option { self.db .get(key.as_slice()) .unwrap_or_else(|err| panic!("read header map from disk should be ok, but {err}")) - .map(|slice| HeaderView::from_slice_should_be_ok(slice.as_ref())) + .map(|slice| HeaderIndexView::from_slice_should_be_ok(key.as_slice(), slice.as_ref())) } - fn insert(&self, value: &HeaderView) -> Option<()> { + fn insert(&self, value: &HeaderIndexView) -> Option<()> { let key = value.hash(); let last_value = self .db @@ -65,7 +72,7 @@ impl KeyValueBackend for SledBackend { last_value.map(|_| ()) } - fn insert_batch(&self, values: &[HeaderView]) { + fn insert_batch(&self, values: &[HeaderIndexView]) { let mut count = 0; for value in values { let key = value.hash(); @@ -80,7 +87,7 @@ impl KeyValueBackend for SledBackend { self.count.fetch_add(count, Ordering::SeqCst); } - fn remove(&self, key: &Byte32) -> Option { + fn remove(&self, key: &Byte32) -> Option { let old_value = self .db .remove(key.as_slice()) @@ -88,7 +95,7 @@ impl KeyValueBackend for SledBackend { old_value.map(|slice| { self.count.fetch_sub(1, Ordering::SeqCst); - HeaderView::from_slice_should_be_ok(&slice) + HeaderIndexView::from_slice_should_be_ok(key.as_slice(), &slice) }) } diff --git a/sync/src/types/header_map/kernel_lru.rs b/sync/src/types/header_map/kernel_lru.rs index 69eb1e8f8c0..245baafe75a 100644 --- a/sync/src/types/header_map/kernel_lru.rs +++ b/sync/src/types/header_map/kernel_lru.rs @@ -8,7 +8,7 @@ use ckb_util::{Mutex, MutexGuard}; use ckb_types::packed::Byte32; use super::{KeyValueBackend, MemoryMap}; -use crate::types::HeaderView; +use crate::types::HeaderIndexView; pub(crate) struct HeaderMapKernel where @@ -88,7 +88,7 @@ where self.backend.contains_key(hash) } - pub(crate) fn get(&self, hash: &Byte32) -> Option { + pub(crate) fn get(&self, hash: &Byte32) -> Option { #[cfg(feature = "stats")] { self.stats().tick_primary_select(); @@ -108,20 +108,20 @@ where { self.stats().tick_primary_insert(); } - self.memory.insert(view.hash(), view.clone()); + self.memory.insert(view.clone()); Some(view) } else { None } } - pub(crate) fn insert(&self, view: HeaderView) -> Option<()> { + pub(crate) fn insert(&self, view: HeaderIndexView) -> Option<()> { #[cfg(feature = "stats")] { self.trace(); self.stats().tick_primary_insert(); } - self.memory.insert(view.hash(), view) + self.memory.insert(view) } pub(crate) fn remove(&self, hash: &Byte32) { diff --git a/sync/src/types/header_map/memory.rs b/sync/src/types/header_map/memory.rs index 39d85506c7a..0411e8c671e 100644 --- a/sync/src/types/header_map/memory.rs +++ b/sync/src/types/header_map/memory.rs @@ -1,12 +1,70 @@ -use crate::types::HeaderView; -use crate::types::SHRINK_THRESHOLD; -use ckb_types::packed::Byte32; -use ckb_util::shrink_to_fit; -use ckb_util::LinkedHashMap; -use ckb_util::RwLock; +use crate::types::{HeaderIndexView, SHRINK_THRESHOLD}; +use ckb_types::{ + core::{BlockNumber, EpochNumberWithFraction}, + packed::Byte32, + U256, +}; +use ckb_util::{shrink_to_fit, LinkedHashMap, RwLock}; use std::default; -pub(crate) struct MemoryMap(RwLock>); +#[derive(Clone, Debug, PartialEq, Eq)] +struct HeaderIndexViewInner { + number: BlockNumber, + epoch: EpochNumberWithFraction, + timestamp: u64, + parent_hash: Byte32, + total_difficulty: U256, + skip_hash: Option, +} + +impl From<(Byte32, HeaderIndexViewInner)> for HeaderIndexView { + fn from((hash, inner): (Byte32, HeaderIndexViewInner)) -> Self { + let HeaderIndexViewInner { + number, + epoch, + timestamp, + parent_hash, + total_difficulty, + skip_hash, + } = inner; + Self { + hash, + number, + epoch, + timestamp, + parent_hash, + total_difficulty, + skip_hash, + } + } +} + +impl From for (Byte32, HeaderIndexViewInner) { + fn from(view: HeaderIndexView) -> Self { + let HeaderIndexView { + hash, + number, + epoch, + timestamp, + parent_hash, + total_difficulty, + skip_hash, + } = view; + ( + hash, + HeaderIndexViewInner { + number, + epoch, + timestamp, + parent_hash, + total_difficulty, + skip_hash, + }, + ) + } +} + +pub(crate) struct MemoryMap(RwLock>); impl default::Default for MemoryMap { fn default() -> Self { @@ -24,29 +82,39 @@ impl MemoryMap { self.0.read().contains_key(key) } - pub(crate) fn get_refresh(&self, key: &Byte32) -> Option { + pub(crate) fn get_refresh(&self, key: &Byte32) -> Option { let mut guard = self.0.write(); - guard.get_refresh(key).cloned() + guard + .get_refresh(key) + .cloned() + .map(|inner| (key.clone(), inner).into()) } - pub(crate) fn insert(&self, key: Byte32, value: HeaderView) -> Option<()> { + pub(crate) fn insert(&self, header: HeaderIndexView) -> Option<()> { let mut guard = self.0.write(); + let (key, value) = header.into(); guard.insert(key, value).map(|_| ()) } - pub(crate) fn remove(&self, key: &Byte32) -> Option { + pub(crate) fn remove(&self, key: &Byte32) -> Option { let mut guard = self.0.write(); let ret = guard.remove(key); shrink_to_fit!(guard, SHRINK_THRESHOLD); - ret + ret.map(|inner| (key.clone(), inner).into()) } - pub(crate) fn front_n(&self, size_limit: usize) -> Option> { + pub(crate) fn front_n(&self, size_limit: usize) -> Option> { let guard = self.0.read(); let size = guard.len(); if size > size_limit { let num = size - size_limit; - Some(guard.values().take(num).cloned().collect()) + Some( + guard + .iter() + .take(num) + .map(|(key, value)| (key.clone(), value.clone()).into()) + .collect(), + ) } else { None } diff --git a/sync/src/types/header_map/mod.rs b/sync/src/types/header_map/mod.rs index d6dde661f4f..5210e482f6a 100644 --- a/sync/src/types/header_map/mod.rs +++ b/sync/src/types/header_map/mod.rs @@ -1,10 +1,9 @@ -use crate::types::HeaderView; use ckb_async_runtime::Handle; use ckb_stop_handler::{SignalSender, StopHandler}; -use ckb_types::packed::{self, Byte32}; -use std::path; +use ckb_types::packed::Byte32; use std::sync::Arc; use std::time::Duration; +use std::{mem::size_of, path}; use tokio::sync::oneshot; use tokio::time::MissedTickBehavior; @@ -18,6 +17,8 @@ pub(crate) use self::{ memory::MemoryMap, }; +use super::HeaderIndexView; + pub struct HeaderMap { inner: Arc>, stop: StopHandler<()>, @@ -30,8 +31,7 @@ impl Drop for HeaderMap { } const INTERVAL: Duration = Duration::from_millis(500); -// key + total_difficulty + skip_hash -const ITEM_BYTES_SIZE: usize = packed::HeaderView::TOTAL_SIZE + 32 * 3; +const ITEM_BYTES_SIZE: usize = size_of::(); const WARN_THRESHOLD: usize = ITEM_BYTES_SIZE * 100_000; impl HeaderMap { @@ -76,11 +76,11 @@ impl HeaderMap { self.inner.contains_key(hash) } - pub(crate) fn get(&self, hash: &Byte32) -> Option { + pub(crate) fn get(&self, hash: &Byte32) -> Option { self.inner.get(hash) } - pub(crate) fn insert(&self, view: HeaderView) -> Option<()> { + pub(crate) fn insert(&self, view: HeaderIndexView) -> Option<()> { self.inner.insert(view) } diff --git a/sync/src/types/mod.rs b/sync/src/types/mod.rs index 3f2583c4439..d398621da2a 100644 --- a/sync/src/types/mod.rs +++ b/sync/src/types/mod.rs @@ -19,7 +19,7 @@ use ckb_network::{CKBProtocolContext, PeerIndex, SupportProtocols}; use ckb_shared::{shared::Shared, Snapshot}; use ckb_store::{ChainDB, ChainStore}; use ckb_systemtime::unix_time_as_millis; -use ckb_traits::HeaderProvider; +use ckb_traits::{HeaderFields, HeaderFieldsProvider}; use ckb_tx_pool::service::TxVerificationResult; use ckb_types::{ core::{self, BlockNumber, EpochExt}, @@ -43,7 +43,7 @@ use std::{cmp, fmt, iter}; mod header_map; use crate::utils::send_message; -use ckb_types::core::EpochNumber; +use ckb_types::core::{EpochNumber, EpochNumberWithFraction}; pub use header_map::HeaderMap; const GET_HEADERS_CACHE_SIZE: usize = 10000; @@ -174,7 +174,7 @@ impl HeadersSyncController { } } - pub(crate) fn from_header(better_tip_header: &core::HeaderView) -> Self { + pub(crate) fn from_header(better_tip_header: &HeaderIndexView) -> Self { let started_ts = unix_time_as_millis(); let started_tip_ts = better_tip_header.timestamp(); Self { @@ -1011,56 +1011,151 @@ impl Peers { } #[derive(Clone, Debug, PartialEq, Eq)] -pub struct HeaderView { - inner: core::HeaderView, +pub struct HeaderIndex { + number: BlockNumber, + hash: Byte32, total_difficulty: U256, - // pointer to the index of some further predecessor of this block - pub(crate) skip_hash: Option, } -impl HeaderView { - pub fn new(inner: core::HeaderView, total_difficulty: U256) -> Self { - HeaderView { - inner, +impl HeaderIndex { + pub fn new(number: BlockNumber, hash: Byte32, total_difficulty: U256) -> Self { + HeaderIndex { + number, + hash, total_difficulty, - skip_hash: None, } } pub fn number(&self) -> BlockNumber { - self.inner.number() + self.number } pub fn hash(&self) -> Byte32 { - self.inner.hash() + self.hash.clone() } - pub fn parent_hash(&self) -> Byte32 { - self.inner.data().raw().parent_hash() + pub fn total_difficulty(&self) -> &U256 { + &self.total_difficulty + } + + pub fn number_and_hash(&self) -> BlockNumberAndHash { + (self.number(), self.hash()).into() + } + + pub fn is_better_chain(&self, other: &Self) -> bool { + self.is_better_than(other.total_difficulty()) + } + + pub fn is_better_than(&self, other_total_difficulty: &U256) -> bool { + self.total_difficulty() > other_total_difficulty + } +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct HeaderIndexView { + hash: Byte32, + number: BlockNumber, + epoch: EpochNumberWithFraction, + timestamp: u64, + parent_hash: Byte32, + total_difficulty: U256, + skip_hash: Option, +} + +impl HeaderIndexView { + pub fn new( + hash: Byte32, + number: BlockNumber, + epoch: EpochNumberWithFraction, + timestamp: u64, + parent_hash: Byte32, + total_difficulty: U256, + ) -> Self { + HeaderIndexView { + hash, + number, + epoch, + timestamp, + parent_hash, + total_difficulty, + skip_hash: None, + } + } + + pub fn hash(&self) -> Byte32 { + self.hash.clone() + } + + pub fn number(&self) -> BlockNumber { + self.number + } + + pub fn epoch(&self) -> EpochNumberWithFraction { + self.epoch } pub fn timestamp(&self) -> u64 { - self.inner.timestamp() + self.timestamp } pub fn total_difficulty(&self) -> &U256 { &self.total_difficulty } - pub fn inner(&self) -> &core::HeaderView { - &self.inner + pub fn parent_hash(&self) -> Byte32 { + self.parent_hash.clone() + } + + pub fn skip_hash(&self) -> Option<&Byte32> { + self.skip_hash.as_ref() + } + + // deserialize from bytes + fn from_slice_should_be_ok(hash: &[u8], slice: &[u8]) -> Self { + let hash = packed::Byte32Reader::from_slice_should_be_ok(hash).to_entity(); + let number = BlockNumber::from_le_bytes(slice[0..8].try_into().expect("stored slice")); + let epoch = EpochNumberWithFraction::from_full_value(u64::from_le_bytes( + slice[8..16].try_into().expect("stored slice"), + )); + let timestamp = u64::from_le_bytes(slice[16..24].try_into().expect("stored slice")); + let parent_hash = packed::Byte32Reader::from_slice_should_be_ok(&slice[24..56]).to_entity(); + let total_difficulty = U256::from_little_endian(&slice[56..88]).expect("stored slice"); + let skip_hash = if slice.len() == 120 { + Some(packed::Byte32Reader::from_slice_should_be_ok(&slice[88..120]).to_entity()) + } else { + None + }; + Self { + hash, + number, + epoch, + timestamp, + parent_hash, + total_difficulty, + skip_hash, + } } - pub fn into_inner(self) -> core::HeaderView { - self.inner + // serialize all fields except `hash` to bytes + fn to_vec(&self) -> Vec { + let mut v = Vec::new(); + v.extend_from_slice(self.number.to_le_bytes().as_slice()); + v.extend_from_slice(self.epoch.full_value().to_le_bytes().as_slice()); + v.extend_from_slice(self.timestamp.to_le_bytes().as_slice()); + v.extend_from_slice(self.parent_hash.as_slice()); + v.extend_from_slice(self.total_difficulty.to_le_bytes().as_slice()); + if let Some(ref skip_hash) = self.skip_hash { + v.extend_from_slice(skip_hash.as_slice()); + } + v } pub fn build_skip(&mut self, tip_number: BlockNumber, get_header_view: F, fast_scanner: G) where - F: Fn(&Byte32, Option) -> Option, - G: Fn(BlockNumber, BlockNumberAndHash) -> Option, + F: Fn(&Byte32, bool) -> Option, + G: Fn(BlockNumber, BlockNumberAndHash) -> Option, { - if self.inner.is_genesis() { + if self.number == 0 { return; } self.skip_hash = self @@ -1079,10 +1174,10 @@ impl HeaderView { number: BlockNumber, get_header_view: F, fast_scanner: G, - ) -> Option + ) -> Option where - F: Fn(&Byte32, Option) -> Option, - G: Fn(BlockNumber, BlockNumberAndHash) -> Option, + F: Fn(&Byte32, bool) -> Option, + G: Fn(BlockNumber, BlockNumberAndHash) -> Option, { if number > self.number() { return None; @@ -1102,11 +1197,11 @@ impl HeaderView { && number_skip_prev >= number)) => { // Only follow skip if parent->skip isn't better than skip->parent - current = get_header_view(hash, Some(store_first))?; + current = get_header_view(hash, store_first)?; number_walk = number_skip; } _ => { - current = get_header_view(¤t.parent_hash(), Some(store_first))?; + current = get_header_view(¤t.parent_hash(), store_first)?; number_walk -= 1; } } @@ -1115,109 +1210,33 @@ impl HeaderView { break; } } - Some(current).map(HeaderView::into_inner) - } - - pub fn is_better_than(&self, total_difficulty: &U256) -> bool { - self.total_difficulty() > total_difficulty - } - - fn from_slice_should_be_ok(slice: &[u8]) -> Self { - let len_size = packed::Uint32Reader::TOTAL_SIZE; - if slice.len() < len_size { - panic!("failed to unpack item in header map: header part is broken"); - } - let mut idx = 0; - let inner_len = { - let reader = packed::Uint32Reader::from_slice_should_be_ok(&slice[idx..idx + len_size]); - Unpack::::unpack(&reader) as usize - }; - idx += len_size; - let total_difficulty_len = packed::Uint256Reader::TOTAL_SIZE; - if slice.len() < len_size + inner_len + total_difficulty_len { - panic!("failed to unpack item in header map: body part is broken"); - } - let inner = { - let reader = - packed::HeaderViewReader::from_slice_should_be_ok(&slice[idx..idx + inner_len]); - Unpack::::unpack(&reader) - }; - idx += inner_len; - let total_difficulty = { - let reader = packed::Uint256Reader::from_slice_should_be_ok( - &slice[idx..idx + total_difficulty_len], - ); - Unpack::::unpack(&reader) - }; - idx += total_difficulty_len; - let skip_hash = { - packed::Byte32OptReader::from_slice_should_be_ok(&slice[idx..]) - .to_entity() - .to_opt() - }; - Self { - inner, - total_difficulty, - skip_hash, - } - } - - fn to_vec(&self) -> Vec { - let mut v = Vec::new(); - let inner: packed::HeaderView = self.inner.pack(); - let total_difficulty: packed::Uint256 = self.total_difficulty.pack(); - let skip_hash: packed::Byte32Opt = Pack::pack(&self.skip_hash); - let inner_len: packed::Uint32 = (inner.as_slice().len() as u32).pack(); - v.extend_from_slice(inner_len.as_slice()); - v.extend_from_slice(inner.as_slice()); - v.extend_from_slice(total_difficulty.as_slice()); - v.extend_from_slice(skip_hash.as_slice()); - v + Some(current) } pub fn as_header_index(&self) -> HeaderIndex { HeaderIndex::new(self.number(), self.hash(), self.total_difficulty().clone()) } -} - -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct HeaderIndex { - number: BlockNumber, - hash: Byte32, - total_difficulty: U256, -} - -impl HeaderIndex { - pub fn new(number: BlockNumber, hash: Byte32, total_difficulty: U256) -> Self { - HeaderIndex { - number, - hash, - total_difficulty, - } - } - - pub fn number(&self) -> BlockNumber { - self.number - } - - pub fn hash(&self) -> Byte32 { - self.hash.clone() - } - - pub fn total_difficulty(&self) -> &U256 { - &self.total_difficulty - } pub fn number_and_hash(&self) -> BlockNumberAndHash { (self.number(), self.hash()).into() } - pub fn is_better_chain(&self, other: &Self) -> bool { - self.is_better_than(other.total_difficulty()) + pub fn is_better_than(&self, total_difficulty: &U256) -> bool { + self.total_difficulty() > total_difficulty } +} - pub fn is_better_than(&self, other_total_difficulty: &U256) -> bool { - self.total_difficulty() > other_total_difficulty +impl From<(core::HeaderView, U256)> for HeaderIndexView { + fn from((header, total_difficulty): (core::HeaderView, U256)) -> Self { + HeaderIndexView { + hash: header.hash(), + number: header.number(), + epoch: header.epoch(), + timestamp: header.timestamp(), + parent_hash: header.parent_hash(), + total_difficulty, + skip_hash: None, + } } } @@ -1286,7 +1305,7 @@ impl SyncShared { snapshot.tip_header().to_owned(), ) }; - let shared_best_header = RwLock::new(HeaderView::new(header, total_difficulty)); + let shared_best_header = RwLock::new((header, total_difficulty).into()); ckb_logger::info!( "header_map.memory_limit {}", sync_config.header_map.memory_limit @@ -1481,25 +1500,33 @@ impl SyncShared { pub fn insert_valid_header(&self, peer: PeerIndex, header: &core::HeaderView) { let tip_number = self.active_chain().tip_number(); let store_first = tip_number >= header.number(); - let parent_view = self - .get_header_view(&header.data().raw().parent_hash(), Some(store_first)) + // We don't use header#parent_hash clone here because it will hold the arc counter of the SendHeaders message + // which will cause the 2000 headers to be held in memory for a long time + let parent_hash = Byte32::from_slice(header.data().raw().parent_hash().as_slice()) + .expect("checked slice length"); + let parent_header_index = self + .get_header_index_view(&parent_hash, store_first) .expect("parent should be verified"); - let mut header_view = { - let total_difficulty = parent_view.total_difficulty() + header.difficulty(); - HeaderView::new(header.clone(), total_difficulty) - }; + let mut header_view = HeaderIndexView::new( + header.hash(), + header.number(), + header.epoch(), + header.timestamp(), + parent_hash, + parent_header_index.total_difficulty() + header.difficulty(), + ); let snapshot = Arc::clone(&self.shared.snapshot()); header_view.build_skip( tip_number, - |hash, store_first_opt| self.get_header_view(hash, store_first_opt), + |hash, store_first| self.get_header_index_view(hash, store_first), |number, current| { // shortcut to return an ancestor block if current.number <= snapshot.tip_number() && snapshot.is_main_chain(¤t.hash) { snapshot .get_block_hash(number) - .and_then(|hash| self.get_header_view(&hash, Some(true))) + .and_then(|hash| self.get_header_index_view(&hash, true)) } else { None } @@ -1512,20 +1539,19 @@ impl SyncShared { self.state.may_set_shared_best_header(header_view); } - /// Get header view with hash - pub fn get_header_view( + pub(crate) fn get_header_index_view( &self, hash: &Byte32, - store_first_opt: Option, - ) -> Option { + store_first: bool, + ) -> Option { let store = self.store(); - if store_first_opt.unwrap_or(false) { + if store_first { store .get_block_header(hash) .and_then(|header| { store .get_block_ext(hash) - .map(|block_ext| HeaderView::new(header, block_ext.total_difficulty)) + .map(|block_ext| (header, block_ext.total_difficulty).into()) }) .or_else(|| self.state.header_map.get(hash)) } else { @@ -1533,7 +1559,7 @@ impl SyncShared { store.get_block_header(hash).and_then(|header| { store .get_block_ext(hash) - .map(|block_ext| HeaderView::new(header, block_ext.total_difficulty)) + .map(|block_ext| (header, block_ext.total_difficulty).into()) }) }) } @@ -1551,13 +1577,29 @@ impl SyncShared { } } -impl HeaderProvider for SyncShared { - fn get_header(&self, hash: &Byte32) -> Option { +impl HeaderFieldsProvider for SyncShared { + fn get_header_fields(&self, hash: &Byte32) -> Option { self.state .header_map .get(hash) - .map(HeaderView::into_inner) - .or_else(|| self.store().get_block_header(hash)) + .map(|header| HeaderFields { + hash: header.hash(), + number: header.number(), + epoch: header.epoch(), + timestamp: header.timestamp(), + parent_hash: header.parent_hash(), + }) + .or_else(|| { + self.store() + .get_block_header(hash) + .map(|header| HeaderFields { + hash: header.hash(), + number: header.number(), + epoch: header.epoch(), + timestamp: header.timestamp(), + parent_hash: header.parent_hash(), + }) + }) } } @@ -1625,7 +1667,7 @@ impl PartialOrd for UnknownTxHashPriority { pub struct SyncState { /* Status irrelevant to peers */ - shared_best_header: RwLock, + shared_best_header: RwLock, header_map: HeaderMap, block_status_map: DashMap, tx_filter: Mutex>, @@ -1701,11 +1743,11 @@ impl SyncState { self.tx_relay_receiver.try_iter().take(limit).collect() } - pub fn shared_best_header(&self) -> HeaderView { + pub fn shared_best_header(&self) -> HeaderIndexView { self.shared_best_header.read().to_owned() } - pub fn shared_best_header_ref(&self) -> RwLockReadGuard { + pub fn shared_best_header_ref(&self) -> RwLockReadGuard { self.shared_best_header.read() } @@ -1713,7 +1755,7 @@ impl SyncState { &self.header_map } - pub fn may_set_shared_best_header(&self, header: HeaderView) { + pub fn may_set_shared_best_header(&self, header: HeaderIndexView) { if !header.is_better_than(self.shared_best_header.read().total_difficulty()) { return; } @@ -2070,22 +2112,25 @@ impl ActiveChain { self.shared.shared().is_initial_block_download() } - pub fn get_ancestor(&self, base: &Byte32, number: BlockNumber) -> Option { + pub fn get_ancestor(&self, base: &Byte32, number: BlockNumber) -> Option { let tip_number = self.tip_number(); - self.shared.get_header_view(base, None)?.get_ancestor( - tip_number, - number, - |hash, store_first_opt| self.shared.get_header_view(hash, store_first_opt), - |number, current| { - // shortcut to return an ancestor block - if current.number <= tip_number && self.snapshot().is_main_chain(¤t.hash) { - self.get_block_hash(number) - .and_then(|hash| self.shared.get_header_view(&hash, Some(true))) - } else { - None - } - }, - ) + self.shared + .get_header_index_view(base, false)? + .get_ancestor( + tip_number, + number, + |hash, store_first| self.shared.get_header_index_view(hash, store_first), + |number, current| { + // shortcut to return an ancestor block + if current.number <= tip_number && self.snapshot().is_main_chain(¤t.hash) + { + self.get_block_hash(number) + .and_then(|hash| self.shared.get_header_index_view(&hash, true)) + } else { + None + } + }, + ) } pub fn get_locator(&self, start: BlockNumberAndHash) -> Vec { @@ -2152,7 +2197,9 @@ impl ActiveChain { (pa.clone(), pb.clone()) }; - m_right = self.get_ancestor(&m_right.hash(), m_left.number())?.into(); + m_right = self + .get_ancestor(&m_right.hash(), m_left.number())? + .number_and_hash(); if m_left == m_right { return Some(m_left); } @@ -2161,10 +2208,10 @@ impl ActiveChain { while m_left != m_right { m_left = self .get_ancestor(&m_left.hash(), m_left.number() - 1)? - .into(); + .number_and_hash(); m_right = self .get_ancestor(&m_right.hash(), m_right.number() - 1)? - .into(); + .number_and_hash(); } Some(m_left) } diff --git a/traits/src/header_provider.rs b/traits/src/header_provider.rs index a3f768d8e41..3536ca01eec 100644 --- a/traits/src/header_provider.rs +++ b/traits/src/header_provider.rs @@ -1,5 +1,5 @@ use ckb_types::{ - core::{BlockNumber, HeaderView}, + core::{BlockNumber, EpochNumberWithFraction, HeaderView}, packed::Byte32, }; @@ -7,27 +7,39 @@ use ckb_types::{ pub trait HeaderProvider { /// Get the header of the given block hash fn get_header(&self, hash: &Byte32) -> Option; +} - /// Get timestamp and block_number of the corresponding block_hash, and hash of parent block - fn timestamp_and_parent(&self, block_hash: &Byte32) -> (u64, BlockNumber, Byte32) { - let header = self.get_header(block_hash).expect("parent header exist"); - ( - header.timestamp(), - header.number(), - header.data().raw().parent_hash(), - ) - } +/// A compact representation of header fields, used for header verification and median time calculation +pub struct HeaderFields { + /// Block hash + pub hash: Byte32, + /// Block number + pub number: BlockNumber, + /// Block epoch + pub epoch: EpochNumberWithFraction, + /// Block timestamp + pub timestamp: u64, + /// Block parent hash + pub parent_hash: Byte32, +} + +/// Trait for header fields storage +pub trait HeaderFieldsProvider { + /// Get the header fields of the given block hash + fn get_header_fields(&self, hash: &Byte32) -> Option; /// Get past block median time, **including the timestamp of the given one** fn block_median_time(&self, block_hash: &Byte32, median_block_count: usize) -> u64 { let mut timestamps: Vec = Vec::with_capacity(median_block_count); let mut block_hash = block_hash.clone(); for _ in 0..median_block_count { - let (timestamp, block_number, parent_hash) = self.timestamp_and_parent(&block_hash); - timestamps.push(timestamp); - block_hash = parent_hash; + let header_fields = self + .get_header_fields(&block_hash) + .expect("parent header exist"); + timestamps.push(header_fields.timestamp); + block_hash = header_fields.parent_hash; - if block_number == 0 { + if header_fields.number == 0 { break; } } @@ -37,9 +49,3 @@ pub trait HeaderProvider { timestamps[timestamps.len() >> 1] } } - -impl HeaderProvider for Box Option> { - fn get_header(&self, hash: &Byte32) -> Option { - (self)(hash.to_owned()) - } -} diff --git a/traits/src/lib.rs b/traits/src/lib.rs index 916d0d5defc..d2a79e00ecc 100644 --- a/traits/src/lib.rs +++ b/traits/src/lib.rs @@ -5,4 +5,4 @@ mod header_provider; pub use crate::cell_data_provider::CellDataProvider; pub use crate::epoch_provider::{BlockEpoch, EpochProvider}; -pub use crate::header_provider::HeaderProvider; +pub use crate::header_provider::*; diff --git a/util/app-config/src/configs/network.rs b/util/app-config/src/configs/network.rs index abe5b08b7c9..d7bf9585c2f 100644 --- a/util/app-config/src/configs/network.rs +++ b/util/app-config/src/configs/network.rs @@ -125,13 +125,13 @@ impl Default for HeaderMapConfig { Self { primary_limit: None, backend_close_threshold: None, - memory_limit: ByteUnit::Megabyte(600), + memory_limit: default_memory_limit(), } } } const fn default_memory_limit() -> ByteUnit { - ByteUnit::Megabyte(600) + ByteUnit::Megabyte(256) } #[derive(Clone, Debug, Copy, Eq, PartialEq, Serialize, Deserialize, Hash)] diff --git a/util/snapshot/src/lib.rs b/util/snapshot/src/lib.rs index 344683f7292..b76e467e121 100644 --- a/util/snapshot/src/lib.rs +++ b/util/snapshot/src/lib.rs @@ -16,7 +16,7 @@ use ckb_merkle_mountain_range::{ }; use ckb_proposal_table::ProposalView; use ckb_store::{ChainStore, StoreCache, StoreSnapshot}; -use ckb_traits::HeaderProvider; +use ckb_traits::{HeaderFields, HeaderFieldsProvider, HeaderProvider}; use ckb_types::core::error::OutPointError; use ckb_types::{ core::{ @@ -272,6 +272,20 @@ impl HeaderProvider for Snapshot { } } +impl HeaderFieldsProvider for Snapshot { + fn get_header_fields(&self, hash: &Byte32) -> Option { + self.store + .get_block_header(hash) + .map(|header| HeaderFields { + hash: header.hash(), + number: header.number(), + epoch: header.epoch(), + timestamp: header.timestamp(), + parent_hash: header.parent_hash(), + }) + } +} + impl ConsensusProvider for Snapshot { fn get_consensus(&self) -> &Consensus { self.consensus() diff --git a/util/test-chain-utils/src/median_time.rs b/util/test-chain-utils/src/median_time.rs index d73e44b9883..fe90aaee072 100644 --- a/util/test-chain-utils/src/median_time.rs +++ b/util/test-chain-utils/src/median_time.rs @@ -1,4 +1,4 @@ -use ckb_traits::HeaderProvider; +use ckb_traits::{HeaderFields, HeaderFieldsProvider}; use ckb_types::{ core::{BlockNumber, EpochNumberWithFraction, HeaderBuilder, HeaderView, TransactionInfo}, packed::Byte32, @@ -24,16 +24,15 @@ pub const MOCK_MEDIAN_TIME_COUNT: usize = 11; #[doc(hidden)] pub const MOCK_EPOCH_LENGTH: BlockNumber = 1000; -impl HeaderProvider for MockMedianTime { - fn get_header(&self, hash: &Byte32) -> Option { - self.headers.get(hash).cloned() - } - - fn timestamp_and_parent(&self, block_hash: &Byte32) -> (u64, BlockNumber, Byte32) { - self.headers - .get(block_hash) - .map(|header| (header.timestamp(), header.number(), header.hash())) - .unwrap() +impl HeaderFieldsProvider for MockMedianTime { + fn get_header_fields(&self, hash: &Byte32) -> Option { + self.headers.get(hash).cloned().map(|header| HeaderFields { + hash: header.hash(), + number: header.number(), + epoch: header.epoch(), + timestamp: header.timestamp(), + parent_hash: header.parent_hash(), + }) } } @@ -41,6 +40,7 @@ impl MockMedianTime { /// Create a new `MockMedianTime`. #[doc(hidden)] pub fn new(timestamps: Vec) -> Self { + let mut parent_hash = Byte32::zero(); Self { headers: Arc::new( timestamps @@ -59,7 +59,9 @@ impl MockMedianTime { ) .pack(), ) + .parent_hash(parent_hash.clone()) .build(); + parent_hash = header.hash(); (header.hash(), header) }) .collect(), diff --git a/verification/src/header_verifier.rs b/verification/src/header_verifier.rs index eacc6e8f7ba..779db05703b 100644 --- a/verification/src/header_verifier.rs +++ b/verification/src/header_verifier.rs @@ -6,8 +6,8 @@ use ckb_chain_spec::consensus::Consensus; use ckb_error::Error; use ckb_pow::PowEngine; use ckb_systemtime::unix_time_as_millis; -use ckb_traits::HeaderProvider; -use ckb_types::core::{HeaderView, Version}; +use ckb_traits::HeaderFieldsProvider; +use ckb_types::core::{BlockNumber, EpochNumberWithFraction, HeaderView, Version}; use ckb_verification_traits::Verifier; /// Context-dependent verification checks for block header @@ -18,7 +18,7 @@ pub struct HeaderVerifier<'a, DL> { consensus: &'a Consensus, } -impl<'a, DL: HeaderProvider> HeaderVerifier<'a, DL> { +impl<'a, DL: HeaderFieldsProvider> HeaderVerifier<'a, DL> { /// Crate new HeaderVerifier pub fn new(data_loader: &'a DL, consensus: &'a Consensus) -> Self { HeaderVerifier { @@ -28,20 +28,20 @@ impl<'a, DL: HeaderProvider> HeaderVerifier<'a, DL> { } } -impl<'a, DL: HeaderProvider> Verifier for HeaderVerifier<'a, DL> { +impl<'a, DL: HeaderFieldsProvider> Verifier for HeaderVerifier<'a, DL> { type Target = HeaderView; fn verify(&self, header: &Self::Target) -> Result<(), Error> { VersionVerifier::new(header, self.consensus.block_version()).verify()?; // POW check first PowVerifier::new(header, self.consensus.pow_engine().as_ref()).verify()?; - let parent = self + let parent_fields = self .data_loader - .get_header(&header.parent_hash()) + .get_header_fields(&header.parent_hash()) .ok_or_else(|| UnknownParentError { parent_hash: header.parent_hash(), })?; - NumberVerifier::new(&parent, header).verify()?; - EpochVerifier::new(&parent, header).verify()?; + NumberVerifier::new(parent_fields.number, header).verify()?; + EpochVerifier::new(parent_fields.epoch, header).verify()?; TimestampVerifier::new( self.data_loader, header, @@ -84,7 +84,7 @@ pub struct TimestampVerifier<'a, DL> { now: u64, } -impl<'a, DL: HeaderProvider> TimestampVerifier<'a, DL> { +impl<'a, DL: HeaderFieldsProvider> TimestampVerifier<'a, DL> { pub fn new(data_loader: &'a DL, header: &'a HeaderView, median_block_count: usize) -> Self { TimestampVerifier { data_loader, @@ -126,19 +126,19 @@ impl<'a, DL: HeaderProvider> TimestampVerifier<'a, DL> { /// Checks if the block number of the given header matches the expected number, /// which is the parent block's number + 1. pub struct NumberVerifier<'a> { - parent: &'a HeaderView, + parent: BlockNumber, header: &'a HeaderView, } impl<'a> NumberVerifier<'a> { - pub fn new(parent: &'a HeaderView, header: &'a HeaderView) -> Self { + pub fn new(parent: BlockNumber, header: &'a HeaderView) -> Self { NumberVerifier { parent, header } } pub fn verify(&self) -> Result<(), Error> { - if self.header.number() != self.parent.number() + 1 { + if self.header.number() != self.parent + 1 { return Err(NumberError { - expected: self.parent.number() + 1, + expected: self.parent + 1, actual: self.header.number(), } .into()); @@ -148,12 +148,12 @@ impl<'a> NumberVerifier<'a> { } pub struct EpochVerifier<'a> { - parent: &'a HeaderView, + parent: EpochNumberWithFraction, header: &'a HeaderView, } impl<'a> EpochVerifier<'a> { - pub fn new(parent: &'a HeaderView, header: &'a HeaderView) -> Self { + pub fn new(parent: EpochNumberWithFraction, header: &'a HeaderView) -> Self { EpochVerifier { parent, header } } @@ -164,10 +164,10 @@ impl<'a> EpochVerifier<'a> { } .into()); } - if !self.parent.is_genesis() && !self.header.epoch().is_successor_of(self.parent.epoch()) { + if !self.parent.is_genesis() && !self.header.epoch().is_successor_of(self.parent) { return Err(EpochError::NonContinuous { current: self.header.epoch(), - parent: self.parent.epoch(), + parent: self.parent, } .into()); } diff --git a/verification/src/tests/header_verifier.rs b/verification/src/tests/header_verifier.rs index 5eff93b02ea..c4cd8287d03 100644 --- a/verification/src/tests/header_verifier.rs +++ b/verification/src/tests/header_verifier.rs @@ -119,10 +119,9 @@ fn test_timestamp_too_new() { #[test] fn test_number() { - let parent = HeaderBuilder::new_with_number(10).build(); let header = HeaderBuilder::new_with_number(10).build(); - let verifier = NumberVerifier::new(&parent, &header); + let verifier = NumberVerifier::new(10, &header); assert_error_eq!( verifier.verify().unwrap_err(), NumberError { @@ -135,10 +134,7 @@ fn test_number() { #[test] fn test_epoch() { { - let parent = HeaderBuilder::default() - .number(1u64.pack()) - .epoch(EpochNumberWithFraction::new(1, 1, 10).pack()) - .build(); + let parent = EpochNumberWithFraction::new(1, 1, 10); let epochs_malformed = vec![ EpochNumberWithFraction::new_unchecked(1, 0, 0), EpochNumberWithFraction::new_unchecked(1, 10, 0), @@ -150,7 +146,7 @@ fn test_epoch() { let malformed = HeaderBuilder::default() .epoch(epoch_malformed.pack()) .build(); - let result = EpochVerifier::new(&parent, &malformed).verify(); + let result = EpochVerifier::new(parent, &malformed).verify(); assert!(result.is_err(), "input: {epoch_malformed:#}"); assert_error_eq!( result.unwrap_err(), @@ -192,20 +188,16 @@ fn test_epoch() { ), ]; - for (epoch_parent, epoch_current) in epochs { - let parent = HeaderBuilder::default() - .number(1u64.pack()) - .epoch(epoch_parent.pack()) - .build(); + for (parent, epoch_current) in epochs { let current = HeaderBuilder::default().epoch(epoch_current.pack()).build(); - let result = EpochVerifier::new(&parent, ¤t).verify(); + let result = EpochVerifier::new(parent, ¤t).verify(); assert!(result.is_err(), "current: {current:#}, parent: {parent:#}"); assert_error_eq!( result.unwrap_err(), EpochError::NonContinuous { current: epoch_current, - parent: epoch_parent, + parent, }, ); } @@ -225,14 +217,10 @@ fn test_epoch() { EpochNumberWithFraction::new_unchecked(2, 0, 11), ), ]; - for (epoch_parent, epoch_current) in epochs { - let parent = HeaderBuilder::default() - .number(1u64.pack()) - .epoch(epoch_parent.pack()) - .build(); + for (parent, epoch_current) in epochs { let current = HeaderBuilder::default().epoch(epoch_current.pack()).build(); - let result = EpochVerifier::new(&parent, ¤t).verify(); + let result = EpochVerifier::new(parent, ¤t).verify(); assert!(result.is_ok(), "current: {current:#}, parent: {parent:#}"); } } diff --git a/verification/src/transaction_verifier.rs b/verification/src/transaction_verifier.rs index 0acab87d310..63c21f28534 100644 --- a/verification/src/transaction_verifier.rs +++ b/verification/src/transaction_verifier.rs @@ -6,7 +6,7 @@ use ckb_dao::DaoCalculator; use ckb_dao_utils::DaoError; use ckb_error::Error; use ckb_script::{TransactionScriptsVerifier, TransactionSnapshot, TransactionState, VerifyResult}; -use ckb_traits::{CellDataProvider, EpochProvider, HeaderProvider}; +use ckb_traits::{CellDataProvider, EpochProvider, HeaderFieldsProvider, HeaderProvider}; use ckb_types::{ core::{ cell::{CellMeta, ResolvedTransaction}, @@ -28,7 +28,7 @@ pub struct TimeRelativeTransactionVerifier<'a, M> { pub(crate) since: SinceVerifier<'a, M>, } -impl<'a, DL: HeaderProvider> TimeRelativeTransactionVerifier<'a, DL> { +impl<'a, DL: HeaderFieldsProvider> TimeRelativeTransactionVerifier<'a, DL> { /// Creates a new TimeRelativeTransactionVerifier pub fn new( rtx: Arc, @@ -110,7 +110,14 @@ pub struct ContextualTransactionVerifier<'a, DL> { impl<'a, DL> ContextualTransactionVerifier<'a, DL> where - DL: CellDataProvider + HeaderProvider + EpochProvider + Send + Sync + Clone + 'static, + DL: CellDataProvider + + HeaderProvider + + HeaderFieldsProvider + + EpochProvider + + Send + + Sync + + Clone + + 'static, { /// Creates a new ContextualTransactionVerifier pub fn new( @@ -628,7 +635,7 @@ pub struct SinceVerifier<'a, DL> { tx_env: &'a TxVerifyEnv, } -impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { +impl<'a, DL: HeaderFieldsProvider> SinceVerifier<'a, DL> { pub fn new( rtx: Arc, consensus: &'a Consensus, @@ -644,8 +651,11 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { } fn parent_median_time(&self, block_hash: &Byte32) -> u64 { - let (_, _, parent_hash) = self.data_loader.timestamp_and_parent(block_hash); - self.block_median_time(&parent_hash) + let header_fields = self + .data_loader + .get_header_fields(block_hash) + .expect("parent block exist"); + self.block_median_time(&header_fields.parent_hash) } fn block_median_time(&self, block_hash: &Byte32) -> u64 { @@ -731,9 +741,9 @@ impl<'a, DL: HeaderProvider> SinceVerifier<'a, DL> { .is_block_ts_as_relative_since_start_enabled(epoch_number) { self.data_loader - .get_header(&info.block_hash) + .get_header_fields(&info.block_hash) .expect("header exist") - .timestamp() + .timestamp } else { self.parent_median_time(&info.block_hash) }; @@ -814,7 +824,14 @@ pub struct ContextualWithoutScriptTransactionVerifier<'a, DL> { impl<'a, DL> ContextualWithoutScriptTransactionVerifier<'a, DL> where - DL: CellDataProvider + HeaderProvider + EpochProvider + Send + Sync + Clone + 'static, + DL: CellDataProvider + + HeaderProvider + + HeaderFieldsProvider + + EpochProvider + + Send + + Sync + + Clone + + 'static, { /// Creates a new ContextualWithoutScriptTransactionVerifier pub fn new(