From 29700c3d9aa4698742c0c9cd5e313fd3d0727626 Mon Sep 17 00:00:00 2001 From: Stan Bondi Date: Fri, 11 Aug 2023 16:22:01 +0400 Subject: [PATCH] fix(core): always pass the correct timestamp window to header validatior (#5624) Description --- fix(core): sanity check for timestamp window length checks for the correct length of the timestamp window fix(core): always include the correct timestamp window to header validation tests(core): add a reorg test of greater size than median timestamp window size chore: remove allocations when logging hashes in blockchain database Motivation and Context --- `sanity_check_timestamp_count` (previously `check_timestamp_count`) allowed timestamp windows of `min(median_window_size, header.height - 1)` or greater. However, the window size must always be the correct size for a given block. If it is not, the median timestamp calculation will be incorrect. This check also subtracted 1 from the header height BEFORE checking the height is correct. This would cause a panic/underflow if a header of height 0 is passed in. In the reorg logic, we passed in the same timestamp window for the candidate block when validating orphan chains, this could cause correct reorgs to fail. This PR also ensures that timestamps are sorted as this is required for a correct median calculation. How Has This Been Tested? --- Adds a new unit test `it_does_a_sanity_check_on_the_number_of_timestamps_provided` Adds block chain unit test `it_links_many_orphan_branches_to_main_chain_with_greater_reorg_than_median_timestamp_window` Existing test `it_links_many_orphan_branches_to_main_chain` failed after the fix to `check_timestamp_count`. Fixed in this PR Manually caused a about 90 block reorg on localnet nodes What process can a PR reviewer use to test or verify this change? --- Create a reorg using localnet Breaking Changes --- - [x] None - [ ] Requires data directory on base node to be deleted - [ ] Requires hard fork - [ ] Other - Please specify --- .../src/conversions/historical_block.rs | 2 +- .../src/commands/command/period_stats.rs | 2 +- .../base_node/sync/header_sync/validator.rs | 8 +- .../core/src/blocks/historical_block.rs | 12 +- .../src/chain_storage/blockchain_database.rs | 395 ++++++++++-------- .../tests/blockchain_database.rs | 2 +- base_layer/core/src/common/rolling_vec.rs | 22 +- base_layer/core/src/validation/error.rs | 7 +- .../header/header_full_validator.rs | 32 +- base_layer/core/src/validation/helpers.rs | 1 + base_layer/core/src/validation/mocks.rs | 20 +- base_layer/core/src/validation/test.rs | 49 ++- 12 files changed, 341 insertions(+), 211 deletions(-) diff --git a/applications/minotari_app_grpc/src/conversions/historical_block.rs b/applications/minotari_app_grpc/src/conversions/historical_block.rs index e391eb75bd..f01078f22f 100644 --- a/applications/minotari_app_grpc/src/conversions/historical_block.rs +++ b/applications/minotari_app_grpc/src/conversions/historical_block.rs @@ -31,7 +31,7 @@ impl TryFrom for grpc::HistoricalBlock { fn try_from(hb: HistoricalBlock) -> Result { Ok(Self { - confirmations: hb.confirmations, + confirmations: hb.confirmations(), block: Some( hb.try_into_block()? .try_into() diff --git a/applications/minotari_node/src/commands/command/period_stats.rs b/applications/minotari_node/src/commands/command/period_stats.rs index d0c7739b15..2870989a5b 100644 --- a/applications/minotari_node/src/commands/command/period_stats.rs +++ b/applications/minotari_node/src/commands/command/period_stats.rs @@ -113,7 +113,7 @@ impl CommandContext { } else { block.header().timestamp.as_u64() - prev_block.header().timestamp.as_u64() }; - let diff = block.accumulated_data.target_difficulty.as_u64(); + let diff = block.accumulated_data().target_difficulty.as_u64(); period_difficulty += diff; period_solvetime += st; period_hash += diff as f64 / st as f64 / 1_000_000.0; diff --git a/base_layer/core/src/base_node/sync/header_sync/validator.rs b/base_layer/core/src/base_node/sync/header_sync/validator.rs index 56f1601d47..f9ece9fe39 100644 --- a/base_layer/core/src/base_node/sync/header_sync/validator.rs +++ b/base_layer/core/src/base_node/sync/header_sync/validator.rs @@ -138,7 +138,7 @@ impl BlockHeaderSyncValidator { // We dont want to mark a block as bad for internal failures Err( e @ ValidationError::FatalStorageError(_) | - e @ ValidationError::NotEnoughTimestamps { .. } | + e @ ValidationError::IncorrectNumberOfTimestampsProvided { .. } | e @ ValidationError::AsyncTaskFailed(_), ) => return Err(e.into()), // We dont have to mark the block twice @@ -159,12 +159,14 @@ impl BlockHeaderSyncValidator { state.previous_header = header.clone(); // Ensure that timestamps are inserted in sorted order - let maybe_index = state.timestamps.iter().position(|ts| ts >= &header.timestamp()); + let maybe_index = state.timestamps.iter().position(|ts| *ts >= header.timestamp()); match maybe_index { Some(pos) => { state.timestamps.insert(pos, header.timestamp()); }, - None => state.timestamps.push(header.timestamp()), + None => { + state.timestamps.push(header.timestamp()); + }, } state.current_height = header.height; diff --git a/base_layer/core/src/blocks/historical_block.rs b/base_layer/core/src/blocks/historical_block.rs index c0bce89579..18847da486 100644 --- a/base_layer/core/src/blocks/historical_block.rs +++ b/base_layer/core/src/blocks/historical_block.rs @@ -34,11 +34,11 @@ use crate::blocks::{error::BlockError, Block, BlockHeader, BlockHeaderAccumulate pub struct HistoricalBlock { /// The number of blocks that have been mined since this block, including this one. The current tip will have one /// confirmation. - pub confirmations: u64, + confirmations: u64, /// The underlying block block: Block, /// Accumulated data in the block header - pub accumulated_data: BlockHeaderAccumulatedData, + accumulated_data: BlockHeaderAccumulatedData, pruned_outputs: Vec, pruned_input_count: u64, } @@ -73,6 +73,14 @@ impl HistoricalBlock { &self.block } + pub fn into_block(self) -> Block { + self.block + } + + pub fn accumulated_data(&self) -> &BlockHeaderAccumulatedData { + &self.accumulated_data + } + pub fn hash(&self) -> &HashOutput { &self.accumulated_data.hash } diff --git a/base_layer/core/src/chain_storage/blockchain_database.rs b/base_layer/core/src/chain_storage/blockchain_database.rs index 8a18837cee..cda5e310ad 100644 --- a/base_layer/core/src/chain_storage/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/blockchain_database.rs @@ -619,8 +619,7 @@ where B: BlockchainBackend details: format!( "Mismatch between header and accumulated data for header {} ({}). This indicates an \ inconsistency in the blockchain database", - hash.to_hex(), - height + hash, height ), } })?; @@ -791,8 +790,8 @@ where B: BlockchainBackend arg: "template", message: format!( "Expected new block template previous hash to be set to the current tip hash ({}) but was {}", - tip_header.hash().to_hex(), - header.prev_hash.to_hex() + tip_header.hash(), + header.prev_hash, ), }); } @@ -880,18 +879,18 @@ where B: BlockchainBackend /// * `ChainReorg`: The block was added, which resulted in a chain-reorg. /// /// If an error does occur while writing the new block parts, all changes are reverted before returning. - pub fn add_block(&self, block: Arc) -> Result { + pub fn add_block(&self, candidate_block: Arc) -> Result { if self.is_add_block_disabled() { warn!( target: LOG_TARGET, "add_block is disabled, node busy syncing. Ignoring candidate block #{} ({})", - block.header.height, - block.hash().to_hex() + candidate_block.header.height, + candidate_block.hash(), ); return Err(ChainStorageError::AddBlockOperationLocked); } - let new_height = block.header.height; + let new_height = candidate_block.header.height; // This is important, we ask for a write lock to disable all read access to the db. The sync process sets the // add_block disable flag, but we can have a race condition between the two especially since the orphan // validation can take some time during big blocks as it does Rangeproof and metadata signature validation. @@ -911,7 +910,7 @@ where B: BlockchainBackend new_height, timer.elapsed() ); - let block_hash = block.hash(); + let block_hash = candidate_block.hash(); if db.contains(&DbKey::BlockHash(block_hash))? { return Ok(BlockAddResult::BlockExists); } @@ -933,8 +932,7 @@ where B: BlockchainBackend &*self.validators.block, &*self.validators.header, self.consensus_manager.chain_strength_comparer(), - &self.difficulty_calculator, - block, + candidate_block, )?; if block_add_result.was_chain_modified() { @@ -1281,9 +1279,9 @@ pub fn calculate_mmr_roots( return Err(ChainStorageError::CannotCalculateNonTipMmr(format!( "Block (#{}) is not building on tip, previous hash is {} but the current tip is #{} {}", header.height, - header.prev_hash.to_hex(), + header.prev_hash, metadata.height_of_longest_chain(), - metadata.best_block().to_hex() + metadata.best_block(), ))); } let deleted = db.fetch_deleted_bitmap()?.into_bitmap(); @@ -1306,16 +1304,18 @@ pub fn calculate_mmr_roots( } for output in body.outputs().iter() { - let output_hash = output.hash().to_vec(); - output_mmr.push(output_hash.clone())?; + let output_hash = output.hash(); + let output_mmr_hash = output_hash.to_vec(); + output_mmr.push(output_mmr_hash.clone())?; if output.is_burned() { - let index = output_mmr - .find_leaf_index(&output_hash)? - .ok_or_else(|| ChainStorageError::ValueNotFound { - entity: "UTXO", - field: "hash", - value: output_hash.to_hex(), - })?; + let index = + output_mmr + .find_leaf_index(&output_mmr_hash)? + .ok_or_else(|| ChainStorageError::ValueNotFound { + entity: "UTXO", + field: "hash", + value: output_hash.to_hex(), + })?; deleted_outputs.push((index, output_hash)); } } @@ -1338,14 +1338,12 @@ pub fn calculate_mmr_roots( })?; debug!( target: LOG_TARGET, - "0-conf spend detected when calculating MMR roots for UTXO index {} ({})", - index, - output_hash.to_hex() + "0-conf spend detected when calculating MMR roots for UTXO index {} ({})", index, output_hash, ); index }, }; - deleted_outputs.push((index, output_hash.to_vec())); + deleted_outputs.push((index, output_hash)); } for (index, output_hash) in deleted_outputs { if !output_mmr.delete(index) { @@ -1354,7 +1352,7 @@ pub fn calculate_mmr_roots( if index < num_leaves && output_mmr.deleted().contains(index) { return Err(ChainStorageError::InvalidOperation(format!( "UTXO {} was already marked as deleted.", - output_hash.to_hex() + output_hash, ))); } return Err(ChainStorageError::InvalidOperation(format!( @@ -1487,8 +1485,7 @@ fn add_block( block_validator: &dyn CandidateBlockValidator, header_validator: &dyn HeaderChainLinkedValidator, chain_strength_comparer: &dyn ChainStrengthComparer, - difficulty_calculator: &DifficultyCalculator, - block: Arc, + candidate_block: Arc, ) -> Result { handle_possible_reorg( db, @@ -1496,9 +1493,8 @@ fn add_block( consensus_manager, block_validator, header_validator, - difficulty_calculator, chain_strength_comparer, - block, + candidate_block, ) } @@ -1509,7 +1505,7 @@ fn insert_best_block(txn: &mut DbTransaction, block: Arc) -> Result< target: LOG_TARGET, "Storing new block #{} `{}`", block.header().height, - block_hash.to_hex() + block_hash, ); if block.header().pow_algo() == PowAlgorithm::RandomX { let monero_header = @@ -1837,11 +1833,7 @@ fn rewind_to_height(db: &mut T, height: u64) -> Result( consensus_manager: &ConsensusManager, block_validator: &dyn CandidateBlockValidator, header_validator: &dyn HeaderChainLinkedValidator, - difficulty_calculator: &DifficultyCalculator, chain_strength_comparer: &dyn ChainStrengthComparer, - new_block: Arc, + candidate_block: Arc, ) -> Result { - insert_orphan_and_find_new_tips( - db, - new_block, - header_validator, - difficulty_calculator, - consensus_manager, - )?; + insert_orphan_and_find_new_tips(db, candidate_block, header_validator, consensus_manager)?; swap_to_highest_pow_chain(db, config, block_validator, chain_strength_comparer) } @@ -1923,7 +1908,7 @@ fn reorganize_chain( target: LOG_TARGET, "Validate and add {} chain block(s) from block {}. Rewound blocks: [{}]", chain.len(), - fork_hash.to_hex(), + fork_hash, removed_blocks .iter() .map(|b| b.height().to_string()) @@ -1941,7 +1926,7 @@ fn reorganize_chain( target: LOG_TARGET, "Orphan block {} ({}) failed validation during chain reorg: {:?}", block.header().height, - block_hash.to_hex(), + block_hash, e ); txn.insert_bad_block(block.header().hash(), block.header().height); @@ -2004,9 +1989,9 @@ fn swap_to_highest_pow_chain( target: LOG_TARGET, "Fork chain (accum_diff:{}, hash:{}) is stronger than the current tip (#{} ({})).", best_fork_header.accumulated_data().total_accumulated_difficulty, - best_fork_header.accumulated_data().hash.to_hex(), + best_fork_header.accumulated_data().hash, tip_header.height(), - tip_header.hash().to_hex() + tip_header.hash(), ); }, Ordering::Less | Ordering::Equal => { @@ -2014,9 +1999,9 @@ fn swap_to_highest_pow_chain( target: LOG_TARGET, "Fork chain (accum_diff:{}, hash:{}) with block {} ({}) has a weaker difficulty.", best_fork_header.accumulated_data().total_accumulated_difficulty, - best_fork_header.accumulated_data().hash.to_hex(), + best_fork_header.accumulated_data().hash, tip_header.header().height, - tip_header.hash().to_hex(), + tip_header.hash(), ); return Ok(BlockAddResult::OrphanBlock); }, @@ -2056,9 +2041,9 @@ fn swap_to_highest_pow_chain( tip_header.header().height, best_fork_header.header().height, tip_header.accumulated_data().total_accumulated_difficulty, - tip_header.accumulated_data().hash.to_hex(), + tip_header.accumulated_data().hash, best_fork_header.accumulated_data().total_accumulated_difficulty, - best_fork_header.accumulated_data().hash.to_hex(), + best_fork_header.accumulated_data().hash, num_removed_blocks, num_added_blocks, ); @@ -2090,7 +2075,7 @@ fn restore_reorged_chain( invalid_chain.len(), invalid_chain .iter() - .map(|block| block.accumulated_data().hash.to_hex()) + .map(|block| block.accumulated_data().hash) .collect::>(), ); let mut txn = DbTransaction::new(); @@ -2107,22 +2092,21 @@ fn restore_reorged_chain( #[allow(clippy::too_many_lines)] fn insert_orphan_and_find_new_tips( db: &mut T, - block: Arc, + candidate_block: Arc, validator: &dyn HeaderChainLinkedValidator, - difficulty_calculator: &DifficultyCalculator, rules: &ConsensusManager, ) -> Result<(), ChainStorageError> { - let hash = block.hash(); + let hash = candidate_block.hash(); // There cannot be any _new_ tips if we've seen this orphan block before if db.contains(&DbKey::OrphanBlock(hash))? { return Ok(()); } - let parent = match db.fetch_orphan_chain_tip_by_hash(&block.header.prev_hash)? { + let parent = match db.fetch_orphan_chain_tip_by_hash(&candidate_block.header.prev_hash)? { Some(curr_parent) => { let mut txn = DbTransaction::new(); - txn.remove_orphan_chain_tip(block.header.prev_hash); + txn.remove_orphan_chain_tip(candidate_block.header.prev_hash); db.write(txn)?; info!( target: LOG_TARGET, @@ -2131,36 +2115,35 @@ fn insert_orphan_and_find_new_tips( curr_parent }, None => match db - .fetch_chain_header_in_all_chains(&block.header.prev_hash) + .fetch_chain_header_in_all_chains(&candidate_block.header.prev_hash) .optional()? { Some(curr_parent) => { debug!( target: LOG_TARGET, "New orphan #{} ({}) does not have a parent in the current tip set. Parent is {}", - block.header.height, - hash.to_hex(), - curr_parent.hash().to_hex() + candidate_block.header.height, + hash, + curr_parent.hash(), ); curr_parent }, None => { - let hash_hex = hash.to_hex(); if db.contains(&DbKey::OrphanBlock(hash))? { info!( target: LOG_TARGET, - "Orphan #{} ({}) already found in orphan database", block.header.height, hash_hex + "Orphan #{} ({}) already found in orphan database", candidate_block.header.height, hash ); } else { info!( target: LOG_TARGET, "Orphan #{} ({}) was not connected to any previous headers. Inserting as true orphan", - block.header.height, - hash_hex + candidate_block.header.height, + hash ); let mut txn = DbTransaction::new(); - txn.insert_orphan(block); + txn.insert_orphan(candidate_block); db.write(txn)?; } return Ok(()); @@ -2169,23 +2152,11 @@ fn insert_orphan_and_find_new_tips( }; // validate the block header - let prev_timestamps_count = cmp::min( - rules.consensus_constants(block.header.height).median_timestamp_count(), - usize::try_from(block.header.height - 1) - .map_err(|_| ChainStorageError::ConversionError("u64 to usize".to_string()))?, - ); - let mut prev_timestamps = Vec::with_capacity(prev_timestamps_count); - prev_timestamps.push(block.header.timestamp()); - let mut curr_header = block.header.prev_hash; - for _ in 0..prev_timestamps_count { - let h = db.fetch_chain_header_in_all_chains(&curr_header)?; - curr_header = h.header().prev_hash; - let timestamp = EpochTime::from(h.timestamp()); - prev_timestamps.push(timestamp); - } - let result = validator.validate(db, &block.header, parent.header(), &prev_timestamps, None); - match result { - Ok(_) => {}, + let mut prev_timestamps = get_previous_timestamps(db, &candidate_block.header, rules)?; + let result = validator.validate(db, &candidate_block.header, parent.header(), &prev_timestamps, None); + + let achieved_target_diff = match result { + Ok(achieved_target_diff) => achieved_target_diff, // future timelimit validation can succeed at a later time. As the block is not yet valid, we discard it // for now and ban the peer, but wont blacklist the block. Err(e @ ValidationError::BlockHeaderError(BlockHeaderValidationError::InvalidTimestampFutureTimeLimit)) => { @@ -2194,7 +2165,7 @@ fn insert_orphan_and_find_new_tips( // We dont want to mark a block as bad for internal failures Err( e @ ValidationError::FatalStorageError(_) | - e @ ValidationError::NotEnoughTimestamps { .. } | + e @ ValidationError::IncorrectNumberOfTimestampsProvided { .. } | e @ ValidationError::AsyncTaskFailed(_), ) => return Err(e.into()), // We dont have to mark the block twice @@ -2202,21 +2173,23 @@ fn insert_orphan_and_find_new_tips( Err(e) => { let mut txn = DbTransaction::new(); - txn.insert_bad_block(block.header.hash(), block.header.height); + txn.insert_bad_block(candidate_block.header.hash(), candidate_block.header.height); db.write(txn)?; return Err(e.into()); }, }; - let achieved_target_diff = difficulty_calculator.check_achieved_and_target_difficulty(db, &block.header)?; + + // Include the current block timestamp in the median window + prev_timestamps.push(candidate_block.header.timestamp); let accumulated_data = BlockHeaderAccumulatedData::builder(parent.accumulated_data()) .with_hash(hash) .with_achieved_target_difficulty(achieved_target_diff) - .with_total_kernel_offset(block.header.total_kernel_offset.clone()) + .with_total_kernel_offset(candidate_block.header.total_kernel_offset.clone()) .build()?; // NOTE: Panic is impossible, accumulated data constructed from block - let chain_block = ChainBlock::try_construct(block, accumulated_data).unwrap(); + let chain_block = ChainBlock::try_construct(candidate_block, accumulated_data).unwrap(); let chain_header = chain_block.to_chain_header(); // Extend orphan chain tip. @@ -2227,7 +2200,7 @@ fn insert_orphan_and_find_new_tips( txn.set_accumulated_data_for_orphan(chain_block.accumulated_data().clone()); db.write(txn)?; - let tips = find_orphan_descendant_tips_of(db, chain_header, &prev_timestamps, validator)?; + let tips = find_orphan_descendant_tips_of(db, chain_header, prev_timestamps, validator)?; debug!(target: LOG_TARGET, "Found {} new orphan tips", tips.len()); let mut txn = DbTransaction::new(); for new_tip in &tips { @@ -2242,7 +2215,7 @@ fn insert_orphan_and_find_new_tips( fn find_orphan_descendant_tips_of( db: &mut T, prev_chain_header: ChainHeader, - prev_timestamps: &[EpochTime], + prev_timestamps: RollingVec, validator: &dyn HeaderChainLinkedValidator, ) -> Result, ChainStorageError> { let children = db.fetch_orphan_children_of(*prev_chain_header.hash())?; @@ -2251,30 +2224,38 @@ fn find_orphan_descendant_tips_of( target: LOG_TARGET, "Found new orphan tip {} ({})", &prev_chain_header.height(), - &prev_chain_header.hash().to_hex() + &prev_chain_header.hash(), ); return Ok(vec![prev_chain_header]); } + debug!( + target: LOG_TARGET, + "Found {} children of orphan {} ({})", + children.len(), + &prev_chain_header.height(), + &prev_chain_header.hash() + ); + let mut res = vec![]; for child in children { debug!( target: LOG_TARGET, "Validating header #{} ({}), descendant of #{} ({})", child.header.height, - child.hash().to_hex(), + child.hash(), prev_chain_header.height(), - prev_chain_header.hash().to_hex() + prev_chain_header.hash(), ); + // we need to validate the header here because it may never have been validated. - match validator.validate( - db, - &child.header, - &prev_chain_header.clone().into_header(), - prev_timestamps, - None, - ) { + match validator.validate(db, &child.header, prev_chain_header.header(), &prev_timestamps, None) { Ok(achieved_target) => { + // Append the child timestamp - a RollingVec ensures that the number of timestamps can never be more + // than the median timestamp window size. + let mut prev_timestamps_for_children = prev_timestamps.clone(); + prev_timestamps_for_children.push(child.header.timestamp); + let child_hash = child.hash(); let accum_data = BlockHeaderAccumulatedData::builder(prev_chain_header.accumulated_data()) .with_hash(child_hash) @@ -2285,7 +2266,7 @@ fn find_orphan_descendant_tips_of( let chain_header = ChainHeader::try_construct(child.header, accum_data).ok_or_else(|| { ChainStorageError::InvalidOperation(format!( "Attempt to create mismatched ChainHeader with hash {}", - child_hash.to_hex() + child_hash, )) })?; @@ -2293,10 +2274,8 @@ fn find_orphan_descendant_tips_of( let mut txn = DbTransaction::new(); txn.set_accumulated_data_for_orphan(chain_header.accumulated_data().clone()); db.write(txn)?; - let curr_timestamp = chain_header.header().timestamp(); - let new_prev_timestamps = [&[curr_timestamp], prev_timestamps].concat(); let children = - find_orphan_descendant_tips_of(db, chain_header.clone(), &new_prev_timestamps, validator)?; + find_orphan_descendant_tips_of(db, chain_header, prev_timestamps_for_children, validator)?; res.extend(children); }, Err(e) => { @@ -2304,7 +2283,7 @@ fn find_orphan_descendant_tips_of( warn!( target: LOG_TARGET, "Discarding orphan {} because it has an invalid header: {:?}", - child.hash().to_hex(), + child.hash(), e ); let mut txn = DbTransaction::new(); @@ -2315,6 +2294,30 @@ fn find_orphan_descendant_tips_of( } Ok(res) } +fn get_previous_timestamps( + db: &mut T, + header: &BlockHeader, + rules: &ConsensusManager, +) -> Result, ChainStorageError> { + let median_timestamp_window_size = rules.consensus_constants(header.height).median_timestamp_count(); + let prev_height = usize::try_from(header.height) + .map_err(|_| ChainStorageError::ConversionError("Block height overflowed usize".to_string()))?; + + let prev_timestamps_count = cmp::min(median_timestamp_window_size, prev_height); + + let mut timestamps = RollingVec::new(median_timestamp_window_size); + let mut curr_header = header.prev_hash; + for _ in 0..prev_timestamps_count { + let h = db.fetch_chain_header_in_all_chains(&curr_header)?; + curr_header = h.header().prev_hash; + timestamps.push(EpochTime::from(h.timestamp())); + } + + // median calculation requires timestamps to be sorted + timestamps.sort_unstable(); + + Ok(timestamps) +} // Discard the the orphan block from the orphan pool that corresponds to the provided block hash. fn remove_orphan(db: &mut T, hash: HashOutput) -> Result<(), ChainStorageError> { @@ -2335,7 +2338,7 @@ fn get_orphan_link_main_chain( let curr_block = db.fetch_orphan_chain_block(curr_hash)?.ok_or_else(|| { ChainStorageError::InvalidOperation(format!( "get_orphan_link_main_chain: Failed to fetch orphan chain block by hash {}", - curr_hash.to_hex() + curr_hash, )) })?; curr_hash = curr_block.header().prev_hash; @@ -2522,6 +2525,7 @@ fn convert_to_option_bounds>(bounds: T) -> (Option, Opt mod test { use std::{collections::HashMap, sync}; + use rand::seq::SliceRandom; use tari_common::configuration::Network; use tari_test_utils::unpack_enum; @@ -2661,14 +2665,8 @@ mod test { let (_, chain) = create_chained_blocks(&[("A->GB", 1u64, 120u64)], genesis_block).await; let block = chain.get("A").unwrap().clone(); let mut access = db.db_write_access().unwrap(); - insert_orphan_and_find_new_tips( - &mut *access, - block.to_arc_block(), - &validator, - &db.difficulty_calculator, - &db.consensus_manager, - ) - .unwrap(); + insert_orphan_and_find_new_tips(&mut *access, block.to_arc_block(), &validator, &db.consensus_manager) + .unwrap(); let maybe_block = access.fetch_orphan_chain_tip_by_hash(block.hash()).unwrap(); assert_eq!(maybe_block.unwrap().header(), block.header()); @@ -2686,24 +2684,12 @@ mod test { let mut access = db.db_write_access().unwrap(); let block_d2 = orphan_chain.get("D2").unwrap().clone(); - insert_orphan_and_find_new_tips( - &mut *access, - block_d2.to_arc_block(), - &validator, - &db.difficulty_calculator, - &db.consensus_manager, - ) - .unwrap(); + insert_orphan_and_find_new_tips(&mut *access, block_d2.to_arc_block(), &validator, &db.consensus_manager) + .unwrap(); let block_e2 = orphan_chain.get("E2").unwrap().clone(); - insert_orphan_and_find_new_tips( - &mut *access, - block_e2.to_arc_block(), - &validator, - &db.difficulty_calculator, - &db.consensus_manager, - ) - .unwrap(); + insert_orphan_and_find_new_tips(&mut *access, block_e2.to_arc_block(), &validator, &db.consensus_manager) + .unwrap(); let maybe_block = access.fetch_orphan_children_of(*block_d2.hash()).unwrap(); assert_eq!(maybe_block[0], *block_e2.to_arc_block()); @@ -2720,14 +2706,8 @@ mod test { let mut access = db.db_write_access().unwrap(); let block = orphan_chain.get("B2").unwrap().clone(); - insert_orphan_and_find_new_tips( - &mut *access, - block.to_arc_block(), - &validator, - &db.difficulty_calculator, - &db.consensus_manager, - ) - .unwrap(); + insert_orphan_and_find_new_tips(&mut *access, block.to_arc_block(), &validator, &db.consensus_manager) + .unwrap(); let fork_tip = access.fetch_orphan_chain_tip_by_hash(block.hash()).unwrap().unwrap(); assert_eq!(fork_tip, block.to_chain_header()); assert_eq!(fork_tip.accumulated_data().total_accumulated_difficulty, 3); @@ -2735,22 +2715,16 @@ mod test { assert_eq!(all_tips, 1); // Insert again (block was received more than once), no new tips - insert_orphan_and_find_new_tips( - &mut *access, - block.to_arc_block(), - &validator, - &db.difficulty_calculator, - &db.consensus_manager, - ) - .unwrap(); + insert_orphan_and_find_new_tips(&mut *access, block.to_arc_block(), &validator, &db.consensus_manager) + .unwrap(); let all_tips = access.fetch_all_orphan_chain_tips().unwrap().len(); assert_eq!(all_tips, 1); } } mod handle_possible_reorg { + use super::*; - use crate::proof_of_work::Difficulty; #[tokio::test] async fn it_links_many_orphan_branches_to_main_chain() { @@ -2769,7 +2743,7 @@ mod test { // Add orphans out of height order for name in ["5b", "3b", "4b", "6b"] { - let block = orphan_chain_b.get(name).unwrap().clone(); + let block = orphan_chain_b.get(name).unwrap(); let result = test.handle_possible_reorg(block.to_arc_block()).unwrap(); assert!(result.is_orphaned()); } @@ -2780,7 +2754,7 @@ mod test { create_chained_blocks(block_specs!(["4c->GB"], ["5c->4c"], ["6c->5c"], ["7c->6c"]), fork_root).await; for name in ["7c", "5c", "6c", "4c"] { - let block = orphan_chain_c.get(name).unwrap().clone(); + let block = orphan_chain_c.get(name).unwrap(); let result = test.handle_possible_reorg(block.to_arc_block()).unwrap(); assert!(result.is_orphaned()); } @@ -2792,12 +2766,13 @@ mod test { ) .await; - let block = orphan_chain_d.get("7d").unwrap().clone(); + let block = orphan_chain_d.get("7d").unwrap(); let result = test.handle_possible_reorg(block.to_arc_block()).unwrap(); assert!(result.is_orphaned()); - // Now, connect the chain and check that the c branch is the tip - let block = orphan_chain_b.get("2b").unwrap().clone(); + // REORG + // Now, connect the chain and check that 7d branch is the tip + let block = orphan_chain_b.get("2b").unwrap(); let result = test.handle_possible_reorg(block.to_arc_block()).unwrap(); result.assert_reorg(6, 3); @@ -2836,6 +2811,98 @@ mod test { } } + #[tokio::test] + async fn it_links_many_orphan_branches_to_main_chain_with_greater_reorg_than_median_timestamp_window() { + let test = TestHarness::setup(); + // This test assumes a MTC of 11 + assert_eq!(test.consensus.consensus_constants(0).median_timestamp_count(), 11); + + let (_, main_chain) = create_main_chain( + &test.db, + block_specs!( + ["1a->GB"], + ["2a->1a"], + ["3a->2a"], + ["4a->3a"], + ["5a->4a"], + ["6a->5a"], + ["7a->6a"], + ["8a->7a"], + ["9a->8a"], + ["10a->9a"], + ["11a->10a"], + ["12a->11a"], + ["13a->12a"], + ), + ) + .await; + let genesis = main_chain.get("GB").unwrap().clone(); + + let fork_root = main_chain.get("1a").unwrap().clone(); + let (_, orphan_chain_b) = create_chained_blocks( + block_specs!( + ["2b->GB"], + ["3b->2b"], + ["4b->3b"], + ["5b->4b"], + ["6b->5b"], + ["7b->6b"], + ["8b->7b"], + ["9b->8b"], + ["10b->9b"], + ["11b->10b"], + ["12b->11b", difficulty: Difficulty::from_u64(5).unwrap()] + ), + fork_root, + ) + .await; + + // Add orphans out of height order + let mut unordered = vec!["3b", "4b", "5b", "6b", "7b", "8b", "9b", "10b", "11b", "12b"]; + unordered.shuffle(&mut rand::thread_rng()); + for name in unordered { + let block = orphan_chain_b.get(name).unwrap().clone(); + let result = test.handle_possible_reorg(block.to_arc_block()).unwrap(); + assert!(result.is_orphaned()); + } + + // Now, connect the chain and check that 12b branch is the tip + let block = orphan_chain_b.get("2b").unwrap().clone(); + let result = test.handle_possible_reorg(block.to_arc_block()).unwrap(); + result.assert_reorg(11, 12); + + { + // Check 2b was added + let access = test.db_write_access(); + let block = orphan_chain_b.get("2b").unwrap().clone(); + assert!(access.contains(&DbKey::BlockHash(*block.hash())).unwrap()); + + // Check 12b is the tip + let block = orphan_chain_b.get("12b").unwrap().clone(); + let tip = access.fetch_tip_header().unwrap(); + assert_eq!(tip.hash(), block.hash()); + let metadata = access.fetch_chain_metadata().unwrap(); + assert_eq!(metadata.best_block(), block.hash()); + assert_eq!(metadata.height_of_longest_chain(), block.height()); + assert!(access.contains(&DbKey::BlockHash(*block.hash())).unwrap()); + + let mut all_blocks = main_chain.into_iter().chain(orphan_chain_b).collect::>(); + all_blocks.insert("GB".to_string(), genesis); + // Check the chain heights + let expected_chain = [ + "GB", "1a", "2b", "3b", "4b", "5b", "6b", "7b", "8b", "9b", "10b", "11b", "12b", + ]; + for (height, name) in expected_chain.iter().enumerate() { + let expected_block = all_blocks.get(*name).unwrap(); + unpack_enum!( + DbValue::BlockHeader(found_block) = + access.fetch(&DbKey::BlockHeader(height as u64)).unwrap().unwrap() + ); + assert_eq!(*found_block, *expected_block.header()); + } + } + } + #[tokio::test] async fn it_errors_if_reorging_to_an_invalid_height() { let test = TestHarness::setup(); @@ -2996,7 +3063,6 @@ mod test { &db.consensus_manager, &mock_validator, &mock_validator, - &db.difficulty_calculator, &*chain_strength_comparer, reorg_chain.get("E2").unwrap().to_arc_block(), ) @@ -3010,7 +3076,6 @@ mod test { &db.consensus_manager, &mock_validator, &mock_validator, - &db.difficulty_calculator, &*chain_strength_comparer, reorg_chain.get("E2").unwrap().to_arc_block(), ) @@ -3023,7 +3088,6 @@ mod test { &db.consensus_manager, &mock_validator, &mock_validator, - &db.difficulty_calculator, &*chain_strength_comparer, reorg_chain.get("D2").unwrap().to_arc_block(), ) @@ -3039,7 +3103,6 @@ mod test { &db.consensus_manager, &mock_validator, &mock_validator, - &db.difficulty_calculator, &*chain_strength_comparer, reorg_chain.get("C2").unwrap().to_arc_block(), ) @@ -3076,7 +3139,6 @@ mod test { &db.consensus_manager, &mock_validator, &mock_validator, - &db.difficulty_calculator, &*chain_strength_comparer, reorg_chain.get("E2").unwrap().to_arc_block(), ) @@ -3089,7 +3151,6 @@ mod test { &db.consensus_manager, &MockValidator::new(false), &mock_validator, - &db.difficulty_calculator, &*chain_strength_comparer, reorg_chain.get("D2").unwrap().to_arc_block(), ) @@ -3295,7 +3356,6 @@ mod test { config: BlockchainDatabaseConfig, consensus: ConsensusManager, chain_strength_comparer: Box, - difficulty_calculator: DifficultyCalculator, post_orphan_body_validator: Box>, header_validator: Box>, } @@ -3305,10 +3365,7 @@ mod test { let consensus = create_consensus_rules(); let db = create_new_blockchain(); let difficulty_calculator = DifficultyCalculator::new(consensus.clone(), Default::default()); - let header_validator = Box::new(HeaderFullValidator::new( - consensus.clone(), - difficulty_calculator.clone(), - )); + let header_validator = Box::new(HeaderFullValidator::new(consensus.clone(), difficulty_calculator)); let post_orphan_body_validator = Box::new(MockValidator::new(true)); let chain_strength_comparer = strongest_chain().by_sha3x_difficulty().build(); Self { @@ -3316,7 +3373,6 @@ mod test { config: Default::default(), consensus, chain_strength_comparer, - difficulty_calculator, header_validator, post_orphan_body_validator, } @@ -3334,7 +3390,6 @@ mod test { &self.consensus, &*self.post_orphan_body_validator, &*self.header_validator, - &self.difficulty_calculator, &*self.chain_strength_comparer, block, ) @@ -3362,8 +3417,8 @@ mod test { debug!( "Testing handle_possible_reorg for block {} ({}, parent = {})", block.height(), - block.hash().to_hex(), - block.header().prev_hash.to_hex() + block.hash(), + block.header().prev_hash, ); results.push(test.handle_possible_reorg(block.to_arc_block()).unwrap()); } diff --git a/base_layer/core/src/chain_storage/tests/blockchain_database.rs b/base_layer/core/src/chain_storage/tests/blockchain_database.rs index d07034d161..d0357fcacf 100644 --- a/base_layer/core/src/chain_storage/tests/blockchain_database.rs +++ b/base_layer/core/src/chain_storage/tests/blockchain_database.rs @@ -572,7 +572,7 @@ mod clear_all_pending_headers { let key_manager = create_test_core_key_manager_with_memory_db(); let _blocks_and_outputs = add_many_chained_blocks(2, &db, &key_manager).await; let prev_block = db.fetch_block(2, true).unwrap(); - let mut prev_accum = prev_block.accumulated_data.clone(); + let mut prev_accum = prev_block.accumulated_data().clone(); let mut prev_header = prev_block.try_into_chain_block().unwrap().to_chain_header(); let headers = (0..5) .map(|_| { diff --git a/base_layer/core/src/common/rolling_vec.rs b/base_layer/core/src/common/rolling_vec.rs index ef5b84d396..428b557b3d 100644 --- a/base_layer/core/src/common/rolling_vec.rs +++ b/base_layer/core/src/common/rolling_vec.rs @@ -33,17 +33,20 @@ impl RollingVec { } /// Adds a new element to the RollingVec. - /// If adding an element will cause the length to exceed the capacity, the first element is removed. - pub fn push(&mut self, item: T) { + /// If adding an element will cause the length to exceed the capacity, the first element is removed and the removed + /// value is returned. + pub fn push(&mut self, item: T) -> Option { if self.capacity() == 0 { - return; + return None; } + let mut removed = None; if self.is_full() { - self.inner_mut().remove(0); + removed = Some(self.inner_mut().remove(0)); } self.inner_mut().push(item); + removed } pub fn insert(&mut self, index: usize, item: T) { @@ -57,6 +60,10 @@ impl RollingVec { self.inner_mut().insert(index, item); } + pub fn pop(&mut self) -> Option { + self.inner_mut().pop() + } + #[inline] pub fn is_full(&self) -> bool { // len never exceeds capacity @@ -69,6 +76,13 @@ impl RollingVec { self.inner().capacity() } + /// Sorts the slice, but might not preserve the order of equal elements. + /// This sort is unstable (i.e., may reorder equal elements), in-place (i.e., does not allocate), and O(n * log(n)) + pub fn sort_unstable(&mut self) + where T: Ord { + self.inner_mut().sort_unstable(); + } + #[inline] fn inner(&self) -> &Vec { &self.0 diff --git a/base_layer/core/src/validation/error.rs b/base_layer/core/src/validation/error.rs index 688739faab..97ef108057 100644 --- a/base_layer/core/src/validation/error.rs +++ b/base_layer/core/src/validation/error.rs @@ -147,8 +147,11 @@ pub enum ValidationError { ValidatorNodeRegistrationMinLockHeight { min: u64, actual: u64 }, #[error("Validator node registration signature failed verification")] InvalidValidatorNodeSignature, - #[error("Not enough timestamps provided. Expected {expected}, got {actual}")] - NotEnoughTimestamps { expected: usize, actual: usize }, + #[error( + "An unexpected number of timestamps were provided to the header validator. THIS IS A BUG. Expected \ + {expected}, got {actual}" + )] + IncorrectNumberOfTimestampsProvided { expected: u64, actual: u64 }, #[error("Invalid difficulty: {0}")] DifficultyError(#[from] DifficultyError), #[error("Covenant too large. Max size: {max_size}, Actual size: {actual_size}")] diff --git a/base_layer/core/src/validation/header/header_full_validator.rs b/base_layer/core/src/validation/header/header_full_validator.rs index f700fb26a3..8b2fcf7085 100644 --- a/base_layer/core/src/validation/header/header_full_validator.rs +++ b/base_layer/core/src/validation/header/header_full_validator.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{cmp, convert::TryFrom}; +use std::cmp; use log::warn; use tari_common_types::types::FixedHash; @@ -69,11 +69,11 @@ impl HeaderChainLinkedValidator for HeaderFullValidator check_not_bad_block(db, header.hash())?; check_blockchain_version(constants, header.version)?; + check_height(header, prev_header)?; - check_timestamp_count(header, prev_timestamps, constants)?; + sanity_check_timestamp_count(header, prev_timestamps, constants)?; check_header_timestamp_greater_than_median(header, prev_timestamps)?; - check_height(header, prev_header)?; check_prev_hash(header, prev_header)?; check_timestamp_ftl(header, &self.rules)?; check_pow_data(header, &self.rules, db)?; @@ -89,20 +89,24 @@ impl HeaderChainLinkedValidator for HeaderFullValidator } } -fn check_timestamp_count( +/// This is a sanity check for the information provided by the caller, rather than a validation for the header itself. +fn sanity_check_timestamp_count( header: &BlockHeader, - prev_timestamps: &[EpochTime], + timestamps: &[EpochTime], consensus_constants: &ConsensusConstants, ) -> Result<(), ValidationError> { - let expected_timestamp_count = cmp::min( - consensus_constants.median_timestamp_count(), - usize::try_from(header.height - 1) - .map_err(|_| ValidationError::CustomError("Invalid conversion u64 to uszie".to_string()))?, - ); - let timestamps: Vec = prev_timestamps.iter().take(expected_timestamp_count).copied().collect(); - if timestamps.len() < expected_timestamp_count { - return Err(ValidationError::NotEnoughTimestamps { - actual: timestamps.len(), + let expected_timestamp_count = cmp::min(consensus_constants.median_timestamp_count() as u64, header.height); + // Empty `timestamps` is never valid + if timestamps.is_empty() { + return Err(ValidationError::IncorrectNumberOfTimestampsProvided { + expected: expected_timestamp_count, + actual: 0, + }); + } + + if timestamps.len() as u64 != expected_timestamp_count { + return Err(ValidationError::IncorrectNumberOfTimestampsProvided { + actual: timestamps.len() as u64, expected: expected_timestamp_count, }); } diff --git a/base_layer/core/src/validation/helpers.rs b/base_layer/core/src/validation/helpers.rs index 2bf64e1934..9f23368262 100644 --- a/base_layer/core/src/validation/helpers.rs +++ b/base_layer/core/src/validation/helpers.rs @@ -81,6 +81,7 @@ pub fn check_header_timestamp_greater_than_median( timestamps: &[EpochTime], ) -> Result<(), ValidationError> { if timestamps.is_empty() { + // unreachable due to sanity_check_timestamp_count return Err(ValidationError::BlockHeaderError( BlockHeaderValidationError::InvalidTimestamp("The timestamp is empty".to_string()), )); diff --git a/base_layer/core/src/validation/mocks.rs b/base_layer/core/src/validation/mocks.rs index f8eb13b43c..41ae56ba0d 100644 --- a/base_layer/core/src/validation/mocks.rs +++ b/base_layer/core/src/validation/mocks.rs @@ -38,9 +38,10 @@ use super::{ use crate::{ blocks::{Block, BlockHeader, ChainBlock}, chain_storage::BlockchainBackend, - proof_of_work::{difficulty::CheckedAdd, sha3x_difficulty, AchievedTargetDifficulty, Difficulty, PowAlgorithm}, + proof_of_work::{randomx_factory::RandomXFactory, AchievedTargetDifficulty, Difficulty}, + test_helpers::create_consensus_rules, transactions::transaction_components::Transaction, - validation::{error::ValidationError, FinalHorizonStateValidation}, + validation::{error::ValidationError, DifficultyCalculator, FinalHorizonStateValidation}, }; #[derive(Clone)] @@ -108,22 +109,17 @@ impl InternalConsistencyValidator for MockValidator { impl HeaderChainLinkedValidator for MockValidator { fn validate( &self, - _: &B, + db: &B, header: &BlockHeader, _: &BlockHeader, _: &[EpochTime], _: Option, ) -> Result { if self.is_valid.load(Ordering::SeqCst) { - let achieved = sha3x_difficulty(header)?; - - let achieved_target = AchievedTargetDifficulty::try_construct( - PowAlgorithm::Sha3x, - achieved, - achieved.checked_add(1).unwrap(), - ) - .unwrap(); - Ok(achieved_target) + // this assumes consensus rules are the same as the test rules which is a little brittle + let difficulty_calculator = DifficultyCalculator::new(create_consensus_rules(), RandomXFactory::default()); + let achieved_target_diff = difficulty_calculator.check_achieved_and_target_difficulty(db, header)?; + Ok(achieved_target_diff) } else { Err(ValidationError::custom_error( "This mock validator always returns an error", diff --git a/base_layer/core/src/validation/test.rs b/base_layer/core/src/validation/test.rs index da88bd1cc6..ffb7c37c8f 100644 --- a/base_layer/core/src/validation/test.rs +++ b/base_layer/core/src/validation/test.rs @@ -51,8 +51,14 @@ use crate::{ }; mod header_validators { + use tari_utilities::epoch_time::EpochTime; + use super::*; - use crate::validation::{header::HeaderFullValidator, HeaderChainLinkedValidator}; + use crate::{ + block_specs, + test_helpers::blockchain::{create_main_chain, create_new_blockchain}, + validation::{header::HeaderFullValidator, HeaderChainLinkedValidator}, + }; #[test] fn header_iter_empty_and_invalid_height() { @@ -121,6 +127,47 @@ mod header_validators { version: u16::MAX })); } + + #[tokio::test] + async fn it_does_a_sanity_check_on_the_number_of_timestamps_provided() { + let consensus_manager = ConsensusManagerBuilder::new(Network::LocalNet).build().unwrap(); + let db = create_new_blockchain(); + + let (_, blocks) = create_main_chain(&db, block_specs!(["1->GB"], ["2->1"], ["3->2"])).await; + let last_block = blocks.get("3").unwrap(); + + let candidate_header = BlockHeader::from_previous(last_block.header()); + let difficulty_calculator = DifficultyCalculator::new(consensus_manager.clone(), Default::default()); + let validator = HeaderFullValidator::new(consensus_manager, difficulty_calculator); + let mut timestamps = db.fetch_block_timestamps(*blocks.get("3").unwrap().hash()).unwrap(); + + // First, lets check that everything else is valid + validator + .validate( + &*db.db_read_access().unwrap(), + &candidate_header, + last_block.header(), + ×tamps, + None, + ) + .unwrap(); + + // Add an extra timestamp + timestamps.push(EpochTime::now()); + let err = validator + .validate( + &*db.db_read_access().unwrap(), + &candidate_header, + last_block.header(), + ×tamps, + None, + ) + .unwrap_err(); + assert!(matches!(err, ValidationError::IncorrectNumberOfTimestampsProvided { + actual: 5, + expected: 4 + })); + } } #[tokio::test]