From 542e61264c3e5fa3ad97afd090dc4cea573d8259 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 28 Aug 2024 18:28:24 +0800 Subject: [PATCH 01/61] add parallel code --- sync/src/lib.rs | 1 + sync/src/store/sync_dag_store.rs | 4 +- sync/src/store/tests.rs | 71 +++++++++++++++++++++++++++++++- 3 files changed, 73 insertions(+), 3 deletions(-) diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 0d5ae0a43c..8c5b6ae197 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -12,3 +12,4 @@ pub mod txn_sync; pub mod parallel; pub mod verified_rpc_client; +pub mod parallel; \ No newline at end of file diff --git a/sync/src/store/sync_dag_store.rs b/sync/src/store/sync_dag_store.rs index 8d3dca1492..45af933521 100644 --- a/sync/src/store/sync_dag_store.rs +++ b/sync/src/store/sync_dag_store.rs @@ -1,6 +1,7 @@ -use std::{path::Path, sync::Arc}; +use std::{ops::DerefMut, path::Path, sync::Arc}; use anyhow::format_err; +use parking_lot::RwLock; use starcoin_config::{temp_dir, RocksdbConfig, StorageConfig}; use starcoin_crypto::HashValue; use starcoin_dag::consensusdb::{prelude::StoreError, schemadb::REACHABILITY_DATA_CF}; @@ -111,7 +112,6 @@ impl SyncDagStore { block: Some(block.clone()), }]) .map_err(|e| format_err!("Failed to save absent block: {:?}", e))?; - Ok(()) } _ => Err(format_err!( diff --git a/sync/src/store/tests.rs b/sync/src/store/tests.rs index 881b1badc0..9a58bad96a 100644 --- a/sync/src/store/tests.rs +++ b/sync/src/store/tests.rs @@ -1,6 +1,20 @@ +use std::{ + ops::{Deref, DerefMut}, + sync::Arc, + u64, +}; + use anyhow::Ok; +use parking_lot::RwLock; use starcoin_crypto::HashValue; -use starcoin_dag::consensusdb::schema::{KeyCodec, ValueCodec}; +use starcoin_dag::{ + consensusdb::{ + schema::{KeyCodec, ValueCodec}, + schemadb::MemoryReachabilityStore, + }, + reachability::inquirer, + types::interval::Interval, +}; use starcoin_types::{ account_address::AccountAddress, block::{Block, BlockBody, BlockHeader, BlockHeaderBuilder, BlockHeaderExtra, BlockNumber}, @@ -67,6 +81,61 @@ fn build_version_0_block(number: BlockNumber) -> Block { Block::new(header, body) } +#[test] +fn test_add_reachability_data() -> anyhow::Result<()> { + let mut sync_dag_store = SyncDagStore::create_for_testing()?; + let reachability_store = sync_dag_store.reachability_store.clone(); + + let mut writer = reachability_store.write(); + + let x = HashValue::random(); + let a = HashValue::random(); + let b = HashValue::random(); + let c = HashValue::random(); + let d = HashValue::random(); + let e = HashValue::random(); + + inquirer::init_with_params(writer.deref_mut(), x, Interval::maximal())?; + + inquirer::add_block(writer.deref_mut(), a, x, &mut [x].into_iter())?; + inquirer::add_block(writer.deref_mut(), b, a, &mut [a].into_iter())?; + inquirer::add_block(writer.deref_mut(), c, a, &mut [a].into_iter())?; + inquirer::add_block(writer.deref_mut(), d, a, &mut [a].into_iter())?; + inquirer::add_block(writer.deref_mut(), e, b, &mut [c, d].into_iter())?; + + drop(writer); + + let reader = reachability_store.read(); + + assert!(inquirer::is_dag_ancestor_of(reader.deref(), a, b)?); + assert!(inquirer::is_dag_ancestor_of(reader.deref(), a, c)?); + assert!(inquirer::is_dag_ancestor_of(reader.deref(), a, d)?); + assert!(inquirer::is_dag_ancestor_of(reader.deref(), a, e)?); + assert!(inquirer::is_dag_ancestor_of(reader.deref(), b, e)?); + assert!(inquirer::is_dag_ancestor_of(reader.deref(), c, e)?); + assert!(inquirer::is_dag_ancestor_of(reader.deref(), d, e)?); + + drop(reader); + + sync_dag_store.reachability_store = Arc::new(RwLock::new(MemoryReachabilityStore::new())); + let mut writer = sync_dag_store.reachability_store.write(); + + inquirer::init_with_params(writer.deref_mut(), 1.into(), Interval::maximal())?; + inquirer::add_block(writer.deref_mut(), e, 1.into(), &mut [1.into()].into_iter())?; + inquirer::add_block(writer.deref_mut(), a, e, &mut [e].into_iter())?; + + drop(writer); + + let reader = sync_dag_store.reachability_store.read(); + + assert!(inquirer::is_dag_ancestor_of(reader.deref(), e, a)?); + assert!(inquirer::is_dag_ancestor_of(reader.deref(), b, e).is_err()); + + drop(reader); + + anyhow::Ok(()) +} + #[test] fn test_sync_dag_absent_store() -> anyhow::Result<()> { let sync_dag_store = SyncDagStore::create_for_testing()?; From a4f0fe395d3e7834d34c798785eba0d1c7d3df33 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 29 Aug 2024 00:33:27 +0800 Subject: [PATCH 02/61] parallel execution --- sync/src/parallel/sender.rs | 1 - sync/src/store/tests.rs | 24 ++++-------------------- sync/src/tasks/mock.rs | 3 +-- 3 files changed, 5 insertions(+), 23 deletions(-) diff --git a/sync/src/parallel/sender.rs b/sync/src/parallel/sender.rs index f4a5f71cbf..031c7c9678 100644 --- a/sync/src/parallel/sender.rs +++ b/sync/src/parallel/sender.rs @@ -140,7 +140,6 @@ impl<'a> DagBlockSender<'a> { self.wait_for_finish().await?; sync_dag_store.delete_all_dag_sync_block()?; - Ok(()) } diff --git a/sync/src/store/tests.rs b/sync/src/store/tests.rs index 9a58bad96a..0049e6dc28 100644 --- a/sync/src/store/tests.rs +++ b/sync/src/store/tests.rs @@ -8,12 +8,10 @@ use anyhow::Ok; use parking_lot::RwLock; use starcoin_crypto::HashValue; use starcoin_dag::{ - consensusdb::{ + blockdag::BlockDAG, consensusdb::{ schema::{KeyCodec, ValueCodec}, schemadb::MemoryReachabilityStore, - }, - reachability::inquirer, - types::interval::Interval, + }, reachability::inquirer, types::interval::Interval }; use starcoin_types::{ account_address::AccountAddress, @@ -117,27 +115,12 @@ fn test_add_reachability_data() -> anyhow::Result<()> { drop(reader); - sync_dag_store.reachability_store = Arc::new(RwLock::new(MemoryReachabilityStore::new())); - let mut writer = sync_dag_store.reachability_store.write(); - - inquirer::init_with_params(writer.deref_mut(), 1.into(), Interval::maximal())?; - inquirer::add_block(writer.deref_mut(), e, 1.into(), &mut [1.into()].into_iter())?; - inquirer::add_block(writer.deref_mut(), a, e, &mut [e].into_iter())?; - - drop(writer); - - let reader = sync_dag_store.reachability_store.read(); - - assert!(inquirer::is_dag_ancestor_of(reader.deref(), e, a)?); - assert!(inquirer::is_dag_ancestor_of(reader.deref(), b, e).is_err()); - - drop(reader); - anyhow::Ok(()) } #[test] fn test_sync_dag_absent_store() -> anyhow::Result<()> { + let dag = BlockDAG::create_for_testing()?; let sync_dag_store = SyncDagStore::create_for_testing()?; // write and read @@ -209,6 +192,7 @@ fn test_sync_dag_absent_store() -> anyhow::Result<()> { #[test] fn test_write_read_in_order() -> anyhow::Result<()> { + let dag = BlockDAG::create_for_testing()?; let sync_dag_store = SyncDagStore::create_for_testing()?; // write and read diff --git a/sync/src/tasks/mock.rs b/sync/src/tasks/mock.rs index 96b9822d1b..15a59bb79c 100644 --- a/sync/src/tasks/mock.rs +++ b/sync/src/tasks/mock.rs @@ -155,8 +155,7 @@ impl SyncNodeMocker { None, ); let peer_selector = PeerSelector::new(vec![peer_info], PeerStrategy::default(), None); - let sync_dag_store = SyncDagStore::create_for_testing() - .context("Failed to create SyncDagStore for testing")?; + let sync_dag_store = SyncDagStore::create_for_testing().context("Failed to create SyncDagStore for testing")?; Ok(Self::new_inner( peer_id, chain, From 0f0aa78f3e897643a6f29348526e72e61546e2ad Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 29 Aug 2024 02:36:20 +0800 Subject: [PATCH 03/61] create chain when the parents are ready in parallel execution --- sync/src/parallel/sender.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/sync/src/parallel/sender.rs b/sync/src/parallel/sender.rs index 031c7c9678..3b5b781641 100644 --- a/sync/src/parallel/sender.rs +++ b/sync/src/parallel/sender.rs @@ -134,7 +134,6 @@ impl<'a> DagBlockSender<'a> { }); sender_to_worker.send(Some(block)).await?; - self.flush_executor_state().await?; } @@ -211,7 +210,5 @@ impl<'a> DagBlockSender<'a> { for worker in self.executors { worker.handle.await?; } - - anyhow::Ok(()) } } From 11102f9d79efe5862b27b450a09aa1010ca323ce Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 29 Aug 2024 03:13:28 +0800 Subject: [PATCH 04/61] set executing state in sender --- sync/src/parallel/sender.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/sync/src/parallel/sender.rs b/sync/src/parallel/sender.rs index 3b5b781641..0389bbe0e4 100644 --- a/sync/src/parallel/sender.rs +++ b/sync/src/parallel/sender.rs @@ -110,7 +110,6 @@ impl<'a> DagBlockSender<'a> { // Finding the executing state is the priority if self.dispatch_to_worker(&block).await? { - self.flush_executor_state().await?; continue; } From 22b78f9caf1a12fe1dc93f92f57eb7b5fdac7542 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 29 Aug 2024 13:56:02 +0800 Subject: [PATCH 05/61] add 10000 buffer for parallel execution --- sync/src/parallel/executor.rs | 1 + sync/src/parallel/sender.rs | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/sync/src/parallel/executor.rs b/sync/src/parallel/executor.rs index 5279dec192..a10bbf4c61 100644 --- a/sync/src/parallel/executor.rs +++ b/sync/src/parallel/executor.rs @@ -80,6 +80,7 @@ impl DagBlockExecutor { }; let header = block.header().clone(); + info!("worker will process header {:?}", header); loop { match Self::waiting_for_parents( &self.dag, diff --git a/sync/src/parallel/sender.rs b/sync/src/parallel/sender.rs index 0389bbe0e4..ec3bd98ec8 100644 --- a/sync/src/parallel/sender.rs +++ b/sync/src/parallel/sender.rs @@ -96,6 +96,20 @@ impl<'a> DagBlockSender<'a> { } } + for executor in &mut self.executors { + match &executor.state { + ExecuteState::Executed(_) => { + executor.state = ExecuteState::Executing(block.id()); + executor.sender_to_executor.send(Some(block.clone())).await?; + return anyhow::Ok(true); + } + + ExecuteState::Executing(_) | ExecuteState::Error(_) | ExecuteState::Closed => { + continue; + } + } + } + anyhow::Ok(false) } @@ -209,5 +223,7 @@ impl<'a> DagBlockSender<'a> { for worker in self.executors { worker.handle.await?; } + + anyhow::Ok(()) } } From 2455ef6940ab0cb870cf071e494d6f7298cf299b Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 29 Aug 2024 15:32:06 +0800 Subject: [PATCH 06/61] add log for saving time --- sync/src/tasks/block_sync_task.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/sync/src/tasks/block_sync_task.rs b/sync/src/tasks/block_sync_task.rs index 4c27cecf9d..a208df4997 100644 --- a/sync/src/tasks/block_sync_task.rs +++ b/sync/src/tasks/block_sync_task.rs @@ -473,6 +473,7 @@ where children: vec![], })?; self.sync_dag_store.save_block(block)?; + info!("finish saving"); anyhow::Ok(ParallelSign::NeedMoreBlocks) } }; From ca45ef7fa624a92f69bdc216998179750c64fc54 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Mon, 2 Sep 2024 15:44:30 +0800 Subject: [PATCH 07/61] add some test --- flexidag/src/blockdag.rs | 1 + flexidag/tests/tests.rs | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 054f01af07..e64facb317 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -12,6 +12,7 @@ use crate::consensusdb::{ }; use crate::ghostdag::protocol::GhostdagManager; use crate::prune::pruning_point_manager::PruningPointManagerT; +use crate::types::ghostdata::CompactGhostdagData; use crate::{process_key_already_error, reachability}; use anyhow::{bail, ensure, format_err, Ok}; use starcoin_accumulator::node::AccumulatorStoreType; diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index e3208e7bcc..926c097307 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -4,13 +4,13 @@ use anyhow::{bail, format_err, Ok, Result}; use starcoin_crypto::HashValue as Hash; use starcoin_dag::{ - blockdag::{BlockDAG, MineNewDagBlockInfo}, - consensusdb::{ + blockdag::{BlockDAG, MineNewDagBlockInfo}, consensusdb::{ consenses_state::{DagState, DagStateReader, DagStateStore}, schemadb::{ DbReachabilityStore, GhostdagStoreReader, ReachabilityStore, ReachabilityStoreReader, RelationsStore, RelationsStoreReader, }, +<<<<<<< HEAD }, reachability::{inquirer, ReachabilityError}, types::{ghostdata::GhostdagData, interval::Interval}, @@ -20,6 +20,12 @@ use starcoin_types::{ block::{BlockHeader, BlockHeaderBuilder, BlockNumber}, blockhash::{BlockHashMap, HashKTypeMap, KType}, }; +======= + }, ghostdag, reachability::{inquirer, ReachabilityError}, types::{ghostdata::GhostdagData, interval::Interval} +}; +use starcoin_logger::prelude::debug; +use starcoin_types::{block::{BlockHeader, BlockHeaderBuilder, BlockNumber}, blockhash::{BlockHashMap, HashKTypeMap, KType}}; +>>>>>>> cb4abfed3 (add some test) use std::{ ops::{Deref, DerefMut}, @@ -1061,6 +1067,10 @@ fn test_verification_blue_block() -> anyhow::Result<()> { genesis.parent_hash(), &mut dag, )?; +<<<<<<< HEAD +======= + +>>>>>>> cb4abfed3 (add some test) // let's obser the blue scores which show how blue the tips are let observer1 = dag.ghostdata(&[block_red_3.id()])?; From e11b1371cfdd5a2702446ddaf77e9a434b2b4701 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Mon, 2 Sep 2024 16:26:08 +0800 Subject: [PATCH 08/61] add false testing case --- flexidag/tests/tests.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index 926c097307..d2d22938fc 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -28,10 +28,7 @@ use starcoin_types::{block::{BlockHeader, BlockHeaderBuilder, BlockNumber}, bloc >>>>>>> cb4abfed3 (add some test) use std::{ - ops::{Deref, DerefMut}, - sync::Arc, - time::Instant, - vec, + io::Read, ops::{Deref, DerefMut}, sync::Arc, time::Instant, vec }; #[test] @@ -1127,6 +1124,21 @@ fn test_verification_blue_block() -> anyhow::Result<()> { ); assert!(check_error.is_err()); + let mut false_observer2 = observer2.clone(); + let red_block_id = false_observer2.mergeset_reds.first().expect("the k is wrong, modify it to create a red block!").clone(); + if red_block_id == block_red_2.id() { + false_observer2.mergeset_blues = Arc::new(vec![red_block_id].into_iter().chain(false_observer2.mergeset_blues.iter().cloned().filter(|id| { + *id != block_red_2_1.id() + })).collect()); + false_observer2.mergeset_reds = Arc::new(vec![block_red_2_1.id()]); + } else { + false_observer2.mergeset_blues = Arc::new(vec![red_block_id].into_iter().chain(false_observer2.mergeset_blues.iter().cloned().filter(|id| { + *id != block_red_2.id() + })).collect()); + false_observer2.mergeset_reds = Arc::new(vec![block_red_2.id()]); + } + assert!(dag.ghost_dag_manager().check_ghostdata_blue_block(&false_observer2).is_err()); + let observer3 = dag.ghostdata(&[block_main_5.id()])?; println!("observer 3 dag data: {:?}, ", observer3); From 6ca87e3d680d082d16ca74f0e1e4dde8cac549d3 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Mon, 2 Sep 2024 16:52:38 +0800 Subject: [PATCH 09/61] add more testing code --- flexidag/tests/tests.rs | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index d2d22938fc..472d243d06 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -10,7 +10,6 @@ use starcoin_dag::{ DbReachabilityStore, GhostdagStoreReader, ReachabilityStore, ReachabilityStoreReader, RelationsStore, RelationsStoreReader, }, -<<<<<<< HEAD }, reachability::{inquirer, ReachabilityError}, types::{ghostdata::GhostdagData, interval::Interval}, @@ -20,12 +19,6 @@ use starcoin_types::{ block::{BlockHeader, BlockHeaderBuilder, BlockNumber}, blockhash::{BlockHashMap, HashKTypeMap, KType}, }; -======= - }, ghostdag, reachability::{inquirer, ReachabilityError}, types::{ghostdata::GhostdagData, interval::Interval} -}; -use starcoin_logger::prelude::debug; -use starcoin_types::{block::{BlockHeader, BlockHeaderBuilder, BlockNumber}, blockhash::{BlockHashMap, HashKTypeMap, KType}}; ->>>>>>> cb4abfed3 (add some test) use std::{ io::Read, ops::{Deref, DerefMut}, sync::Arc, time::Instant, vec @@ -1064,10 +1057,6 @@ fn test_verification_blue_block() -> anyhow::Result<()> { genesis.parent_hash(), &mut dag, )?; -<<<<<<< HEAD -======= - ->>>>>>> cb4abfed3 (add some test) // let's obser the blue scores which show how blue the tips are let observer1 = dag.ghostdata(&[block_red_3.id()])?; @@ -1137,7 +1126,10 @@ fn test_verification_blue_block() -> anyhow::Result<()> { })).collect()); false_observer2.mergeset_reds = Arc::new(vec![block_red_2.id()]); } - assert!(dag.ghost_dag_manager().check_ghostdata_blue_block(&false_observer2).is_err()); + + let check_error = dag.ghost_dag_manager().check_ghostdata_blue_block(&false_observer2); + println!("check error: {:?} after the blue block turns red and the red turns blue maliciously", check_error); + assert!(check_error.is_err()); let observer3 = dag.ghostdata(&[block_main_5.id()])?; println!("observer 3 dag data: {:?}, ", observer3); @@ -1232,5 +1224,11 @@ fn test_verification_blue_block() -> anyhow::Result<()> { dag.ghost_dag_manager() .check_ghostdata_blue_block(&together_ghost_data)?; + + let together_mine = dag.ghostdata(&[block_from_normal.id(), block_from_makeup.id()])?; + let mine_together = add_and_print(8, together_mine.selected_parent, vec![block_from_normal.id(), block_from_makeup.id()], genesis.parent_hash(), &mut dag)?; + let together_ghost_data = dag.storage.ghost_dag_store.get_data(mine_together.id())?; + dag.ghost_dag_manager().check_ghostdata_blue_block(&together_ghost_data)?; + anyhow::Result::Ok(()) } From 67741c34618b741455a4b50629de756924088e6f Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 3 Sep 2024 10:12:27 +0800 Subject: [PATCH 10/61] add check data --- flexidag/src/ghostdag/protocol.rs | 2 +- flexidag/tests/tests.rs | 1 - sync/src/store/tests.rs | 39 ------------------------------- 3 files changed, 1 insertion(+), 41 deletions(-) diff --git a/flexidag/src/ghostdag/protocol.rs b/flexidag/src/ghostdag/protocol.rs index c219be7af4..011eff23ae 100644 --- a/flexidag/src/ghostdag/protocol.rs +++ b/flexidag/src/ghostdag/protocol.rs @@ -206,7 +206,7 @@ impl< // No k-cluster violation found, we can now set the candidate block as blue new_block_data.add_blue(blue_candidate, blue_anticone_size, &blues_anticone_sizes); } else { - new_block_data.add_red(blue_candidate); + check_ghostdata.add_red(blue_candidate); } } diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index 472d243d06..5828565fbe 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -1224,7 +1224,6 @@ fn test_verification_blue_block() -> anyhow::Result<()> { dag.ghost_dag_manager() .check_ghostdata_blue_block(&together_ghost_data)?; - let together_mine = dag.ghostdata(&[block_from_normal.id(), block_from_makeup.id()])?; let mine_together = add_and_print(8, together_mine.selected_parent, vec![block_from_normal.id(), block_from_makeup.id()], genesis.parent_hash(), &mut dag)?; let together_ghost_data = dag.storage.ghost_dag_store.get_data(mine_together.id())?; diff --git a/sync/src/store/tests.rs b/sync/src/store/tests.rs index 0049e6dc28..c63e9c12cc 100644 --- a/sync/src/store/tests.rs +++ b/sync/src/store/tests.rs @@ -79,45 +79,6 @@ fn build_version_0_block(number: BlockNumber) -> Block { Block::new(header, body) } -#[test] -fn test_add_reachability_data() -> anyhow::Result<()> { - let mut sync_dag_store = SyncDagStore::create_for_testing()?; - let reachability_store = sync_dag_store.reachability_store.clone(); - - let mut writer = reachability_store.write(); - - let x = HashValue::random(); - let a = HashValue::random(); - let b = HashValue::random(); - let c = HashValue::random(); - let d = HashValue::random(); - let e = HashValue::random(); - - inquirer::init_with_params(writer.deref_mut(), x, Interval::maximal())?; - - inquirer::add_block(writer.deref_mut(), a, x, &mut [x].into_iter())?; - inquirer::add_block(writer.deref_mut(), b, a, &mut [a].into_iter())?; - inquirer::add_block(writer.deref_mut(), c, a, &mut [a].into_iter())?; - inquirer::add_block(writer.deref_mut(), d, a, &mut [a].into_iter())?; - inquirer::add_block(writer.deref_mut(), e, b, &mut [c, d].into_iter())?; - - drop(writer); - - let reader = reachability_store.read(); - - assert!(inquirer::is_dag_ancestor_of(reader.deref(), a, b)?); - assert!(inquirer::is_dag_ancestor_of(reader.deref(), a, c)?); - assert!(inquirer::is_dag_ancestor_of(reader.deref(), a, d)?); - assert!(inquirer::is_dag_ancestor_of(reader.deref(), a, e)?); - assert!(inquirer::is_dag_ancestor_of(reader.deref(), b, e)?); - assert!(inquirer::is_dag_ancestor_of(reader.deref(), c, e)?); - assert!(inquirer::is_dag_ancestor_of(reader.deref(), d, e)?); - - drop(reader); - - anyhow::Ok(()) -} - #[test] fn test_sync_dag_absent_store() -> anyhow::Result<()> { let dag = BlockDAG::create_for_testing()?; From 86f1817586a4f763a2b00228c220081f80beeeda Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 3 Sep 2024 15:39:00 +0800 Subject: [PATCH 11/61] add verify blue block in verifier --- chain/src/verifier/mod.rs | 17 +++++++++++++++++ flexidag/src/ghostdag/protocol.rs | 1 - 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/chain/src/verifier/mod.rs b/chain/src/verifier/mod.rs index bd8870685c..a1bbb18785 100644 --- a/chain/src/verifier/mod.rs +++ b/chain/src/verifier/mod.rs @@ -385,6 +385,23 @@ impl BasicDagVerifier { ConsensusVerifier::verify_header(current_chain, new_block_header) } + + fn verify_blue_blocks(current_chain: &R, uncles: &[BlockHeader], header: &BlockHeader) -> Result where R: ChainReader { + current_chain.verify_and_ghostdata(uncles, header) + } + + + +} +//TODO: Implement it. +pub struct DagVerifier; +impl BlockVerifier for DagVerifier { + fn verify_header(current_chain: &R, new_block_header: &BlockHeader) -> Result<()> + where + R: ChainReader, + { + BasicDagVerifier::verify_header(current_chain, new_block_header) + } fn verify_blue_blocks( current_chain: &R, diff --git a/flexidag/src/ghostdag/protocol.rs b/flexidag/src/ghostdag/protocol.rs index 011eff23ae..9d8d948e1b 100644 --- a/flexidag/src/ghostdag/protocol.rs +++ b/flexidag/src/ghostdag/protocol.rs @@ -261,7 +261,6 @@ impl< header.id(), new_block_data ); - Ok(new_block_data) } From c35bd07ad50a2594c8efc8af50cf09184c902e44 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 4 Sep 2024 09:50:18 +0800 Subject: [PATCH 12/61] add some code --- chain/src/chain.rs | 1 - flexidag/src/ghostdag/protocol.rs | 70 +++++++++++++++++++++++++++++++ sync/src/parallel/executor.rs | 1 - sync/src/parallel/sender.rs | 1 + sync/src/tasks/block_sync_task.rs | 1 - 5 files changed, 71 insertions(+), 3 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index d83f16caa9..bbad4b3f9b 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1545,7 +1545,6 @@ impl ChainWriter for BlockChain { fn chain_state(&mut self) -> &ChainStateDB { &self.statedb } - fn apply_for_sync(&mut self, block: Block) -> Result { self.apply_with_verifier::(block) } diff --git a/flexidag/src/ghostdag/protocol.rs b/flexidag/src/ghostdag/protocol.rs index 9d8d948e1b..8be2d0ead9 100644 --- a/flexidag/src/ghostdag/protocol.rs +++ b/flexidag/src/ghostdag/protocol.rs @@ -262,6 +262,75 @@ impl< new_block_data ); Ok(new_block_data) + + // let mut new_block_data = GhostdagData::new_with_selected_parent(header.parent_hash(), self.k); + // let mut mergetset = header.parents_hash().into_iter().filter(|header_id| { + // *header_id != header.parent_hash() + // }).chain(blue_blocks.into_iter().filter(|header| { + // header.id() != new_block_data.selected_parent + // }).map(|header| header.id())).collect::>().into_iter().collect::>(); + // info!("jacktest: merget set = {:?}", mergetset); + // mergetset = self.sort_blocks(mergetset.into_iter())?; + // let ordered_mergeset = + // self.ordered_mergeset_without_selected_parent(new_block_data.selected_parent, &vec![new_block_data.selected_parent])?; + + + // for blue_candidate in ordered_mergeset { + // let coloring = self.check_blue_candidate(&new_block_data, blue_candidate)?; + // if let ColoringOutput::Blue(blue_anticone_size, blues_anticone_sizes) = coloring { + // new_block_data.add_blue(blue_candidate, blue_anticone_size, &blues_anticone_sizes); + // } else { + // new_block_data.add_red(blue_candidate); + // } + // } + // let mut valid_mergeset_blues = vec![new_block_data.selected_parent]; + // valid_mergeset_blues.append(&mut new_block_data.mergeset_blues.iter().cloned().collect::>().into_iter().filter(|header_id| { + // *header_id != new_block_data.selected_parent + // }).collect()); + // *BlockHashes::make_mut(&mut new_block_data.mergeset_blues) = valid_mergeset_blues; + // *BlockHashes::make_mut(&mut new_block_data.mergeset_reds) = new_block_data.mergeset_reds.iter().cloned().collect::>().into_iter().collect::>(); + // let selectd_ghostdata = self.ghostdag(&vec![header.parent_hash()])?; + // if blue_blocks.len() != new_block_data.mergeset_blues.len().saturating_sub(1) { + // return Err(anyhow::anyhow!("The len of blue set is not equal, for {:?}, checking data: {:?}", blue_blocks.iter().map(|header| header.id()).collect::>(), new_block_data.mergeset_blues)); + // } + + // let mut expected_blue_blocks = blue_blocks.iter().map(|header| header.id()).collect::>(); + // expected_blue_blocks.insert(new_block_data.selected_parent); + // if expected_blue_blocks != new_block_data.mergeset_blues.iter().cloned().collect::>() { + // // return Err(anyhow::anyhow!("The data of blue set is not equal, for {:?}, checking data: {:?}", blue_blocks.iter().map(|header| header.id()).collect::>(), new_block_data.mergeset_blues)); + // info!("The data of blue set is not equal, for {:?}, checking data: {:?}, run ghost protocol to get the valid data", blue_blocks, new_block_data.mergeset_blues); + // return Ok(self.ghostdag(&header.parents_hash())?); + // } + + // let blue_score = self + // .ghostdag_store + // .get_blue_score(header.parent_hash())? + // .checked_add(new_block_data.mergeset_blues.len() as u64) + // .expect("blue score size should less than u64"); + + // let added_blue_work: BlueWorkType = new_block_data + // .mergeset_blues + // .iter() + // .cloned() + // .map(|hash| { + // self.headers_store + // .get_difficulty(hash) + // .unwrap_or_else(|err| { + // error!("Failed to get difficulty of block: {}, {}", hash, err); + // 0.into() + // }) + // }) + // .sum(); + + // let blue_work = self + // .ghostdag_store + // .get_blue_work(new_block_data.selected_parent)? + // .checked_add(added_blue_work) + // .expect("blue work should less than u256"); + + // new_block_data.finalize_score_and_work(blue_score, blue_work); + + // Ok(new_block_data) } pub fn check_ghostdata_blue_block(&self, ghostdata: &GhostdagData) -> Result<()> { @@ -357,6 +426,7 @@ impl< .reachability_service .is_dag_ancestor_of(hash, blue_candidate) { + info!("jacktest: because {:?} is_dag_ancestor_of {:?}, return blue", hash, blue_candidate); return Ok(ColoringState::Blue); } } diff --git a/sync/src/parallel/executor.rs b/sync/src/parallel/executor.rs index a10bbf4c61..5279dec192 100644 --- a/sync/src/parallel/executor.rs +++ b/sync/src/parallel/executor.rs @@ -80,7 +80,6 @@ impl DagBlockExecutor { }; let header = block.header().clone(); - info!("worker will process header {:?}", header); loop { match Self::waiting_for_parents( &self.dag, diff --git a/sync/src/parallel/sender.rs b/sync/src/parallel/sender.rs index ec3bd98ec8..7db3f2c31c 100644 --- a/sync/src/parallel/sender.rs +++ b/sync/src/parallel/sender.rs @@ -124,6 +124,7 @@ impl<'a> DagBlockSender<'a> { // Finding the executing state is the priority if self.dispatch_to_worker(&block).await? { + self.flush_executor_state().await?; continue; } diff --git a/sync/src/tasks/block_sync_task.rs b/sync/src/tasks/block_sync_task.rs index a208df4997..4c27cecf9d 100644 --- a/sync/src/tasks/block_sync_task.rs +++ b/sync/src/tasks/block_sync_task.rs @@ -473,7 +473,6 @@ where children: vec![], })?; self.sync_dag_store.save_block(block)?; - info!("finish saving"); anyhow::Ok(ParallelSign::NeedMoreBlocks) } }; From f8c3795eac75ca7d88aa495b3573dd1e1b8d2dfb Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 4 Sep 2024 12:34:01 +0800 Subject: [PATCH 13/61] add verification --- flexidag/src/ghostdag/protocol.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/flexidag/src/ghostdag/protocol.rs b/flexidag/src/ghostdag/protocol.rs index 8be2d0ead9..cd67edfc60 100644 --- a/flexidag/src/ghostdag/protocol.rs +++ b/flexidag/src/ghostdag/protocol.rs @@ -426,7 +426,6 @@ impl< .reachability_service .is_dag_ancestor_of(hash, blue_candidate) { - info!("jacktest: because {:?} is_dag_ancestor_of {:?}, return blue", hash, blue_candidate); return Ok(ColoringState::Blue); } } From 93e71f7f7fde76ff66f014e8634138bf61371dc5 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 4 Sep 2024 16:15:41 +0800 Subject: [PATCH 14/61] fix some bugs --- flexidag/src/ghostdag/protocol.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/flexidag/src/ghostdag/protocol.rs b/flexidag/src/ghostdag/protocol.rs index cd67edfc60..56887b32d8 100644 --- a/flexidag/src/ghostdag/protocol.rs +++ b/flexidag/src/ghostdag/protocol.rs @@ -210,12 +210,6 @@ impl< } } - if new_block_data - .mergeset_blues - .iter() - .skip(1) - .cloned() - .collect::>() != blue_blocks .iter() .map(|header| header.id()) @@ -569,7 +563,6 @@ impl< blocks: impl IntoIterator, ) -> Result> { let mut sorted_blocks: Vec = blocks.into_iter().collect(); - sorted_blocks.sort_by_cached_key(|block| { let blue_work = self .ghostdag_store From df381deb46743229a87a349cd81daa7b27bd52ed Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 4 Sep 2024 17:05:20 +0800 Subject: [PATCH 15/61] add yeilding after execution for processing the main chain in other service --- sync/src/parallel/executor.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sync/src/parallel/executor.rs b/sync/src/parallel/executor.rs index 5279dec192..bdd33b3870 100644 --- a/sync/src/parallel/executor.rs +++ b/sync/src/parallel/executor.rs @@ -14,6 +14,8 @@ use tokio::{ task::JoinHandle, }; +use crate::tasks::continue_execute_absent_block::ContinueChainOperator; + #[derive(Debug)] pub enum ExecuteState { Executing(HashValue), From d757bd64e32e9a6d9d59527b9bc1f25b0a6eca77 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 5 Sep 2024 11:27:31 +0800 Subject: [PATCH 16/61] fmt and clippy --- chain/src/verifier/mod.rs | 14 ++++--- flexidag/src/blockdag.rs | 1 - flexidag/src/ghostdag/protocol.rs | 11 ++++-- flexidag/tests/tests.rs | 63 ++++++++++++++++++++++++------- sync/src/lib.rs | 1 - sync/src/parallel/executor.rs | 2 - sync/src/store/sync_dag_store.rs | 3 +- sync/src/store/tests.rs | 16 +------- sync/src/tasks/mock.rs | 3 +- 9 files changed, 71 insertions(+), 43 deletions(-) diff --git a/chain/src/verifier/mod.rs b/chain/src/verifier/mod.rs index a1bbb18785..a4b89cb502 100644 --- a/chain/src/verifier/mod.rs +++ b/chain/src/verifier/mod.rs @@ -385,13 +385,17 @@ impl BasicDagVerifier { ConsensusVerifier::verify_header(current_chain, new_block_header) } - - fn verify_blue_blocks(current_chain: &R, uncles: &[BlockHeader], header: &BlockHeader) -> Result where R: ChainReader { + + fn verify_blue_blocks( + current_chain: &R, + uncles: &[BlockHeader], + header: &BlockHeader, + ) -> Result + where + R: ChainReader, + { current_chain.verify_and_ghostdata(uncles, header) } - - - } //TODO: Implement it. pub struct DagVerifier; diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index e64facb317..054f01af07 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -12,7 +12,6 @@ use crate::consensusdb::{ }; use crate::ghostdag::protocol::GhostdagManager; use crate::prune::pruning_point_manager::PruningPointManagerT; -use crate::types::ghostdata::CompactGhostdagData; use crate::{process_key_already_error, reachability}; use anyhow::{bail, ensure, format_err, Ok}; use starcoin_accumulator::node::AccumulatorStoreType; diff --git a/flexidag/src/ghostdag/protocol.rs b/flexidag/src/ghostdag/protocol.rs index 56887b32d8..f510155ea2 100644 --- a/flexidag/src/ghostdag/protocol.rs +++ b/flexidag/src/ghostdag/protocol.rs @@ -206,10 +206,16 @@ impl< // No k-cluster violation found, we can now set the candidate block as blue new_block_data.add_blue(blue_candidate, blue_anticone_size, &blues_anticone_sizes); } else { - check_ghostdata.add_red(blue_candidate); + new_block_data.add_red(blue_candidate); } } + if new_block_data + .mergeset_blues + .iter() + .skip(1) + .cloned() + .collect::>() != blue_blocks .iter() .map(|header| header.id()) @@ -268,7 +274,6 @@ impl< // let ordered_mergeset = // self.ordered_mergeset_without_selected_parent(new_block_data.selected_parent, &vec![new_block_data.selected_parent])?; - // for blue_candidate in ordered_mergeset { // let coloring = self.check_blue_candidate(&new_block_data, blue_candidate)?; // if let ColoringOutput::Blue(blue_anticone_size, blues_anticone_sizes) = coloring { @@ -302,7 +307,7 @@ impl< // .checked_add(new_block_data.mergeset_blues.len() as u64) // .expect("blue score size should less than u64"); - // let added_blue_work: BlueWorkType = new_block_data + // let added_blue_work: BlueWorkType = new_block_data // .mergeset_blues // .iter() // .cloned() diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index 5828565fbe..3e664bc266 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -4,7 +4,8 @@ use anyhow::{bail, format_err, Ok, Result}; use starcoin_crypto::HashValue as Hash; use starcoin_dag::{ - blockdag::{BlockDAG, MineNewDagBlockInfo}, consensusdb::{ + blockdag::{BlockDAG, MineNewDagBlockInfo}, + consensusdb::{ consenses_state::{DagState, DagStateReader, DagStateStore}, schemadb::{ DbReachabilityStore, GhostdagStoreReader, ReachabilityStore, ReachabilityStoreReader, @@ -21,7 +22,10 @@ use starcoin_types::{ }; use std::{ - io::Read, ops::{Deref, DerefMut}, sync::Arc, time::Instant, vec + ops::{Deref, DerefMut}, + sync::Arc, + time::Instant, + vec, }; #[test] @@ -1114,21 +1118,47 @@ fn test_verification_blue_block() -> anyhow::Result<()> { assert!(check_error.is_err()); let mut false_observer2 = observer2.clone(); - let red_block_id = false_observer2.mergeset_reds.first().expect("the k is wrong, modify it to create a red block!").clone(); + let red_block_id = *false_observer2 + .mergeset_reds + .first() + .expect("the k is wrong, modify it to create a red block!"); if red_block_id == block_red_2.id() { - false_observer2.mergeset_blues = Arc::new(vec![red_block_id].into_iter().chain(false_observer2.mergeset_blues.iter().cloned().filter(|id| { - *id != block_red_2_1.id() - })).collect()); + false_observer2.mergeset_blues = Arc::new( + vec![red_block_id] + .into_iter() + .chain( + false_observer2 + .mergeset_blues + .iter() + .cloned() + .filter(|id| *id != block_red_2_1.id()), + ) + .collect(), + ); false_observer2.mergeset_reds = Arc::new(vec![block_red_2_1.id()]); } else { - false_observer2.mergeset_blues = Arc::new(vec![red_block_id].into_iter().chain(false_observer2.mergeset_blues.iter().cloned().filter(|id| { - *id != block_red_2.id() - })).collect()); + false_observer2.mergeset_blues = Arc::new( + vec![red_block_id] + .into_iter() + .chain( + false_observer2 + .mergeset_blues + .iter() + .cloned() + .filter(|id| *id != block_red_2.id()), + ) + .collect(), + ); false_observer2.mergeset_reds = Arc::new(vec![block_red_2.id()]); } - let check_error = dag.ghost_dag_manager().check_ghostdata_blue_block(&false_observer2); - println!("check error: {:?} after the blue block turns red and the red turns blue maliciously", check_error); + let check_error = dag + .ghost_dag_manager() + .check_ghostdata_blue_block(&false_observer2); + println!( + "check error: {:?} after the blue block turns red and the red turns blue maliciously", + check_error + ); assert!(check_error.is_err()); let observer3 = dag.ghostdata(&[block_main_5.id()])?; @@ -1225,9 +1255,16 @@ fn test_verification_blue_block() -> anyhow::Result<()> { .check_ghostdata_blue_block(&together_ghost_data)?; let together_mine = dag.ghostdata(&[block_from_normal.id(), block_from_makeup.id()])?; - let mine_together = add_and_print(8, together_mine.selected_parent, vec![block_from_normal.id(), block_from_makeup.id()], genesis.parent_hash(), &mut dag)?; + let mine_together = add_and_print( + 8, + together_mine.selected_parent, + vec![block_from_normal.id(), block_from_makeup.id()], + genesis.parent_hash(), + &mut dag, + )?; let together_ghost_data = dag.storage.ghost_dag_store.get_data(mine_together.id())?; - dag.ghost_dag_manager().check_ghostdata_blue_block(&together_ghost_data)?; + dag.ghost_dag_manager() + .check_ghostdata_blue_block(&together_ghost_data)?; anyhow::Result::Ok(()) } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 8c5b6ae197..0d5ae0a43c 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -12,4 +12,3 @@ pub mod txn_sync; pub mod parallel; pub mod verified_rpc_client; -pub mod parallel; \ No newline at end of file diff --git a/sync/src/parallel/executor.rs b/sync/src/parallel/executor.rs index bdd33b3870..5279dec192 100644 --- a/sync/src/parallel/executor.rs +++ b/sync/src/parallel/executor.rs @@ -14,8 +14,6 @@ use tokio::{ task::JoinHandle, }; -use crate::tasks::continue_execute_absent_block::ContinueChainOperator; - #[derive(Debug)] pub enum ExecuteState { Executing(HashValue), diff --git a/sync/src/store/sync_dag_store.rs b/sync/src/store/sync_dag_store.rs index 45af933521..4216240f34 100644 --- a/sync/src/store/sync_dag_store.rs +++ b/sync/src/store/sync_dag_store.rs @@ -1,7 +1,6 @@ -use std::{ops::DerefMut, path::Path, sync::Arc}; +use std::{path::Path, sync::Arc}; use anyhow::format_err; -use parking_lot::RwLock; use starcoin_config::{temp_dir, RocksdbConfig, StorageConfig}; use starcoin_crypto::HashValue; use starcoin_dag::consensusdb::{prelude::StoreError, schemadb::REACHABILITY_DATA_CF}; diff --git a/sync/src/store/tests.rs b/sync/src/store/tests.rs index c63e9c12cc..881b1badc0 100644 --- a/sync/src/store/tests.rs +++ b/sync/src/store/tests.rs @@ -1,18 +1,6 @@ -use std::{ - ops::{Deref, DerefMut}, - sync::Arc, - u64, -}; - use anyhow::Ok; -use parking_lot::RwLock; use starcoin_crypto::HashValue; -use starcoin_dag::{ - blockdag::BlockDAG, consensusdb::{ - schema::{KeyCodec, ValueCodec}, - schemadb::MemoryReachabilityStore, - }, reachability::inquirer, types::interval::Interval -}; +use starcoin_dag::consensusdb::schema::{KeyCodec, ValueCodec}; use starcoin_types::{ account_address::AccountAddress, block::{Block, BlockBody, BlockHeader, BlockHeaderBuilder, BlockHeaderExtra, BlockNumber}, @@ -81,7 +69,6 @@ fn build_version_0_block(number: BlockNumber) -> Block { #[test] fn test_sync_dag_absent_store() -> anyhow::Result<()> { - let dag = BlockDAG::create_for_testing()?; let sync_dag_store = SyncDagStore::create_for_testing()?; // write and read @@ -153,7 +140,6 @@ fn test_sync_dag_absent_store() -> anyhow::Result<()> { #[test] fn test_write_read_in_order() -> anyhow::Result<()> { - let dag = BlockDAG::create_for_testing()?; let sync_dag_store = SyncDagStore::create_for_testing()?; // write and read diff --git a/sync/src/tasks/mock.rs b/sync/src/tasks/mock.rs index 15a59bb79c..96b9822d1b 100644 --- a/sync/src/tasks/mock.rs +++ b/sync/src/tasks/mock.rs @@ -155,7 +155,8 @@ impl SyncNodeMocker { None, ); let peer_selector = PeerSelector::new(vec![peer_info], PeerStrategy::default(), None); - let sync_dag_store = SyncDagStore::create_for_testing().context("Failed to create SyncDagStore for testing")?; + let sync_dag_store = SyncDagStore::create_for_testing() + .context("Failed to create SyncDagStore for testing")?; Ok(Self::new_inner( peer_id, chain, From fd6d2bba4e69e5b622649bea0b166feaf734fd45 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 5 Sep 2024 11:49:21 +0800 Subject: [PATCH 17/61] rebase master set the number 4500000 for vega updating --- flexidag/src/ghostdag/protocol.rs | 68 ------------------------------- types/src/block/mod.rs | 2 +- 2 files changed, 1 insertion(+), 69 deletions(-) diff --git a/flexidag/src/ghostdag/protocol.rs b/flexidag/src/ghostdag/protocol.rs index f510155ea2..4e0be6e96e 100644 --- a/flexidag/src/ghostdag/protocol.rs +++ b/flexidag/src/ghostdag/protocol.rs @@ -262,74 +262,6 @@ impl< new_block_data ); Ok(new_block_data) - - // let mut new_block_data = GhostdagData::new_with_selected_parent(header.parent_hash(), self.k); - // let mut mergetset = header.parents_hash().into_iter().filter(|header_id| { - // *header_id != header.parent_hash() - // }).chain(blue_blocks.into_iter().filter(|header| { - // header.id() != new_block_data.selected_parent - // }).map(|header| header.id())).collect::>().into_iter().collect::>(); - // info!("jacktest: merget set = {:?}", mergetset); - // mergetset = self.sort_blocks(mergetset.into_iter())?; - // let ordered_mergeset = - // self.ordered_mergeset_without_selected_parent(new_block_data.selected_parent, &vec![new_block_data.selected_parent])?; - - // for blue_candidate in ordered_mergeset { - // let coloring = self.check_blue_candidate(&new_block_data, blue_candidate)?; - // if let ColoringOutput::Blue(blue_anticone_size, blues_anticone_sizes) = coloring { - // new_block_data.add_blue(blue_candidate, blue_anticone_size, &blues_anticone_sizes); - // } else { - // new_block_data.add_red(blue_candidate); - // } - // } - // let mut valid_mergeset_blues = vec![new_block_data.selected_parent]; - // valid_mergeset_blues.append(&mut new_block_data.mergeset_blues.iter().cloned().collect::>().into_iter().filter(|header_id| { - // *header_id != new_block_data.selected_parent - // }).collect()); - // *BlockHashes::make_mut(&mut new_block_data.mergeset_blues) = valid_mergeset_blues; - // *BlockHashes::make_mut(&mut new_block_data.mergeset_reds) = new_block_data.mergeset_reds.iter().cloned().collect::>().into_iter().collect::>(); - // let selectd_ghostdata = self.ghostdag(&vec![header.parent_hash()])?; - // if blue_blocks.len() != new_block_data.mergeset_blues.len().saturating_sub(1) { - // return Err(anyhow::anyhow!("The len of blue set is not equal, for {:?}, checking data: {:?}", blue_blocks.iter().map(|header| header.id()).collect::>(), new_block_data.mergeset_blues)); - // } - - // let mut expected_blue_blocks = blue_blocks.iter().map(|header| header.id()).collect::>(); - // expected_blue_blocks.insert(new_block_data.selected_parent); - // if expected_blue_blocks != new_block_data.mergeset_blues.iter().cloned().collect::>() { - // // return Err(anyhow::anyhow!("The data of blue set is not equal, for {:?}, checking data: {:?}", blue_blocks.iter().map(|header| header.id()).collect::>(), new_block_data.mergeset_blues)); - // info!("The data of blue set is not equal, for {:?}, checking data: {:?}, run ghost protocol to get the valid data", blue_blocks, new_block_data.mergeset_blues); - // return Ok(self.ghostdag(&header.parents_hash())?); - // } - - // let blue_score = self - // .ghostdag_store - // .get_blue_score(header.parent_hash())? - // .checked_add(new_block_data.mergeset_blues.len() as u64) - // .expect("blue score size should less than u64"); - - // let added_blue_work: BlueWorkType = new_block_data - // .mergeset_blues - // .iter() - // .cloned() - // .map(|hash| { - // self.headers_store - // .get_difficulty(hash) - // .unwrap_or_else(|err| { - // error!("Failed to get difficulty of block: {}, {}", hash, err); - // 0.into() - // }) - // }) - // .sum(); - - // let blue_work = self - // .ghostdag_store - // .get_blue_work(new_block_data.selected_parent)? - // .checked_add(added_blue_work) - // .expect("blue work should less than u256"); - - // new_block_data.finalize_score_and_work(blue_score, blue_work); - - // Ok(new_block_data) } pub fn check_ghostdata_blue_block(&self, ghostdata: &GhostdagData) -> Result<()> { diff --git a/types/src/block/mod.rs b/types/src/block/mod.rs index 68a6995052..056051e01e 100644 --- a/types/src/block/mod.rs +++ b/types/src/block/mod.rs @@ -510,7 +510,7 @@ impl BlockHeader { if number == 0 { false } else if chain_id.is_vega() { - number >= 3300000 + number >= 4500000 } else if chain_id.is_halley() { number >= 3100000 } else if chain_id.is_proxima() { From 0f70ba11632efb5b97af7999afbd44eea4dc317f Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Fri, 6 Sep 2024 15:50:51 +0800 Subject: [PATCH 18/61] 3300000 will be version 1 in vega --- types/src/block/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/types/src/block/mod.rs b/types/src/block/mod.rs index 056051e01e..68a6995052 100644 --- a/types/src/block/mod.rs +++ b/types/src/block/mod.rs @@ -510,7 +510,7 @@ impl BlockHeader { if number == 0 { false } else if chain_id.is_vega() { - number >= 4500000 + number >= 3300000 } else if chain_id.is_halley() { number >= 3100000 } else if chain_id.is_proxima() { From a75f2f23c67dff8da6037e5fc36dcb2120db5c09 Mon Sep 17 00:00:00 2001 From: Jack Huang Date: Sun, 21 Jul 2024 16:01:24 +0800 Subject: [PATCH 19/61] add pruning logic and compatible logic fix some test cases --- chain/src/chain.rs | 5 +- chain/src/verifier/mod.rs | 2 +- chain/tests/test_txn_info_and_proof.rs | 139 ++++++++++++++++++ config/src/genesis_config.rs | 3 - flexidag/src/blockdag.rs | 16 +- flexidag/src/consensusdb/consenses_state.rs | 6 +- flexidag/src/prune/pruning_point_manager.rs | 18 ++- flexidag/tests/tests.rs | 3 +- .../src/block_connector/test_illegal_block.rs | 4 +- .../block_connector/test_write_block_chain.rs | 2 + sync/src/tasks/tests_dag.rs | 1 - types/src/block/tests.rs | 47 +++++- vm/types/src/block_metadata/legacy.rs | 2 +- 13 files changed, 227 insertions(+), 21 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index bbad4b3f9b..1615db142b 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1519,7 +1519,10 @@ impl BlockChain { if self.epoch.end_block_number() == block.header().number() { self.epoch = get_epoch_from_statedb(&self.statedb)?; } - self.dag.save_dag_state(DagState { tips })?; + self.dag.save_dag_state(DagState { + tips, + pruning_point: block.header().pruning_point(), + })?; Ok(executed_block) } } diff --git a/chain/src/verifier/mod.rs b/chain/src/verifier/mod.rs index a4b89cb502..76e9b38679 100644 --- a/chain/src/verifier/mod.rs +++ b/chain/src/verifier/mod.rs @@ -347,7 +347,6 @@ impl BasicDagVerifier { R: ChainReader, { let parents_hash = new_block_header.parents_hash(); - verify_block!( VerifyBlockField::Header, parents_hash.len() == parents_hash.iter().collect::>().len(), @@ -363,6 +362,7 @@ impl BasicDagVerifier { parents_hash, new_block_header.parent_hash() ); + parents_hash.iter().try_for_each(|parent_hash| { verify_block!( VerifyBlockField::Header, diff --git a/chain/tests/test_txn_info_and_proof.rs b/chain/tests/test_txn_info_and_proof.rs index d902703b93..92e3694f82 100644 --- a/chain/tests/test_txn_info_and_proof.rs +++ b/chain/tests/test_txn_info_and_proof.rs @@ -123,3 +123,142 @@ fn test_transaction_info_and_proof_1() -> Result<()> { ); Ok(()) } + +#[stest::test(timeout = 480)] +fn test_transaction_info_and_proof() -> Result<()> { + let config = Arc::new(NodeConfig::random_for_test()); + let mut block_chain = test_helper::gen_blockchain_for_test(config.net())?; + let mut current_header = block_chain.current_header(); + let miner_account = AccountInfo::random(); + + let mut rng = rand::thread_rng(); + + let block_count: u64 = rng.gen_range(2..10); + let mut seq_number = 0; + let mut all_txns = vec![]; + let mut all_address = HashMap::::new(); + + let genesis_block = block_chain.get_block_by_number(0).unwrap().unwrap(); + //put the genesis txn, the genesis block metadata txn do not generate txn info + + all_txns.push(Transaction::UserTransaction( + genesis_block.body.transactions.first().cloned().unwrap(), + )); + + (0..block_count).for_each(|_block_idx| { + let txn_count: u64 = rng.gen_range(1..10); + let txns: Vec = (0..txn_count) + .map(|_txn_idx| { + let account_address = AccountAddress::random(); + + let txn = peer_to_peer_txn_sent_as_association( + account_address, + seq_number, + 10000, + config.net().time_service().now_secs() + DEFAULT_EXPIRATION_TIME, + config.net(), + ); + all_address.insert(txn.id(), account_address); + seq_number += 1; + txn + }) + .collect(); + + let (template, _) = block_chain + .create_block_template( + *miner_account.address(), + Some(current_header.id()), + txns.clone(), + vec![], + None, + vec![], + HashValue::zero(), + ) + .unwrap(); + + let block = block_chain + .consensus() + .create_block(template, config.net().time_service().as_ref()) + .unwrap(); + debug!("apply block:{:?}", &block); + block_chain.apply(block.clone()).unwrap(); + all_txns.push(Transaction::BlockMetadata( + block.to_metadata(current_header.gas_used()), + )); + all_txns.extend(txns.into_iter().map(Transaction::UserTransaction)); + current_header = block.header().clone(); + }); + + let txn_index = rng.gen_range(0..all_txns.len()); + debug!("all txns len: {}, txn index:{}", all_txns.len(), txn_index); + + for txn_global_index in 0..all_txns.len() { + let txn = all_txns.get(txn_global_index).cloned().unwrap(); + let txn_hash = txn.id(); + let txn_info = block_chain.get_transaction_info(txn_hash)?.ok_or_else(|| { + format_err!( + "Can not get txn info by txn hash:{}, txn:{:?}", + txn_hash, + txn + ) + })?; + + let txn_info_leaf = block_chain + .get_txn_accumulator() + .get_leaf(txn_global_index as u64)? + .unwrap(); + assert_eq!( + txn_info.transaction_info.id(), + txn_info_leaf, + "txn_info hash do not match txn info leaf in accumulator, index: {}", + txn_global_index + ); + + assert_eq!( + txn_info.transaction_global_index, txn_global_index as u64, + "txn_global_index:{}", + txn_global_index + ); + + let account_address = match &txn { + Transaction::UserTransaction(user_txn) => user_txn.sender(), + Transaction::BlockMetadata(metadata_txn) => metadata_txn.author(), + }; + let access_path: Option = Some(AccessPath::resource_access_path( + account_address, + AccountResource::struct_tag(), + )); + + let events = block_chain + .get_events(txn_info.transaction_info.id())? + .unwrap(); + + for (event_index, event) in events.into_iter().enumerate() { + let txn_proof = block_chain + .get_transaction_proof( + current_header.id(), + txn_global_index as u64, + Some(event_index as u64), + access_path.clone(), + )? + .expect("get transaction proof return none"); + assert_eq!(&event, &txn_proof.event_proof.as_ref().unwrap().event); + + let result = txn_proof.verify( + current_header.txn_accumulator_root(), + txn_global_index as u64, + Some(event_index as u64), + access_path.clone(), + ); + + assert!( + result.is_ok(), + "txn index: {}, {:?} verify failed, reason: {:?}", + txn_global_index, + txn_proof, + result.err().unwrap() + ); + } + } + Ok(()) +} diff --git a/config/src/genesis_config.rs b/config/src/genesis_config.rs index 50a1136ffe..e479c4de8f 100644 --- a/config/src/genesis_config.rs +++ b/config/src/genesis_config.rs @@ -654,13 +654,10 @@ pub struct GenesisConfig { pub time_service_type: TimeServiceType, /// transaction timeout pub transaction_timeout: u64, - /// pruning depth pub pruning_depth: u64, - /// pruning finality pub pruning_finality: u64, - /// block header version pub block_header_version: starcoin_types::block::Version, } diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 054f01af07..0194049775 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -93,6 +93,14 @@ impl BlockDAG { Ok(Self::new(k, dag_storage)) } + pub fn new_by_config(db_path: &Path) -> anyhow::Result { + let config = FlexiDagStorageConfig::create_with_params(1, RocksdbConfig::default()); + let db = FlexiDagStorage::create_from_path(db_path, config)?; + let dag = Self::new(DEFAULT_GHOSTDAG_K, db); + Ok(dag) +>>>>>>> 2ba8ddcfe (add pruning logic and compatible logic) + } + pub fn has_dag_block(&self, hash: Hash) -> anyhow::Result { Ok(self.storage.header_store.has(hash)?) } @@ -116,6 +124,7 @@ impl BlockDAG { self.commit(genesis, origin)?; self.save_dag_state(DagState { tips: vec![genesis_id], + pruning_point: genesis_id, })?; Ok(origin) } @@ -443,7 +452,6 @@ impl BlockDAG { ) -> anyhow::Result { let dag_state = self.get_dag_state()?; let ghostdata = self.ghost_dag_manager().ghostdag(&dag_state.tips)?; - anyhow::Ok(MineNewDagBlockInfo { tips: dag_state.tips, blue_blocks: (*ghostdata.mergeset_blues).clone(), @@ -486,9 +494,13 @@ impl BlockDAG { block_header: &BlockHeader, genesis_id: HashValue, ) -> anyhow::Result<()> { + let dag_state = DagState { + tips: block_header.parents(), + pruning_point: block_header.pruning_point(), + }; let ghostdata = self.ghost_dag_manager().ghostdag(&block_header.parents())?; let next_pruning_point = self.pruning_point_manager().next_pruning_point( - block_header.pruning_point(), + &dag_state, &ghostdata, pruning_depth, pruning_finality, diff --git a/flexidag/src/consensusdb/consenses_state.rs b/flexidag/src/consensusdb/consenses_state.rs index 481c415fdb..a6b0f3cf09 100644 --- a/flexidag/src/consensusdb/consenses_state.rs +++ b/flexidag/src/consensusdb/consenses_state.rs @@ -9,6 +9,7 @@ use std::sync::Arc; #[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug, Default)] pub struct DagState { pub tips: Vec, + pub pruning_point: Hash, } pub(crate) const DAG_STATE_STORE_CF: &str = "dag-state-store"; @@ -87,6 +88,9 @@ pub struct DagStateView { impl DagStateView { pub fn into_state(self) -> DagState { - DagState { tips: self.tips } + DagState { + tips: self.tips, + pruning_point: self.pruning_point, + } } } diff --git a/flexidag/src/prune/pruning_point_manager.rs b/flexidag/src/prune/pruning_point_manager.rs index b7496e456a..b7b2656d09 100644 --- a/flexidag/src/prune/pruning_point_manager.rs +++ b/flexidag/src/prune/pruning_point_manager.rs @@ -40,10 +40,9 @@ impl PruningPointManagerT { pub fn prune( &self, dag_state: &DagState, - current_pruning_point: HashValue, next_pruning_point: HashValue, ) -> anyhow::Result> { - if current_pruning_point == HashValue::zero() { + if dag_state.pruning_point == HashValue::zero() { return Ok(dag_state.tips.clone()); } anyhow::Ok( @@ -58,15 +57,15 @@ impl PruningPointManagerT { .collect(), ) } - + pub(crate) fn next_pruning_point( &self, - pruning_point: HashValue, + dag_state: &DagState, ghostdata: &GhostdagData, pruning_depth: u64, pruning_finality: u64, ) -> anyhow::Result { - let pruning_ghostdata = self.ghost_dag_store.get_data(pruning_point)?; + let pruning_ghostdata = self.ghost_dag_store.get_data(dag_state.pruning_point)?; let min_required_blue_score_for_next_pruning_point = (self.finality_score(pruning_ghostdata.blue_score, pruning_finality) + 1) * pruning_finality; @@ -75,10 +74,13 @@ impl PruningPointManagerT { "min_required_blue_score_for_next_pruning_point: {:?}", min_required_blue_score_for_next_pruning_point ); - let mut latest_pruning_ghost_data = self.ghost_dag_store.get_compact_data(pruning_point)?; + + let mut latest_pruning_ghost_data = self + .ghost_dag_store + .get_compact_data(dag_state.pruning_point)?; if min_required_blue_score_for_next_pruning_point + pruning_depth <= ghostdata.blue_score { for child in self.reachability_service().forward_chain_iterator( - pruning_point, + dag_state.pruning_point, ghostdata.selected_parent, true, ) { @@ -105,7 +107,7 @@ impl PruningPointManagerT { } if latest_pruning_ghost_data.selected_parent == HashValue::new(ORIGIN) { - anyhow::Ok(pruning_point) // still genesis + anyhow::Ok(dag_state.pruning_point) // still genesis } else { anyhow::Ok(latest_pruning_ghost_data.selected_parent) } diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index 3e664bc266..8cb8f91452 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -319,6 +319,7 @@ fn test_dag_tips_store() { let state = DagState { tips: vec![Hash::random()], + pruning_point: Hash::random(), }; dag.storage .state_store @@ -869,7 +870,6 @@ fn test_big_data_commit() -> anyhow::Result<()> { anyhow::Result::Ok(()) } -#[ignore = "pruning will be tested in next release"] #[test] fn test_prune() -> anyhow::Result<()> { // initialzie the dag firstly @@ -969,6 +969,7 @@ fn test_prune() -> anyhow::Result<()> { // prunning process begins dag.save_dag_state(DagState { tips: vec![block_red_3.id(), block_main_5.id()], + pruning_point: genesis.id(), })?; let MineNewDagBlockInfo { diff --git a/sync/src/block_connector/test_illegal_block.rs b/sync/src/block_connector/test_illegal_block.rs index d9bb234e9d..f081964f54 100644 --- a/sync/src/block_connector/test_illegal_block.rs +++ b/sync/src/block_connector/test_illegal_block.rs @@ -1,7 +1,9 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 #![allow(clippy::arithmetic_side_effects)] -use crate::block_connector::{create_writeable_block_chain, WriteBlockChainService}; +use crate::block_connector::{ + create_writeable_block_chain, WriteBlockChainService, +}; use anyhow::Result; use starcoin_account_api::AccountInfo; use starcoin_chain::BlockChain; diff --git a/sync/src/block_connector/test_write_block_chain.rs b/sync/src/block_connector/test_write_block_chain.rs index ebdc928dff..ae8083687f 100644 --- a/sync/src/block_connector/test_write_block_chain.rs +++ b/sync/src/block_connector/test_write_block_chain.rs @@ -12,6 +12,8 @@ use starcoin_txpool_mock_service::MockTxPoolService; use starcoin_types::startup_info::StartupInfo; use std::sync::Arc; +use super::test_write_dag_block_chain::new_dag_block; + pub async fn create_writeable_dag_block_chain() -> ( WriteBlockChainService, Arc, diff --git a/sync/src/tasks/tests_dag.rs b/sync/src/tasks/tests_dag.rs index b2248cc137..f4fb074658 100644 --- a/sync/src/tasks/tests_dag.rs +++ b/sync/src/tasks/tests_dag.rs @@ -144,7 +144,6 @@ async fn test_continue_sync_dag_blocks() -> Result<()> { .unwrap() .produce_fork_chain(one_fork_count, two_fork_count)?; - ///// let target_dag_genesis_header_id = target_node .chain() .get_storage() diff --git a/types/src/block/tests.rs b/types/src/block/tests.rs index 33bf8b5450..bce998e9e1 100644 --- a/types/src/block/tests.rs +++ b/types/src/block/tests.rs @@ -258,7 +258,52 @@ fn test_header_with_dag_but_pruning_adaptable() -> anyhow::Result<()> { #[test] fn test_block_compatible_for_vega() -> anyhow::Result<()> { - let latest_block = crate::block::Block::rational_random(); + let uncle1 = crate::block::BlockHeaderBuilder::new() + .with_chain_id(ChainId::vega()) + .with_number(512) + .with_parent_hash(HashValue::random()) + .with_parents_hash(vec![ + HashValue::random(), + HashValue::random(), + HashValue::random(), + ]) + .build(); + + let uncle2 = crate::block::BlockHeaderBuilder::new() + .with_number(128) + .with_chain_id(ChainId::vega()) + .with_parent_hash(HashValue::random()) + .with_parents_hash(vec![ + HashValue::random(), + HashValue::random(), + HashValue::random(), + ]) + .build(); + let body = crate::block::BlockBody { + transactions: vec![ + SignedUserTransaction::sample(), + SignedUserTransaction::sample(), + SignedUserTransaction::sample(), + ], + uncles: Some(vec![uncle1, uncle2]), + }; + + let header = crate::block::BlockHeaderBuilder::new() + .with_number(1024) + .with_chain_id(ChainId::vega()) + .with_parent_hash(HashValue::random()) + .with_parents_hash(vec![ + HashValue::random(), + HashValue::random(), + HashValue::random(), + ]) + .with_body_hash(body.hash()) + .build(); + + let latest_block = crate::block::Block { + header: header.clone(), + body, + }; let deserilized_block = crate::block::Block::decode(&latest_block.encode()?)?; diff --git a/vm/types/src/block_metadata/legacy.rs b/vm/types/src/block_metadata/legacy.rs index 9b20891459..40a99b6217 100644 --- a/vm/types/src/block_metadata/legacy.rs +++ b/vm/types/src/block_metadata/legacy.rs @@ -72,7 +72,7 @@ impl From for super::BlockMetadata { number: value.number, chain_id: value.chain_id, parent_gas_used: value.parent_gas_used, - parents_hash: None, + parents_hash: Some(vec![]), } } } From d3f851435c639b7553ec3657de9cbc9289ca39da Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 6 Aug 2024 10:25:36 +0800 Subject: [PATCH 20/61] fix clippy --- flexidag/src/prune/pruning_point_manager.rs | 2 +- sync/src/block_connector/test_illegal_block.rs | 4 +--- sync/src/block_connector/test_write_block_chain.rs | 2 -- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/flexidag/src/prune/pruning_point_manager.rs b/flexidag/src/prune/pruning_point_manager.rs index b7b2656d09..bd41eaa35a 100644 --- a/flexidag/src/prune/pruning_point_manager.rs +++ b/flexidag/src/prune/pruning_point_manager.rs @@ -57,7 +57,7 @@ impl PruningPointManagerT { .collect(), ) } - + pub(crate) fn next_pruning_point( &self, dag_state: &DagState, diff --git a/sync/src/block_connector/test_illegal_block.rs b/sync/src/block_connector/test_illegal_block.rs index f081964f54..d9bb234e9d 100644 --- a/sync/src/block_connector/test_illegal_block.rs +++ b/sync/src/block_connector/test_illegal_block.rs @@ -1,9 +1,7 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 #![allow(clippy::arithmetic_side_effects)] -use crate::block_connector::{ - create_writeable_block_chain, WriteBlockChainService, -}; +use crate::block_connector::{create_writeable_block_chain, WriteBlockChainService}; use anyhow::Result; use starcoin_account_api::AccountInfo; use starcoin_chain::BlockChain; diff --git a/sync/src/block_connector/test_write_block_chain.rs b/sync/src/block_connector/test_write_block_chain.rs index ae8083687f..ebdc928dff 100644 --- a/sync/src/block_connector/test_write_block_chain.rs +++ b/sync/src/block_connector/test_write_block_chain.rs @@ -12,8 +12,6 @@ use starcoin_txpool_mock_service::MockTxPoolService; use starcoin_types::startup_info::StartupInfo; use std::sync::Arc; -use super::test_write_dag_block_chain::new_dag_block; - pub async fn create_writeable_dag_block_chain() -> ( WriteBlockChainService, Arc, From 8f1e647354ea3367e8cfa6318647ec27e1a030c8 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 6 Aug 2024 16:14:27 +0800 Subject: [PATCH 21/61] fix test case --- chain/src/chain.rs | 2 ++ chain/src/verifier/mod.rs | 2 +- flexidag/src/blockdag.rs | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 1615db142b..02670d596a 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -275,6 +275,8 @@ impl BlockChain { None => self.current_header(), }; + println!("jacktest: current header: {:?}", previous_header); + self.create_block_template_by_header( author, previous_header, diff --git a/chain/src/verifier/mod.rs b/chain/src/verifier/mod.rs index 76e9b38679..d123f025f8 100644 --- a/chain/src/verifier/mod.rs +++ b/chain/src/verifier/mod.rs @@ -11,7 +11,7 @@ use starcoin_dag::types::ghostdata::GhostdagData; use starcoin_logger::prelude::debug; use starcoin_open_block::AddressFilter; use starcoin_types::block::{Block, BlockHeader, ALLOWED_FUTURE_BLOCKTIME}; -use std::{collections::HashSet, str::FromStr}; +use std::{collections::HashSet, hash::Hash, str::FromStr}; #[derive(Debug, Clone)] pub enum Verifier { diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 0194049775..8312283b69 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -452,6 +452,7 @@ impl BlockDAG { ) -> anyhow::Result { let dag_state = self.get_dag_state()?; let ghostdata = self.ghost_dag_manager().ghostdag(&dag_state.tips)?; + println!("jacktest: dag state: {:?}, ghost data: {:?}", dag_state, ghostdata); anyhow::Ok(MineNewDagBlockInfo { tips: dag_state.tips, blue_blocks: (*ghostdata.mergeset_blues).clone(), From 296d04ac883f361904ebca1dd8cfef11fc374525 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 6 Aug 2024 16:54:40 +0800 Subject: [PATCH 22/61] remove some single chain test case --- chain/tests/test_txn_info_and_proof.rs | 139 ------------------------- 1 file changed, 139 deletions(-) diff --git a/chain/tests/test_txn_info_and_proof.rs b/chain/tests/test_txn_info_and_proof.rs index 92e3694f82..d902703b93 100644 --- a/chain/tests/test_txn_info_and_proof.rs +++ b/chain/tests/test_txn_info_and_proof.rs @@ -123,142 +123,3 @@ fn test_transaction_info_and_proof_1() -> Result<()> { ); Ok(()) } - -#[stest::test(timeout = 480)] -fn test_transaction_info_and_proof() -> Result<()> { - let config = Arc::new(NodeConfig::random_for_test()); - let mut block_chain = test_helper::gen_blockchain_for_test(config.net())?; - let mut current_header = block_chain.current_header(); - let miner_account = AccountInfo::random(); - - let mut rng = rand::thread_rng(); - - let block_count: u64 = rng.gen_range(2..10); - let mut seq_number = 0; - let mut all_txns = vec![]; - let mut all_address = HashMap::::new(); - - let genesis_block = block_chain.get_block_by_number(0).unwrap().unwrap(); - //put the genesis txn, the genesis block metadata txn do not generate txn info - - all_txns.push(Transaction::UserTransaction( - genesis_block.body.transactions.first().cloned().unwrap(), - )); - - (0..block_count).for_each(|_block_idx| { - let txn_count: u64 = rng.gen_range(1..10); - let txns: Vec = (0..txn_count) - .map(|_txn_idx| { - let account_address = AccountAddress::random(); - - let txn = peer_to_peer_txn_sent_as_association( - account_address, - seq_number, - 10000, - config.net().time_service().now_secs() + DEFAULT_EXPIRATION_TIME, - config.net(), - ); - all_address.insert(txn.id(), account_address); - seq_number += 1; - txn - }) - .collect(); - - let (template, _) = block_chain - .create_block_template( - *miner_account.address(), - Some(current_header.id()), - txns.clone(), - vec![], - None, - vec![], - HashValue::zero(), - ) - .unwrap(); - - let block = block_chain - .consensus() - .create_block(template, config.net().time_service().as_ref()) - .unwrap(); - debug!("apply block:{:?}", &block); - block_chain.apply(block.clone()).unwrap(); - all_txns.push(Transaction::BlockMetadata( - block.to_metadata(current_header.gas_used()), - )); - all_txns.extend(txns.into_iter().map(Transaction::UserTransaction)); - current_header = block.header().clone(); - }); - - let txn_index = rng.gen_range(0..all_txns.len()); - debug!("all txns len: {}, txn index:{}", all_txns.len(), txn_index); - - for txn_global_index in 0..all_txns.len() { - let txn = all_txns.get(txn_global_index).cloned().unwrap(); - let txn_hash = txn.id(); - let txn_info = block_chain.get_transaction_info(txn_hash)?.ok_or_else(|| { - format_err!( - "Can not get txn info by txn hash:{}, txn:{:?}", - txn_hash, - txn - ) - })?; - - let txn_info_leaf = block_chain - .get_txn_accumulator() - .get_leaf(txn_global_index as u64)? - .unwrap(); - assert_eq!( - txn_info.transaction_info.id(), - txn_info_leaf, - "txn_info hash do not match txn info leaf in accumulator, index: {}", - txn_global_index - ); - - assert_eq!( - txn_info.transaction_global_index, txn_global_index as u64, - "txn_global_index:{}", - txn_global_index - ); - - let account_address = match &txn { - Transaction::UserTransaction(user_txn) => user_txn.sender(), - Transaction::BlockMetadata(metadata_txn) => metadata_txn.author(), - }; - let access_path: Option = Some(AccessPath::resource_access_path( - account_address, - AccountResource::struct_tag(), - )); - - let events = block_chain - .get_events(txn_info.transaction_info.id())? - .unwrap(); - - for (event_index, event) in events.into_iter().enumerate() { - let txn_proof = block_chain - .get_transaction_proof( - current_header.id(), - txn_global_index as u64, - Some(event_index as u64), - access_path.clone(), - )? - .expect("get transaction proof return none"); - assert_eq!(&event, &txn_proof.event_proof.as_ref().unwrap().event); - - let result = txn_proof.verify( - current_header.txn_accumulator_root(), - txn_global_index as u64, - Some(event_index as u64), - access_path.clone(), - ); - - assert!( - result.is_ok(), - "txn index: {}, {:?} verify failed, reason: {:?}", - txn_global_index, - txn_proof, - result.err().unwrap() - ); - } - } - Ok(()) -} From 9a94908445334184262f406108b2e1c145091e15 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 6 Aug 2024 18:12:31 +0800 Subject: [PATCH 23/61] fix clippy --- chain/src/verifier/mod.rs | 2 +- flexidag/src/blockdag.rs | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/chain/src/verifier/mod.rs b/chain/src/verifier/mod.rs index d123f025f8..76e9b38679 100644 --- a/chain/src/verifier/mod.rs +++ b/chain/src/verifier/mod.rs @@ -11,7 +11,7 @@ use starcoin_dag::types::ghostdata::GhostdagData; use starcoin_logger::prelude::debug; use starcoin_open_block::AddressFilter; use starcoin_types::block::{Block, BlockHeader, ALLOWED_FUTURE_BLOCKTIME}; -use std::{collections::HashSet, hash::Hash, str::FromStr}; +use std::{collections::HashSet, str::FromStr}; #[derive(Debug, Clone)] pub enum Verifier { diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 8312283b69..876482050d 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -452,7 +452,10 @@ impl BlockDAG { ) -> anyhow::Result { let dag_state = self.get_dag_state()?; let ghostdata = self.ghost_dag_manager().ghostdag(&dag_state.tips)?; - println!("jacktest: dag state: {:?}, ghost data: {:?}", dag_state, ghostdata); + println!( + "jacktest: dag state: {:?}, ghost data: {:?}", + dag_state, ghostdata + ); anyhow::Ok(MineNewDagBlockInfo { tips: dag_state.tips, blue_blocks: (*ghostdata.mergeset_blues).clone(), From 91a997b7264c32e7500ea359405e7cd6b39d2deb Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 10 Sep 2024 17:00:17 +0800 Subject: [PATCH 24/61] fix flexdag's test case --- chain/src/chain.rs | 2 -- flexidag/src/blockdag.rs | 4 ---- flexidag/tests/tests.rs | 1 + 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 02670d596a..1615db142b 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -275,8 +275,6 @@ impl BlockChain { None => self.current_header(), }; - println!("jacktest: current header: {:?}", previous_header); - self.create_block_template_by_header( author, previous_header, diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 876482050d..0194049775 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -452,10 +452,6 @@ impl BlockDAG { ) -> anyhow::Result { let dag_state = self.get_dag_state()?; let ghostdata = self.ghost_dag_manager().ghostdag(&dag_state.tips)?; - println!( - "jacktest: dag state: {:?}, ghost data: {:?}", - dag_state, ghostdata - ); anyhow::Ok(MineNewDagBlockInfo { tips: dag_state.tips, blue_blocks: (*ghostdata.mergeset_blues).clone(), diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index 8cb8f91452..434c8c19d7 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -870,6 +870,7 @@ fn test_big_data_commit() -> anyhow::Result<()> { anyhow::Result::Ok(()) } +#[ignore = "pruning will be tested in next release"] #[test] fn test_prune() -> anyhow::Result<()> { // initialzie the dag firstly From 06fbd3e6582dcb15355cae3f3ab307025f26cefb Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 6 Aug 2024 22:44:13 +0800 Subject: [PATCH 25/61] fix clippy --- flexidag/src/blockdag.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 0194049775..fb3412fcc9 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -93,7 +93,7 @@ impl BlockDAG { Ok(Self::new(k, dag_storage)) } - pub fn new_by_config(db_path: &Path) -> anyhow::Result { + pub fn new_by_config(db_path: &Path) -> anyhow::Result { let config = FlexiDagStorageConfig::create_with_params(1, RocksdbConfig::default()); let db = FlexiDagStorage::create_from_path(db_path, config)?; let dag = Self::new(DEFAULT_GHOSTDAG_K, db); From 8049c16b5ec32259f9f2a35bec890345c0d2e7b2 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 7 Aug 2024 20:15:37 +0800 Subject: [PATCH 26/61] add rational random to pass the deserialization verification --- types/src/block/tests.rs | 47 +--------------------------------------- 1 file changed, 1 insertion(+), 46 deletions(-) diff --git a/types/src/block/tests.rs b/types/src/block/tests.rs index bce998e9e1..33bf8b5450 100644 --- a/types/src/block/tests.rs +++ b/types/src/block/tests.rs @@ -258,52 +258,7 @@ fn test_header_with_dag_but_pruning_adaptable() -> anyhow::Result<()> { #[test] fn test_block_compatible_for_vega() -> anyhow::Result<()> { - let uncle1 = crate::block::BlockHeaderBuilder::new() - .with_chain_id(ChainId::vega()) - .with_number(512) - .with_parent_hash(HashValue::random()) - .with_parents_hash(vec![ - HashValue::random(), - HashValue::random(), - HashValue::random(), - ]) - .build(); - - let uncle2 = crate::block::BlockHeaderBuilder::new() - .with_number(128) - .with_chain_id(ChainId::vega()) - .with_parent_hash(HashValue::random()) - .with_parents_hash(vec![ - HashValue::random(), - HashValue::random(), - HashValue::random(), - ]) - .build(); - let body = crate::block::BlockBody { - transactions: vec![ - SignedUserTransaction::sample(), - SignedUserTransaction::sample(), - SignedUserTransaction::sample(), - ], - uncles: Some(vec![uncle1, uncle2]), - }; - - let header = crate::block::BlockHeaderBuilder::new() - .with_number(1024) - .with_chain_id(ChainId::vega()) - .with_parent_hash(HashValue::random()) - .with_parents_hash(vec![ - HashValue::random(), - HashValue::random(), - HashValue::random(), - ]) - .with_body_hash(body.hash()) - .build(); - - let latest_block = crate::block::Block { - header: header.clone(), - body, - }; + let latest_block = crate::block::Block::rational_random(); let deserilized_block = crate::block::Block::decode(&latest_block.encode()?)?; From d32d12fcef5f799303140c90fe2e2247406f66f7 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 7 Aug 2024 22:42:05 +0800 Subject: [PATCH 27/61] merge dag-master --- flexidag/src/blockdag.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index fb3412fcc9..fea571ba73 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -98,7 +98,6 @@ impl BlockDAG { let db = FlexiDagStorage::create_from_path(db_path, config)?; let dag = Self::new(DEFAULT_GHOSTDAG_K, db); Ok(dag) ->>>>>>> 2ba8ddcfe (add pruning logic and compatible logic) } pub fn has_dag_block(&self, hash: Hash) -> anyhow::Result { From 676e861ac1eee4214e0692c9caf5f73df16edf44 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 8 Aug 2024 12:30:31 +0800 Subject: [PATCH 28/61] fix bugs: blockmeta changes into latest version --- storage/src/lib.rs | 4 +++- storage/src/upgrade.rs | 7 +++++++ vm/types/src/block_metadata/legacy.rs | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/storage/src/lib.rs b/storage/src/lib.rs index 2f186aef4d..d6957ec086 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -212,11 +212,12 @@ pub enum StorageVersion { V3 = 3, V4 = 4, V5 = 5, + V6 = 6, } impl StorageVersion { pub fn current_version() -> Self { - Self::V5 + Self::V6 } pub fn get_column_family_names(&self) -> &'static [ColumnFamilyName] { @@ -226,6 +227,7 @@ impl StorageVersion { Self::V3 => &VEC_PREFIX_NAME_V3, Self::V4 => &VEC_PREFIX_NAME_V4, Self::V5 => &VEC_PREFIX_NAME_V5, + Self::V6 => &VEC_PREFIX_NAME_V5, } } } diff --git a/storage/src/upgrade.rs b/storage/src/upgrade.rs index a584a3b560..325248a1fd 100644 --- a/storage/src/upgrade.rs +++ b/storage/src/upgrade.rs @@ -239,6 +239,9 @@ impl DBUpgrade { (StorageVersion::V4, StorageVersion::V5) => { Self::db_upgrade_v4_v5(instance)?; } + (StorageVersion::V5, StorageVersion::V6) => { + Self::db_upgrade_v5_v6(instance)?; + } _ => bail!( "Cannot upgrade db from {:?} to {:?}", version_in_db, @@ -337,6 +340,10 @@ impl DBUpgrade { } Ok(()) } + + fn db_upgrade_v5_v6(_instance: &mut StorageInstance) -> Result<()> { + Ok(()) + } } fn upgrade_store(old_store: T1, store: T2, batch_size: usize) -> Result diff --git a/vm/types/src/block_metadata/legacy.rs b/vm/types/src/block_metadata/legacy.rs index 40a99b6217..9b20891459 100644 --- a/vm/types/src/block_metadata/legacy.rs +++ b/vm/types/src/block_metadata/legacy.rs @@ -72,7 +72,7 @@ impl From for super::BlockMetadata { number: value.number, chain_id: value.chain_id, parent_gas_used: value.parent_gas_used, - parents_hash: Some(vec![]), + parents_hash: None, } } } From 788cec33f19b0df772572f11c4d91585af65336e Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 8 Aug 2024 13:46:42 +0800 Subject: [PATCH 29/61] add update db code --- storage/src/upgrade.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/storage/src/upgrade.rs b/storage/src/upgrade.rs index 325248a1fd..08808226fa 100644 --- a/storage/src/upgrade.rs +++ b/storage/src/upgrade.rs @@ -229,6 +229,12 @@ impl DBUpgrade { Self::db_upgrade_v3_v4(instance)?; Self::db_upgrade_v4_v5(instance)?; } + (StorageVersion::V2, StorageVersion::V6) => { + Self::db_upgrade_v2_v3(instance)?; + Self::db_upgrade_v3_v4(instance)?; + Self::db_upgrade_v4_v5(instance)?; + Self::db_upgrade_v5_v6(instance)?; + } (StorageVersion::V3, StorageVersion::V4) => { Self::db_upgrade_v3_v4(instance)?; } @@ -236,9 +242,18 @@ impl DBUpgrade { Self::db_upgrade_v3_v4(instance)?; Self::db_upgrade_v4_v5(instance)?; } + (StorageVersion::V3, StorageVersion::V6) => { + Self::db_upgrade_v3_v4(instance)?; + Self::db_upgrade_v4_v5(instance)?; + Self::db_upgrade_v5_v6(instance)?; + } (StorageVersion::V4, StorageVersion::V5) => { Self::db_upgrade_v4_v5(instance)?; } + (StorageVersion::V4, StorageVersion::V6) => { + Self::db_upgrade_v4_v5(instance)?; + Self::db_upgrade_v5_v6(instance)?; + } (StorageVersion::V5, StorageVersion::V6) => { Self::db_upgrade_v5_v6(instance)?; } From 7689b483063cd8fd141563f9c4e4984b24c8e81e Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 8 Aug 2024 21:24:49 +0800 Subject: [PATCH 30/61] add dag db update --- flexidag/src/blockdag.rs | 4 ++++ storage/src/lib.rs | 4 +--- storage/src/upgrade.rs | 22 ---------------------- 3 files changed, 5 insertions(+), 25 deletions(-) diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index fea571ba73..71174171f3 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -19,8 +19,12 @@ use starcoin_accumulator::{Accumulator, MerkleAccumulator}; use starcoin_config::temp_dir; use starcoin_crypto::{HashValue as Hash, HashValue}; use starcoin_logger::prelude::{debug, info, warn}; +<<<<<<< HEAD use starcoin_storage::Store; use starcoin_types::block::{AccumulatorInfo, BlockHeader}; +======= +use starcoin_types::block::BlockHeader; +>>>>>>> 32eccfca1 (add dag db update) use starcoin_types::{ blockhash::{BlockHashes, KType}, consensus_header::ConsensusHeader, diff --git a/storage/src/lib.rs b/storage/src/lib.rs index d6957ec086..2f186aef4d 100644 --- a/storage/src/lib.rs +++ b/storage/src/lib.rs @@ -212,12 +212,11 @@ pub enum StorageVersion { V3 = 3, V4 = 4, V5 = 5, - V6 = 6, } impl StorageVersion { pub fn current_version() -> Self { - Self::V6 + Self::V5 } pub fn get_column_family_names(&self) -> &'static [ColumnFamilyName] { @@ -227,7 +226,6 @@ impl StorageVersion { Self::V3 => &VEC_PREFIX_NAME_V3, Self::V4 => &VEC_PREFIX_NAME_V4, Self::V5 => &VEC_PREFIX_NAME_V5, - Self::V6 => &VEC_PREFIX_NAME_V5, } } } diff --git a/storage/src/upgrade.rs b/storage/src/upgrade.rs index 08808226fa..a584a3b560 100644 --- a/storage/src/upgrade.rs +++ b/storage/src/upgrade.rs @@ -229,12 +229,6 @@ impl DBUpgrade { Self::db_upgrade_v3_v4(instance)?; Self::db_upgrade_v4_v5(instance)?; } - (StorageVersion::V2, StorageVersion::V6) => { - Self::db_upgrade_v2_v3(instance)?; - Self::db_upgrade_v3_v4(instance)?; - Self::db_upgrade_v4_v5(instance)?; - Self::db_upgrade_v5_v6(instance)?; - } (StorageVersion::V3, StorageVersion::V4) => { Self::db_upgrade_v3_v4(instance)?; } @@ -242,21 +236,9 @@ impl DBUpgrade { Self::db_upgrade_v3_v4(instance)?; Self::db_upgrade_v4_v5(instance)?; } - (StorageVersion::V3, StorageVersion::V6) => { - Self::db_upgrade_v3_v4(instance)?; - Self::db_upgrade_v4_v5(instance)?; - Self::db_upgrade_v5_v6(instance)?; - } (StorageVersion::V4, StorageVersion::V5) => { Self::db_upgrade_v4_v5(instance)?; } - (StorageVersion::V4, StorageVersion::V6) => { - Self::db_upgrade_v4_v5(instance)?; - Self::db_upgrade_v5_v6(instance)?; - } - (StorageVersion::V5, StorageVersion::V6) => { - Self::db_upgrade_v5_v6(instance)?; - } _ => bail!( "Cannot upgrade db from {:?} to {:?}", version_in_db, @@ -355,10 +337,6 @@ impl DBUpgrade { } Ok(()) } - - fn db_upgrade_v5_v6(_instance: &mut StorageInstance) -> Result<()> { - Ok(()) - } } fn upgrade_store(old_store: T1, store: T2, batch_size: usize) -> Result From e0ec991f2c3693c45708b8881f5566fbe0fd389c Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 8 Aug 2024 22:21:47 +0800 Subject: [PATCH 31/61] update dag db --- flexidag/src/blockdag.rs | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 71174171f3..0518617a8d 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -19,12 +19,8 @@ use starcoin_accumulator::{Accumulator, MerkleAccumulator}; use starcoin_config::temp_dir; use starcoin_crypto::{HashValue as Hash, HashValue}; use starcoin_logger::prelude::{debug, info, warn}; -<<<<<<< HEAD use starcoin_storage::Store; use starcoin_types::block::{AccumulatorInfo, BlockHeader}; -======= -use starcoin_types::block::BlockHeader; ->>>>>>> 32eccfca1 (add dag db update) use starcoin_types::{ blockhash::{BlockHashes, KType}, consensus_header::ConsensusHeader, @@ -566,6 +562,15 @@ impl BlockDAG { Err(_) => { warn!("Cannot get the dag state by genesis id. Might be it is a new node. The dag state will be: {:?}", read_guard.get_state()?); None +======= + Err(_) => { + info!("The dag state will be saved as {:?}", dag_state); + self.storage.state_store.write().insert(dag_state)?; + } + }, + Err(_) => { + warn!("Cannot get the dag state by genesis id. Might be it is a new node. The dag state will be: {:?}", self.storage.state_store.read().get_state()?); +>>>>>>> e00426dfc (update dag db) } }; @@ -587,12 +592,4 @@ impl BlockDAG { self.storage.reachability_store.clone() } - pub fn verify_and_ghostdata( - &self, - blue_blocks: &[BlockHeader], - header: &BlockHeader, - ) -> Result { - self.ghost_dag_manager() - .verify_and_ghostdata(blue_blocks, header) - } } From 75241a6ccd7dfff18ccf9df9098b453972be9934 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 8 Aug 2024 23:58:56 +0800 Subject: [PATCH 32/61] update dag db --- chain/src/chain.rs | 5 +---- flexidag/src/blockdag.rs | 16 +--------------- flexidag/src/consensusdb/consenses_state.rs | 6 +----- flexidag/src/prune/pruning_point_manager.rs | 11 ++++++----- flexidag/tests/tests.rs | 2 -- 5 files changed, 9 insertions(+), 31 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 1615db142b..bbad4b3f9b 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1519,10 +1519,7 @@ impl BlockChain { if self.epoch.end_block_number() == block.header().number() { self.epoch = get_epoch_from_statedb(&self.statedb)?; } - self.dag.save_dag_state(DagState { - tips, - pruning_point: block.header().pruning_point(), - })?; + self.dag.save_dag_state(DagState { tips })?; Ok(executed_block) } } diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 0518617a8d..5b14e86385 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -123,7 +123,6 @@ impl BlockDAG { self.commit(genesis, origin)?; self.save_dag_state(DagState { tips: vec![genesis_id], - pruning_point: genesis_id, })?; Ok(origin) } @@ -493,13 +492,9 @@ impl BlockDAG { block_header: &BlockHeader, genesis_id: HashValue, ) -> anyhow::Result<()> { - let dag_state = DagState { - tips: block_header.parents(), - pruning_point: block_header.pruning_point(), - }; let ghostdata = self.ghost_dag_manager().ghostdag(&block_header.parents())?; let next_pruning_point = self.pruning_point_manager().next_pruning_point( - &dag_state, + block_header.pruning_point(), &ghostdata, pruning_depth, pruning_finality, @@ -562,15 +557,6 @@ impl BlockDAG { Err(_) => { warn!("Cannot get the dag state by genesis id. Might be it is a new node. The dag state will be: {:?}", read_guard.get_state()?); None -======= - Err(_) => { - info!("The dag state will be saved as {:?}", dag_state); - self.storage.state_store.write().insert(dag_state)?; - } - }, - Err(_) => { - warn!("Cannot get the dag state by genesis id. Might be it is a new node. The dag state will be: {:?}", self.storage.state_store.read().get_state()?); ->>>>>>> e00426dfc (update dag db) } }; diff --git a/flexidag/src/consensusdb/consenses_state.rs b/flexidag/src/consensusdb/consenses_state.rs index a6b0f3cf09..481c415fdb 100644 --- a/flexidag/src/consensusdb/consenses_state.rs +++ b/flexidag/src/consensusdb/consenses_state.rs @@ -9,7 +9,6 @@ use std::sync::Arc; #[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug, Default)] pub struct DagState { pub tips: Vec, - pub pruning_point: Hash, } pub(crate) const DAG_STATE_STORE_CF: &str = "dag-state-store"; @@ -88,9 +87,6 @@ pub struct DagStateView { impl DagStateView { pub fn into_state(self) -> DagState { - DagState { - tips: self.tips, - pruning_point: self.pruning_point, - } + DagState { tips: self.tips } } } diff --git a/flexidag/src/prune/pruning_point_manager.rs b/flexidag/src/prune/pruning_point_manager.rs index bd41eaa35a..f538a58f76 100644 --- a/flexidag/src/prune/pruning_point_manager.rs +++ b/flexidag/src/prune/pruning_point_manager.rs @@ -40,9 +40,10 @@ impl PruningPointManagerT { pub fn prune( &self, dag_state: &DagState, + current_pruning_point: HashValue, next_pruning_point: HashValue, ) -> anyhow::Result> { - if dag_state.pruning_point == HashValue::zero() { + if current_pruning_point == HashValue::zero() { return Ok(dag_state.tips.clone()); } anyhow::Ok( @@ -60,12 +61,12 @@ impl PruningPointManagerT { pub(crate) fn next_pruning_point( &self, - dag_state: &DagState, + pruning_point: HashValue, ghostdata: &GhostdagData, pruning_depth: u64, pruning_finality: u64, ) -> anyhow::Result { - let pruning_ghostdata = self.ghost_dag_store.get_data(dag_state.pruning_point)?; + let pruning_ghostdata = self.ghost_dag_store.get_data(pruning_point)?; let min_required_blue_score_for_next_pruning_point = (self.finality_score(pruning_ghostdata.blue_score, pruning_finality) + 1) * pruning_finality; @@ -80,7 +81,7 @@ impl PruningPointManagerT { .get_compact_data(dag_state.pruning_point)?; if min_required_blue_score_for_next_pruning_point + pruning_depth <= ghostdata.blue_score { for child in self.reachability_service().forward_chain_iterator( - dag_state.pruning_point, + pruning_point, ghostdata.selected_parent, true, ) { @@ -107,7 +108,7 @@ impl PruningPointManagerT { } if latest_pruning_ghost_data.selected_parent == HashValue::new(ORIGIN) { - anyhow::Ok(dag_state.pruning_point) // still genesis + anyhow::Ok(pruning_point) // still genesis } else { anyhow::Ok(latest_pruning_ghost_data.selected_parent) } diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index 434c8c19d7..3e664bc266 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -319,7 +319,6 @@ fn test_dag_tips_store() { let state = DagState { tips: vec![Hash::random()], - pruning_point: Hash::random(), }; dag.storage .state_store @@ -970,7 +969,6 @@ fn test_prune() -> anyhow::Result<()> { // prunning process begins dag.save_dag_state(DagState { tips: vec![block_red_3.id(), block_main_5.id()], - pruning_point: genesis.id(), })?; let MineNewDagBlockInfo { From 4a39acd219cbb3a289187f747f28880961a323c3 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 13 Aug 2024 19:47:13 +0800 Subject: [PATCH 33/61] uncomment the pruning code --- chain/src/chain.rs | 2 + flexidag/src/blockdag.rs | 47 ++++--------------- flexidag/tests/tests.rs | 2 +- miner/src/create_block_template/mod.rs | 2 +- .../block_connector_service.rs | 9 +++- sync/src/block_connector/mod.rs | 6 ++- 6 files changed, 24 insertions(+), 44 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index bbad4b3f9b..5691c947f2 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -320,8 +320,10 @@ impl BlockChain { } } else { self.dag().calc_mergeset_and_tips( + &previous_header, G_DAG_TEST_CONFIG.pruning_depth, G_DAG_TEST_CONFIG.pruning_finality, + 0, )? }; debug!( diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 5b14e86385..c856b7f40f 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -123,6 +123,7 @@ impl BlockDAG { self.commit(genesis, origin)?; self.save_dag_state(DagState { tips: vec![genesis_id], + pruning_point: genesis_id, })?; Ok(origin) } @@ -518,48 +519,18 @@ impl BlockDAG { bail!("pruning point is not correct, the local next pruning point is {}, but the block header pruning point is {}", next_pruning_point, block_header.pruning_point()); } anyhow::Ok(()) - } - - pub fn verify( - &self, - pruning_depth: u64, - pruning_finality: u64, - block_header: &BlockHeader, - genesis_id: HashValue, - ) -> anyhow::Result<()> { - self.verify_pruning_point(pruning_depth, pruning_finality, block_header, genesis_id) - } - - pub fn check_upgrade( - &self, - info: AccumulatorInfo, - storage: Arc, - ) -> anyhow::Result<()> { - let accumulator = MerkleAccumulator::new_with_info( - info, - storage.get_accumulator_store(AccumulatorStoreType::Block), - ); - - let read_guard = self.storage.state_store.read(); - - let update_dag_state = match read_guard.get_state_by_hash( - accumulator - .get_leaf(0)? - .ok_or_else(|| format_err!("no leaf when upgrading dag db"))?, - ) { - anyhow::Result::Ok(dag_state) => match read_guard.get_state() { - anyhow::Result::Ok(saved_dag_state) => { - info!("The dag state is {:?}", saved_dag_state); - None +======= + Err(_) => { + info!("The dag state will be saved as {:?}", dag_state); + self.storage.state_store.write().insert(dag_state)?; } - Err(_) => Some(dag_state), }, Err(_) => { - warn!("Cannot get the dag state by genesis id. Might be it is a new node. The dag state will be: {:?}", read_guard.get_state()?); - None - } - }; + warn!("Cannot get the dag state by genesis id. Might be it is a new node. The dag state will be: {:?}", self.storage.state_store.read().get_state()?); +>>>>>>> e00426dfc (update dag db) + } + pub fn verify( drop(read_guard); if let Some(dag_state) = update_dag_state { diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index 3e664bc266..94cafdcc4e 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -975,7 +975,7 @@ fn test_prune() -> anyhow::Result<()> { tips, blue_blocks: _, pruning_point, - } = dag.calc_mergeset_and_tips(pruning_depth, pruning_finality)?; + } = dag.calc_mergeset_and_tips(&block_main_5, pruning_depth, pruning_finality, 2)?; assert_eq!(pruning_point, block_main_2.id()); assert_eq!(tips.len(), 1); diff --git a/miner/src/create_block_template/mod.rs b/miner/src/create_block_template/mod.rs index 7fd06ac88d..4b272acbac 100644 --- a/miner/src/create_block_template/mod.rs +++ b/miner/src/create_block_template/mod.rs @@ -188,7 +188,7 @@ where next_difficulty: difficulty, now_milliseconds: mut now_millis, pruning_point, - } = *block_on(self.block_connector_service.send(MinerRequest {}))??; + } = *block_on(self.block_connector_service.send(MinerRequest { version }))??; let block_gas_limit = self .local_block_gas_limit diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index 8617bdd39a..344c990dbf 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -375,7 +375,7 @@ where { fn handle( &mut self, - _msg: MinerRequest, + msg: MinerRequest, ctx: &mut ServiceContext, ) -> ::Response { let main = self.chain_service.get_main(); @@ -392,7 +392,12 @@ where tips, blue_blocks, pruning_point, - } = dag.calc_mergeset_and_tips(pruning_depth, pruning_finality)?; + } = dag.calc_mergeset_and_tips( + main.status().head(), + pruning_depth, + pruning_finality, + msg.version, + )?; if blue_blocks.is_empty() { bail!("failed to get the blue blocks from the DAG"); } diff --git a/sync/src/block_connector/mod.rs b/sync/src/block_connector/mod.rs index a1f97a65dc..32f45f26fc 100644 --- a/sync/src/block_connector/mod.rs +++ b/sync/src/block_connector/mod.rs @@ -3,7 +3,7 @@ use starcoin_crypto::HashValue; use starcoin_service_registry::ServiceRequest; -use starcoin_types::block::{Block, ExecutedBlock}; +use starcoin_types::block::{Block, ExecutedBlock, Version}; mod block_connector_service; mod metrics; @@ -48,7 +48,9 @@ impl ServiceRequest for ExecuteRequest { } #[derive(Clone, Debug)] -pub struct MinerRequest {} +pub struct MinerRequest { + pub version: Version, +} #[derive(Clone, Debug)] pub struct MinerResponse { From 43339b2a94df27a18bace9a3e148dbb4b8c07f3b Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 10 Sep 2024 17:16:40 +0800 Subject: [PATCH 34/61] rebase sync parallel3 --- flexidag/src/blockdag.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index c856b7f40f..6570739991 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -519,15 +519,6 @@ impl BlockDAG { bail!("pruning point is not correct, the local next pruning point is {}, but the block header pruning point is {}", next_pruning_point, block_header.pruning_point()); } anyhow::Ok(()) -======= - Err(_) => { - info!("The dag state will be saved as {:?}", dag_state); - self.storage.state_store.write().insert(dag_state)?; - } - }, - Err(_) => { - warn!("Cannot get the dag state by genesis id. Might be it is a new node. The dag state will be: {:?}", self.storage.state_store.read().get_state()?); ->>>>>>> e00426dfc (update dag db) } pub fn verify( From 07ad098a47d025a8b744f76f37f7bc9ced99416f Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 10 Sep 2024 20:07:13 +0800 Subject: [PATCH 35/61] fix compiling --- chain/src/chain.rs | 1 - flexidag/src/blockdag.rs | 130 +++++++++++------- flexidag/src/prune/pruning_point_manager.rs | 4 +- flexidag/tests/tests.rs | 2 +- .../block_connector_service.rs | 7 +- 5 files changed, 81 insertions(+), 63 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 5691c947f2..e7dc246dea 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -323,7 +323,6 @@ impl BlockChain { &previous_header, G_DAG_TEST_CONFIG.pruning_depth, G_DAG_TEST_CONFIG.pruning_finality, - 0, )? }; debug!( diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 6570739991..53f7c02ad0 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -93,13 +93,6 @@ impl BlockDAG { Ok(Self::new(k, dag_storage)) } - pub fn new_by_config(db_path: &Path) -> anyhow::Result { - let config = FlexiDagStorageConfig::create_with_params(1, RocksdbConfig::default()); - let db = FlexiDagStorage::create_from_path(db_path, config)?; - let dag = Self::new(DEFAULT_GHOSTDAG_K, db); - Ok(dag) - } - pub fn has_dag_block(&self, hash: Hash) -> anyhow::Result { Ok(self.storage.header_store.has(hash)?) } @@ -123,7 +116,6 @@ impl BlockDAG { self.commit(genesis, origin)?; self.save_dag_state(DagState { tips: vec![genesis_id], - pruning_point: genesis_id, })?; Ok(origin) } @@ -446,44 +438,42 @@ impl BlockDAG { pub fn calc_mergeset_and_tips( &self, - _pruning_depth: u64, - _pruning_finality: u64, + block_header: &BlockHeader, + pruning_depth: u64, + pruning_finality: u64, ) -> anyhow::Result { let dag_state = self.get_dag_state()?; - let ghostdata = self.ghost_dag_manager().ghostdag(&dag_state.tips)?; - anyhow::Ok(MineNewDagBlockInfo { - tips: dag_state.tips, - blue_blocks: (*ghostdata.mergeset_blues).clone(), - pruning_point: HashValue::zero(), - }) - - // let next_pruning_point = self.pruning_point_manager().next_pruning_point( - // &dag_state, - // &ghostdata, - // pruning_depth, - // pruning_finality, - // )?; - // if next_pruning_point == dag_state.pruning_point { - // anyhow::Ok(MineNewDagBlockInfo { - // tips: dag_state.tips, - // blue_blocks: (*ghostdata.mergeset_blues).clone(), - // pruning_point: next_pruning_point, - // }) - // } else { - // let pruned_tips = self - // .pruning_point_manager() - // .prune(&dag_state, next_pruning_point)?; - // let mergeset_blues = (*self - // .ghost_dag_manager() - // .ghostdag(&pruned_tips)? - // .mergeset_blues) - // .clone(); - // anyhow::Ok(MineNewDagBlockInfo { - // tips: pruned_tips, - // blue_blocks: mergeset_blues, - // pruning_point: next_pruning_point, - // }) - // } + let ghostdata = self.storage.ghost_dag_store.get_data(block_header.id())?; + + let next_pruning_point = self.pruning_point_manager().next_pruning_point( + block_header.pruning_point(), + ghostdata.as_ref(), + pruning_depth, + pruning_finality, + )?; + if next_pruning_point == block_header.pruning_point() { + anyhow::Ok(MineNewDagBlockInfo { + tips: dag_state.tips, + blue_blocks: (*ghostdata.mergeset_blues).clone(), + pruning_point: next_pruning_point, + }) + } else { + let pruned_tips = self.pruning_point_manager().prune( + &dag_state, + block_header.pruning_point(), + next_pruning_point, + )?; + let mergeset_blues = (*self + .ghost_dag_manager() + .ghostdag(&pruned_tips)? + .mergeset_blues) + .clone(); + anyhow::Ok(MineNewDagBlockInfo { + tips: pruned_tips, + blue_blocks: mergeset_blues, + pruning_point: next_pruning_point, + }) + } } fn verify_pruning_point( @@ -521,7 +511,50 @@ impl BlockDAG { anyhow::Ok(()) } - pub fn verify( + pub fn reachability_store( + &self, + ) -> Arc> { + self.storage.reachability_store.clone() + } + + pub fn verify_and_ghostdata( + &self, + blue_blocks: &[BlockHeader], + header: &BlockHeader, + ) -> Result { + self.ghost_dag_manager() + .verify_and_ghostdata(blue_blocks, header) + } + pub fn check_upgrade( + &self, + info: AccumulatorInfo, + storage: Arc, + ) -> anyhow::Result<()> { + let accumulator = MerkleAccumulator::new_with_info( + info, + storage.get_accumulator_store(AccumulatorStoreType::Block), + ); + + let read_guard = self.storage.state_store.read(); + + let update_dag_state = match read_guard.get_state_by_hash( + accumulator + .get_leaf(0)? + .ok_or_else(|| format_err!("no leaf when upgrading dag db"))?, + ) { + anyhow::Result::Ok(dag_state) => match read_guard.get_state() { + anyhow::Result::Ok(saved_dag_state) => { + info!("The dag state is {:?}", saved_dag_state); + None + } + Err(_) => Some(dag_state), + }, + Err(_) => { + warn!("Cannot get the dag state by genesis id. Might be it is a new node. The dag state will be: {:?}", read_guard.get_state()?); + None + } + }; + drop(read_guard); if let Some(dag_state) = update_dag_state { @@ -533,11 +566,4 @@ impl BlockDAG { anyhow::Ok(()) } - - pub fn reachability_store( - &self, - ) -> Arc> { - self.storage.reachability_store.clone() - } - } diff --git a/flexidag/src/prune/pruning_point_manager.rs b/flexidag/src/prune/pruning_point_manager.rs index f538a58f76..c795ac6d16 100644 --- a/flexidag/src/prune/pruning_point_manager.rs +++ b/flexidag/src/prune/pruning_point_manager.rs @@ -76,9 +76,7 @@ impl PruningPointManagerT { min_required_blue_score_for_next_pruning_point ); - let mut latest_pruning_ghost_data = self - .ghost_dag_store - .get_compact_data(dag_state.pruning_point)?; + let mut latest_pruning_ghost_data = self.ghost_dag_store.get_compact_data(pruning_point)?; if min_required_blue_score_for_next_pruning_point + pruning_depth <= ghostdata.blue_score { for child in self.reachability_service().forward_chain_iterator( pruning_point, diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index 94cafdcc4e..00183ed654 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -975,7 +975,7 @@ fn test_prune() -> anyhow::Result<()> { tips, blue_blocks: _, pruning_point, - } = dag.calc_mergeset_and_tips(&block_main_5, pruning_depth, pruning_finality, 2)?; + } = dag.calc_mergeset_and_tips(&block_main_5, pruning_depth, pruning_finality)?; assert_eq!(pruning_point, block_main_2.id()); assert_eq!(tips.len(), 1); diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index 344c990dbf..49d9f9b622 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -392,12 +392,7 @@ where tips, blue_blocks, pruning_point, - } = dag.calc_mergeset_and_tips( - main.status().head(), - pruning_depth, - pruning_finality, - msg.version, - )?; + } = dag.calc_mergeset_and_tips(main.status().head(), pruning_depth, pruning_finality)?; if blue_blocks.is_empty() { bail!("failed to get the blue blocks from the DAG"); } From f08687acf8efc459f697a7b6a1104f657bc79880 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 11 Sep 2024 12:26:29 +0800 Subject: [PATCH 36/61] db update for dag state --- chain/src/chain.rs | 13 +- flexidag/src/blockdag.rs | 114 ++++++++++-------- flexidag/src/consensusdb/consenses_state.rs | 12 +- flexidag/tests/tests.rs | 13 +- node/src/node.rs | 5 +- .../block_connector_service.rs | 2 +- 6 files changed, 85 insertions(+), 74 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index e7dc246dea..b3f29b2066 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -178,7 +178,7 @@ impl BlockChain { } fn init_dag(mut dag: BlockDAG, genesis_header: BlockHeader) -> Result { - match dag.get_dag_state() { + match dag.get_dag_state(genesis_header.pruning_point()) { anyhow::Result::Ok(_dag_state) => (), Err(e) => match e.downcast::()? { StoreError::KeyNotFound(_) => { @@ -984,7 +984,7 @@ impl BlockChain { } pub fn get_dag_state(&self) -> Result { - self.dag.get_dag_state() + self.dag.get_dag_state(self.status().head().pruning_point()) } } @@ -1336,7 +1336,9 @@ impl ChainReader for BlockChain { } fn current_tips_hash(&self) -> Result> { - self.dag.get_dag_state().map(|state| state.tips) + self.dag + .get_dag_state(self.status().head().id()) + .map(|state| state.tips) } fn has_dag_block(&self, header_id: HashValue) -> Result { @@ -1520,7 +1522,10 @@ impl BlockChain { if self.epoch.end_block_number() == block.header().number() { self.epoch = get_epoch_from_statedb(&self.statedb)?; } - self.dag.save_dag_state(DagState { tips })?; + self.dag.save_dag_state( + executed_block.block().header().pruning_point(), + DagState { tips }, + )?; Ok(executed_block) } } diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 53f7c02ad0..7209a2c430 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -13,14 +13,11 @@ use crate::consensusdb::{ use crate::ghostdag::protocol::GhostdagManager; use crate::prune::pruning_point_manager::PruningPointManagerT; use crate::{process_key_already_error, reachability}; -use anyhow::{bail, ensure, format_err, Ok}; -use starcoin_accumulator::node::AccumulatorStoreType; -use starcoin_accumulator::{Accumulator, MerkleAccumulator}; +use anyhow::{bail, ensure, Ok}; use starcoin_config::temp_dir; use starcoin_crypto::{HashValue as Hash, HashValue}; -use starcoin_logger::prelude::{debug, info, warn}; -use starcoin_storage::Store; -use starcoin_types::block::{AccumulatorInfo, BlockHeader}; +use starcoin_logger::prelude::{debug, info}; +use starcoin_types::block::BlockHeader; use starcoin_types::{ blockhash::{BlockHashes, KType}, consensus_header::ConsensusHeader, @@ -113,10 +110,14 @@ impl BlockDAG { .write() .insert(origin, BlockHashes::new(vec![]))?; + let pruning_point = genesis.pruning_point(); self.commit(genesis, origin)?; - self.save_dag_state(DagState { - tips: vec![genesis_id], - })?; + self.save_dag_state( + pruning_point, + DagState { + tips: vec![genesis_id], + }, + )?; Ok(origin) } pub fn ghostdata(&self, parents: &[HashValue]) -> anyhow::Result { @@ -423,12 +424,12 @@ impl BlockDAG { } } - pub fn get_dag_state(&self) -> anyhow::Result { - Ok(self.storage.state_store.read().get_state()?) + pub fn get_dag_state(&self, hash: Hash) -> anyhow::Result { + Ok(self.storage.state_store.read().get_state_by_hash(hash)?) } - pub fn save_dag_state(&self, state: DagState) -> anyhow::Result<()> { - self.storage.state_store.write().insert(state)?; + pub fn save_dag_state(&self, hash: Hash, state: DagState) -> anyhow::Result<()> { + self.storage.state_store.write().insert(hash, state)?; Ok(()) } @@ -442,12 +443,12 @@ impl BlockDAG { pruning_depth: u64, pruning_finality: u64, ) -> anyhow::Result { - let dag_state = self.get_dag_state()?; - let ghostdata = self.storage.ghost_dag_store.get_data(block_header.id())?; + let dag_state = self.get_dag_state(block_header.id())?; + let ghostdata = self.ghostdata(&dag_state.tips)?; let next_pruning_point = self.pruning_point_manager().next_pruning_point( block_header.pruning_point(), - ghostdata.as_ref(), + &ghostdata, pruning_depth, pruning_finality, )?; @@ -525,43 +526,54 @@ impl BlockDAG { self.ghost_dag_manager() .verify_and_ghostdata(blue_blocks, header) } - pub fn check_upgrade( - &self, - info: AccumulatorInfo, - storage: Arc, - ) -> anyhow::Result<()> { - let accumulator = MerkleAccumulator::new_with_info( - info, - storage.get_accumulator_store(AccumulatorStoreType::Block), - ); - - let read_guard = self.storage.state_store.read(); - - let update_dag_state = match read_guard.get_state_by_hash( - accumulator - .get_leaf(0)? - .ok_or_else(|| format_err!("no leaf when upgrading dag db"))?, - ) { - anyhow::Result::Ok(dag_state) => match read_guard.get_state() { - anyhow::Result::Ok(saved_dag_state) => { - info!("The dag state is {:?}", saved_dag_state); - None + pub fn check_upgrade(&self, main: &BlockHeader) -> anyhow::Result<()> { + // set the state with key 0 + if main.version() == 0 { + let result_dag_state = self + .storage + .state_store + .read() + .get_state_by_hash(main.pruning_point()); + match result_dag_state { + anyhow::Result::Ok(_dag_state) => (), + Err(_) => { + let result_dag_state = + self.storage.state_store.read().get_state_by_hash(0.into()); + match result_dag_state { + anyhow::Result::Ok(dag_state) => self + .storage + .state_store + .write() + .insert(main.pruning_point(), dag_state)?, + Err(_) => { + let dag_state = self + .storage + .state_store + .read() + .get_state_by_hash(main.id())?; + self.storage + .state_store + .write() + .insert(0.into(), dag_state.clone())?; + self.storage + .state_store + .write() + .insert(HashValue::zero(), dag_state)?; + } + } } - Err(_) => Some(dag_state), - }, - Err(_) => { - warn!("Cannot get the dag state by genesis id. Might be it is a new node. The dag state will be: {:?}", read_guard.get_state()?); - None } - }; - - drop(read_guard); - - if let Some(dag_state) = update_dag_state { - let write_guard = self.storage.state_store.write(); - info!("The dag state will be saved as {:?}", dag_state); - write_guard.insert(dag_state)?; - drop(write_guard); + return Ok(()); + } else if main.version() == 1 { + let dag_state = self + .storage + .state_store + .read() + .get_state_by_hash(0.into())?; + self.storage + .state_store + .write() + .insert(HashValue::zero(), dag_state)?; } anyhow::Ok(()) diff --git a/flexidag/src/consensusdb/consenses_state.rs b/flexidag/src/consensusdb/consenses_state.rs index 481c415fdb..229e790db3 100644 --- a/flexidag/src/consensusdb/consenses_state.rs +++ b/flexidag/src/consensusdb/consenses_state.rs @@ -34,13 +34,12 @@ impl ValueCodec for DagState { } pub trait DagStateReader { - fn get_state(&self) -> Result; fn get_state_by_hash(&self, hash: Hash) -> Result; } pub trait DagStateStore: DagStateReader { // This is append only - fn insert(&self, state: DagState) -> Result<(), StoreError>; + fn insert(&self, hash: Hash, state: DagState) -> Result<(), StoreError>; } /// A DB + cache implementation of `HeaderStore` trait, with concurrency support. @@ -60,11 +59,6 @@ impl DbDagStateStore { } impl DagStateReader for DbDagStateStore { - fn get_state(&self) -> Result { - let result = self.dag_state_access.read(0.into())?; - Ok(result) - } - fn get_state_by_hash(&self, hash: Hash) -> Result { let result = self.dag_state_access.read(hash)?; Ok(result) @@ -72,9 +66,9 @@ impl DagStateReader for DbDagStateStore { } impl DagStateStore for DbDagStateStore { - fn insert(&self, state: DagState) -> Result<(), StoreError> { + fn insert(&self, hash: Hash, state: DagState) -> Result<(), StoreError> { self.dag_state_access - .write(DirectDbWriter::new(&self.db), 0.into(), state)?; + .write(DirectDbWriter::new(&self.db), hash, state)?; Ok(()) } } diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index 00183ed654..411063a29a 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -323,14 +323,14 @@ fn test_dag_tips_store() { dag.storage .state_store .write() - .insert(state.clone()) + .insert(Hash::zero(), state.clone()) .expect("failed to store the dag state"); assert_eq!( dag.storage .state_store .read() - .get_state() + .get_state_by_hash(Hash::zero()) .expect("failed to get the dag state"), state ); @@ -967,9 +967,12 @@ fn test_prune() -> anyhow::Result<()> { assert_eq!(observer3.selected_parent, observer2.selected_parent); // prunning process begins - dag.save_dag_state(DagState { - tips: vec![block_red_3.id(), block_main_5.id()], - })?; + dag.save_dag_state( + Hash::zero(), + DagState { + tips: vec![block_red_3.id(), block_main_5.id()], + }, + )?; let MineNewDagBlockInfo { tips, diff --git a/node/src/node.rs b/node/src/node.rs index e9c2891323..14391f7e59 100644 --- a/node/src/node.rs +++ b/node/src/node.rs @@ -336,10 +336,7 @@ impl NodeService { upgrade_time.as_secs() ); - dag.check_upgrade( - chain_info.status().info().block_accumulator_info.clone(), - storage.clone(), - )?; + dag.check_upgrade(chain_info.status().head())?; registry.put_shared(genesis).await?; diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index 49d9f9b622..3f2b5695fd 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -375,7 +375,7 @@ where { fn handle( &mut self, - msg: MinerRequest, + _msg: MinerRequest, ctx: &mut ServiceContext, ) -> ::Response { let main = self.chain_service.get_main(); From 4cfc2ccc6c6253c01de9af500eb63235d0e14a82 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 11 Sep 2024 21:01:40 +0800 Subject: [PATCH 37/61] fix connection --- chain/api/src/chain.rs | 2 +- chain/src/chain.rs | 48 +++++++++++++------ flexidag/src/blockdag.rs | 2 +- .../block_connector_service.rs | 28 +++++++---- .../test_write_dag_block_chain.rs | 4 +- sync/src/block_connector/write_block_chain.rs | 11 ++--- 6 files changed, 64 insertions(+), 31 deletions(-) diff --git a/chain/api/src/chain.rs b/chain/api/src/chain.rs index 2d801f1d32..92116f2370 100644 --- a/chain/api/src/chain.rs +++ b/chain/api/src/chain.rs @@ -106,7 +106,7 @@ pub trait ChainReader { access_path: Option, ) -> Result>; - fn current_tips_hash(&self) -> Result>; + fn current_tips_hash(&self, pruning_point: HashValue) -> Result>; fn has_dag_block(&self, header_id: HashValue) -> Result; fn check_chain_type(&self) -> Result; fn verify_and_ghostdata( diff --git a/chain/src/chain.rs b/chain/src/chain.rs index b3f29b2066..7d82228142 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1335,9 +1335,9 @@ impl ChainReader for BlockChain { })) } - fn current_tips_hash(&self) -> Result> { + fn current_tips_hash(&self, pruning_point: HashValue) -> Result> { self.dag - .get_dag_state(self.status().head().id()) + .get_dag_state(pruning_point) .map(|state| state.tips) } @@ -1472,16 +1472,26 @@ impl BlockChain { fn connect_dag(&mut self, executed_block: ExecutedBlock) -> Result { let dag = self.dag.clone(); let (new_tip_block, _) = (executed_block.block(), executed_block.block_info()); - let mut tips = self.current_tips_hash()?; - let parents = executed_block.block.header.parents_hash(); - if !tips.contains(&new_tip_block.id()) { - for hash in parents { - tips.retain(|x| *x != hash); - } - if !dag.check_ancestor_of(new_tip_block.id(), tips.clone())? { - tips.push(new_tip_block.id()); + let parent_header = self + .storage + .get_block_header_by_hash(new_tip_block.header().parent_hash())? + .ok_or_else(|| { + format_err!( + "Dag block should exist, block id: {:?}", + new_tip_block.header().parent_hash() + ) + })?; + let mut tips = self.current_tips_hash(parent_header.pruning_point())?; + + let mut new_tips = vec![]; + for hash in tips { + if !dag.check_ancestor_of(hash, vec![new_tip_block.id()])? { + new_tips.push(hash); } } + tips = new_tips; + tips.push(new_tip_block.id()); + // Caculate the ghostdata of the virutal node created by all tips. // And the ghostdata.selected of the tips will be the latest head. let block_hash = dag @@ -1522,10 +1532,20 @@ impl BlockChain { if self.epoch.end_block_number() == block.header().number() { self.epoch = get_epoch_from_statedb(&self.statedb)?; } - self.dag.save_dag_state( - executed_block.block().header().pruning_point(), - DagState { tips }, - )?; + + if new_tip_block.header().pruning_point() == block.header().pruning_point() { + self.dag + .save_dag_state(block.header().pruning_point(), DagState { tips })?; + } else { + let new_tips = dag.pruning_point_manager().prune( + &DagState { tips }, + block.header().pruning_point(), + new_tip_block.header().pruning_point(), + )?; + self.dag + .save_dag_state(block.header().pruning_point(), DagState { tips: new_tips })?; + } + Ok(executed_block) } } diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 7209a2c430..89a573aac3 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -443,7 +443,7 @@ impl BlockDAG { pruning_depth: u64, pruning_finality: u64, ) -> anyhow::Result { - let dag_state = self.get_dag_state(block_header.id())?; + let dag_state = self.get_dag_state(block_header.pruning_point())?; let ghostdata = self.ghostdata(&dag_state.tips)?; let next_pruning_point = self.pruning_point_manager().next_pruning_point( diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index 3f2b5695fd..06fe9f664f 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -14,6 +14,7 @@ use crate::sync::{CheckSyncEvent, SyncService}; use crate::tasks::{BlockConnectedEvent, BlockConnectedFinishEvent, BlockDiskCheckEvent}; use anyhow::{bail, format_err, Ok, Result}; use network_api::PeerProvider; +use starcoin_chain::BlockChain; use starcoin_chain_api::{ChainReader, ConnectBlockError, WriteableChainService}; use starcoin_config::{NodeConfig, G_CRATE_VERSION}; use starcoin_consensus::Consensus; @@ -378,30 +379,41 @@ where _msg: MinerRequest, ctx: &mut ServiceContext, ) -> ::Response { - let main = self.chain_service.get_main(); + let main_header = self.chain_service.get_main().status().head().clone(); let dag = self.chain_service.get_dag(); - let epoch = main.epoch().clone(); - let strategy = epoch.strategy(); - let on_chain_block_gas_limit = epoch.block_gas_limit(); + let (pruning_depth, pruning_finality) = ctx .get_shared::>()? .base() .net() .pruning_config(); + let MineNewDagBlockInfo { tips, blue_blocks, pruning_point, - } = dag.calc_mergeset_and_tips(main.status().head(), pruning_depth, pruning_finality)?; + } = dag.calc_mergeset_and_tips(&main_header, pruning_depth, pruning_finality)?; + if blue_blocks.is_empty() { bail!("failed to get the blue blocks from the DAG"); } - let selected_parent = blue_blocks.first().expect("the blue block must exist"); + let selected_parent = *blue_blocks + .first() + .ok_or_else(|| format_err!("the blue blocks must be not be 0!"))?; + + let time_service = self.config.net().time_service(); + let storage = ctx.get_shared::>()?; + let vm_metrics = ctx.get_shared_opt::()?; + let main = BlockChain::new(time_service, selected_parent, storage, vm_metrics, dag)?; + + let epoch = main.epoch().clone(); + let strategy = epoch.strategy(); + let on_chain_block_gas_limit = epoch.block_gas_limit(); let previous_header = main .get_storage() - .get_block_header_by_hash(*selected_parent)? + .get_block_header_by_hash(selected_parent)? .ok_or_else(|| format_err!("BlockHeader should exist by hash: {}", selected_parent))?; - let next_difficulty = epoch.strategy().calculate_next_difficulty(main)?; + let next_difficulty = epoch.strategy().calculate_next_difficulty(&main)?; let now_milliseconds = main.time_service().now_millis(); Ok(Box::new(MinerResponse { diff --git a/sync/src/block_connector/test_write_dag_block_chain.rs b/sync/src/block_connector/test_write_dag_block_chain.rs index bdc0992aa5..4cc259234f 100644 --- a/sync/src/block_connector/test_write_dag_block_chain.rs +++ b/sync/src/block_connector/test_write_dag_block_chain.rs @@ -50,7 +50,9 @@ pub fn new_dag_block( let miner_address = *miner.address(); let block_chain = writeable_block_chain_service.get_main(); - let tips = block_chain.current_tips_hash().expect("failed to get tips"); + let tips = block_chain + .current_tips_hash(block_chain.status().head().pruning_point()) + .expect("failed to get tips"); let (block_template, _) = block_chain .create_block_template( miner_address, diff --git a/sync/src/block_connector/write_block_chain.rs b/sync/src/block_connector/write_block_chain.rs index 36817a9eb3..a0f53d82f8 100644 --- a/sync/src/block_connector/write_block_chain.rs +++ b/sync/src/block_connector/write_block_chain.rs @@ -591,12 +591,11 @@ where return Ok(ConnectOk::Duplicate); } - if self.main.check_chain_type()? == ChainType::Dag - && !block - .header() - .parents_hash() - .iter() - .all(|parent_hash| self.main.dag().has_dag_block(*parent_hash).unwrap_or(false)) + if !block + .header() + .parents_hash() + .iter() + .all(|parent_hash| self.main.dag().has_dag_block(*parent_hash).unwrap_or(false)) { debug!( "block: {:?} is a future dag block, trigger sync to pull other dag blocks", From b987a310cc4066742282c5b5bee3a582cd446722 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 12 Sep 2024 21:22:22 +0800 Subject: [PATCH 38/61] add verify pruning --- chain/api/src/chain.rs | 2 + chain/src/chain.rs | 50 ++++++++++--- chain/src/verifier/mod.rs | 40 +++++++++- config/src/genesis_config.rs | 35 +++++---- flexidag/src/blockdag.rs | 73 ++++++++++--------- flexidag/src/prune/pruning_point_manager.rs | 2 +- miner/src/create_block_template/mod.rs | 1 + .../block_connector_service.rs | 13 +++- sync/src/block_connector/write_block_chain.rs | 11 +-- 9 files changed, 154 insertions(+), 73 deletions(-) diff --git a/chain/api/src/chain.rs b/chain/api/src/chain.rs index 92116f2370..53bcdd2fb8 100644 --- a/chain/api/src/chain.rs +++ b/chain/api/src/chain.rs @@ -114,6 +114,8 @@ pub trait ChainReader { uncles: &[BlockHeader], header: &BlockHeader, ) -> Result; + fn is_dag_ancestor_of(&self, ancestor: HashValue, descendants: Vec) -> Result; + fn get_pruning_height(&self) -> BlockNumber; } pub trait ChainWriter { diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 7d82228142..be736d17f6 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -13,7 +13,6 @@ use starcoin_chain_api::{ ExcludedTxns, ExecutedBlock, MintedUncleNumber, TransactionInfoWithProof, VerifiedBlock, VerifyBlockField, }; -use starcoin_config::genesis_config::G_DAG_TEST_CONFIG; use starcoin_consensus::Consensus; use starcoin_crypto::hash::PlainCryptoHash; use starcoin_crypto::HashValue; @@ -319,11 +318,14 @@ impl BlockChain { pruning_point, // TODO: new test cases will need pass this field if they have some special requirements. } } else { - self.dag().calc_mergeset_and_tips( - &previous_header, - G_DAG_TEST_CONFIG.pruning_depth, - G_DAG_TEST_CONFIG.pruning_finality, - )? + let dag_state = self.get_dag_state()?; + let ghostdata = self.dag().ghost_dag_manager().ghostdag(&dag_state.tips)?; + + MineNewDagBlockInfo { + tips: dag_state.tips, + blue_blocks: (*ghostdata.mergeset_blues).clone(), + pruning_point: HashValue::zero(), + } }; debug!( "Blue blocks:{:?} in chain/create_block_template_by_header", @@ -1363,7 +1365,20 @@ impl ChainReader for BlockChain { uncles: &[BlockHeader], header: &BlockHeader, ) -> Result { - self.dag().verify_and_ghostdata(uncles, header) + let previous_header = self + .storage + .get_block_header_by_hash(header.parent_hash())? + .ok_or_else(|| format_err!("cannot find parent block header"))?; + self.dag() + .verify_and_ghostdata(uncles, header, previous_header.pruning_point()) + } + + fn is_dag_ancestor_of(&self, ancestor: HashValue, descendants: Vec) -> Result { + self.dag().check_ancestor_of(ancestor, descendants) + } + + fn get_pruning_height(&self) -> BlockNumber { + self.get_pruning_height() } } @@ -1533,14 +1548,14 @@ impl BlockChain { self.epoch = get_epoch_from_statedb(&self.statedb)?; } - if new_tip_block.header().pruning_point() == block.header().pruning_point() { + if parent_header.pruning_point() == block.header().pruning_point() { self.dag .save_dag_state(block.header().pruning_point(), DagState { tips })?; } else { let new_tips = dag.pruning_point_manager().prune( &DagState { tips }, + parent_header.pruning_point(), block.header().pruning_point(), - new_tip_block.header().pruning_point(), )?; self.dag .save_dag_state(block.header().pruning_point(), DagState { tips: new_tips })?; @@ -1548,6 +1563,23 @@ impl BlockChain { Ok(executed_block) } + + pub fn get_pruning_height(&self) -> BlockNumber { + let chain_id = self.status().head().chain_id(); + if chain_id.is_vega() { + 4000000 + } else if chain_id.is_proxima() { + 700000 + } else if chain_id.is_halley() { + 4200000 + } else if chain_id.is_main() { + 0 + } else if chain_id.is_dag_test() || chain_id.is_test() { + BlockNumber::MAX + } else { + 0 + } + } } impl ChainWriter for BlockChain { diff --git a/chain/src/verifier/mod.rs b/chain/src/verifier/mod.rs index 76e9b38679..a48a781581 100644 --- a/chain/src/verifier/mod.rs +++ b/chain/src/verifier/mod.rs @@ -7,6 +7,7 @@ use starcoin_chain_api::{ verify_block, ChainReader, ConnectBlockError, VerifiedBlock, VerifyBlockField, }; use starcoin_consensus::{Consensus, ConsensusVerifyError}; +use starcoin_crypto::HashValue; use starcoin_dag::types::ghostdata::GhostdagData; use starcoin_logger::prelude::debug; use starcoin_open_block::AddressFilter; @@ -383,6 +384,33 @@ impl BasicDagVerifier { Ok::<(), ConnectBlockError>(()) })?; + // verify the pruning point + let parent_header = current_chain.current_header(); + if parent_header.pruning_point() != HashValue::zero() { + // the chain had pruning point already checking the descendants of the pruning point is a must + // check the parents are the descendants of the pruning point + parents_hash.iter().try_for_each(|parent_hash| { + verify_block!( + VerifyBlockField::Header, + current_chain.is_dag_ancestor_of(new_block_header.pruning_point(), vec![*parent_hash]).map_err(|e| { + ConnectBlockError::VerifyBlockFailed( + VerifyBlockField::Header, + anyhow::anyhow!( + "the block {:?} 's parent: {:?} is not the descendant of pruning point {:?}, error: {:?}", + new_block_header.id(), + parent_hash, + new_block_header.pruning_point(), + e + ), + ) + })?, + "Invalid block: parent {} might not exist.", + parent_hash + ); + Ok::<(), ConnectBlockError>(()) + })?; + } + ConsensusVerifier::verify_header(current_chain, new_block_header) } @@ -429,14 +457,18 @@ impl BlockVerifier for DagVerifier { } fn verify_uncles( - _current_chain: &R, - _uncles: &[BlockHeader], - _header: &BlockHeader, + current_chain: &R, + uncles: &[BlockHeader], + header: &BlockHeader, ) -> Result> where R: ChainReader, { - Ok(None) + Ok(Some(BasicDagVerifier::verify_blue_blocks( + current_chain, + uncles, + header, + )?)) } } diff --git a/config/src/genesis_config.rs b/config/src/genesis_config.rs index e479c4de8f..69483e1f29 100644 --- a/config/src/genesis_config.rs +++ b/config/src/genesis_config.rs @@ -754,6 +754,9 @@ static G_DEFAULT_BASE_REWARD_PER_BLOCK: Lazy> = pub static G_BASE_BLOCK_GAS_LIMIT: u64 = 50_000_000; //must big than maximum_number_of_gas_units +pub static G_PRUNING_DEPTH: u64 = 17280; +pub static G_PRUNING_FINALITY: u64 = 8640; + static G_EMPTY_BOOT_NODES: Lazy> = Lazy::new(Vec::new); const ONE_DAY: u64 = 86400; @@ -804,8 +807,8 @@ pub static G_DAG_TEST_CONFIG: Lazy = Lazy::new(|| { min_action_delay: 60 * 60 * 1000, // 1h }, transaction_timeout: ONE_DAY, - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); @@ -857,8 +860,8 @@ pub static G_TEST_CONFIG: Lazy = Lazy::new(|| { min_action_delay: 60 * 60 * 1000, // 1h }, transaction_timeout: ONE_DAY, - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); @@ -913,8 +916,8 @@ pub static G_DEV_CONFIG: Lazy = Lazy::new(|| { min_action_delay: 60 * 60 * 1000, // 1h }, transaction_timeout: ONE_DAY, - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); @@ -974,8 +977,8 @@ pub static G_HALLEY_CONFIG: Lazy = Lazy::new(|| { min_action_delay: 60 * 60 * 1000, // 1h }, transaction_timeout: ONE_DAY, - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); @@ -1036,8 +1039,8 @@ pub static G_PROXIMA_CONFIG: Lazy = Lazy::new(|| { }, transaction_timeout: ONE_DAY, // todo: rollback it to zero and initialize BlockDag properly - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); @@ -1096,8 +1099,8 @@ pub static G_BARNARD_CONFIG: Lazy = Lazy::new(|| { min_action_delay: 60 * 60 * 24 * 1000, // 1d }, transaction_timeout: ONE_DAY, - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); @@ -1170,8 +1173,8 @@ pub static G_MAIN_CONFIG: Lazy = Lazy::new(|| { min_action_delay: 60 * 60 * 24 * 1000, // 1d }, transaction_timeout: ONE_DAY, - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); @@ -1228,8 +1231,8 @@ pub static G_VEGA_CONFIG: Lazy = Lazy::new(|| { min_action_delay: 60 * 60 * 24 * 1000, // 1d }, transaction_timeout: ONE_DAY, - pruning_depth: 17280, - pruning_finality: 8640, + pruning_depth: G_PRUNING_DEPTH, + pruning_finality: G_PRUNING_FINALITY, block_header_version: 1, } }); diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 89a573aac3..fff66b94b3 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -14,6 +14,7 @@ use crate::ghostdag::protocol::GhostdagManager; use crate::prune::pruning_point_manager::PruningPointManagerT; use crate::{process_key_already_error, reachability}; use anyhow::{bail, ensure, Ok}; +use starcoin_config::genesis_config::{G_PRUNING_DEPTH, G_PRUNING_FINALITY}; use starcoin_config::temp_dir; use starcoin_crypto::{HashValue as Hash, HashValue}; use starcoin_logger::prelude::{debug, info}; @@ -204,6 +205,7 @@ impl BlockDAG { trusted_ghostdata } }; + // Store ghostdata process_key_already_error( self.storage @@ -318,6 +320,7 @@ impl BlockDAG { } Some(ghostdata) => ghostdata, }; + // Store ghostdata process_key_already_error( self.storage @@ -439,20 +442,20 @@ impl BlockDAG { pub fn calc_mergeset_and_tips( &self, - block_header: &BlockHeader, + previous_header: &BlockHeader, pruning_depth: u64, pruning_finality: u64, ) -> anyhow::Result { - let dag_state = self.get_dag_state(block_header.pruning_point())?; + let dag_state = self.get_dag_state(previous_header.pruning_point())?; let ghostdata = self.ghostdata(&dag_state.tips)?; let next_pruning_point = self.pruning_point_manager().next_pruning_point( - block_header.pruning_point(), + previous_header.pruning_point(), &ghostdata, pruning_depth, pruning_finality, )?; - if next_pruning_point == block_header.pruning_point() { + if next_pruning_point == previous_header.pruning_point() { anyhow::Ok(MineNewDagBlockInfo { tips: dag_state.tips, blue_blocks: (*ghostdata.mergeset_blues).clone(), @@ -461,7 +464,7 @@ impl BlockDAG { } else { let pruned_tips = self.pruning_point_manager().prune( &dag_state, - block_header.pruning_point(), + previous_header.pruning_point(), next_pruning_point, )?; let mergeset_blues = (*self @@ -477,37 +480,35 @@ impl BlockDAG { } } - fn verify_pruning_point( + pub fn verify_pruning_point( &self, - pruning_depth: u64, - pruning_finality: u64, - block_header: &BlockHeader, - genesis_id: HashValue, + previous_pruning_point: HashValue, + next_pruning_point: HashValue, + ghostdata: &GhostdagData, ) -> anyhow::Result<()> { - let ghostdata = self.ghost_dag_manager().ghostdag(&block_header.parents())?; - let next_pruning_point = self.pruning_point_manager().next_pruning_point( - block_header.pruning_point(), - &ghostdata, - pruning_depth, - pruning_finality, + let inside_next_pruning_point = self.pruning_point_manager().next_pruning_point( + previous_pruning_point, + ghostdata, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, )?; - if (block_header.chain_id().is_vega() - || block_header.chain_id().is_proxima() - || block_header.chain_id().is_halley()) - && block_header.pruning_point() == HashValue::zero() - { - if next_pruning_point == genesis_id { - return anyhow::Ok(()); - } else { - bail!( - "pruning point is not correct, it should update the next pruning point: {}", - next_pruning_point - ); - } - } - if next_pruning_point != block_header.pruning_point() { - bail!("pruning point is not correct, the local next pruning point is {}, but the block header pruning point is {}", next_pruning_point, block_header.pruning_point()); + // if (block_header.chain_id().is_vega() + // || block_header.chain_id().is_proxima() + // || block_header.chain_id().is_halley()) + // && block_header.pruning_point() == HashValue::zero() + // { + // if next_pruning_point == genesis_id { + // return anyhow::Ok(()); + // } else { + // bail!( + // "pruning point is not correct, it should update the next pruning point: {}", + // next_pruning_point + // ); + // } + // } + if next_pruning_point != inside_next_pruning_point { + bail!("pruning point is not correct, the local next pruning point is {}, but the block header pruning point is {}", next_pruning_point, inside_next_pruning_point); } anyhow::Ok(()) } @@ -522,9 +523,13 @@ impl BlockDAG { &self, blue_blocks: &[BlockHeader], header: &BlockHeader, + previous_pruning_point: HashValue, ) -> Result { - self.ghost_dag_manager() - .verify_and_ghostdata(blue_blocks, header) + let ghostdata = self + .ghost_dag_manager() + .verify_and_ghostdata(blue_blocks, header)?; + self.verify_pruning_point(previous_pruning_point, header.pruning_point(), &ghostdata)?; + Ok(ghostdata) } pub fn check_upgrade(&self, main: &BlockHeader) -> anyhow::Result<()> { // set the state with key 0 diff --git a/flexidag/src/prune/pruning_point_manager.rs b/flexidag/src/prune/pruning_point_manager.rs index c795ac6d16..bbad1e23ae 100644 --- a/flexidag/src/prune/pruning_point_manager.rs +++ b/flexidag/src/prune/pruning_point_manager.rs @@ -43,7 +43,7 @@ impl PruningPointManagerT { current_pruning_point: HashValue, next_pruning_point: HashValue, ) -> anyhow::Result> { - if current_pruning_point == HashValue::zero() { + if current_pruning_point == next_pruning_point { return Ok(dag_state.tips.clone()); } anyhow::Ok( diff --git a/miner/src/create_block_template/mod.rs b/miner/src/create_block_template/mod.rs index 4b272acbac..3bae182df7 100644 --- a/miner/src/create_block_template/mod.rs +++ b/miner/src/create_block_template/mod.rs @@ -113,6 +113,7 @@ impl ServiceHandler for BlockBuilderService { .net() .genesis_config() .block_header_version; + self.inner.create_block_template(header_version) } } diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index 06fe9f664f..758b912450 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -388,11 +388,22 @@ where .net() .pruning_config(); + // which height to prune the DAG + let MineNewDagBlockInfo { tips, blue_blocks, pruning_point, - } = dag.calc_mergeset_and_tips(&main_header, pruning_depth, pruning_finality)?; + } = if main_header.number() >= self.chain_service.get_main().get_pruning_height() { + dag.calc_mergeset_and_tips(&main_header, pruning_depth, pruning_finality)? + } else { + let tips = dag.get_dag_state(HashValue::zero())?.tips; + MineNewDagBlockInfo { + tips: tips.clone(), + blue_blocks: dag.ghostdata(&tips)?.mergeset_blues.as_ref().clone(), + pruning_point: HashValue::zero(), + } + }; if blue_blocks.is_empty() { bail!("failed to get the blue blocks from the DAG"); diff --git a/sync/src/block_connector/write_block_chain.rs b/sync/src/block_connector/write_block_chain.rs index a0f53d82f8..1802b713de 100644 --- a/sync/src/block_connector/write_block_chain.rs +++ b/sync/src/block_connector/write_block_chain.rs @@ -580,12 +580,6 @@ where fn connect_inner(&mut self, block: Block) -> Result { let block_id = block.id(); - if block_id == *starcoin_storage::BARNARD_HARD_FORK_HASH - && block.header().number() == starcoin_storage::BARNARD_HARD_FORK_HEIGHT - { - debug!("barnard hard fork {}", block_id); - return Err(ConnectBlockError::BarnardHardFork(Box::new(block)).into()); - } if self.main.current_header().id() == block_id { debug!("Repeat connect, current header is {} already.", block_id); return Ok(ConnectOk::Duplicate); @@ -614,7 +608,7 @@ where } let (block_info, fork) = self.find_or_fork(block.header())?; match (block_info, fork) { - //block has been processed in some branch, so just trigger a head selection. + // block has been processed in some branch, so just trigger a head selection. (Some(_block_info), Some(branch)) => { debug!( "Block {} has been processed, trigger head selection, total_difficulty: {}", @@ -624,7 +618,7 @@ where self.select_head(branch)?; Ok(ConnectOk::Duplicate) } - //block has been processed, and its parent is main chain, so just connect it to main chain. + // block has been processed, and its parent is main chain, so just connect it to main chain. (Some(block_info), None) => { let executed_block = self.main.connect(ExecutedBlock { block: block.clone(), @@ -637,6 +631,7 @@ where self.do_new_head(executed_block, 1, vec![block], 0, vec![])?; Ok(ConnectOk::Connect) } + // the block is not processed but its parent branch exists (None, Some(mut branch)) => { let _executed_block = branch.apply(block)?; self.select_head(branch)?; From d55c6782a3ef16e72c10219562630ba7d9275891 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Fri, 13 Sep 2024 10:30:29 +0800 Subject: [PATCH 39/61] add pruning height --- chain/src/chain.rs | 13 +++++++++++-- flexidag/src/blockdag.rs | 8 ++------ 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index be736d17f6..d7b2165acf 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1369,8 +1369,17 @@ impl ChainReader for BlockChain { .storage .get_block_header_by_hash(header.parent_hash())? .ok_or_else(|| format_err!("cannot find parent block header"))?; - self.dag() - .verify_and_ghostdata(uncles, header, previous_header.pruning_point()) + let ghostdata = self.dag().verify_and_ghostdata(uncles, header)?; + + if self.get_pruning_height() <= self.status().head().number() { + self.dag().verify_pruning_point( + previous_header.pruning_point(), + header.pruning_point(), + &ghostdata, + )?; + } + + Ok(ghostdata) } fn is_dag_ancestor_of(&self, ancestor: HashValue, descendants: Vec) -> Result { diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index fff66b94b3..e766c6a3b4 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -523,13 +523,9 @@ impl BlockDAG { &self, blue_blocks: &[BlockHeader], header: &BlockHeader, - previous_pruning_point: HashValue, ) -> Result { - let ghostdata = self - .ghost_dag_manager() - .verify_and_ghostdata(blue_blocks, header)?; - self.verify_pruning_point(previous_pruning_point, header.pruning_point(), &ghostdata)?; - Ok(ghostdata) + self.ghost_dag_manager() + .verify_and_ghostdata(blue_blocks, header) } pub fn check_upgrade(&self, main: &BlockHeader) -> anyhow::Result<()> { // set the state with key 0 From 954c243f93ea9a425ce4ff7a7245035aa2f88289 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Fri, 13 Sep 2024 11:45:22 +0800 Subject: [PATCH 40/61] no checking the pruning point if the main header still dose not have the pruning point --- chain/src/chain.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index d7b2165acf..b52084ca48 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1371,7 +1371,7 @@ impl ChainReader for BlockChain { .ok_or_else(|| format_err!("cannot find parent block header"))?; let ghostdata = self.dag().verify_and_ghostdata(uncles, header)?; - if self.get_pruning_height() <= self.status().head().number() { + if self.status().head().pruning_point() != HashValue::zero() { self.dag().verify_pruning_point( previous_header.pruning_point(), header.pruning_point(), @@ -1583,7 +1583,7 @@ impl BlockChain { 4200000 } else if chain_id.is_main() { 0 - } else if chain_id.is_dag_test() || chain_id.is_test() { + } else if chain_id.is_dag_test() || chain_id.is_test() || chain_id.is_dev() { BlockNumber::MAX } else { 0 From e8f123fd6f92353f0edce26d8a14c2dc5fd9057b Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Fri, 13 Sep 2024 19:26:03 +0800 Subject: [PATCH 41/61] add is ancestor of command for reachability viewing --- chain/api/src/message.rs | 7 +++++- chain/api/src/service.rs | 24 ++++++++++++++++++- chain/service/src/chain_service.rs | 6 +++++ chain/src/chain.rs | 2 +- cmd/starcoin/src/chain/mod.rs | 2 ++ flexidag/src/blockdag.rs | 22 ++++++++++++++++- flexidag/src/consensusdb/consenses_state.rs | 6 +++++ rpc/api/src/chain/mod.rs | 8 +++++++ rpc/client/src/lib.rs | 11 ++++++++- rpc/server/src/module/chain_rpc.rs | 12 ++++++++++ .../src/fork_chain.rs | 8 +++++++ 11 files changed, 103 insertions(+), 5 deletions(-) diff --git a/chain/api/src/message.rs b/chain/api/src/message.rs index 97e5a8d60b..3e28820552 100644 --- a/chain/api/src/message.rs +++ b/chain/api/src/message.rs @@ -4,7 +4,7 @@ use crate::{ChainType, TransactionInfoWithProof}; use anyhow::Result; use starcoin_crypto::HashValue; -use starcoin_dag::consensusdb::consenses_state::DagStateView; +use starcoin_dag::consensusdb::consenses_state::{DagStateView, ReachabilityView}; use starcoin_dag::types::ghostdata::GhostdagData; use starcoin_service_registry::ServiceRequest; use starcoin_types::transaction::RichTransactionInfo; @@ -68,6 +68,10 @@ pub enum ChainRequest { GetDagStateView, CheckChainType, GetGhostdagData(HashValue), + IsAncestorOfCommand { + ancestor: HashValue, + descendants: Vec, + }, } impl ServiceRequest for ChainRequest { @@ -99,4 +103,5 @@ pub enum ChainResponse { DagStateView(Box), CheckChainType(ChainType), GhostdagDataOption(Box>), + IsAncestorOfCommand { reachability_view: ReachabilityView }, } diff --git a/chain/api/src/service.rs b/chain/api/src/service.rs index 4017174a14..4c78839d4a 100644 --- a/chain/api/src/service.rs +++ b/chain/api/src/service.rs @@ -5,7 +5,7 @@ use crate::message::{ChainRequest, ChainResponse}; use crate::{ChainType, TransactionInfoWithProof}; use anyhow::{bail, Result}; use starcoin_crypto::HashValue; -use starcoin_dag::consensusdb::consenses_state::DagStateView; +use starcoin_dag::consensusdb::consenses_state::{DagStateView, ReachabilityView}; use starcoin_dag::types::ghostdata::GhostdagData; use starcoin_service_registry::{ActorService, ServiceHandler, ServiceRef}; use starcoin_types::contract_event::{ContractEvent, ContractEventInfo}; @@ -149,6 +149,11 @@ pub trait ChainAsyncService: async fn get_dag_state(&self) -> Result; async fn check_chain_type(&self) -> Result; async fn get_ghostdagdata(&self, id: HashValue) -> Result>; + async fn is_ancestor_of( + &self, + ancestor: HashValue, + descendants: Vec, + ) -> Result; } #[async_trait::async_trait] @@ -486,4 +491,21 @@ where bail!("failed to get ghostdag data") } } + async fn is_ancestor_of( + &self, + ancestor: HashValue, + descendants: Vec, + ) -> Result { + let response = self + .send(ChainRequest::IsAncestorOfCommand { + ancestor, + descendants, + }) + .await??; + if let ChainResponse::IsAncestorOfCommand { reachability_view } = response { + Ok(reachability_view) + } else { + bail!("failed to get ghostdag data") + } + } } diff --git a/chain/service/src/chain_service.rs b/chain/service/src/chain_service.rs index 422a70130e..99537ffe5a 100644 --- a/chain/service/src/chain_service.rs +++ b/chain/service/src/chain_service.rs @@ -254,6 +254,12 @@ impl ServiceHandler for ChainReaderService { ChainRequest::GetGhostdagData(id) => Ok(ChainResponse::GhostdagDataOption(Box::new( self.inner.get_ghostdagdata(id)?, ))), + ChainRequest::IsAncestorOfCommand { + ancestor, + descendants, + } => Ok(ChainResponse::IsAncestorOfCommand { + reachability_view: self.inner.dag.is_ancestor_of(ancestor, descendants)?, + }), } } } diff --git a/chain/src/chain.rs b/chain/src/chain.rs index b52084ca48..82e75fa18d 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1578,7 +1578,7 @@ impl BlockChain { if chain_id.is_vega() { 4000000 } else if chain_id.is_proxima() { - 700000 + 500 } else if chain_id.is_halley() { 4200000 } else if chain_id.is_main() { diff --git a/cmd/starcoin/src/chain/mod.rs b/cmd/starcoin/src/chain/mod.rs index c004e3c96f..7206ffbad9 100644 --- a/cmd/starcoin/src/chain/mod.rs +++ b/cmd/starcoin/src/chain/mod.rs @@ -12,6 +12,7 @@ mod get_txn_info_list_cmd; mod get_txn_infos_cmd; pub mod get_txn_proof_cmd; mod info_cmd; +mod is_ancestor_of_cmd; mod list_block_cmd; pub use epoch_info::*; @@ -24,4 +25,5 @@ pub use get_txn_info_cmd::*; pub use get_txn_info_list_cmd::*; pub use get_txn_infos_cmd::*; pub use info_cmd::*; +pub use is_ancestor_of_cmd::*; pub use list_block_cmd::*; diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index e766c6a3b4..6510baedc4 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -1,6 +1,8 @@ use super::reachability::{inquirer, reachability_service::MTReachabilityService}; use super::types::ghostdata::GhostdagData; -use crate::consensusdb::consenses_state::{DagState, DagStateReader, DagStateStore}; +use crate::consensusdb::consenses_state::{ + DagState, DagStateReader, DagStateStore, ReachabilityView, +}; use crate::consensusdb::prelude::{FlexiDagStorageConfig, StoreError}; use crate::consensusdb::schemadb::{GhostdagStoreReader, ReachabilityStore, REINDEX_ROOT_KEY}; use crate::consensusdb::{ @@ -579,4 +581,22 @@ impl BlockDAG { anyhow::Ok(()) } + + pub fn is_ancestor_of( + &self, + ancestor: Hash, + descendants: Vec, + ) -> anyhow::Result { + let de = descendants + .into_iter() + .filter(|descendant| { + self.check_ancestor_of(ancestor, vec![*descendant]) + .unwrap_or(false) + }) + .collect::>(); + anyhow::Ok(ReachabilityView { + ancestor, + descendants: de, + }) + } } diff --git a/flexidag/src/consensusdb/consenses_state.rs b/flexidag/src/consensusdb/consenses_state.rs index 229e790db3..8dcf852d3b 100644 --- a/flexidag/src/consensusdb/consenses_state.rs +++ b/flexidag/src/consensusdb/consenses_state.rs @@ -84,3 +84,9 @@ impl DagStateView { DagState { tips: self.tips } } } + +#[derive(Eq, PartialEq, Hash, Deserialize, Serialize, Clone, Debug, JsonSchema)] +pub struct ReachabilityView { + pub ancestor: Hash, + pub descendants: Vec, +} diff --git a/rpc/api/src/chain/mod.rs b/rpc/api/src/chain/mod.rs index ea4a80afd3..ae95e33f90 100644 --- a/rpc/api/src/chain/mod.rs +++ b/rpc/api/src/chain/mod.rs @@ -132,6 +132,14 @@ pub trait ChainApi { /// Get block ghostdag data #[rpc(name = "chain.get_ghostdagdata")] fn get_ghostdagdata(&self, block_hash: HashValue) -> FutureResult>; + + /// Check the ancestor and descendants' relationship + #[rpc(name = "chain.is_ancestor_of")] + fn is_ancestor_of( + &self, + ancestor: HashValue, + descendants: Vec, + ) -> FutureResult; } #[derive(Copy, Clone, Default, Serialize, Deserialize, JsonSchema)] diff --git a/rpc/client/src/lib.rs b/rpc/client/src/lib.rs index 569ff82333..d8fbc5ad47 100644 --- a/rpc/client/src/lib.rs +++ b/rpc/client/src/lib.rs @@ -21,7 +21,7 @@ use serde_json::Value; use starcoin_abi_types::{FunctionABI, ModuleABI, StructInstantiation}; use starcoin_account_api::AccountInfo; use starcoin_crypto::HashValue; -use starcoin_dag::consensusdb::consenses_state::DagStateView; +use starcoin_dag::consensusdb::consenses_state::{DagStateView, ReachabilityView}; use starcoin_logger::{prelude::*, LogPattern}; use starcoin_rpc_api::chain::{ GetBlockOption, GetBlocksOption, GetEventOption, GetTransactionOption, @@ -790,6 +790,15 @@ impl RpcClient { .map_err(map_err) } + pub fn is_ancestor_of( + &self, + ancestor: HashValue, + descendants: Vec, + ) -> anyhow::Result { + self.call_rpc_blocking(|inner| inner.chain_client.is_ancestor_of(ancestor, descendants)) + .map_err(map_err) + } + pub fn chain_get_blocks_by_number( &self, number: Option, diff --git a/rpc/server/src/module/chain_rpc.rs b/rpc/server/src/module/chain_rpc.rs index dedee3b0e6..ea78017804 100644 --- a/rpc/server/src/module/chain_rpc.rs +++ b/rpc/server/src/module/chain_rpc.rs @@ -485,6 +485,18 @@ where let fut = async move { service.get_ghostdagdata(block_hash).await }.map_err(map_err); Box::pin(fut.boxed()) } + + #[doc = " Check the ancestor and descendants\' relationship "] + fn is_ancestor_of( + &self, + ancestor: HashValue, + descendants: Vec, + ) -> FutureResult { + let service = self.service.clone(); + let fut = + async move { service.is_ancestor_of(ancestor, descendants).await }.map_err(map_err); + Box::pin(fut.boxed()) + } } fn try_decode_block_txns(state: &dyn StateView, block: &mut BlockView) -> anyhow::Result<()> { diff --git a/vm/starcoin-transactional-test-harness/src/fork_chain.rs b/vm/starcoin-transactional-test-harness/src/fork_chain.rs index bebaaa1d0d..bcb118ca1b 100644 --- a/vm/starcoin-transactional-test-harness/src/fork_chain.rs +++ b/vm/starcoin-transactional-test-harness/src/fork_chain.rs @@ -505,6 +505,14 @@ impl ChainApi for MockChainApi { fn get_ghostdagdata(&self, _block_hash: HashValue) -> FutureResult> { unimplemented!() } + + fn is_ancestor_of( + &self, + _ancestor: HashValue, + _descendants: Vec, + ) -> FutureResult { + unimplemented!() + } } fn try_decode_block_txns(state: &dyn StateView, block: &mut BlockView) -> anyhow::Result<()> { From dd1ee2adbab3e957bfaac6038b83187fed8a7bdc Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Fri, 13 Sep 2024 19:41:32 +0800 Subject: [PATCH 42/61] add command file --- cmd/starcoin/src/chain/is_ancestor_of_cmd.rs | 46 ++++++++++++++++++++ cmd/starcoin/src/lib.rs | 3 +- 2 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 cmd/starcoin/src/chain/is_ancestor_of_cmd.rs diff --git a/cmd/starcoin/src/chain/is_ancestor_of_cmd.rs b/cmd/starcoin/src/chain/is_ancestor_of_cmd.rs new file mode 100644 index 0000000000..cfb9d74676 --- /dev/null +++ b/cmd/starcoin/src/chain/is_ancestor_of_cmd.rs @@ -0,0 +1,46 @@ +// Copyright (c) The Starcoin Core Contributors +// SPDX-License-Identifier: Apache-2.0 + +use std::str::FromStr; + +use crate::cli_state::CliState; +use crate::StarcoinOpt; +use anyhow::Result; +use clap::Parser; +use scmd::{CommandAction, ExecContext}; +use starcoin_crypto::HashValue; +use starcoin_dag::consensusdb::consenses_state::ReachabilityView; + +/// Get block info by number +#[derive(Debug, Parser, Clone)] +#[clap(name = "is-ancestor-of", alias = "is_ancestor_of")] +pub struct IsAncestorOfOpt { + #[clap(name = "ancestor", long, short = 'a')] + ancestor: String, + + #[clap(name = "descendants", long, short = 'd')] + descendants: Vec, +} + +pub struct IsAncestorOfCommand; + +impl CommandAction for IsAncestorOfCommand { + type State = CliState; + type GlobalOpt = StarcoinOpt; + type Opt = IsAncestorOfOpt; + type ReturnItem = ReachabilityView; + + fn run( + &self, + ctx: &ExecContext, + ) -> Result { + let opt = ctx.opt().clone(); + ctx.state().client().is_ancestor_of( + HashValue::from_str(&opt.ancestor)?, + opt.descendants + .into_iter() + .map(|id| HashValue::from_str(&id).map_err(|e| anyhow::anyhow!("{:?}", e))) + .collect::>>()?, + ) + } +} diff --git a/cmd/starcoin/src/lib.rs b/cmd/starcoin/src/lib.rs index bc2114cc75..4a5ef258fc 100644 --- a/cmd/starcoin/src/lib.rs +++ b/cmd/starcoin/src/lib.rs @@ -103,7 +103,8 @@ pub fn add_command( .subcommand(chain::GetTransactionInfoListCommand) .subcommand(chain::get_txn_proof_cmd::GetTransactionProofCommand) .subcommand(chain::GetBlockInfoCommand) - .subcommand(chain::GetDagStateCommand), + .subcommand(chain::GetDagStateCommand) + .subcommand(chain::IsAncestorOfCommand), ) .command( CustomCommand::with_name("txpool") From 109e0bd6ffa531ac6c25b65defb426fe6d7a01e6 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Fri, 13 Sep 2024 19:59:03 +0800 Subject: [PATCH 43/61] use 850000 --- chain/src/chain.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 82e75fa18d..0e9af4bd32 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1578,7 +1578,7 @@ impl BlockChain { if chain_id.is_vega() { 4000000 } else if chain_id.is_proxima() { - 500 + 850000 } else if chain_id.is_halley() { 4200000 } else if chain_id.is_main() { From cbc7c551c220f6ca071ceee4f34105ba97851290 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Fri, 13 Sep 2024 20:54:19 +0800 Subject: [PATCH 44/61] add rpc json new command --- rpc/api/generated_rpc_schema/chain.json | 51 +++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/rpc/api/generated_rpc_schema/chain.json b/rpc/api/generated_rpc_schema/chain.json index 3f6243e7c9..e79576d8c2 100644 --- a/rpc/api/generated_rpc_schema/chain.json +++ b/rpc/api/generated_rpc_schema/chain.json @@ -4173,6 +4173,57 @@ } } } + }, + { + "name": "chain.is_ancestor_of", + "params": [ + { + "name": "ancestor", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "HashValue", + "type": "string", + "format": "HashValue" + } + }, + { + "name": "descendants", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Array_of_HashValue", + "type": "array", + "items": { + "type": "string", + "format": "HashValue" + } + } + } + ], + "result": { + "name": "starcoin_dag :: consensusdb :: consenses_state :: ReachabilityView", + "schema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ReachabilityView", + "type": "object", + "required": [ + "ancestor", + "descendants" + ], + "properties": { + "ancestor": { + "type": "string", + "format": "HashValue" + }, + "descendants": { + "type": "array", + "items": { + "type": "string", + "format": "HashValue" + } + } + } + } + } } ] } \ No newline at end of file From 153c30df4459adba839e73c82da8f4ab2e6b7caa Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Sat, 14 Sep 2024 11:43:42 +0800 Subject: [PATCH 45/61] add some log for debug --- chain/src/chain.rs | 5 ++++- flexidag/src/blockdag.rs | 15 ++++++++++++++- kube/manifest/starcoin-proxima.yaml | 16 ++-------------- network/src/network_p2p_handle.rs | 2 +- .../block_connector/block_connector_service.rs | 1 + sync/src/parallel/executor.rs | 6 ++++++ sync/src/parallel/sender.rs | 2 ++ 7 files changed, 30 insertions(+), 17 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 0e9af4bd32..915917c858 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1558,14 +1558,17 @@ impl BlockChain { } if parent_header.pruning_point() == block.header().pruning_point() { + info!("pruning point not changed, save dag state without prune. tips are {:?}, pruning point is {:?}", tips, block.header().pruning_point()); self.dag .save_dag_state(block.header().pruning_point(), DagState { tips })?; } else { let new_tips = dag.pruning_point_manager().prune( - &DagState { tips }, + &DagState { tips: tips.clone() }, parent_header.pruning_point(), block.header().pruning_point(), )?; + info!("pruning point changed, previous tips are: {:?}, save dag state with prune. tips are {:?}, previous pruning point is {:?}, current pruning point is {:?}", + tips, new_tips, parent_header.pruning_point(), block.header().pruning_point()); self.dag .save_dag_state(block.header().pruning_point(), DagState { tips: new_tips })?; } diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 6510baedc4..dfb05ccd82 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -450,13 +450,21 @@ impl BlockDAG { ) -> anyhow::Result { let dag_state = self.get_dag_state(previous_header.pruning_point())?; let ghostdata = self.ghostdata(&dag_state.tips)?; - + info!( + "start to calculate the mergeset and tips for tips: {:?}, and last pruning point: {:?} and ghostdata: {:?}", + dag_state.tips, previous_header.pruning_point(), ghostdata, + ); let next_pruning_point = self.pruning_point_manager().next_pruning_point( previous_header.pruning_point(), &ghostdata, pruning_depth, pruning_finality, )?; + info!( + "the next pruning point is: {:?}, and the previous pruning point is: {:?}", + next_pruning_point, + previous_header.pruning_point() + ); if next_pruning_point == previous_header.pruning_point() { anyhow::Ok(MineNewDagBlockInfo { tips: dag_state.tips, @@ -474,6 +482,11 @@ impl BlockDAG { .ghostdag(&pruned_tips)? .mergeset_blues) .clone(); + info!( + "previous tips are: {:?}, the pruned tips are: {:?}, the mergeset blues are: {:?}, the next pruning point is: {:?}", + dag_state.tips, + pruned_tips, mergeset_blues, next_pruning_point + ); anyhow::Ok(MineNewDagBlockInfo { tips: pruned_tips, blue_blocks: mergeset_blues, diff --git a/kube/manifest/starcoin-proxima.yaml b/kube/manifest/starcoin-proxima.yaml index 71294b8665..dd8476ae67 100644 --- a/kube/manifest/starcoin-proxima.yaml +++ b/kube/manifest/starcoin-proxima.yaml @@ -23,13 +23,13 @@ spec: starcoin/node-pool: seed-pool containers: - name: starcoin - image: ghcr.io/starcoinorg/starcoin:dag-master + image: ghcr.io/starcoinorg/starcoin:pruning-point imagePullPolicy: Always command: - bash - -c args: - - rm -rf /sc-data/proxima/ /sc-data/proxima/starcoindb/db/starcoindb/LOCK; + - rm -rf /sc-data/proxima/starcoindb/db/starcoindb/LOCK /sc-data/proxima/genesis_config.json; id=$(echo -e $POD_NAME|awk -F'-' '{print $2}') && IFS='; ' read -r -a node_keys <<< $NODE_KEYS && node_key=${node_keys[$id]}; if [ ! -z $node_key ]; then @@ -70,18 +70,6 @@ spec: timeoutSeconds: 2 failureThreshold: 3 successThreshold: 1 - readinessProbe: - exec: - command: - - sh - - -c - - >- - /starcoin/starcoin -n proxima -d /sc-data node sync status|grep Synchronized - initialDelaySeconds: 10 - periodSeconds: 5 - timeoutSeconds: 2 - failureThreshold: 3 - successThreshold: 1 volumeClaimTemplates: - metadata: name: starcoin-volume diff --git a/network/src/network_p2p_handle.rs b/network/src/network_p2p_handle.rs index 8a61623fc0..95dc994e0d 100644 --- a/network/src/network_p2p_handle.rs +++ b/network/src/network_p2p_handle.rs @@ -97,7 +97,7 @@ impl BusinessLayerHandle for Networkp2pHandle { match Status::decode(&received_handshake[..]) { Result::Ok(status) => self.inner_handshake(peer_id, status), Err(err) => { - error!(target: "network-p2p", "Couldn't decode handshake packet sent by {}: {:?}: {}", peer_id, hex::encode(received_handshake), err); + error!(target: "network-p2p", "Couldn't decode handshake packet sent by {}, err: {}", peer_id, err); Err(rep::BAD_MESSAGE) } } diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index 758b912450..c31608bb21 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -395,6 +395,7 @@ where blue_blocks, pruning_point, } = if main_header.number() >= self.chain_service.get_main().get_pruning_height() { + info!("now calculate the next pruning point"); dag.calc_mergeset_and_tips(&main_header, pruning_depth, pruning_finality)? } else { let tips = dag.get_dag_state(HashValue::zero())?.tips; diff --git a/sync/src/parallel/executor.rs b/sync/src/parallel/executor.rs index 5279dec192..eb6014504a 100644 --- a/sync/src/parallel/executor.rs +++ b/sync/src/parallel/executor.rs @@ -80,6 +80,12 @@ impl DagBlockExecutor { }; let header = block.header().clone(); + info!( + "sync parallel worker {:p} received block: {:?}", + &self, + block.header().id() + ); + loop { match Self::waiting_for_parents( &self.dag, diff --git a/sync/src/parallel/sender.rs b/sync/src/parallel/sender.rs index 7db3f2c31c..0d0686d516 100644 --- a/sync/src/parallel/sender.rs +++ b/sync/src/parallel/sender.rs @@ -66,6 +66,7 @@ impl<'a> DagBlockSender<'a> { || block.header.parents_hash().contains(header_id) { executor.state = ExecuteState::Executing(block.id()); + info!("send block {:?} to executor {:p}", block.id(), &executor); executor .sender_to_executor .send(Some(block.clone())) @@ -83,6 +84,7 @@ impl<'a> DagBlockSender<'a> { match &executor.state { ExecuteState::Executed(_) => { executor.state = ExecuteState::Executing(block.id()); + info!("send block {:?} to executor {:p}", block.id(), &executor); executor .sender_to_executor .send(Some(block.clone())) From 165690dabf4cc6e0b5acef4ea1cd9848cf9afbab Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 18 Sep 2024 12:24:46 +0800 Subject: [PATCH 46/61] if this is the first pruning point, the previous one will be genesis --- chain/src/chain.rs | 20 ++++++++++-- flexidag/src/blockdag.rs | 31 ++++++++++--------- flexidag/src/prune/pruning_point_manager.rs | 30 +++++++++++------- flexidag/tests/tests.rs | 14 ++++++++- .../block_connector_service.rs | 20 +++++++++++- 5 files changed, 85 insertions(+), 30 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 915917c858..f51127c32d 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -19,6 +19,7 @@ use starcoin_crypto::HashValue; use starcoin_dag::blockdag::{BlockDAG, MineNewDagBlockInfo}; use starcoin_dag::consensusdb::consenses_state::DagState; use starcoin_dag::consensusdb::prelude::StoreError; +use starcoin_dag::consensusdb::schemadb::GhostdagStoreReader; use starcoin_executor::VMMetrics; use starcoin_logger::prelude::*; use starcoin_open_block::OpenedBlock; @@ -1369,17 +1370,30 @@ impl ChainReader for BlockChain { .storage .get_block_header_by_hash(header.parent_hash())? .ok_or_else(|| format_err!("cannot find parent block header"))?; - let ghostdata = self.dag().verify_and_ghostdata(uncles, header)?; + let next_ghostdata = self.dag().verify_and_ghostdata(uncles, header)?; if self.status().head().pruning_point() != HashValue::zero() { + let previous_ghostdata = if previous_header.pruning_point() == HashValue::zero() { + let genesis = self + .storage + .get_genesis()? + .ok_or_else(|| format_err!("the genesis id is none!"))?; + self.dag().storage.ghost_dag_store.get_data(genesis)? + } else { + self.dag() + .storage + .ghost_dag_store + .get_data(previous_header.pruning_point())? + }; self.dag().verify_pruning_point( previous_header.pruning_point(), + previous_ghostdata.as_ref(), header.pruning_point(), - &ghostdata, + &next_ghostdata, )?; } - Ok(ghostdata) + Ok(next_ghostdata) } fn is_dag_ancestor_of(&self, ancestor: HashValue, descendants: Vec) -> Result { diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index dfb05ccd82..352b4769e3 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -444,37 +444,38 @@ impl BlockDAG { pub fn calc_mergeset_and_tips( &self, - previous_header: &BlockHeader, + previous_pruning_point: HashValue, + previous_ghostdata: &GhostdagData, pruning_depth: u64, pruning_finality: u64, ) -> anyhow::Result { - let dag_state = self.get_dag_state(previous_header.pruning_point())?; - let ghostdata = self.ghostdata(&dag_state.tips)?; + let dag_state = self.get_dag_state(previous_pruning_point)?; + let next_ghostdata = self.ghostdata(&dag_state.tips)?; info!( - "start to calculate the mergeset and tips for tips: {:?}, and last pruning point: {:?} and ghostdata: {:?}", - dag_state.tips, previous_header.pruning_point(), ghostdata, + "start to calculate the mergeset and tips for tips: {:?}, and last pruning point: {:?} and next ghostdata: {:?}", + dag_state.tips, previous_pruning_point, next_ghostdata, ); let next_pruning_point = self.pruning_point_manager().next_pruning_point( - previous_header.pruning_point(), - &ghostdata, + previous_pruning_point, + previous_ghostdata, + &next_ghostdata, pruning_depth, pruning_finality, )?; info!( "the next pruning point is: {:?}, and the previous pruning point is: {:?}", - next_pruning_point, - previous_header.pruning_point() + next_pruning_point, previous_pruning_point ); - if next_pruning_point == previous_header.pruning_point() { + if next_pruning_point == previous_pruning_point { anyhow::Ok(MineNewDagBlockInfo { tips: dag_state.tips, - blue_blocks: (*ghostdata.mergeset_blues).clone(), + blue_blocks: (*next_ghostdata.mergeset_blues).clone(), pruning_point: next_pruning_point, }) } else { let pruned_tips = self.pruning_point_manager().prune( &dag_state, - previous_header.pruning_point(), + previous_pruning_point, next_pruning_point, )?; let mergeset_blues = (*self @@ -498,12 +499,14 @@ impl BlockDAG { pub fn verify_pruning_point( &self, previous_pruning_point: HashValue, + previous_ghostdata: &GhostdagData, next_pruning_point: HashValue, - ghostdata: &GhostdagData, + next_ghostdata: &GhostdagData, ) -> anyhow::Result<()> { let inside_next_pruning_point = self.pruning_point_manager().next_pruning_point( previous_pruning_point, - ghostdata, + previous_ghostdata, + next_ghostdata, G_PRUNING_DEPTH, G_PRUNING_FINALITY, )?; diff --git a/flexidag/src/prune/pruning_point_manager.rs b/flexidag/src/prune/pruning_point_manager.rs index bbad1e23ae..6e4a979a66 100644 --- a/flexidag/src/prune/pruning_point_manager.rs +++ b/flexidag/src/prune/pruning_point_manager.rs @@ -61,14 +61,14 @@ impl PruningPointManagerT { pub(crate) fn next_pruning_point( &self, - pruning_point: HashValue, - ghostdata: &GhostdagData, + previous_pruning_point: HashValue, + previous_ghostdata: &GhostdagData, + next_ghostdata: &GhostdagData, pruning_depth: u64, pruning_finality: u64, ) -> anyhow::Result { - let pruning_ghostdata = self.ghost_dag_store.get_data(pruning_point)?; let min_required_blue_score_for_next_pruning_point = - (self.finality_score(pruning_ghostdata.blue_score, pruning_finality) + 1) + (self.finality_score(previous_ghostdata.blue_score, pruning_finality) + 1) * pruning_finality; debug!( @@ -76,19 +76,27 @@ impl PruningPointManagerT { min_required_blue_score_for_next_pruning_point ); - let mut latest_pruning_ghost_data = self.ghost_dag_store.get_compact_data(pruning_point)?; - if min_required_blue_score_for_next_pruning_point + pruning_depth <= ghostdata.blue_score { + let mut latest_pruning_ghost_data = previous_ghostdata.to_compact(); + if min_required_blue_score_for_next_pruning_point + pruning_depth + <= next_ghostdata.blue_score + { + let ancestor = if previous_pruning_point == HashValue::zero() { + HashValue::new(ORIGIN) + } else { + previous_pruning_point + }; + for child in self.reachability_service().forward_chain_iterator( - pruning_point, - ghostdata.selected_parent, + ancestor, + next_ghostdata.selected_parent, true, ) { let next_pruning_ghostdata = self.ghost_dag_store.get_data(child)?; debug!( "child: {:?}, observer2.blue_score: {:?}, next_pruning_ghostdata.blue_score: {:?}", - child, ghostdata.blue_score, next_pruning_ghostdata.blue_score + child, next_ghostdata.blue_score, next_pruning_ghostdata.blue_score ); - if ghostdata.blue_score - next_pruning_ghostdata.blue_score < pruning_depth { + if next_ghostdata.blue_score - next_pruning_ghostdata.blue_score < pruning_depth { break; } if self.finality_score(next_pruning_ghostdata.blue_score, pruning_finality) @@ -106,7 +114,7 @@ impl PruningPointManagerT { } if latest_pruning_ghost_data.selected_parent == HashValue::new(ORIGIN) { - anyhow::Ok(pruning_point) // still genesis + anyhow::Ok(HashValue::zero()) // still genesis } else { anyhow::Ok(latest_pruning_ghost_data.selected_parent) } diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index 411063a29a..17ed8cb5f4 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -978,7 +978,19 @@ fn test_prune() -> anyhow::Result<()> { tips, blue_blocks: _, pruning_point, - } = dag.calc_mergeset_and_tips(&block_main_5, pruning_depth, pruning_finality)?; + } = dag.calc_mergeset_and_tips( + block_main_5.pruning_point(), + dag.ghostdata_by_hash(block_main_5.pruning_point())? + .ok_or_else(|| { + format_err!( + "failed to get the ghostdata by {:?}", + block_main_5.pruning_point() + ) + })? + .as_ref(), + pruning_depth, + pruning_finality, + )?; assert_eq!(pruning_point, block_main_2.id()); assert_eq!(tips.len(), 1); diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index c31608bb21..ea95ca7e21 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -22,6 +22,7 @@ use starcoin_crypto::HashValue; use starcoin_dag::blockdag::BlockDAG; use starcoin_dag::blockdag::MineNewDagBlockInfo; use starcoin_executor::VMMetrics; +use starcoin_genesis::Genesis; use starcoin_logger::prelude::*; use starcoin_network::NetworkServiceRef; use starcoin_service_registry::{ @@ -396,7 +397,24 @@ where pruning_point, } = if main_header.number() >= self.chain_service.get_main().get_pruning_height() { info!("now calculate the next pruning point"); - dag.calc_mergeset_and_tips(&main_header, pruning_depth, pruning_finality)? + let previous_ghostdata = if main_header.pruning_point() == HashValue::zero() { + let genesis = ctx.get_shared::()?; + self.chain_service + .get_dag() + .ghostdata_by_hash(genesis.block().id())? + .ok_or_else(|| format_err!("Genesis block header should exist."))? + } else { + self.chain_service + .get_dag() + .ghostdata_by_hash(main_header.pruning_point())? + .ok_or_else(|| format_err!("Genesis block header should exist."))? + }; + dag.calc_mergeset_and_tips( + main_header.pruning_point(), + previous_ghostdata.as_ref(), + pruning_depth, + pruning_finality, + )? } else { let tips = dag.get_dag_state(HashValue::zero())?.tips; MineNewDagBlockInfo { From 61414cd9b3e72fe4687932d9fd0e0c18a00f3bb7 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 18 Sep 2024 15:41:28 +0800 Subject: [PATCH 47/61] save the dag state using the pruning point as the key and if it is 0, use genesis id --- chain/src/chain.rs | 18 +++++++-- flexidag/src/blockdag.rs | 43 +++++++++------------ flexidag/src/prune/pruning_point_manager.rs | 8 +--- flexidag/tests/tests.rs | 36 +++++++++++------ node/src/node.rs | 2 +- 5 files changed, 59 insertions(+), 48 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index f51127c32d..fb18651c6c 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -178,7 +178,7 @@ impl BlockChain { } fn init_dag(mut dag: BlockDAG, genesis_header: BlockHeader) -> Result { - match dag.get_dag_state(genesis_header.pruning_point()) { + match dag.get_dag_state(genesis_header.id()) { anyhow::Result::Ok(_dag_state) => (), Err(e) => match e.downcast::()? { StoreError::KeyNotFound(_) => { @@ -987,7 +987,12 @@ impl BlockChain { } pub fn get_dag_state(&self) -> Result { - self.dag.get_dag_state(self.status().head().pruning_point()) + let current_pruning_point = self.status().head().pruning_point(); + if current_pruning_point == HashValue::zero() { + self.dag.get_dag_state(self.genesis_hash) + } else { + self.dag.get_dag_state(current_pruning_point) + } } } @@ -1573,8 +1578,13 @@ impl BlockChain { if parent_header.pruning_point() == block.header().pruning_point() { info!("pruning point not changed, save dag state without prune. tips are {:?}, pruning point is {:?}", tips, block.header().pruning_point()); - self.dag - .save_dag_state(block.header().pruning_point(), DagState { tips })?; + if block.header().pruning_point() == HashValue::zero() { + self.dag + .save_dag_state(self.genesis_hash, DagState { tips })?; + } else { + self.dag + .save_dag_state(block.header().pruning_point(), DagState { tips })?; + } } else { let new_tips = dag.pruning_point_manager().prune( &DagState { tips: tips.clone() }, diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 352b4769e3..8439e54665 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -113,10 +113,9 @@ impl BlockDAG { .write() .insert(origin, BlockHashes::new(vec![]))?; - let pruning_point = genesis.pruning_point(); self.commit(genesis, origin)?; self.save_dag_state( - pruning_point, + genesis_id, DagState { tips: vec![genesis_id], }, @@ -545,25 +544,30 @@ impl BlockDAG { self.ghost_dag_manager() .verify_and_ghostdata(blue_blocks, header) } - pub fn check_upgrade(&self, main: &BlockHeader) -> anyhow::Result<()> { + pub fn check_upgrade(&self, main: &BlockHeader, genesis_id: HashValue) -> anyhow::Result<()> { // set the state with key 0 - if main.version() == 0 { + if main.version() == 0 || main.version() == 1 { let result_dag_state = self .storage .state_store .read() - .get_state_by_hash(main.pruning_point()); + .get_state_by_hash(genesis_id); match result_dag_state { anyhow::Result::Ok(_dag_state) => (), Err(_) => { - let result_dag_state = - self.storage.state_store.read().get_state_by_hash(0.into()); + let result_dag_state = self + .storage + .state_store + .read() + .get_state_by_hash(HashValue::zero()); + match result_dag_state { - anyhow::Result::Ok(dag_state) => self - .storage - .state_store - .write() - .insert(main.pruning_point(), dag_state)?, + anyhow::Result::Ok(dag_state) => { + self.storage + .state_store + .write() + .insert(genesis_id, dag_state)?; + } Err(_) => { let dag_state = self .storage @@ -573,26 +577,15 @@ impl BlockDAG { self.storage .state_store .write() - .insert(0.into(), dag_state.clone())?; + .insert(HashValue::zero(), dag_state.clone())?; self.storage .state_store .write() - .insert(HashValue::zero(), dag_state)?; + .insert(genesis_id, dag_state)?; } } } } - return Ok(()); - } else if main.version() == 1 { - let dag_state = self - .storage - .state_store - .read() - .get_state_by_hash(0.into())?; - self.storage - .state_store - .write() - .insert(HashValue::zero(), dag_state)?; } anyhow::Ok(()) diff --git a/flexidag/src/prune/pruning_point_manager.rs b/flexidag/src/prune/pruning_point_manager.rs index 6e4a979a66..8f03f5cf57 100644 --- a/flexidag/src/prune/pruning_point_manager.rs +++ b/flexidag/src/prune/pruning_point_manager.rs @@ -80,14 +80,8 @@ impl PruningPointManagerT { if min_required_blue_score_for_next_pruning_point + pruning_depth <= next_ghostdata.blue_score { - let ancestor = if previous_pruning_point == HashValue::zero() { - HashValue::new(ORIGIN) - } else { - previous_pruning_point - }; - for child in self.reachability_service().forward_chain_iterator( - ancestor, + previous_pruning_point, next_ghostdata.selected_parent, true, ) { diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index 17ed8cb5f4..f7547b66de 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -748,6 +748,7 @@ fn add_and_print( .with_parent_hash(parent) .with_parents_hash(parents) .with_number(number) + .with_pruning_point(Hash::zero()) .build(); let start = Instant::now(); dag.commit(header.to_owned(), origin)?; @@ -869,7 +870,6 @@ fn test_big_data_commit() -> anyhow::Result<()> { anyhow::Result::Ok(()) } -#[ignore = "pruning will be tested in next release"] #[test] fn test_prune() -> anyhow::Result<()> { // initialzie the dag firstly @@ -968,26 +968,40 @@ fn test_prune() -> anyhow::Result<()> { // prunning process begins dag.save_dag_state( - Hash::zero(), + genesis.id(), DagState { tips: vec![block_red_3.id(), block_main_5.id()], }, )?; + let (previous_ghostdata, previous_pruning_point) = + if block_main_5.pruning_point() == Hash::zero() { + ( + dag.ghostdata_by_hash(genesis.id())?.ok_or_else(|| { + format_err!("failed to get the ghostdata by genesis: {:?}", genesis.id()) + })?, + genesis.id(), + ) + } else { + ( + dag.ghostdata_by_hash(block_main_5.pruning_point())? + .ok_or_else(|| { + format_err!( + "failed to get the ghostdata by pruning point: {:?}", + block_main_5.pruning_point() + ) + })?, + block_main_5.pruning_point(), + ) + }; + let MineNewDagBlockInfo { tips, blue_blocks: _, pruning_point, } = dag.calc_mergeset_and_tips( - block_main_5.pruning_point(), - dag.ghostdata_by_hash(block_main_5.pruning_point())? - .ok_or_else(|| { - format_err!( - "failed to get the ghostdata by {:?}", - block_main_5.pruning_point() - ) - })? - .as_ref(), + previous_pruning_point, + previous_ghostdata.as_ref(), pruning_depth, pruning_finality, )?; diff --git a/node/src/node.rs b/node/src/node.rs index 14391f7e59..6041b0ecf4 100644 --- a/node/src/node.rs +++ b/node/src/node.rs @@ -336,7 +336,7 @@ impl NodeService { upgrade_time.as_secs() ); - dag.check_upgrade(chain_info.status().head())?; + dag.check_upgrade(chain_info.status().head(), genesis.block().id())?; registry.put_shared(genesis).await?; From ec341bc8c8bcd5b32ec4f5a8590360f03f9f3b53 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 18 Sep 2024 18:12:50 +0800 Subject: [PATCH 48/61] merge dag master --- chain/src/verifier/mod.rs | 21 --------------------- sync/src/parallel/sender.rs | 5 ++++- 2 files changed, 4 insertions(+), 22 deletions(-) diff --git a/chain/src/verifier/mod.rs b/chain/src/verifier/mod.rs index a48a781581..5b4eba556e 100644 --- a/chain/src/verifier/mod.rs +++ b/chain/src/verifier/mod.rs @@ -425,28 +425,7 @@ impl BasicDagVerifier { current_chain.verify_and_ghostdata(uncles, header) } } -//TODO: Implement it. -pub struct DagVerifier; -impl BlockVerifier for DagVerifier { - fn verify_header(current_chain: &R, new_block_header: &BlockHeader) -> Result<()> - where - R: ChainReader, - { - BasicDagVerifier::verify_header(current_chain, new_block_header) - } - fn verify_blue_blocks( - current_chain: &R, - uncles: &[BlockHeader], - header: &BlockHeader, - ) -> Result - where - R: ChainReader, - { - current_chain.verify_and_ghostdata(uncles, header) - } -} -//TODO: Implement it. pub struct DagVerifier; impl BlockVerifier for DagVerifier { fn verify_header(current_chain: &R, new_block_header: &BlockHeader) -> Result<()> diff --git a/sync/src/parallel/sender.rs b/sync/src/parallel/sender.rs index 0d0686d516..842e8830e0 100644 --- a/sync/src/parallel/sender.rs +++ b/sync/src/parallel/sender.rs @@ -102,7 +102,10 @@ impl<'a> DagBlockSender<'a> { match &executor.state { ExecuteState::Executed(_) => { executor.state = ExecuteState::Executing(block.id()); - executor.sender_to_executor.send(Some(block.clone())).await?; + executor + .sender_to_executor + .send(Some(block.clone())) + .await?; return anyhow::Ok(true); } From 7d24357587a95ef1c4d0a0da4dbd2d39bd888284 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 18 Sep 2024 18:53:46 +0800 Subject: [PATCH 49/61] get the tips by genesis id if the pruning point is 0 --- chain/src/chain.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index fb18651c6c..a728c048e0 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1524,7 +1524,11 @@ impl BlockChain { new_tip_block.header().parent_hash() ) })?; - let mut tips = self.current_tips_hash(parent_header.pruning_point())?; + let mut tips = if parent_header.pruning_point() == HashValue::zero() { + self.current_tips_hash(self.genesis_hash)? + } else { + self.current_tips_hash(parent_header.pruning_point())? + }; let mut new_tips = vec![]; for hash in tips { From d5f27621813f3b8495705e0846b347bc4a6e8dd7 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 18 Sep 2024 20:43:45 +0800 Subject: [PATCH 50/61] fix test case as prievious version did --- .../test_write_dag_block_chain.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/sync/src/block_connector/test_write_dag_block_chain.rs b/sync/src/block_connector/test_write_dag_block_chain.rs index 4cc259234f..2d93d21eed 100644 --- a/sync/src/block_connector/test_write_dag_block_chain.rs +++ b/sync/src/block_connector/test_write_dag_block_chain.rs @@ -3,7 +3,7 @@ #![allow(clippy::arithmetic_side_effects)] use crate::block_connector::test_write_block_chain::create_writeable_dag_block_chain; use crate::block_connector::WriteBlockChainService; -use anyhow::{bail, Ok}; +use anyhow::{bail, format_err, Ok}; use starcoin_account_api::AccountInfo; use starcoin_chain::{BlockChain, ChainReader}; use starcoin_chain_service::WriteableChainService; @@ -50,9 +50,19 @@ pub fn new_dag_block( let miner_address = *miner.address(); let block_chain = writeable_block_chain_service.get_main(); - let tips = block_chain - .current_tips_hash(block_chain.status().head().pruning_point()) - .expect("failed to get tips"); + let tips = if block_chain.status().head().pruning_point() == HashValue::zero() { + let genesis_id = block_chain + .get_storage() + .get_genesis()? + .ok_or_else(|| format_err!("Genesis block is none"))?; + block_chain + .current_tips_hash(genesis_id) + .expect("failed to get tips") + } else { + block_chain + .current_tips_hash(block_chain.status().head().pruning_point()) + .expect("failed to get tips") + }; let (block_template, _) = block_chain .create_block_template( miner_address, From a70db1079e6209063bfdacfbf0b48b5a567260b8 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 18 Sep 2024 22:41:29 +0800 Subject: [PATCH 51/61] use genesis id to get the ghost data --- flexidag/src/blockdag.rs | 3 +- .../block_connector_service.rs | 36 +++++++++++-------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 8439e54665..57dda3bd9a 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -448,6 +448,7 @@ impl BlockDAG { pruning_depth: u64, pruning_finality: u64, ) -> anyhow::Result { + info!("start to calculate the mergeset and tips, previous pruning point: {:?}, previous ghostdata: {:?}", previous_pruning_point, previous_ghostdata); let dag_state = self.get_dag_state(previous_pruning_point)?; let next_ghostdata = self.ghostdata(&dag_state.tips)?; info!( @@ -465,7 +466,7 @@ impl BlockDAG { "the next pruning point is: {:?}, and the previous pruning point is: {:?}", next_pruning_point, previous_pruning_point ); - if next_pruning_point == previous_pruning_point { + if next_pruning_point == Hash::zero() { anyhow::Ok(MineNewDagBlockInfo { tips: dag_state.tips, blue_blocks: (*next_ghostdata.mergeset_blues).clone(), diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index ea95ca7e21..042741d29e 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -397,26 +397,34 @@ where pruning_point, } = if main_header.number() >= self.chain_service.get_main().get_pruning_height() { info!("now calculate the next pruning point"); - let previous_ghostdata = if main_header.pruning_point() == HashValue::zero() { - let genesis = ctx.get_shared::()?; - self.chain_service - .get_dag() - .ghostdata_by_hash(genesis.block().id())? - .ok_or_else(|| format_err!("Genesis block header should exist."))? - } else { - self.chain_service - .get_dag() - .ghostdata_by_hash(main_header.pruning_point())? - .ok_or_else(|| format_err!("Genesis block header should exist."))? - }; + let (previous_ghostdata, pruning_point) = + if main_header.pruning_point() == HashValue::zero() { + let genesis = ctx.get_shared::()?; + ( + self.chain_service + .get_dag() + .ghostdata_by_hash(genesis.block().id())? + .ok_or_else(|| format_err!("Genesis block header should exist."))?, + genesis.block().id(), + ) + } else { + ( + self.chain_service + .get_dag() + .ghostdata_by_hash(main_header.pruning_point())? + .ok_or_else(|| format_err!("Genesis block header should exist."))?, + main_header.pruning_point(), + ) + }; dag.calc_mergeset_and_tips( - main_header.pruning_point(), + pruning_point, previous_ghostdata.as_ref(), pruning_depth, pruning_finality, )? } else { - let tips = dag.get_dag_state(HashValue::zero())?.tips; + let genesis = ctx.get_shared::()?; + let tips = dag.get_dag_state(genesis.block().id())?.tips; MineNewDagBlockInfo { tips: tips.clone(), blue_blocks: dag.ghostdata(&tips)?.mergeset_blues.as_ref().clone(), From fd96b21316630339e1f286d361b7a93e97f85fc4 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 18 Sep 2024 23:49:07 +0800 Subject: [PATCH 52/61] add genesis in registry in test_miner_service --- miner/tests/miner_test.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/miner/tests/miner_test.rs b/miner/tests/miner_test.rs index f8996def5f..60042ef95b 100644 --- a/miner/tests/miner_test.rs +++ b/miner/tests/miner_test.rs @@ -30,6 +30,7 @@ async fn test_miner_service() { registry.put_shared(dag).await.unwrap(); let genesis_hash = genesis.block().id(); + registry.put_shared(genesis).await.unwrap(); let chain_header = storage .get_block_header_by_hash(genesis_hash) .unwrap() From 38232f7e198d4eced34f5edb635e72cf60dab638 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 19 Sep 2024 14:22:01 +0800 Subject: [PATCH 53/61] use 1000 as parallel buffer --- sync/src/tasks/block_sync_task.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/tasks/block_sync_task.rs b/sync/src/tasks/block_sync_task.rs index 4c27cecf9d..ef6fcb4273 100644 --- a/sync/src/tasks/block_sync_task.rs +++ b/sync/src/tasks/block_sync_task.rs @@ -452,7 +452,7 @@ where self.find_absent_ancestor(vec![block_header.clone()]) .await?; - if block_header.number() % 10000 == 0 + if block_header.number() % 1000 == 0 || block_header.number() >= self.target.target_id.number() { let parallel_execute = DagBlockSender::new( From f603fc33a785925b1afa60c9f4ac6b5ead38926c Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Thu, 19 Sep 2024 17:02:32 +0800 Subject: [PATCH 54/61] remove some logs add test case for pruning --- flexidag/src/blockdag.rs | 2 +- flexidag/tests/tests.rs | 34 ++++++++++++++++++++++++++++++++++ sync/src/parallel/sender.rs | 2 -- 3 files changed, 35 insertions(+), 3 deletions(-) diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 57dda3bd9a..1af6edef27 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -466,7 +466,7 @@ impl BlockDAG { "the next pruning point is: {:?}, and the previous pruning point is: {:?}", next_pruning_point, previous_pruning_point ); - if next_pruning_point == Hash::zero() { + if next_pruning_point == Hash::zero() || next_pruning_point == previous_pruning_point { anyhow::Ok(MineNewDagBlockInfo { tips: dag_state.tips, blue_blocks: (*next_ghostdata.mergeset_blues).clone(), diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index f7547b66de..763843146a 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -1010,6 +1010,40 @@ fn test_prune() -> anyhow::Result<()> { assert_eq!(tips.len(), 1); assert_eq!(*tips.last().unwrap(), block_main_5.id()); + // prunning process begins + dag.save_dag_state( + pruning_point, + DagState { + tips: tips.clone(), + }, + )?; + + let block_main_6 = add_and_print( + 6, + block_main_5.id(), + tips, + genesis.parent_hash(), + &mut dag, + )?; + + let MineNewDagBlockInfo { + tips, + blue_blocks: _, + pruning_point, + } = dag.calc_mergeset_and_tips( + pruning_point, + dag.ghostdata_by_hash(pruning_point)?.ok_or_else(|| format_err!("failed to get the ghostdata for main 5 block"))?.as_ref(), + pruning_depth, + pruning_finality, + )?; + + let mut new_tips = vec![]; + tips.into_iter().filter(|id| dag.ghost_dag_manager().check_ancestor_of(*id, vec![block_main_6.id()]).unwrap()).for_each(|id| new_tips.push(id)); + + assert_eq!(pruning_point, block_main_2.id()); + assert_eq!(new_tips.len(), 1); + assert_eq!(*new_tips.last().unwrap(), block_main_6.id()); + anyhow::Result::Ok(()) } diff --git a/sync/src/parallel/sender.rs b/sync/src/parallel/sender.rs index 842e8830e0..4d30510b01 100644 --- a/sync/src/parallel/sender.rs +++ b/sync/src/parallel/sender.rs @@ -66,7 +66,6 @@ impl<'a> DagBlockSender<'a> { || block.header.parents_hash().contains(header_id) { executor.state = ExecuteState::Executing(block.id()); - info!("send block {:?} to executor {:p}", block.id(), &executor); executor .sender_to_executor .send(Some(block.clone())) @@ -84,7 +83,6 @@ impl<'a> DagBlockSender<'a> { match &executor.state { ExecuteState::Executed(_) => { executor.state = ExecuteState::Executing(block.id()); - info!("send block {:?} to executor {:p}", block.id(), &executor); executor .sender_to_executor .send(Some(block.clone())) From 6e6032db46db76d5429a9a32e427bdd956d03a69 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Fri, 20 Sep 2024 18:33:08 +0800 Subject: [PATCH 55/61] add pruning arguments in pruning methods to custom the network config --- chain/src/chain.rs | 5 +- cmd/db-exporter/src/force_deploy_output.rs | 3 + cmd/db-exporter/src/main.rs | 74 ++++++++++++++++--- cmd/generator/src/lib.rs | 8 +- flexidag/src/blockdag.rs | 44 +++++------ flexidag/src/prune/pruning_point_manager.rs | 34 +++++---- flexidag/tests/tests.rs | 46 +++++------- node/src/node.rs | 2 + .../block_connector_service.rs | 35 +++------ sync/src/tasks/test_tools.rs | 8 +- 10 files changed, 157 insertions(+), 102 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index a728c048e0..0a20d2c180 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -1390,6 +1390,7 @@ impl ChainReader for BlockChain { .ghost_dag_store .get_data(previous_header.pruning_point())? }; + self.dag().verify_pruning_point( previous_header.pruning_point(), previous_ghostdata.as_ref(), @@ -1613,11 +1614,11 @@ impl BlockChain { } else if chain_id.is_halley() { 4200000 } else if chain_id.is_main() { - 0 + 1 } else if chain_id.is_dag_test() || chain_id.is_test() || chain_id.is_dev() { BlockNumber::MAX } else { - 0 + 1 } } } diff --git a/cmd/db-exporter/src/force_deploy_output.rs b/cmd/db-exporter/src/force_deploy_output.rs index d7325bf7b7..03f614ea25 100644 --- a/cmd/db-exporter/src/force_deploy_output.rs +++ b/cmd/db-exporter/src/force_deploy_output.rs @@ -9,6 +9,7 @@ use anyhow::format_err; use clap::Parser; use starcoin_chain::{BlockChain, ChainReader, ChainWriter}; use starcoin_cmd::dev::dev_helper; +use starcoin_config::genesis_config::{G_PRUNING_DEPTH, G_PRUNING_FINALITY}; use starcoin_config::{BuiltinNetworkID, ChainNetwork}; use starcoin_dag::blockdag::{BlockDAG, DEFAULT_GHOSTDAG_K}; use starcoin_dag::consensusdb::prelude::FlexiDagStorageConfig; @@ -83,6 +84,8 @@ pub fn force_deploy_output( network_path.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, ); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), network_path.as_ref())?; diff --git a/cmd/db-exporter/src/main.rs b/cmd/db-exporter/src/main.rs index a7d0491b9a..301d928c65 100644 --- a/cmd/db-exporter/src/main.rs +++ b/cmd/db-exporter/src/main.rs @@ -18,6 +18,7 @@ use starcoin_chain::{ verifier::{BasicVerifier, ConsensusVerifier, FullVerifier, NoneVerifier, Verifier}, BlockChain, ChainReader, ChainWriter, }; +use starcoin_config::genesis_config::{G_PRUNING_DEPTH, G_PRUNING_FINALITY}; use starcoin_config::{BuiltinNetworkID, ChainNetwork, RocksdbConfig}; use starcoin_consensus::Consensus; use starcoin_crypto::HashValue; @@ -768,7 +769,12 @@ pub fn export_block_range( from_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::new( + DEFAULT_GHOSTDAG_K, + dag_storage, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, + ); let storage = Arc::new(Storage::new(StorageInstance::new_cache_and_db_instance( CacheStorage::new(None), @@ -895,7 +901,12 @@ pub fn apply_block( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::new( + DEFAULT_GHOSTDAG_K, + dag_storage, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, + ); // StarcoinVM::set_concurrency_level_once(num_cpus::get()); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; @@ -986,7 +997,12 @@ pub fn startup_info_back( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::new( + DEFAULT_GHOSTDAG_K, + dag_storage, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, + ); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; let chain = BlockChain::new( @@ -1038,7 +1054,12 @@ pub fn gen_block_transactions( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::new( + DEFAULT_GHOSTDAG_K, + dag_storage, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, + ); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; let mut chain = BlockChain::new( @@ -1564,7 +1585,12 @@ pub fn export_snapshot( from_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::new( + DEFAULT_GHOSTDAG_K, + dag_storage, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, + ); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), from_dir.as_ref())?; let chain = BlockChain::new( @@ -1916,7 +1942,12 @@ pub fn apply_snapshot( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::new( + DEFAULT_GHOSTDAG_K, + dag_storage, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, + ); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; @@ -2258,7 +2289,12 @@ pub fn gen_turbo_stm_transactions(to_dir: PathBuf, block_num: Option) -> an to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::new( + DEFAULT_GHOSTDAG_K, + dag_storage, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, + ); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; let mut chain = BlockChain::new( @@ -2290,7 +2326,12 @@ pub fn apply_turbo_stm_block( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::new( + DEFAULT_GHOSTDAG_K, + dag_storage, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, + ); let (chain_info_seq, _) = Genesis::init_and_check_storage(&net, storage_seq.clone(), dag.clone(), to_dir.as_ref())?; let mut chain_seq = BlockChain::new( @@ -2354,7 +2395,12 @@ pub fn apply_turbo_stm_block( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::new( + DEFAULT_GHOSTDAG_K, + dag_storage, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, + ); let (chain_info_stm, _) = Genesis::init_and_check_storage( &net, storage_stm.clone(), @@ -2418,6 +2464,8 @@ pub fn verify_block( from_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, ); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), from_dir.as_ref())?; @@ -2532,6 +2580,8 @@ pub fn block_output( from_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, ); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), from_dir.as_ref())?; @@ -2577,6 +2627,8 @@ pub fn apply_block_output( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, ); let (_chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), to_dir.as_ref())?; @@ -2638,6 +2690,8 @@ fn save_startup_info( to_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, ); let (_chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag, to_dir.as_ref())?; @@ -2674,6 +2728,8 @@ fn token_supply( from_dir.join("dag/db/starcoindb"), FlexiDagStorageConfig::new(), )?, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, ); let (chain_info, _) = Genesis::init_and_check_storage(&net, storage.clone(), dag.clone(), from_dir.as_ref())?; diff --git a/cmd/generator/src/lib.rs b/cmd/generator/src/lib.rs index 125a50a225..4ed71b1d8e 100644 --- a/cmd/generator/src/lib.rs +++ b/cmd/generator/src/lib.rs @@ -5,6 +5,7 @@ use anyhow::{bail, Result}; use starcoin_account::account_storage::AccountStorage; use starcoin_account::AccountManager; use starcoin_account_api::AccountInfo; +use starcoin_config::genesis_config::{G_PRUNING_DEPTH, G_PRUNING_FINALITY}; use starcoin_config::{NodeConfig, StarcoinOpt}; use starcoin_dag::blockdag::{BlockDAG, DEFAULT_GHOSTDAG_K}; use starcoin_genesis::Genesis; @@ -36,7 +37,12 @@ pub fn init_or_load_data_dir( config.storage.dag_dir(), config.storage.clone().into(), )?; - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); + let dag = starcoin_dag::blockdag::BlockDAG::new( + DEFAULT_GHOSTDAG_K, + dag_storage, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, + ); let (chain_info, _genesis) = Genesis::init_and_check_storage( config.net(), storage.clone(), diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 1af6edef27..b97459a8d9 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -56,7 +56,7 @@ pub struct BlockDAG { } impl BlockDAG { - pub fn new(k: KType, db: FlexiDagStorage) -> Self { + pub fn new(k: KType, db: FlexiDagStorage, pruning_depth: u64, pruning_finality: u64) -> Self { let ghostdag_store = db.ghost_dag_store.clone(); let header_store = db.header_store.clone(); let relations_store = db.relations_store.clone(); @@ -69,7 +69,12 @@ impl BlockDAG { header_store, reachability_service.clone(), ); - let pruning_point_manager = PruningPointManager::new(reachability_service, ghostdag_store); + let pruning_point_manager = PruningPointManager::new( + reachability_service, + ghostdag_store, + pruning_depth, + pruning_finality, + ); Self { ghostdag_manager, @@ -84,13 +89,22 @@ impl BlockDAG { ..Default::default() }; let dag_storage = FlexiDagStorage::create_from_path(temp_dir(), config)?; - Ok(Self::new(DEFAULT_GHOSTDAG_K, dag_storage)) + Ok(Self::new( + DEFAULT_GHOSTDAG_K, + dag_storage, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, + )) } - pub fn create_for_testing_with_parameters(k: KType) -> anyhow::Result { + pub fn create_for_testing_with_parameters( + k: KType, + pruning_depth: u64, + pruning_finality: u64, + ) -> anyhow::Result { let dag_storage = FlexiDagStorage::create_from_path(temp_dir(), FlexiDagStorageConfig::default())?; - Ok(Self::new(k, dag_storage)) + Ok(Self::new(k, dag_storage, pruning_depth, pruning_finality)) } pub fn has_dag_block(&self, hash: Hash) -> anyhow::Result { @@ -445,8 +459,6 @@ impl BlockDAG { &self, previous_pruning_point: HashValue, previous_ghostdata: &GhostdagData, - pruning_depth: u64, - pruning_finality: u64, ) -> anyhow::Result { info!("start to calculate the mergeset and tips, previous pruning point: {:?}, previous ghostdata: {:?}", previous_pruning_point, previous_ghostdata); let dag_state = self.get_dag_state(previous_pruning_point)?; @@ -459,8 +471,6 @@ impl BlockDAG { previous_pruning_point, previous_ghostdata, &next_ghostdata, - pruning_depth, - pruning_finality, )?; info!( "the next pruning point is: {:?}, and the previous pruning point is: {:?}", @@ -507,24 +517,8 @@ impl BlockDAG { previous_pruning_point, previous_ghostdata, next_ghostdata, - G_PRUNING_DEPTH, - G_PRUNING_FINALITY, )?; - // if (block_header.chain_id().is_vega() - // || block_header.chain_id().is_proxima() - // || block_header.chain_id().is_halley()) - // && block_header.pruning_point() == HashValue::zero() - // { - // if next_pruning_point == genesis_id { - // return anyhow::Ok(()); - // } else { - // bail!( - // "pruning point is not correct, it should update the next pruning point: {}", - // next_pruning_point - // ); - // } - // } if next_pruning_point != inside_next_pruning_point { bail!("pruning point is not correct, the local next pruning point is {}, but the block header pruning point is {}", next_pruning_point, inside_next_pruning_point); } diff --git a/flexidag/src/prune/pruning_point_manager.rs b/flexidag/src/prune/pruning_point_manager.rs index 8f03f5cf57..0e1a2dd1b4 100644 --- a/flexidag/src/prune/pruning_point_manager.rs +++ b/flexidag/src/prune/pruning_point_manager.rs @@ -1,6 +1,5 @@ use starcoin_crypto::HashValue; -use starcoin_logger::prelude::debug; -use starcoin_types::blockhash::ORIGIN; +use starcoin_logger::prelude::{debug, info}; use crate::reachability::reachability_service::ReachabilityService; use crate::{ @@ -16,16 +15,22 @@ use crate::{ pub struct PruningPointManagerT { reachability_service: MTReachabilityService, ghost_dag_store: DbGhostdagStore, + pruning_depth: u64, + pruning_finality: u64, } impl PruningPointManagerT { pub fn new( reachability_service: MTReachabilityService, ghost_dag_store: DbGhostdagStore, + pruning_depth: u64, + pruning_finality: u64, ) -> Self { Self { reachability_service, ghost_dag_store, + pruning_depth, + pruning_finality, } } @@ -33,8 +38,8 @@ impl PruningPointManagerT { self.reachability_service.clone() } - pub fn finality_score(&self, blue_score: u64, pruning_finality: u64) -> u64 { - blue_score / pruning_finality + pub fn finality_score(&self, blue_score: u64) -> u64 { + blue_score / self.pruning_finality } pub fn prune( @@ -64,12 +69,9 @@ impl PruningPointManagerT { previous_pruning_point: HashValue, previous_ghostdata: &GhostdagData, next_ghostdata: &GhostdagData, - pruning_depth: u64, - pruning_finality: u64, ) -> anyhow::Result { let min_required_blue_score_for_next_pruning_point = - (self.finality_score(previous_ghostdata.blue_score, pruning_finality) + 1) - * pruning_finality; + (self.finality_score(previous_ghostdata.blue_score) + 1) * self.pruning_finality; debug!( "min_required_blue_score_for_next_pruning_point: {:?}", @@ -77,7 +79,7 @@ impl PruningPointManagerT { ); let mut latest_pruning_ghost_data = previous_ghostdata.to_compact(); - if min_required_blue_score_for_next_pruning_point + pruning_depth + if min_required_blue_score_for_next_pruning_point + self.pruning_depth <= next_ghostdata.blue_score { for child in self.reachability_service().forward_chain_iterator( @@ -90,11 +92,13 @@ impl PruningPointManagerT { "child: {:?}, observer2.blue_score: {:?}, next_pruning_ghostdata.blue_score: {:?}", child, next_ghostdata.blue_score, next_pruning_ghostdata.blue_score ); - if next_ghostdata.blue_score - next_pruning_ghostdata.blue_score < pruning_depth { + if next_ghostdata.blue_score - next_pruning_ghostdata.blue_score + < self.pruning_depth + { break; } - if self.finality_score(next_pruning_ghostdata.blue_score, pruning_finality) - > self.finality_score(latest_pruning_ghost_data.blue_score, pruning_finality) + if self.finality_score(next_pruning_ghostdata.blue_score) + > self.finality_score(latest_pruning_ghost_data.blue_score) { latest_pruning_ghost_data = CompactGhostdagData { blue_score: next_pruning_ghostdata.blue_score, @@ -104,10 +108,12 @@ impl PruningPointManagerT { } } - println!("prune point: {:?}", latest_pruning_ghost_data); + info!("prune point: {:?}", latest_pruning_ghost_data); } - if latest_pruning_ghost_data.selected_parent == HashValue::new(ORIGIN) { + if latest_pruning_ghost_data.selected_parent + == previous_ghostdata.to_compact().selected_parent + { anyhow::Ok(HashValue::zero()) // still genesis } else { anyhow::Ok(latest_pruning_ghost_data.selected_parent) diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index 763843146a..856f75f37a 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use anyhow::{bail, format_err, Ok, Result}; +use starcoin_config::genesis_config::{G_PRUNING_DEPTH, G_PRUNING_FINALITY}; use starcoin_crypto::HashValue as Hash; use starcoin_dag::{ blockdag::{BlockDAG, MineNewDagBlockInfo}, @@ -877,7 +878,8 @@ fn test_prune() -> anyhow::Result<()> { let pruning_depth = 4; let pruning_finality = 3; - let mut dag = BlockDAG::create_for_testing_with_parameters(k).unwrap(); + let mut dag = + BlockDAG::create_for_testing_with_parameters(k, pruning_depth, pruning_finality).unwrap(); let origin = BlockHeaderBuilder::random().with_number(0).build(); let genesis = BlockHeader::dag_genesis_random_with_parent(origin)?; @@ -999,32 +1001,16 @@ fn test_prune() -> anyhow::Result<()> { tips, blue_blocks: _, pruning_point, - } = dag.calc_mergeset_and_tips( - previous_pruning_point, - previous_ghostdata.as_ref(), - pruning_depth, - pruning_finality, - )?; + } = dag.calc_mergeset_and_tips(previous_pruning_point, previous_ghostdata.as_ref())?; assert_eq!(pruning_point, block_main_2.id()); assert_eq!(tips.len(), 1); assert_eq!(*tips.last().unwrap(), block_main_5.id()); - // prunning process begins - dag.save_dag_state( - pruning_point, - DagState { - tips: tips.clone(), - }, - )?; + // prunning process begins + dag.save_dag_state(pruning_point, DagState { tips: tips.clone() })?; - let block_main_6 = add_and_print( - 6, - block_main_5.id(), - tips, - genesis.parent_hash(), - &mut dag, - )?; + let block_main_6 = add_and_print(6, block_main_5.id(), tips, genesis.parent_hash(), &mut dag)?; let MineNewDagBlockInfo { tips, @@ -1032,13 +1018,19 @@ fn test_prune() -> anyhow::Result<()> { pruning_point, } = dag.calc_mergeset_and_tips( pruning_point, - dag.ghostdata_by_hash(pruning_point)?.ok_or_else(|| format_err!("failed to get the ghostdata for main 5 block"))?.as_ref(), - pruning_depth, - pruning_finality, + dag.ghostdata_by_hash(pruning_point)? + .ok_or_else(|| format_err!("failed to get the ghostdata for main 5 block"))? + .as_ref(), )?; let mut new_tips = vec![]; - tips.into_iter().filter(|id| dag.ghost_dag_manager().check_ancestor_of(*id, vec![block_main_6.id()]).unwrap()).for_each(|id| new_tips.push(id)); + tips.into_iter() + .filter(|id| { + dag.ghost_dag_manager() + .check_ancestor_of(*id, vec![block_main_6.id()]) + .unwrap() + }) + .for_each(|id| new_tips.push(id)); assert_eq!(pruning_point, block_main_2.id()); assert_eq!(new_tips.len(), 1); @@ -1052,7 +1044,9 @@ fn test_verification_blue_block() -> anyhow::Result<()> { // initialzie the dag firstly let k = 5; - let mut dag = BlockDAG::create_for_testing_with_parameters(k).unwrap(); + let mut dag = + BlockDAG::create_for_testing_with_parameters(k, G_PRUNING_DEPTH, G_PRUNING_FINALITY) + .unwrap(); let origin = BlockHeaderBuilder::random().with_number(0).build(); let genesis = BlockHeader::dag_genesis_random_with_parent(origin)?; diff --git a/node/src/node.rs b/node/src/node.rs index 6041b0ecf4..a59468e1e5 100644 --- a/node/src/node.rs +++ b/node/src/node.rs @@ -321,6 +321,8 @@ impl NodeService { let dag = starcoin_dag::blockdag::BlockDAG::new( KType::try_from(G_BASE_MAX_UNCLES_PER_BLOCK)?, dag_storage.clone(), + config.base().net().genesis_config().pruning_depth, + config.base().net().genesis_config().pruning_finality, ); registry.put_shared(dag.clone()).await?; let (chain_info, genesis) = Genesis::init_and_check_storage( diff --git a/sync/src/block_connector/block_connector_service.rs b/sync/src/block_connector/block_connector_service.rs index 042741d29e..1e4d554ca6 100644 --- a/sync/src/block_connector/block_connector_service.rs +++ b/sync/src/block_connector/block_connector_service.rs @@ -383,45 +383,32 @@ where let main_header = self.chain_service.get_main().status().head().clone(); let dag = self.chain_service.get_dag(); - let (pruning_depth, pruning_finality) = ctx - .get_shared::>()? - .base() - .net() - .pruning_config(); - - // which height to prune the DAG - let MineNewDagBlockInfo { tips, blue_blocks, pruning_point, } = if main_header.number() >= self.chain_service.get_main().get_pruning_height() { - info!("now calculate the next pruning point"); - let (previous_ghostdata, pruning_point) = - if main_header.pruning_point() == HashValue::zero() { - let genesis = ctx.get_shared::()?; - ( + let (previous_ghostdata, pruning_point) = if main_header.pruning_point() + == HashValue::zero() + { + let genesis = ctx.get_shared::()?; + ( self.chain_service .get_dag() .ghostdata_by_hash(genesis.block().id())? - .ok_or_else(|| format_err!("Genesis block header should exist."))?, + .ok_or_else(|| format_err!("The ghostdata of Genesis block header dose not exist., genesis id: {:?}", genesis.block().id()))?, genesis.block().id(), ) - } else { - ( + } else { + ( self.chain_service .get_dag() .ghostdata_by_hash(main_header.pruning_point())? - .ok_or_else(|| format_err!("Genesis block header should exist."))?, + .ok_or_else(|| format_err!("The ghostdata of the pruning point does not exist. pruning point id: {:?}", main_header.pruning_point()))?, main_header.pruning_point(), ) - }; - dag.calc_mergeset_and_tips( - pruning_point, - previous_ghostdata.as_ref(), - pruning_depth, - pruning_finality, - )? + }; + dag.calc_mergeset_and_tips(pruning_point, previous_ghostdata.as_ref())? } else { let genesis = ctx.get_shared::()?; let tips = dag.get_dag_state(genesis.block().id())?.tips; diff --git a/sync/src/tasks/test_tools.rs b/sync/src/tasks/test_tools.rs index 6f015367f4..2124dda442 100644 --- a/sync/src/tasks/test_tools.rs +++ b/sync/src/tasks/test_tools.rs @@ -12,6 +12,7 @@ use pin_utils::core_reexport::time::Duration; use starcoin_account_api::AccountInfo; use starcoin_chain_api::ChainReader; use starcoin_chain_service::ChainReaderService; +use starcoin_config::genesis_config::{G_PRUNING_DEPTH, G_PRUNING_FINALITY}; use starcoin_config::{BuiltinNetworkID, ChainNetwork, NodeConfig, RocksdbConfig}; use starcoin_dag::blockdag::DEFAULT_GHOSTDAG_K; use starcoin_dag::consensusdb::prelude::FlexiDagStorageConfig; @@ -59,7 +60,12 @@ impl SyncTestSystem { FlexiDagStorageConfig::new(), ) .expect("init dag storage fail."); - let dag = starcoin_dag::blockdag::BlockDAG::new(DEFAULT_GHOSTDAG_K, dag_storage); // local dag + let dag = starcoin_dag::blockdag::BlockDAG::new( + DEFAULT_GHOSTDAG_K, + dag_storage, + G_PRUNING_DEPTH, + G_PRUNING_FINALITY, + ); // local dag let chain_info = genesis.execute_genesis_block(config.net(), storage.clone(), dag.clone())?; From 41d5def6103973c4b86d78ce611b09685ae35519 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Mon, 23 Sep 2024 16:49:52 +0800 Subject: [PATCH 56/61] add test case for pruning calculation and pruning --- flexidag/src/ghostdag/protocol.rs | 1 + flexidag/tests/tests.rs | 69 ++++++++++++++++++++----------- 2 files changed, 47 insertions(+), 23 deletions(-) diff --git a/flexidag/src/ghostdag/protocol.rs b/flexidag/src/ghostdag/protocol.rs index 4e0be6e96e..30567d473a 100644 --- a/flexidag/src/ghostdag/protocol.rs +++ b/flexidag/src/ghostdag/protocol.rs @@ -222,6 +222,7 @@ impl< .collect::>() { if header.number() < 10000000 { + // no bail before 10000000 warn!("The data of blue set is not equal when executing the block: {:?}, for {:?}, checking data: {:?}", header.id(), blue_blocks.iter().map(|header| header.id()).collect::>(), new_block_data.mergeset_blues); } else { bail!("The data of blue set is not equal when executing the block: {:?}, for {:?}, checking data: {:?}", header.id(), blue_blocks.iter().map(|header| header.id()).collect::>(), new_block_data.mergeset_blues); diff --git a/flexidag/tests/tests.rs b/flexidag/tests/tests.rs index 856f75f37a..e76d3b8cf7 100644 --- a/flexidag/tests/tests.rs +++ b/flexidag/tests/tests.rs @@ -23,6 +23,7 @@ use starcoin_types::{ }; use std::{ + collections::HashSet, ops::{Deref, DerefMut}, sync::Arc, time::Instant, @@ -737,11 +738,12 @@ fn add_and_print_with_ghostdata( Ok(header) } -fn add_and_print( +fn add_and_print_with_pruning_point( number: BlockNumber, parent: Hash, parents: Vec, origin: Hash, + pruning_point: Hash, dag: &mut BlockDAG, ) -> anyhow::Result { let header_builder = BlockHeaderBuilder::random(); @@ -749,7 +751,7 @@ fn add_and_print( .with_parent_hash(parent) .with_parents_hash(parents) .with_number(number) - .with_pruning_point(Hash::zero()) + .with_pruning_point(pruning_point) .build(); let start = Instant::now(); dag.commit(header.to_owned(), origin)?; @@ -768,6 +770,16 @@ fn add_and_print( Ok(header) } +fn add_and_print( + number: BlockNumber, + parent: Hash, + parents: Vec, + origin: Hash, + dag: &mut BlockDAG, +) -> anyhow::Result { + add_and_print_with_pruning_point(number, parent, parents, origin, Hash::zero(), dag) +} + #[test] fn test_dag_mergeset() -> anyhow::Result<()> { // initialzie the dag firstly @@ -968,7 +980,6 @@ fn test_prune() -> anyhow::Result<()> { assert_eq!(observer3.blue_score, observer2.blue_score); assert_eq!(observer3.selected_parent, observer2.selected_parent); - // prunning process begins dag.save_dag_state( genesis.id(), DagState { @@ -976,6 +987,7 @@ fn test_prune() -> anyhow::Result<()> { }, )?; + // prunning process begins let (previous_ghostdata, previous_pruning_point) = if block_main_5.pruning_point() == Hash::zero() { ( @@ -997,6 +1009,7 @@ fn test_prune() -> anyhow::Result<()> { ) }; + // test the pruning point calculation let MineNewDagBlockInfo { tips, blue_blocks: _, @@ -1007,34 +1020,44 @@ fn test_prune() -> anyhow::Result<()> { assert_eq!(tips.len(), 1); assert_eq!(*tips.last().unwrap(), block_main_5.id()); - // prunning process begins - dag.save_dag_state(pruning_point, DagState { tips: tips.clone() })?; + // test the pruning logic + + let block_main_6 = add_and_print( + 6, + block_main_5.id(), + tips.clone(), + genesis.parent_hash(), + &mut dag, + )?; + let block_main_6_1 = + add_and_print(6, block_main_5.id(), tips, genesis.parent_hash(), &mut dag)?; + let block_fork = add_and_print( + 4, + block_red_3.id(), + vec![block_red_3.id()], + genesis.parent_hash(), + &mut dag, + )?; - let block_main_6 = add_and_print(6, block_main_5.id(), tips, genesis.parent_hash(), &mut dag)?; + dag.save_dag_state( + genesis.id(), + DagState { + tips: vec![block_main_6.id(), block_main_6_1.id(), block_fork.id()], + }, + )?; let MineNewDagBlockInfo { tips, blue_blocks: _, pruning_point, - } = dag.calc_mergeset_and_tips( - pruning_point, - dag.ghostdata_by_hash(pruning_point)? - .ok_or_else(|| format_err!("failed to get the ghostdata for main 5 block"))? - .as_ref(), - )?; - - let mut new_tips = vec![]; - tips.into_iter() - .filter(|id| { - dag.ghost_dag_manager() - .check_ancestor_of(*id, vec![block_main_6.id()]) - .unwrap() - }) - .for_each(|id| new_tips.push(id)); + } = dag.calc_mergeset_and_tips(previous_pruning_point, previous_ghostdata.as_ref())?; assert_eq!(pruning_point, block_main_2.id()); - assert_eq!(new_tips.len(), 1); - assert_eq!(*new_tips.last().unwrap(), block_main_6.id()); + assert_eq!(tips.len(), 2); + assert_eq!( + tips.into_iter().collect::>(), + HashSet::from_iter(vec![block_main_6.id(), block_main_6_1.id()]) + ); anyhow::Result::Ok(()) } From f5807c804dd0bfbf948c47a553a00f95f508cff7 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 24 Sep 2024 11:02:54 +0800 Subject: [PATCH 57/61] add write lock when saving the tips --- flexidag/src/blockdag.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index b97459a8d9..908ce0e09b 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -447,7 +447,17 @@ impl BlockDAG { } pub fn save_dag_state(&self, hash: Hash, state: DagState) -> anyhow::Result<()> { - self.storage.state_store.write().insert(hash, state)?; + let writer = self.storage.state_store.write(); + let merged_tips = writer + .get_state_by_hash(hash)? + .tips + .into_iter() + .chain(state.tips) + .collect::>() + .into_iter() + .collect::>(); + writer.insert(hash, DagState { tips: merged_tips })?; + drop(writer); Ok(()) } From 2be86560d01feaa8591b74eb5555028b74fd1d96 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 24 Sep 2024 17:56:57 +0800 Subject: [PATCH 58/61] add test pruning for chain --- chain/mock/src/mock_chain.rs | 19 +++++++++++++++ chain/src/chain.rs | 30 +++++++++++++---------- chain/tests/test_prune.rs | 46 ++++++++++++++++++++++++++++++++++++ flexidag/src/blockdag.rs | 35 ++++++++++++++++++--------- genesis/src/lib.rs | 15 ++++++++++++ 5 files changed, 122 insertions(+), 23 deletions(-) create mode 100644 chain/tests/test_prune.rs diff --git a/chain/mock/src/mock_chain.rs b/chain/mock/src/mock_chain.rs index 492f9b0389..a81ed28cc2 100644 --- a/chain/mock/src/mock_chain.rs +++ b/chain/mock/src/mock_chain.rs @@ -12,6 +12,7 @@ use starcoin_genesis::Genesis; use starcoin_logger::prelude::*; use starcoin_storage::Storage; use starcoin_types::block::{Block, BlockHeader}; +use starcoin_types::blockhash::KType; use starcoin_types::startup_info::ChainInfo; use std::sync::Arc; use std::vec; @@ -39,6 +40,24 @@ impl MockChain { Ok(Self::new_inner(net, chain, miner, storage)) } + pub fn new_with_params(net: ChainNetwork, + k: KType, + pruning_depth: u64, + pruning_finality: u64,) -> Result { + let (storage, chain_info, _, dag) = + Genesis::init_storage_for_test_with_param(&net, k, pruning_depth, pruning_finality).expect("init storage by genesis fail."); + + let chain = BlockChain::new( + net.time_service(), + chain_info.head().id(), + storage.clone(), + None, + dag, + )?; + let miner = AccountInfo::random(); + Ok(Self::new_inner(net, chain, miner, storage)) + } + pub fn new_with_storage( net: ChainNetwork, storage: Arc, diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 0a20d2c180..de4baeafdb 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -307,27 +307,27 @@ impl BlockChain { .unwrap_or(on_chain_block_gas_limit); let strategy = epoch.strategy(); let difficulty = strategy.calculate_next_difficulty(self)?; + + let (ghostdata, tips) = if tips.is_empty() { + let tips = self.get_dag_state()?.tips; + (self.dag().ghostdata(&tips)?, tips) + } else { + (self.dag().ghostdata(&tips)?, tips) + }; + let MineNewDagBlockInfo { tips, blue_blocks, pruning_point: _, - } = if !tips.is_empty() { - let blue_blocks = (*self.dag().ghostdata(&tips)?.mergeset_blues).clone()[1..].to_vec(); + } = { + let blue_blocks = ghostdata.mergeset_blues.clone()[1..].to_vec(); MineNewDagBlockInfo { tips, blue_blocks, pruning_point, // TODO: new test cases will need pass this field if they have some special requirements. } - } else { - let dag_state = self.get_dag_state()?; - let ghostdata = self.dag().ghost_dag_manager().ghostdag(&dag_state.tips)?; - - MineNewDagBlockInfo { - tips: dag_state.tips, - blue_blocks: (*ghostdata.mergeset_blues).clone(), - pruning_point: HashValue::zero(), - } }; + debug!( "Blue blocks:{:?} in chain/create_block_template_by_header", blue_blocks @@ -349,9 +349,15 @@ impl BlockChain { uncles }; + let parent_header = if ghostdata.selected_parent != previous_header.id() { + self.storage.get_block_header_by_hash(ghostdata.selected_parent)?.ok_or_else(|| format_err!("Cannot find block header by {:?}", ghostdata.selected_parent))? + } else { + previous_header + }; + let mut opened_block = OpenedBlock::new( self.storage.clone(), - previous_header, + parent_header, final_block_gas_limit, author, self.time_service.now_millis(), diff --git a/chain/tests/test_prune.rs b/chain/tests/test_prune.rs new file mode 100644 index 0000000000..358a226d3c --- /dev/null +++ b/chain/tests/test_prune.rs @@ -0,0 +1,46 @@ +use std::collections::HashSet; + +use rand::rngs::mock; +use starcoin_chain::ChainReader; +use starcoin_chain_mock::MockChain; +use starcoin_config::ChainNetwork; +use starcoin_crypto::HashValue; +use starcoin_logger::prelude::debug; + + + + +#[stest::test] +fn test_block_chain_prune() -> anyhow::Result<()> { + let mut mock_chain = MockChain::new_with_params(ChainNetwork::new_test(), 3, 4, 3)?; + let genesis = mock_chain.head().status().head.clone(); + + // blue blocks + let block_blue_1 = mock_chain.produce_block_by_tips(genesis.clone(), vec![genesis.id()])?; + mock_chain.apply(block_blue_1.clone())?; + let block_blue_2 = mock_chain.produce_block_by_tips(block_blue_1.header().clone(), vec![block_blue_1.id()])?; + mock_chain.apply(block_blue_2.clone())?; + let block_blue_3 = mock_chain.produce_block_by_tips(block_blue_2.header().clone(), vec![block_blue_2.id()])?; + mock_chain.apply(block_blue_3.clone())?; + let block_blue_3_1 = mock_chain.produce_block_by_tips(block_blue_2.header().clone(), vec![block_blue_2.id()])?; + mock_chain.apply(block_blue_3_1.clone())?; + let block_blue_4 = mock_chain.produce_block_by_tips(block_blue_3.header().clone(), vec![block_blue_3.id(), block_blue_3_1.id()])?; + mock_chain.apply(block_blue_4.clone())?; + let block_blue_5 = mock_chain.produce_block_by_tips(block_blue_4.header().clone(), vec![block_blue_4.id()])?; + mock_chain.apply(block_blue_5.clone())?; + + // red blocks + let block_red_2 = mock_chain.produce_block_by_tips(block_blue_1.header().clone(), vec![block_blue_1.id()])?; + mock_chain.apply(block_red_2.clone())?; + let block_red_2_1 = mock_chain.produce_block_by_tips(block_blue_1.header().clone(), vec![block_blue_1.id()])?; + mock_chain.apply(block_red_2_1.clone())?; + let block_red_3 = mock_chain.produce_block_by_tips(block_red_2.header().clone(), vec![block_red_2.id(), block_red_2_1.id()])?; + mock_chain.apply(block_red_3.clone())?; + + debug!("tips: {:?}, pruning point: {:?}", mock_chain.head().get_dag_state()?, mock_chain.head().status().head().pruning_point()); + assert_eq!(mock_chain.head().get_dag_state()?.tips.into_iter().collect::>(), HashSet::from_iter(vec![block_blue_5.id(), block_red_3.id()])); + + + + Ok(()) +} \ No newline at end of file diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index 908ce0e09b..f5c6df958b 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -19,13 +19,14 @@ use anyhow::{bail, ensure, Ok}; use starcoin_config::genesis_config::{G_PRUNING_DEPTH, G_PRUNING_FINALITY}; use starcoin_config::temp_dir; use starcoin_crypto::{HashValue as Hash, HashValue}; -use starcoin_logger::prelude::{debug, info}; +use starcoin_logger::prelude::{debug, info, warn}; use starcoin_types::block::BlockHeader; use starcoin_types::{ blockhash::{BlockHashes, KType}, consensus_header::ConsensusHeader, }; use std::collections::HashSet; +use std::fmt::write; use std::ops::DerefMut; use std::sync::Arc; @@ -293,7 +294,6 @@ impl BlockDAG { bail!("failed to add a block when committing, e: {:?}", e); } } - process_key_already_error( self.storage .relations_store @@ -448,16 +448,29 @@ impl BlockDAG { pub fn save_dag_state(&self, hash: Hash, state: DagState) -> anyhow::Result<()> { let writer = self.storage.state_store.write(); - let merged_tips = writer - .get_state_by_hash(hash)? - .tips - .into_iter() - .chain(state.tips) - .collect::>() - .into_iter() - .collect::>(); - writer.insert(hash, DagState { tips: merged_tips })?; + match writer.get_state_by_hash(hash) { + anyhow::Result::Ok(dag_state) => { + // remove the ancestor tips + let left_tips = dag_state.tips.into_iter().filter(|tip| { + !state.tips.iter().any(|new_tip| { + self.ghost_dag_manager().check_ancestor_of(*tip, vec![*new_tip]).unwrap_or_else(|e| { + warn!("failed to check ancestor of tip: {:?}, new_tip: {:?}, error: {:?}", tip, new_tip, e); + false + }) + }) + }); + let merged_tips = left_tips.chain(state.tips.clone()).collect::>().into_iter().collect::>(); + writer.insert(hash, DagState { + tips: merged_tips, + })?; + } + Err(_) => { + writer.insert(hash, state)?; + } + } + drop(writer); + Ok(()) } diff --git a/genesis/src/lib.rs b/genesis/src/lib.rs index dfae64eaa2..e9894b1ef3 100644 --- a/genesis/src/lib.rs +++ b/genesis/src/lib.rs @@ -24,6 +24,7 @@ use starcoin_storage::table_info::TableInfoStore; use starcoin_storage::{BlockStore, Storage, Store}; use starcoin_transaction_builder::build_stdlib_package_with_modules; use starcoin_transaction_builder::{build_stdlib_package, StdLibOptions}; +use starcoin_types::blockhash::KType; use starcoin_types::startup_info::{ChainInfo, StartupInfo}; use starcoin_types::transaction::Package; use starcoin_types::transaction::TransactionInfo; @@ -368,6 +369,20 @@ impl Genesis { let chain_info = genesis.execute_genesis_block(net, storage.clone(), dag.clone())?; Ok((storage, chain_info, genesis, dag)) } + + pub fn init_storage_for_test_with_param( + net: &ChainNetwork, + k: KType, + pruning_depth: u64, + pruning_finality: u64, + ) -> Result<(Arc, ChainInfo, Self, BlockDAG)> { + debug!("init storage by genesis for test. {net:?}"); + let storage = Arc::new(Storage::new(StorageInstance::new_cache_instance())?); + let genesis = Self::load_or_build(net)?; + let dag = BlockDAG::create_for_testing_with_parameters(k, pruning_depth, pruning_finality)?; + let chain_info = genesis.execute_genesis_block(net, storage.clone(), dag.clone())?; + Ok((storage, chain_info, genesis, dag)) + } } #[cfg(test)] From b7d157d630330cc87570eb8813cd34723426772c Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Tue, 24 Sep 2024 20:03:05 +0800 Subject: [PATCH 59/61] add test case --- chain/mock/src/mock_chain.rs | 76 ++++++++++++++++++++++++-- chain/src/chain.rs | 11 +++- chain/tests/test_prune.rs | 103 +++++++++++++++++++++++++++++------ flexidag/src/blockdag.rs | 11 ++-- 4 files changed, 171 insertions(+), 30 deletions(-) diff --git a/chain/mock/src/mock_chain.rs b/chain/mock/src/mock_chain.rs index a81ed28cc2..a03c58b107 100644 --- a/chain/mock/src/mock_chain.rs +++ b/chain/mock/src/mock_chain.rs @@ -1,13 +1,13 @@ // Copyright (c) The Starcoin Core Contributors // SPDX-License-Identifier: Apache-2.0 -use anyhow::Result; +use anyhow::{format_err, Result}; use starcoin_account_api::AccountInfo; use starcoin_chain::{BlockChain, ChainReader, ChainWriter}; use starcoin_config::ChainNetwork; use starcoin_consensus::Consensus; use starcoin_crypto::HashValue; -use starcoin_dag::blockdag::BlockDAG; +use starcoin_dag::blockdag::{BlockDAG, MineNewDagBlockInfo}; use starcoin_genesis::Genesis; use starcoin_logger::prelude::*; use starcoin_storage::Storage; @@ -40,12 +40,15 @@ impl MockChain { Ok(Self::new_inner(net, chain, miner, storage)) } - pub fn new_with_params(net: ChainNetwork, + pub fn new_with_params( + net: ChainNetwork, k: KType, pruning_depth: u64, - pruning_finality: u64,) -> Result { + pruning_finality: u64, + ) -> Result { let (storage, chain_info, _, dag) = - Genesis::init_storage_for_test_with_param(&net, k, pruning_depth, pruning_finality).expect("init storage by genesis fail."); + Genesis::init_storage_for_test_with_param(&net, k, pruning_depth, pruning_finality) + .expect("init storage by genesis fail."); let chain = BlockChain::new( net.time_service(), @@ -216,6 +219,69 @@ impl MockChain { .create_block(template, self.net.time_service().as_ref()) } + pub fn produce_block_for_pruning(&mut self) -> Result { + let tips = self.head.get_dag_state()?.tips; + let ghostdata = self.head.dag().ghost_dag_manager().ghostdag(&tips)?; + let selected_header = self + .head() + .get_storage() + .get_block_header_by_hash(ghostdata.selected_parent)? + .ok_or_else(|| { + format_err!( + "Cannot find block header by hash: {:?}", + ghostdata.selected_parent + ) + })?; + + let previous_pruning = if selected_header.pruning_point() == HashValue::zero() { + self.head().get_storage().get_genesis()?.unwrap() + } else { + selected_header.pruning_point() + }; + + let prevous_ghostdata = self + .head() + .dag() + .ghostdata_by_hash(previous_pruning)? + .ok_or_else(|| format_err!("Cannot find ghostdata by hash: {:?}", previous_pruning))?; + + let MineNewDagBlockInfo { + tips: pruned_tips, + blue_blocks, + pruning_point, + } = self + .head + .dag() + .calc_mergeset_and_tips(previous_pruning, prevous_ghostdata.as_ref())?; + + debug!( + "tips: {:?}, blue_blocks: {:?}, pruning_point: {:?}", + pruned_tips, blue_blocks, pruning_point + ); + + let (template, _) = self.head.create_block_template_by_header( + *self.miner.address(), + selected_header, + vec![], + blue_blocks[1..] + .iter() + .map(|block_id| { + self.head() + .get_storage() + .get_block_header_by_hash(*block_id) + .unwrap() + .unwrap() + }) + .collect(), + None, + pruned_tips, + pruning_point, + )?; + self.head + .consensus() + .create_block(template, self.net.time_service().as_ref()) + } + pub fn apply(&mut self, block: Block) -> Result<()> { self.head.apply(block)?; Ok(()) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index de4baeafdb..1c2fa43d07 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -350,7 +350,14 @@ impl BlockChain { }; let parent_header = if ghostdata.selected_parent != previous_header.id() { - self.storage.get_block_header_by_hash(ghostdata.selected_parent)?.ok_or_else(|| format_err!("Cannot find block header by {:?}", ghostdata.selected_parent))? + self.storage + .get_block_header_by_hash(ghostdata.selected_parent)? + .ok_or_else(|| { + format_err!( + "Cannot find block header by {:?}", + ghostdata.selected_parent + ) + })? } else { previous_header }; @@ -368,7 +375,7 @@ impl BlockChain { tips, blue_blocks, 0, - HashValue::zero(), // TODO: this field must be returned by dag + pruning_point, )?; let excluded_txns = opened_block.push_txns(user_txns)?; let template = opened_block.finalize()?; diff --git a/chain/tests/test_prune.rs b/chain/tests/test_prune.rs index 358a226d3c..72354b7def 100644 --- a/chain/tests/test_prune.rs +++ b/chain/tests/test_prune.rs @@ -1,14 +1,8 @@ -use std::collections::HashSet; - -use rand::rngs::mock; use starcoin_chain::ChainReader; use starcoin_chain_mock::MockChain; use starcoin_config::ChainNetwork; -use starcoin_crypto::HashValue; use starcoin_logger::prelude::debug; - - - +use std::collections::HashSet; #[stest::test] fn test_block_chain_prune() -> anyhow::Result<()> { @@ -18,29 +12,102 @@ fn test_block_chain_prune() -> anyhow::Result<()> { // blue blocks let block_blue_1 = mock_chain.produce_block_by_tips(genesis.clone(), vec![genesis.id()])?; mock_chain.apply(block_blue_1.clone())?; - let block_blue_2 = mock_chain.produce_block_by_tips(block_blue_1.header().clone(), vec![block_blue_1.id()])?; + let block_blue_2 = + mock_chain.produce_block_by_tips(block_blue_1.header().clone(), vec![block_blue_1.id()])?; mock_chain.apply(block_blue_2.clone())?; - let block_blue_3 = mock_chain.produce_block_by_tips(block_blue_2.header().clone(), vec![block_blue_2.id()])?; + let block_blue_3 = + mock_chain.produce_block_by_tips(block_blue_2.header().clone(), vec![block_blue_2.id()])?; mock_chain.apply(block_blue_3.clone())?; - let block_blue_3_1 = mock_chain.produce_block_by_tips(block_blue_2.header().clone(), vec![block_blue_2.id()])?; + let block_blue_3_1 = + mock_chain.produce_block_by_tips(block_blue_2.header().clone(), vec![block_blue_2.id()])?; mock_chain.apply(block_blue_3_1.clone())?; - let block_blue_4 = mock_chain.produce_block_by_tips(block_blue_3.header().clone(), vec![block_blue_3.id(), block_blue_3_1.id()])?; + let block_blue_4 = mock_chain.produce_block_by_tips( + block_blue_3.header().clone(), + vec![block_blue_3.id(), block_blue_3_1.id()], + )?; mock_chain.apply(block_blue_4.clone())?; - let block_blue_5 = mock_chain.produce_block_by_tips(block_blue_4.header().clone(), vec![block_blue_4.id()])?; + let block_blue_5 = + mock_chain.produce_block_by_tips(block_blue_4.header().clone(), vec![block_blue_4.id()])?; mock_chain.apply(block_blue_5.clone())?; // red blocks - let block_red_2 = mock_chain.produce_block_by_tips(block_blue_1.header().clone(), vec![block_blue_1.id()])?; + let block_red_2 = + mock_chain.produce_block_by_tips(block_blue_1.header().clone(), vec![block_blue_1.id()])?; mock_chain.apply(block_red_2.clone())?; - let block_red_2_1 = mock_chain.produce_block_by_tips(block_blue_1.header().clone(), vec![block_blue_1.id()])?; + let block_red_2_1 = + mock_chain.produce_block_by_tips(block_blue_1.header().clone(), vec![block_blue_1.id()])?; mock_chain.apply(block_red_2_1.clone())?; - let block_red_3 = mock_chain.produce_block_by_tips(block_red_2.header().clone(), vec![block_red_2.id(), block_red_2_1.id()])?; + let block_red_3 = mock_chain.produce_block_by_tips( + block_red_2.header().clone(), + vec![block_red_2.id(), block_red_2_1.id()], + )?; mock_chain.apply(block_red_3.clone())?; - debug!("tips: {:?}, pruning point: {:?}", mock_chain.head().get_dag_state()?, mock_chain.head().status().head().pruning_point()); - assert_eq!(mock_chain.head().get_dag_state()?.tips.into_iter().collect::>(), HashSet::from_iter(vec![block_blue_5.id(), block_red_3.id()])); + debug!( + "tips: {:?}, pruning point: {:?}", + mock_chain.head().get_dag_state()?, + mock_chain.head().status().head().pruning_point() + ); + assert_eq!( + mock_chain + .head() + .get_dag_state()? + .tips + .into_iter() + .collect::>(), + HashSet::from_iter(vec![block_blue_5.id(), block_red_3.id()]) + ); + + let block_blue_6 = + mock_chain.produce_block_by_tips(block_blue_5.header().clone(), vec![block_blue_5.id()])?; + mock_chain.apply(block_blue_6.clone())?; + let block_blue_6_1 = + mock_chain.produce_block_by_tips(block_blue_5.header().clone(), vec![block_blue_5.id()])?; + mock_chain.apply(block_blue_6_1.clone())?; + let block_red_4 = + mock_chain.produce_block_by_tips(block_red_3.header().clone(), vec![block_red_3.id()])?; + mock_chain.apply(block_red_4.clone())?; + + debug!( + "tips: {:?}, pruning point: {:?}", + mock_chain.head().get_dag_state()?, + mock_chain.head().status().head().pruning_point() + ); + assert_eq!( + mock_chain + .head() + .get_dag_state()? + .tips + .into_iter() + .collect::>(), + HashSet::from_iter(vec![ + block_blue_6.id(), + block_blue_6_1.id(), + block_red_4.id() + ]) + ); + let block_blue_7 = mock_chain.produce_block_for_pruning()?; + mock_chain.apply(block_blue_7.clone())?; + assert_eq!(block_blue_7.header().pruning_point(), block_blue_2.id()); + assert_eq!( + block_blue_7 + .header() + .parents_hash() + .into_iter() + .collect::>(), + HashSet::from_iter(vec![block_blue_6.id(), block_blue_6_1.id()]) + ); + assert_eq!( + mock_chain + .head() + .get_dag_state()? + .tips + .into_iter() + .collect::>(), + HashSet::from_iter(vec![block_blue_7.id()]) + ); Ok(()) -} \ No newline at end of file +} diff --git a/flexidag/src/blockdag.rs b/flexidag/src/blockdag.rs index f5c6df958b..4f561630df 100644 --- a/flexidag/src/blockdag.rs +++ b/flexidag/src/blockdag.rs @@ -26,7 +26,6 @@ use starcoin_types::{ consensus_header::ConsensusHeader, }; use std::collections::HashSet; -use std::fmt::write; use std::ops::DerefMut; use std::sync::Arc; @@ -459,10 +458,12 @@ impl BlockDAG { }) }) }); - let merged_tips = left_tips.chain(state.tips.clone()).collect::>().into_iter().collect::>(); - writer.insert(hash, DagState { - tips: merged_tips, - })?; + let merged_tips = left_tips + .chain(state.tips.clone()) + .collect::>() + .into_iter() + .collect::>(); + writer.insert(hash, DagState { tips: merged_tips })?; } Err(_) => { writer.insert(hash, state)?; From 2d3745edb42cec461fdfa9a7d0af17115793b492 Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Wed, 25 Sep 2024 15:06:54 +0800 Subject: [PATCH 60/61] if no fork, execute at once --- sync/src/tasks/block_sync_task.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/sync/src/tasks/block_sync_task.rs b/sync/src/tasks/block_sync_task.rs index ef6fcb4273..918c3649ae 100644 --- a/sync/src/tasks/block_sync_task.rs +++ b/sync/src/tasks/block_sync_task.rs @@ -421,13 +421,15 @@ where Ok(()) } - async fn find_absent_ancestor(&self, mut block_headers: Vec) -> Result<()> { + async fn find_absent_ancestor(&self, mut block_headers: Vec) -> Result { + let mut count: u64 = 0; loop { let mut absent_blocks = vec![]; self.find_absent_parent_dag_blocks_for_blocks(block_headers, &mut absent_blocks)?; if absent_blocks.is_empty() { - return Ok(()); + return Ok(count); } + count = count.saturating_add(absent_blocks.len() as u64); block_headers = self.fetch_blocks(absent_blocks).await?; } } @@ -449,10 +451,12 @@ where block_header.parents_hash() ); let fut = async { - self.find_absent_ancestor(vec![block_header.clone()]) + let count = self + .find_absent_ancestor(vec![block_header.clone()]) .await?; - if block_header.number() % 1000 == 0 + if count == 0 + || block_header.number() % 1000 == 0 || block_header.number() >= self.target.target_id.number() { let parallel_execute = DagBlockSender::new( From 07de3ce251ff48a71d7cc871c6b19d852232d03e Mon Sep 17 00:00:00 2001 From: jackzhhuang Date: Fri, 27 Sep 2024 11:39:10 +0800 Subject: [PATCH 61/61] add test code --- chain/mock/src/mock_chain.rs | 20 ++++++++++++++++++++ chain/src/verifier/mod.rs | 4 ++-- chain/tests/test_prune.rs | 24 +++++++++++++++++++----- kube/manifest/starcoin-halley.yaml | 14 +------------- sync/src/tasks/block_sync_task.rs | 7 +++++-- 5 files changed, 47 insertions(+), 22 deletions(-) diff --git a/chain/mock/src/mock_chain.rs b/chain/mock/src/mock_chain.rs index a03c58b107..79996cd4f8 100644 --- a/chain/mock/src/mock_chain.rs +++ b/chain/mock/src/mock_chain.rs @@ -200,6 +200,26 @@ impl MockChain { .create_block(template, self.net.time_service().as_ref()) } + pub fn produce_block_by_params( + &mut self, + parent_header: BlockHeader, + tips: Vec, + pruning_point: HashValue, + ) -> Result { + let (template, _) = self.head.create_block_template_by_header( + *self.miner.address(), + parent_header, + vec![], + vec![], + None, + tips, + pruning_point, + )?; + self.head + .consensus() + .create_block(template, self.net.time_service().as_ref()) + } + pub fn produce_block_by_tips( &mut self, parent_header: BlockHeader, diff --git a/chain/src/verifier/mod.rs b/chain/src/verifier/mod.rs index 5b4eba556e..a4381b2522 100644 --- a/chain/src/verifier/mod.rs +++ b/chain/src/verifier/mod.rs @@ -404,8 +404,8 @@ impl BasicDagVerifier { ), ) })?, - "Invalid block: parent {} might not exist.", - parent_hash + "Invalid block: parent {:?} is not the descendant of pruning point: {:?}", + parent_hash, new_block_header.pruning_point() ); Ok::<(), ConnectBlockError>(()) })?; diff --git a/chain/tests/test_prune.rs b/chain/tests/test_prune.rs index 72354b7def..8e808125e5 100644 --- a/chain/tests/test_prune.rs +++ b/chain/tests/test_prune.rs @@ -99,15 +99,29 @@ fn test_block_chain_prune() -> anyhow::Result<()> { .collect::>(), HashSet::from_iter(vec![block_blue_6.id(), block_blue_6_1.id()]) ); + + let tips = mock_chain.head().get_dag_state()?.tips; assert_eq!( - mock_chain - .head() - .get_dag_state()? - .tips + tips.iter().cloned().collect::>(), + HashSet::from_iter(vec![block_blue_7.id()]) + ); + + let failure_block = mock_chain.produce_block_by_params( + block_blue_7.header().clone(), + vec![block_red_4.id(), block_blue_7.id()], + block_blue_7.header().pruning_point(), + )?; + assert_eq!( + failure_block + .header() + .parents_hash() .into_iter() .collect::>(), - HashSet::from_iter(vec![block_blue_7.id()]) + HashSet::from_iter(vec![block_red_4.id(), block_blue_7.id()]) ); + let result = mock_chain.apply(failure_block); + debug!("apply failure block result: {:?}", result); + assert!(result.is_err()); Ok(()) } diff --git a/kube/manifest/starcoin-halley.yaml b/kube/manifest/starcoin-halley.yaml index 12c9aca410..dce70e7307 100644 --- a/kube/manifest/starcoin-halley.yaml +++ b/kube/manifest/starcoin-halley.yaml @@ -23,7 +23,7 @@ spec: starcoin/node-pool: seed-pool containers: - name: starcoin - image: starcoin/starcoin:dag-master + image: starcoin/starcoin:pruning-point imagePullPolicy: Always command: - bash @@ -68,18 +68,6 @@ spec: timeoutSeconds: 2 failureThreshold: 3 successThreshold: 1 - readinessProbe: - exec: - command: - - sh - - -c - - >- - /starcoin/starcoin -n halley -d /sc-data node sync status|grep Synchronized - initialDelaySeconds: 10 - periodSeconds: 5 - timeoutSeconds: 2 - failureThreshold: 3 - successThreshold: 1 volumeClaimTemplates: - metadata: name: starcoin-volume diff --git a/sync/src/tasks/block_sync_task.rs b/sync/src/tasks/block_sync_task.rs index 918c3649ae..7f88b94007 100644 --- a/sync/src/tasks/block_sync_task.rs +++ b/sync/src/tasks/block_sync_task.rs @@ -455,8 +455,11 @@ where .find_absent_ancestor(vec![block_header.clone()]) .await?; - if count == 0 - || block_header.number() % 1000 == 0 + if count == 0 { + return anyhow::Ok(ParallelSign::Continue); + } + + if block_header.number() % 1000 == 0 || block_header.number() >= self.target.target_id.number() { let parallel_execute = DagBlockSender::new(