diff --git a/src/burnchains/bitcoin/blocks.rs b/src/burnchains/bitcoin/blocks.rs
index 0c585e320b..61a1820847 100644
--- a/src/burnchains/bitcoin/blocks.rs
+++ b/src/burnchains/bitcoin/blocks.rs
@@ -72,6 +72,10 @@ impl BurnHeaderIPC for BitcoinHeaderIPC {
fn height(&self) -> u64 {
self.block_height
}
+
+ fn header_hash(&self) -> [u8; 32] {
+ self.block_header.header.bitcoin_hash().0
+ }
}
#[derive(Debug, Clone, PartialEq)]
diff --git a/src/burnchains/burnchain.rs b/src/burnchains/burnchain.rs
index eaf2c31f4f..a0ecf45cf6 100644
--- a/src/burnchains/burnchain.rs
+++ b/src/burnchains/burnchain.rs
@@ -17,6 +17,9 @@
along with Blockstack. If not, see .
*/
+use deps;
+use deps::bitcoin::util::hash::Sha256dHash as BitcoinSha256dHash;
+
use std::fs;
use std::path::PathBuf;
use std::sync::mpsc::sync_channel;
@@ -43,7 +46,7 @@ use burnchains::{
use burnchains::db::BurnchainDB;
use burnchains::indexer::{
- BurnBlockIPC, BurnchainBlockDownloader, BurnchainBlockParser, BurnchainIndexer,
+ BurnBlockIPC, BurnHeaderIPC, BurnchainBlockDownloader, BurnchainBlockParser, BurnchainIndexer,
};
use burnchains::bitcoin::address::address_type_to_version_byte;
@@ -791,9 +794,16 @@ impl Burnchain {
pub fn sync(
&mut self,
comms: &CoordinatorChannels,
+ target_block_height_opt: Option,
+ max_blocks_opt: Option,
) -> Result {
let mut indexer: I = self.make_indexer()?;
- let chain_tip = self.sync_with_indexer(&mut indexer, comms.clone())?;
+ let chain_tip = self.sync_with_indexer(
+ &mut indexer,
+ comms.clone(),
+ target_block_height_opt,
+ max_blocks_opt,
+ )?;
Ok(chain_tip.block_height)
}
@@ -1006,12 +1016,16 @@ impl Burnchain {
}
/// Top-level burnchain sync.
- /// Returns the burnchain block header for the new burnchain tip
+ /// Returns the burnchain block header for the new burnchain tip, which will be _at least_ as
+ /// high as target_block_height_opt (if given), or whatever is currently at the tip of the
+ /// burnchain DB.
/// If this method returns Err(burnchain_error::TrySyncAgain), then call this method again.
pub fn sync_with_indexer(
&mut self,
indexer: &mut I,
coord_comm: CoordinatorChannels,
+ target_block_height_opt: Option,
+ max_blocks_opt: Option,
) -> Result
where
I: BurnchainIndexer + 'static,
@@ -1040,11 +1054,9 @@ impl Burnchain {
// get latest headers.
debug!("Sync headers from {}", sync_height);
- let end_block = indexer.sync_headers(sync_height, None)?;
- let mut start_block = match sync_height {
- 0 => 0,
- _ => sync_height,
- };
+ // fetch all headers, no matter what
+ let mut end_block = indexer.sync_headers(sync_height, None)?;
+ let mut start_block = sync_height;
if db_height < start_block {
start_block = db_height;
}
@@ -1053,6 +1065,41 @@ impl Burnchain {
"Sync'ed headers from {} to {}. DB at {}",
start_block, end_block, db_height
);
+
+ if let Some(target_block_height) = target_block_height_opt {
+ if target_block_height < end_block {
+ debug!(
+ "Will download up to max burn block height {}",
+ target_block_height
+ );
+ end_block = target_block_height;
+ }
+ }
+
+ if let Some(max_blocks) = max_blocks_opt {
+ if start_block + max_blocks < end_block {
+ debug!(
+ "Will download only {} blocks (up to block height {})",
+ max_blocks,
+ start_block + max_blocks
+ );
+ end_block = start_block + max_blocks;
+ }
+ }
+
+ if end_block < start_block {
+ // nothing to do -- go get the burnchain block data at that height
+ let mut hdrs = indexer.read_headers(end_block, end_block + 1)?;
+ if let Some(hdr) = hdrs.pop() {
+ debug!("Nothing to do; already have blocks up to {}", end_block);
+ let bhh =
+ BurnchainHeaderHash::from_bitcoin_hash(&BitcoinSha256dHash(hdr.header_hash()));
+ return burnchain_db
+ .get_burnchain_block(&bhh)
+ .map(|block_data| block_data.header);
+ }
+ }
+
if start_block == db_height && db_height == end_block {
// all caught up
return Ok(burn_chain_tip);
diff --git a/src/burnchains/db.rs b/src/burnchains/db.rs
index d835775c9e..116c5d03c7 100644
--- a/src/burnchains/db.rs
+++ b/src/burnchains/db.rs
@@ -12,7 +12,10 @@ use chainstate::burn::operations::BlockstackOperationType;
use chainstate::stacks::index::MarfTrieId;
-use util::db::{query_row, query_rows, u64_to_sql, Error as DBError, FromColumn, FromRow};
+use util::db::{
+ query_row, query_rows, tx_begin_immediate, tx_busy_handler, u64_to_sql, Error as DBError,
+ FromColumn, FromRow,
+};
pub struct BurnchainDB {
conn: Connection,
@@ -185,10 +188,12 @@ impl BurnchainDB {
}
};
- let mut db = BurnchainDB {
- conn: Connection::open_with_flags(path, open_flags)
- .expect(&format!("FAILED to open: {}", path)),
- };
+ let conn = Connection::open_with_flags(path, open_flags)
+ .expect(&format!("FAILED to open: {}", path));
+
+ conn.busy_handler(Some(tx_busy_handler))?;
+
+ let mut db = BurnchainDB { conn };
if create_flag {
let db_tx = db.tx_begin()?;
@@ -216,14 +221,14 @@ impl BurnchainDB {
OpenFlags::SQLITE_OPEN_READ_ONLY
};
let conn = Connection::open_with_flags(path, open_flags)?;
+ conn.busy_handler(Some(tx_busy_handler))?;
Ok(BurnchainDB { conn })
}
fn tx_begin<'a>(&'a mut self) -> Result, BurnchainError> {
- Ok(BurnchainDBTransaction {
- sql_tx: self.conn.transaction()?,
- })
+ let sql_tx = tx_begin_immediate(&mut self.conn)?;
+ Ok(BurnchainDBTransaction { sql_tx: sql_tx })
}
pub fn get_canonical_chain_tip(&self) -> Result {
diff --git a/src/burnchains/indexer.rs b/src/burnchains/indexer.rs
index c17cf759d1..7b11e9dcf4 100644
--- a/src/burnchains/indexer.rs
+++ b/src/burnchains/indexer.rs
@@ -28,6 +28,7 @@ pub trait BurnHeaderIPC {
fn height(&self) -> u64;
fn header(&self) -> Self::H;
+ fn header_hash(&self) -> [u8; 32];
}
pub trait BurnBlockIPC {
diff --git a/src/chainstate/burn/operations/mod.rs b/src/chainstate/burn/operations/mod.rs
index 8a25504a93..27552e9078 100644
--- a/src/chainstate/burn/operations/mod.rs
+++ b/src/chainstate/burn/operations/mod.rs
@@ -141,7 +141,7 @@ impl From for Error {
#[derive(Debug, PartialEq, Clone, Eq, Serialize, Deserialize)]
pub struct LeaderBlockCommitOp {
- pub block_header_hash: BlockHeaderHash, // hash of Stacks block header (double-sha256)
+ pub block_header_hash: BlockHeaderHash, // hash of Stacks block header (sha512/256)
pub new_seed: VRFSeed, // new seed for this block
pub parent_block_ptr: u32, // block height of the block that contains the parent block hash
diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs
index df5dd88166..ca9e6ba68d 100644
--- a/src/chainstate/coordinator/mod.rs
+++ b/src/chainstate/coordinator/mod.rs
@@ -432,8 +432,8 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider>
self.notifier.notify_sortition_processed();
debug!(
- "Sortition processed: {} (tip {})",
- &sortition_id, &next_snapshot.burn_header_hash
+ "Sortition processed: {} (tip {} height {})",
+ &sortition_id, &next_snapshot.burn_header_hash, next_snapshot.block_height
);
if sortition_tip_snapshot.block_height < header.block_height {
diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs
index 3d428495d7..d6347a1b45 100644
--- a/src/chainstate/coordinator/tests.rs
+++ b/src/chainstate/coordinator/tests.rs
@@ -1782,6 +1782,7 @@ fn preprocess_block(
&my_sortition.consensus_hash,
&block,
&parent_consensus_hash,
+ 5,
)
.unwrap();
}
diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs
index 89bf410f48..e18a296b8f 100644
--- a/src/chainstate/stacks/boot/mod.rs
+++ b/src/chainstate/stacks/boot/mod.rs
@@ -3202,7 +3202,7 @@ pub mod test {
})
.unwrap();
- eprintln!("\ntenure: {}\nreward cycle: {}\nmin-uSTX: {}\naddrs: {:?}\ntotal_liquid_ustx: {}\ntotal-stacked: {}\ntotal-stacked next: {}\n",
+ eprintln!("\ntenure: {}\nreward cycle: {}\nmin-uSTX: {}\naddrs: {:?}\ntotal_liquid_ustx: {}\ntotal-stacked: {}\ntotal-stacked next: {}\n",
tenure_id, cur_reward_cycle, min_ustx, &reward_addrs, total_liquid_ustx, total_stacked, total_stacked_next);
if tenure_id <= 1 {
diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs
index 0c7453c37c..b78d53e0aa 100644
--- a/src/chainstate/stacks/db/blocks.rs
+++ b/src/chainstate/stacks/db/blocks.rs
@@ -47,8 +47,8 @@ use std::path::{Path, PathBuf};
use util::db::Error as db_error;
use util::db::{
- query_count, query_int, query_row, query_row_columns, query_rows, tx_busy_handler, DBConn,
- FromColumn, FromRow,
+ query_count, query_int, query_row, query_row_columns, query_row_panic, query_rows,
+ tx_busy_handler, DBConn, FromColumn, FromRow,
};
use util::db::u64_to_sql;
@@ -431,7 +431,7 @@ const STACKS_BLOCK_INDEX_SQL: &'static [&'static str] = &[
CREATE TABLE staging_blocks(anchored_block_hash TEXT NOT NULL,
parent_anchored_block_hash TEXT NOT NULL,
consensus_hash TEXT NOT NULL,
- -- parent_consensus_hash is the consensus hash of the parent sortition of the sortition that chose this block
+ -- parent_consensus_hash is the consensus hash of the parent sortition of the sortition that chose this block
parent_consensus_hash TEXT NOT NULL,
parent_microblock_hash TEXT NOT NULL,
parent_microblock_seq INT NOT NULL,
@@ -443,6 +443,9 @@ const STACKS_BLOCK_INDEX_SQL: &'static [&'static str] = &[
commit_burn INT NOT NULL,
sortition_burn INT NOT NULL,
index_block_hash TEXT NOT NULL, -- used internally; hash of burn header and block header
+ download_time INT NOT NULL, -- how long the block was in-flight
+ arrival_time INT NOT NULL, -- when this block was stored
+ processed_time INT NOT NULL, -- when this block was processed
PRIMARY KEY(anchored_block_hash,consensus_hash)
);
CREATE INDEX processed_stacks_blocks ON staging_blocks(processed,anchored_blcok_hash,consensus_hash);
@@ -1373,6 +1376,7 @@ impl StacksChainState {
parent_consensus_hash: &ConsensusHash,
commit_burn: u64,
sortition_burn: u64,
+ download_time: u64,
) -> Result<(), Error> {
debug!(
"Store anchored block {}/{}, parent in {}",
@@ -1428,8 +1432,11 @@ impl StacksChainState {
orphaned, \
commit_burn, \
sortition_burn, \
- index_block_hash) \
- VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)";
+ index_block_hash, \
+ arrival_time, \
+ processed_time, \
+ download_time) \
+ VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17)";
let args: &[&dyn ToSql] = &[
&block_hash,
&block.header.parent_block,
@@ -1445,6 +1452,9 @@ impl StacksChainState {
&u64_to_sql(commit_burn)?,
&u64_to_sql(sortition_burn)?,
&index_block_hash,
+ &u64_to_sql(get_epoch_time_secs())?,
+ &0,
+ &u64_to_sql(download_time)?,
];
tx.execute(&sql, args)
@@ -1870,40 +1880,38 @@ impl StacksChainState {
anchored_block_hash: &BlockHeaderHash,
) -> Result<(), Error> {
// This block is orphaned
- let update_block_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 1, attachable = 0 WHERE anchored_block_hash = ?1".to_string();
- let update_block_args = [&anchored_block_hash];
+ let update_block_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 1, attachable = 0 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2".to_string();
+ let update_block_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash];
// All descendents of this processed block are never attachable.
// Indicate this by marking all children as orphaned (but not procesed), across all burnchain forks.
- let update_children_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 0, attachable = 0 WHERE parent_anchored_block_hash = ?1".to_string();
- let update_children_args = [&anchored_block_hash];
+ let update_children_sql = "UPDATE staging_blocks SET orphaned = 1, processed = 0, attachable = 0 WHERE parent_consensus_hash = ?1 AND parent_anchored_block_hash = ?2".to_string();
+ let update_children_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash];
// find all orphaned microblocks, and delete the block data
- let find_orphaned_microblocks_sql =
- "SELECT microblock_hash FROM staging_microblocks WHERE anchored_block_hash = ?1"
- .to_string();
- let find_orphaned_microblocks_args = [&anchored_block_hash];
+ let find_orphaned_microblocks_sql = "SELECT microblock_hash FROM staging_microblocks WHERE consensus_hash = ?1 AND anchored_block_hash = ?2".to_string();
+ let find_orphaned_microblocks_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash];
let orphaned_microblock_hashes = query_row_columns::(
tx,
&find_orphaned_microblocks_sql,
- &find_orphaned_microblocks_args,
+ find_orphaned_microblocks_args,
"microblock_hash",
)
.map_err(Error::DBError)?;
// drop microblocks (this processes them)
- let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE anchored_block_hash = ?1".to_string();
- let update_microblock_children_args = [&anchored_block_hash];
+ let update_microblock_children_sql = "UPDATE staging_microblocks SET orphaned = 1, processed = 1 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2".to_string();
+ let update_microblock_children_args: &[&dyn ToSql] = &[consensus_hash, anchored_block_hash];
- tx.execute(&update_block_sql, &update_block_args)
+ tx.execute(&update_block_sql, update_block_args)
.map_err(|e| Error::DBError(db_error::SqliteError(e)))?;
- tx.execute(&update_children_sql, &update_children_args)
+ tx.execute(&update_children_sql, update_children_args)
.map_err(|e| Error::DBError(db_error::SqliteError(e)))?;
tx.execute(
&update_microblock_children_sql,
- &update_microblock_children_args,
+ update_microblock_children_args,
)
.map_err(|e| Error::DBError(db_error::SqliteError(e)))?;
@@ -1936,6 +1944,7 @@ impl StacksChainState {
/// Clear out a staging block -- mark it as processed.
/// Mark its children as attachable.
/// Idempotent.
+ /// sort_tx_opt is required if accept is true
fn set_block_processed<'a, 'b>(
tx: &mut BlocksDBTx<'a>,
mut sort_tx_opt: Option<&mut SortitionHandleTx<'b>>,
@@ -2016,8 +2025,12 @@ impl StacksChainState {
);
}
- let update_sql = "UPDATE staging_blocks SET processed = 1 WHERE consensus_hash = ?1 AND anchored_block_hash = ?2".to_string();
- let update_args: &[&dyn ToSql] = &[&consensus_hash, &anchored_block_hash];
+ let update_sql = "UPDATE staging_blocks SET processed = 1, processed_time = ?1 WHERE consensus_hash = ?2 AND anchored_block_hash = ?3".to_string();
+ let update_args: &[&dyn ToSql] = &[
+ &u64_to_sql(get_epoch_time_secs())?,
+ &consensus_hash,
+ &anchored_block_hash,
+ ];
tx.execute(&update_sql, update_args)
.map_err(|e| Error::DBError(db_error::SqliteError(e)))?;
@@ -2972,6 +2985,7 @@ impl StacksChainState {
consensus_hash: &ConsensusHash,
block: &StacksBlock,
parent_consensus_hash: &ConsensusHash,
+ download_time: u64,
) -> Result {
debug!(
"preprocess anchored block {}/{}",
@@ -3067,6 +3081,7 @@ impl StacksChainState {
parent_consensus_hash,
commit_burn,
sortition_burn,
+ download_time,
)?;
// store users who burned for this block so they'll get rewarded if we process it
@@ -3222,6 +3237,7 @@ impl StacksChainState {
&snapshot.consensus_hash,
block,
&parent_sn.consensus_hash,
+ 5,
)?;
let block_hash = block.block_hash();
for mblock in microblocks.iter() {
@@ -3380,152 +3396,243 @@ impl StacksChainState {
Ok(true)
}
- /// Is there at least one staging block that can be attached?
- pub fn has_attachable_staging_blocks(blocks_conn: &DBConn) -> Result {
- // go through staging blocks and see if any of them match headers and are attachable.
- // pick randomly -- don't allow the network sender to choose the processing order!
- let sql = "SELECT 1 FROM staging_blocks WHERE processed = 0 AND attachable = 1 AND orphaned = 0 LIMIT 1".to_string();
- let available = blocks_conn
- .query_row(&sql, NO_PARAMS, |_row| ())
- .optional()
- .map_err(|e| Error::DBError(db_error::SqliteError(e)))?
- .is_some();
- Ok(available)
+ /// How many attachable staging blocks do we have, up to a limit, at or after the given
+ /// timestamp?
+ pub fn count_attachable_staging_blocks(
+ blocks_conn: &DBConn,
+ limit: u64,
+ min_arrival_time: u64,
+ ) -> Result {
+ let sql = "SELECT COUNT(*) FROM staging_blocks WHERE processed = 0 AND attachable = 1 AND orphaned = 0 AND arrival_time >= ?1 LIMIT ?2".to_string();
+ let cnt = query_count(
+ blocks_conn,
+ &sql,
+ &[&u64_to_sql(min_arrival_time)?, &u64_to_sql(limit)?],
+ )
+ .map_err(Error::DBError)?;
+ Ok(cnt as u64)
+ }
+
+ /// How many processed staging blocks do we have, up to a limit, at or after the given
+ /// timestamp?
+ pub fn count_processed_staging_blocks(
+ blocks_conn: &DBConn,
+ limit: u64,
+ min_arrival_time: u64,
+ ) -> Result {
+ let sql = "SELECT COUNT(*) FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND processed_time > 0 AND processed_time >= ?1 LIMIT ?2".to_string();
+ let cnt = query_count(
+ blocks_conn,
+ &sql,
+ &[&u64_to_sql(min_arrival_time)?, &u64_to_sql(limit)?],
+ )
+ .map_err(Error::DBError)?;
+ Ok(cnt as u64)
+ }
+
+ /// Measure how long a block waited in-between when it arrived and when it got processed.
+ /// Includes both orphaned and accepted blocks.
+ pub fn measure_block_wait_time(
+ blocks_conn: &DBConn,
+ start_height: u64,
+ end_height: u64,
+ ) -> Result, Error> {
+ let sql = "SELECT processed_time - arrival_time FROM staging_blocks WHERE processed = 1 AND height >= ?1 AND height < ?2";
+ let args: &[&dyn ToSql] = &[&u64_to_sql(start_height)?, &u64_to_sql(end_height)?];
+ let list = query_rows::(blocks_conn, &sql, args)?;
+ Ok(list)
+ }
+
+ /// Measure how long a block took to be downloaded (for blocks that we downloaded).
+ /// Includes _all_ blocks.
+ pub fn measure_block_download_time(
+ blocks_conn: &DBConn,
+ start_height: u64,
+ end_height: u64,
+ ) -> Result, Error> {
+ let sql = "SELECT download_time FROM staging_blocks WHERE height >= ?1 AND height < ?2";
+ let args: &[&dyn ToSql] = &[&u64_to_sql(start_height)?, &u64_to_sql(end_height)?];
+ let list = query_rows::(blocks_conn, &sql, args)?;
+ Ok(list)
}
/// Given access to the chain state (headers) and the staging blocks, find a staging block we
/// can process, as well as its parent microblocks that it confirms
/// Returns Some(microblocks, staging block) if we found a sequence of blocks to process.
/// Returns None if not.
- fn find_next_staging_block(
- blocks_conn: &DBConn,
+ fn find_next_staging_block<'a>(
+ blocks_tx: &mut BlocksDBTx<'a>,
blocks_path: &String,
headers_conn: &DBConn,
+ sort_conn: &DBConn,
) -> Result