Skip to content

Commit

Permalink
chore: removed unused requests (#4465)
Browse files Browse the repository at this point in the history
Description
---
This removed some requests that were used at some point in time but, due to changed logic, are not used anymore. 

Fixes: #3333
  • Loading branch information
SWvheerden authored Aug 15, 2022
1 parent 2c76031 commit 725accc
Show file tree
Hide file tree
Showing 5 changed files with 1 addition and 168 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,7 @@ pub enum NodeCommsRequest {
GetChainMetadata,
FetchHeaders(RangeInclusive<u64>),
FetchHeadersByHashes(Vec<HashOutput>),
FetchHeadersAfter(Vec<HashOutput>, HashOutput),
FetchMatchingUtxos(Vec<HashOutput>),
FetchMatchingTxos(Vec<HashOutput>),
FetchMatchingBlocks(RangeInclusive<u64>),
FetchBlocksByHash(Vec<HashOutput>),
FetchBlocksByKernelExcessSigs(Vec<Signature>),
Expand Down Expand Up @@ -101,9 +99,7 @@ impl Display for NodeCommsRequest {
write!(f, "FetchHeaders ({:?})", range)
},
FetchHeadersByHashes(v) => write!(f, "FetchHeadersByHashes (n={})", v.len()),
FetchHeadersAfter(v, _hash) => write!(f, "FetchHeadersAfter (n={})", v.len()),
FetchMatchingUtxos(v) => write!(f, "FetchMatchingUtxos (n={})", v.len()),
FetchMatchingTxos(v) => write!(f, "FetchMatchingTxos (n={})", v.len()),
FetchMatchingBlocks(range) => {
write!(f, "FetchMatchingBlocks ({:?})", range)
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ use tari_common_types::{
};

use crate::{
blocks::{Block, BlockHeader, ChainHeader, HistoricalBlock, NewBlockTemplate},
blocks::{Block, ChainHeader, HistoricalBlock, NewBlockTemplate},
chain_storage::UtxoMinedInfo,
proof_of_work::Difficulty,
transactions::transaction_components::{Transaction, TransactionKernel, TransactionOutput},
Expand All @@ -54,7 +54,6 @@ pub enum NodeCommsResponse {
block: Option<Block>,
},
TargetDifficulty(Difficulty),
FetchHeadersAfterResponse(Vec<BlockHeader>),
MmrNodes(Vec<HashOutput>, Vec<u8>),
FetchTokensResponse {
outputs: Vec<(TransactionOutput, u64)>,
Expand Down Expand Up @@ -98,7 +97,6 @@ impl Display for NodeCommsResponse {
error.as_ref().unwrap_or(&"Unspecified".to_string())
),
TargetDifficulty(_) => write!(f, "TargetDifficulty"),
FetchHeadersAfterResponse(_) => write!(f, "FetchHeadersAfterResponse"),
MmrNodes(_, _) => write!(f, "MmrNodes"),
FetchTokensResponse { .. } => write!(f, "FetchTokensResponse"),
FetchAssetRegistrationsResponse { .. } => write!(f, "FetchAssetRegistrationsResponse"),
Expand Down
76 changes: 0 additions & 76 deletions base_layer/core/src/base_node/comms_interface/inbound_handlers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ use crate::{
};

const LOG_TARGET: &str = "c::bn::comms_interface::inbound_handler";
const MAX_HEADERS_PER_RESPONSE: u32 = 100;
const MAX_REQUEST_BY_BLOCK_HASHES: usize = 100;
const MAX_REQUEST_BY_KERNEL_EXCESS_SIGS: usize = 100;
const MAX_REQUEST_BY_UTXO_HASHES: usize = 100;
Expand Down Expand Up @@ -151,68 +150,6 @@ where B: BlockchainBackend + 'static
}
Ok(NodeCommsResponse::BlockHeaders(block_headers))
},
NodeCommsRequest::FetchHeadersAfter(header_hashes, stopping_hash) => {
let mut starting_block = None;
// Find first header that matches
for header_hash in header_hashes {
match self
.blockchain_db
.fetch_header_by_block_hash(header_hash.clone())
.await?
{
Some(from_block) => {
starting_block = Some(from_block);
break;
},
None => {
// Not an error. The header requested is simply not in our chain.
// Logging it as debug because it may not just be not found.
debug!(
target: LOG_TARGET,
"Skipping header {} when searching for matching headers in our chain.",
header_hash.to_hex(),
);
},
}
}
let starting_block = match starting_block {
Some(b) => b,
// Send from genesis block if no hashes match
None => self
.blockchain_db
.fetch_header(0)
.await?
.ok_or(CommsInterfaceError::BlockHeaderNotFound(0))?,
};
let mut headers = Vec::with_capacity(MAX_HEADERS_PER_RESPONSE as usize);
for i in 1..MAX_HEADERS_PER_RESPONSE {
match self
.blockchain_db
.fetch_header(starting_block.height + u64::from(i))
.await
{
Ok(Some(header)) => {
let hash = header.hash();
headers.push(header);
if hash == stopping_hash {
break;
}
},
Err(err) => {
error!(
target: LOG_TARGET,
"Could not fetch header at {}:{}",
starting_block.height + u64::from(i),
err.to_string()
);
return Err(err.into());
},
_ => error!(target: LOG_TARGET, "Could not fetch header: None"),
}
}

Ok(NodeCommsResponse::FetchHeadersAfterResponse(headers))
},
NodeCommsRequest::FetchMatchingUtxos(utxo_hashes) => {
let mut res = Vec::with_capacity(utxo_hashes.len());
for (pruned_output, spent) in (self.blockchain_db.fetch_utxos(utxo_hashes).await?)
Expand All @@ -227,19 +164,6 @@ where B: BlockchainBackend + 'static
}
Ok(NodeCommsResponse::TransactionOutputs(res))
},
NodeCommsRequest::FetchMatchingTxos(hashes) => {
let res = self
.blockchain_db
.fetch_utxos(hashes)
.await?
.into_iter()
.filter_map(|opt| match opt {
Some((PrunedOutput::NotPruned { output }, _)) => Some(output),
_ => None,
})
.collect();
Ok(NodeCommsResponse::TransactionOutputs(res))
},
NodeCommsRequest::FetchMatchingBlocks(range) => {
let blocks = self.blockchain_db.fetch_blocks(range).await?;
Ok(NodeCommsResponse::HistoricalBlocks(blocks))
Expand Down
6 changes: 0 additions & 6 deletions base_layer/core/src/base_node/proto/request.proto
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,6 @@ message Commitments{
repeated tari.types.Commitment commitments = 1;
}

message FetchHeadersAfter {
repeated bytes hashes = 1;
bytes stopping_hash = 2;
}


message NewBlockTemplateRequest{
uint64 algo = 1;
uint64 max_weight = 2;
Expand Down
79 changes: 0 additions & 79 deletions base_layer/core/tests/node_comms_interface.rs
Original file line number Diff line number Diff line change
Expand Up @@ -208,85 +208,6 @@ async fn inbound_fetch_utxos() {
}
}

#[tokio::test]
async fn inbound_fetch_txos() {
let factories = CryptoFactories::default();
let store = create_test_blockchain_db();
let mempool = new_mempool();
let (block_event_sender, _) = broadcast::channel(50);
let network = Network::LocalNet;
let consensus_manager = ConsensusManager::builder(network).build();
let (request_sender, _) = reply_channel::unbounded();
let (block_sender, _) = mpsc::unbounded_channel();
let outbound_nci = OutboundNodeCommsInterface::new(request_sender, block_sender);
let (connectivity, _) = create_connectivity_mock();
let inbound_nch = InboundNodeCommsHandlers::new(
block_event_sender,
store.clone().into(),
mempool,
consensus_manager,
outbound_nci,
connectivity,
);

let (utxo, _, _) = create_utxo(
MicroTari(10_000),
&factories,
&Default::default(),
&TariScript::default(),
&Covenant::default(),
MicroTari::zero(),
);
let (pruned_utxo, _, _) = create_utxo(
MicroTari(10_000),
&factories,
&Default::default(),
&TariScript::default(),
&Covenant::default(),
MicroTari::zero(),
);
let (stxo, _, _) = create_utxo(
MicroTari(10_000),
&factories,
&Default::default(),
&TariScript::default(),
&Covenant::default(),
MicroTari::zero(),
);
let utxo_hash = utxo.hash();
let stxo_hash = stxo.hash();
let pruned_utxo_hash = pruned_utxo.hash();
let block = store.fetch_block(0).unwrap().block().clone();
let header_hash = block.header.hash();
let mut txn = DbTransaction::new();
txn.insert_utxo(utxo.clone(), header_hash.clone(), block.header.height, 6000, 0);
txn.insert_utxo(stxo.clone(), header_hash.clone(), block.header.height, 6001, 0);
txn.insert_pruned_utxo(
pruned_utxo_hash.clone(),
pruned_utxo.witness_hash(),
header_hash.clone(),
5,
6002,
0,
);
assert!(store.commit(txn).is_ok());

if let Ok(NodeCommsResponse::TransactionOutputs(received_txos)) = inbound_nch
.handle_request(NodeCommsRequest::FetchMatchingTxos(vec![
utxo_hash,
stxo_hash,
pruned_utxo_hash,
]))
.await
{
assert_eq!(received_txos.len(), 2);
assert_eq!(received_txos[0], utxo);
assert_eq!(received_txos[1], stxo);
} else {
panic!();
}
}

#[tokio::test]
async fn inbound_fetch_blocks() {
let store = create_test_blockchain_db();
Expand Down

0 comments on commit 725accc

Please sign in to comment.