Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Always allocate slots for reserved nodes #11909

Merged
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 29 additions & 1 deletion client/network/src/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,10 @@ pub struct Protocol<B: BlockT, Client> {
/// List of nodes for which we perform additional logging because they are important for the
/// user.
important_peers: HashSet<PeerId>,
/// List of nodes that should never occupy peer slots.
default_peers_set_no_slot_peers: HashSet<PeerId>,
/// Actual list of connected no-slot nodes.
default_peers_set_no_slot_connected_peers: HashSet<PeerId>,
dmitry-markin marked this conversation as resolved.
Show resolved Hide resolved
/// Value that was passed as part of the configuration. Used to cap the number of full nodes.
default_peers_set_num_full: usize,
/// Number of slots to allocate to light nodes.
Expand Down Expand Up @@ -304,6 +308,17 @@ where
imp_p
};

let default_peers_set_no_slot_peers = {
let mut no_slot_p: HashSet<PeerId> = network_config
.default_peers_set
.reserved_nodes
dmitry-markin marked this conversation as resolved.
Show resolved Hide resolved
.iter()
.map(|reserved| reserved.peer_id)
.collect();
no_slot_p.shrink_to_fit();
no_slot_p
};

let mut known_addresses = Vec::new();

let (peerset, peerset_handle) = {
Expand Down Expand Up @@ -404,6 +419,8 @@ where
genesis_hash: info.genesis_hash,
chain_sync,
important_peers,
default_peers_set_no_slot_peers,
default_peers_set_no_slot_connected_peers: HashSet::new(),
default_peers_set_num_full: network_config.default_peers_set_num_full as usize,
default_peers_set_num_light: {
let total = network_config.default_peers_set.out_peers +
Expand Down Expand Up @@ -542,6 +559,7 @@ where
self.pending_messages
.push_back(CustomMessageOutcome::BlockImport(origin, blocks));
}
self.default_peers_set_no_slot_connected_peers.remove(&peer);
Ok(())
} else {
Err(())
Expand Down Expand Up @@ -723,7 +741,14 @@ where
}
}

if status.roles.is_full() && self.chain_sync.num_peers() >= self.default_peers_set_num_full
let no_slot_peer = self.default_peers_set_no_slot_peers.contains(&who);
let this_peer_reserved_slot: usize = if no_slot_peer { 1 } else { 0 };

if status.roles.is_full() &&
self.chain_sync.num_peers() >=
self.default_peers_set_num_full +
self.default_peers_set_no_slot_connected_peers.len() +
this_peer_reserved_slot
{
debug!(target: "sync", "Too many full nodes, rejecting {}", who);
self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_SYNC);
Expand Down Expand Up @@ -767,6 +792,9 @@ where
debug!(target: "sync", "Connected {}", who);

self.peers.insert(who, peer);
if no_slot_peer {
self.default_peers_set_no_slot_connected_peers.insert(who);
}
self.pending_messages
.push_back(CustomMessageOutcome::PeerNewBest(who, status.best_number));

Expand Down