From 64023d981d07d730eaad207b6b83021e354f1397 Mon Sep 17 00:00:00 2001 From: Diva M Date: Tue, 23 Aug 2022 13:10:53 -0500 Subject: [PATCH 01/31] version upgrades --- Cargo.lock | 158 ++++++++++------------ beacon_node/lighthouse_network/Cargo.toml | 4 +- 2 files changed, 73 insertions(+), 89 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d742276687b..bcb3e758478 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3223,9 +3223,9 @@ dependencies = [ [[package]] name = "libp2p" -version = "0.45.1" +version = "0.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41726ee8f662563fafba2d2d484b14037cc8ecb8c953fbfc8439d4ce3a0a9029" +checksum = "2e7cc4d88e132823122905158c8e019173da72117825ad82154890beff02967e" dependencies = [ "bytes", "futures", @@ -3233,7 +3233,7 @@ dependencies = [ "getrandom 0.2.7", "instant", "lazy_static", - "libp2p-core 0.33.0", + "libp2p-core 0.35.1", "libp2p-dns", "libp2p-gossipsub", "libp2p-identify", @@ -3290,9 +3290,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.33.0" +version = "0.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d46fca305dee6757022e2f5a4f6c023315084d0ed7441c3ab244e76666d979" +checksum = "583862167683fea9e4712f2802910df067ca5f83e6b88979be16364904c2bdf1" dependencies = [ "asn1_der", "bs58", @@ -3310,8 +3310,8 @@ dependencies = [ "multistream-select 0.11.0", "parking_lot 0.12.1", "pin-project 1.0.11", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "rand 0.8.5", "ring", "rw-stream-sink 0.3.0", @@ -3325,12 +3325,12 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.33.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbb462ec3a51fab457b4b44ac295e8b0a4b04dc175127e615cf996b1f0f1a268" +checksum = "d8a09386cb4891343703614454a4e5f1084a22589f655d48a78c8dcad6b09d0a" dependencies = [ "futures", - "libp2p-core 0.33.0", + "libp2p-core 0.35.1", "log", "parking_lot 0.12.1", "smallvec", @@ -3339,9 +3339,9 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.38.1" +version = "0.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43e064ba4d7832e01c738626c6b274ae100baba05f5ffcc7b265c2a3ed398108" +checksum = "108af01af425f6184911301ff8d33e78cf3de88d8b24aee83ca466acba500dc9" dependencies = [ "asynchronous-codec", "base64", @@ -3351,12 +3351,12 @@ dependencies = [ "futures", "hex_fmt", "instant", - "libp2p-core 0.33.0", + "libp2p-core 0.35.1", "libp2p-swarm", "log", "prometheus-client", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "rand 0.7.3", "regex", "sha2 0.10.2", @@ -3367,19 +3367,19 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.36.1" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84b53490442d086db1fa5375670c9666e79143dccadef3f7c74a4346899a984" +checksum = "edad807953d75e3c7f015a118c6a6bb85349eb134a2646a0a5a4c05db0f86737" dependencies = [ "asynchronous-codec", "futures", "futures-timer", - "libp2p-core 0.33.0", + "libp2p-core 0.35.1", "libp2p-swarm", "log", "lru", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "prost-codec", "smallvec", "thiserror", @@ -3388,11 +3388,11 @@ dependencies = [ [[package]] name = "libp2p-metrics" -version = "0.6.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "564a7e5284d7d9b3140fdfc3cb6567bc32555e86a21de5604c2ec85da05cf384" +checksum = "1d1364ebcfe0146428fceee2d12e57ba79f94f001945bddecdb70d97406b91c2" dependencies = [ - "libp2p-core 0.33.0", + "libp2p-core 0.35.1", "libp2p-gossipsub", "libp2p-identify", "libp2p-swarm", @@ -3401,14 +3401,14 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.33.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ff9c893f2367631a711301d703c47432af898c9bb8253bea0e2c051a13f7640" +checksum = "17eb2b734c3c5dc49408e257a70872b42eb21cd3ca9dc34e3c20a2a131120088" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.33.0", + "libp2p-core 0.35.1", "log", "nohash-hasher", "parking_lot 0.12.1", @@ -3419,18 +3419,18 @@ dependencies = [ [[package]] name = "libp2p-noise" -version = "0.36.0" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf2cee1dad1c83325bbd182a8e94555778699cec8a9da00086efb7522c4c15ad" +checksum = "9a550a023fe31aafb3a5e9831ac124d43060807d7c201a035922ec75857c7e3e" dependencies = [ "bytes", "curve25519-dalek 3.2.0", "futures", "lazy_static", - "libp2p-core 0.33.0", + "libp2p-core 0.35.1", "log", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "rand 0.8.5", "sha2 0.10.2", "snow", @@ -3441,33 +3441,33 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.33.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db007e737adc5d28b2e03223b0210164928ad742591127130796a72aa8eaf54f" +checksum = "5a7aac8b2b2aa25615c27d3d424038b855499c16acee8dbada08d8f39a28363f" dependencies = [ "asynchronous-codec", "bytes", "futures", - "libp2p-core 0.33.0", + "libp2p-core 0.35.1", "log", - "prost 0.10.4", - "prost-build 0.10.4", + "prost 0.11.0", + "prost-build 0.11.1", "unsigned-varint 0.7.1", "void", ] [[package]] name = "libp2p-swarm" -version = "0.36.1" +version = "0.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4bb21c5abadbf00360c734f16bf87f1712ed4f23cd46148f625d2ddb867346" +checksum = "d91932a67f579ac6d66b51603a75b04c2737af7f84c9e44743f81978be951933" dependencies = [ "either", "fnv", "futures", "futures-timer", "instant", - "libp2p-core 0.33.0", + "libp2p-core 0.35.1", "log", "pin-project 1.0.11", "rand 0.7.3", @@ -3478,26 +3478,27 @@ dependencies = [ [[package]] name = "libp2p-swarm-derive" -version = "0.27.2" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f693c8c68213034d472cbb93a379c63f4f307d97c06f1c41e4985de481687a5" +checksum = "27b2aac6a51e400168345fd89aaa5ce55395f49297e1d4fbf4acf0c6104ad096" dependencies = [ + "heck 0.4.0", "quote", "syn", ] [[package]] name = "libp2p-tcp" -version = "0.33.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f4933e38ef21b50698aefc87799c24f2a365c9d3f6cf50471f3f6a0bc410892" +checksum = "ebf0bba86870fd7b2d74f9b939066be5b984b46989b02d13d292fcf3caab3fdb" dependencies = [ "futures", "futures-timer", "if-addrs 0.7.0", "ipnet", "libc", - "libp2p-core 0.33.0", + "libp2p-core 0.35.1", "log", "socket2", "tokio", @@ -3505,14 +3506,14 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39d398fbb29f432c4128fabdaac2ed155c3bcaf1b9bd40eeeb10a471eefacbf5" +checksum = "d32644385b7787ada559b49957efd939d857ad1357d12f4cc4af7938635b6f20" dependencies = [ "either", "futures", "futures-rustls", - "libp2p-core 0.33.0", + "libp2p-core 0.35.1", "log", "parking_lot 0.12.1", "quicksink", @@ -3524,12 +3525,12 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.37.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fe653639ad74877c759720febb0cbcbf4caa221adde4eed2d3126ce5c6f381f" +checksum = "1d1dc106f1f0c67aa89d59184dd8ae1db0bc683337dcdc36b6dc6e31dafc35ea" dependencies = [ "futures", - "libp2p-core 0.33.0", + "libp2p-core 0.35.1", "parking_lot 0.12.1", "thiserror", "yamux", @@ -4430,15 +4431,6 @@ dependencies = [ "types", ] -[[package]] -name = "owning_ref" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" -dependencies = [ - "stable_deref_trait", -] - [[package]] name = "parity-scale-codec" version = "2.3.1" @@ -4866,21 +4858,21 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1abe0255c04d15f571427a2d1e00099016506cf3297b53853acd2b7eb87825" +checksum = "3c473049631c233933d6286c88bbb7be30e62ec534cf99a9ae0079211f7fa603" dependencies = [ "dtoa", "itoa 1.0.2", - "owning_ref", + "parking_lot 0.12.1", "prometheus-client-derive-text-encode", ] [[package]] name = "prometheus-client-derive-text-encode" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8e12d01b9d66ad9eb4529c57666b6263fc1993cb30261d83ead658fdd932652" +checksum = "66a455fbcb954c1a7decf3c586e860fd7889cddf4b8e164be736dbac95a953cd" dependencies = [ "proc-macro2", "quote", @@ -4899,12 +4891,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.10.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71adf41db68aa0daaefc69bb30bcd68ded9b9abaad5d1fbb6304c4fb390e083e" +checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7" dependencies = [ "bytes", - "prost-derive 0.10.1", + "prost-derive 0.11.0", ] [[package]] @@ -4929,21 +4921,19 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.10.4" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae5a4388762d5815a9fc0dea33c56b021cdc8dde0c55e0c9ca57197254b0cab" +checksum = "7f835c582e6bd972ba8347313300219fed5bfa52caf175298d860b61ff6069bb" dependencies = [ "bytes", - "cfg-if", - "cmake", "heck 0.4.0", "itertools", "lazy_static", "log", "multimap", "petgraph", - "prost 0.10.4", - "prost-types 0.10.1", + "prost 0.11.0", + "prost-types 0.11.1", "regex", "tempfile", "which", @@ -4951,13 +4941,13 @@ dependencies = [ [[package]] name = "prost-codec" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00af1e92c33b4813cc79fda3f2dbf56af5169709be0202df730e9ebc3e4cd007" +checksum = "011ae9ff8359df7915f97302d591cdd9e0e27fbd5a4ddc5bd13b71079bb20987" dependencies = [ "asynchronous-codec", "bytes", - "prost 0.10.4", + "prost 0.11.0", "thiserror", "unsigned-varint 0.7.1", ] @@ -4977,9 +4967,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.10.1" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b670f45da57fb8542ebdbb6105a925fe571b67f9e7ed9f47a06a84e72b4e7cc" +checksum = "7345d5f0e08c0536d7ac7229952590239e77abf0a0100a1b1d890add6ea96364" dependencies = [ "anyhow", "itertools", @@ -5000,12 +4990,12 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.10.1" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0a014229361011dc8e69c8a1ec6c2e8d0f2af7c91e3ea3f5b2170298461e68" +checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e" dependencies = [ "bytes", - "prost 0.10.4", + "prost 0.11.0", ] [[package]] @@ -6262,12 +6252,6 @@ dependencies = [ "syn", ] -[[package]] -name = "stable_deref_trait" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" - [[package]] name = "state_processing" version = "0.2.0" diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index c6ba5305088..e23a53b8ce6 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -37,12 +37,12 @@ directory = { path = "../../common/directory" } regex = "1.5.5" strum = { version = "0.24.0", features = ["derive"] } superstruct = "0.5.0" -prometheus-client = "0.16.0" +prometheus-client = "0.18.0" unused_port = { path = "../../common/unused_port" } delay_map = "0.1.1" [dependencies.libp2p] -version = "0.45.1" +version = "0.47.0" default-features = false features = ["websocket", "identify", "mplex", "yamux", "noise", "gossipsub", "dns-tokio", "tcp-tokio", "plaintext", "secp256k1"] From 01b6b629abe16bdac0b002a5679298e20d42ad27 Mon Sep 17 00:00:00 2001 From: Diva M Date: Tue, 23 Aug 2022 13:11:11 -0500 Subject: [PATCH 02/31] type renames to aid in refactor --- .../lighthouse_network/src/behaviour/mod.rs | 48 +++++++++--------- beacon_node/lighthouse_network/src/lib.rs | 2 +- beacon_node/lighthouse_network/src/service.rs | 12 ++--- .../lighthouse_network/tests/rpc_tests.rs | 50 +++++++++---------- beacon_node/network/src/service.rs | 22 ++++---- 5 files changed, 67 insertions(+), 67 deletions(-) diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index 931ced3bf94..e41e5c726ed 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -76,7 +76,7 @@ pub enum RequestId { /// The types of events than can be obtained from polling the behaviour. #[derive(Debug)] -pub enum BehaviourEvent { +pub enum OldBehaviourEvent { /// We have successfully dialed and connected to a peer. PeerConnectedOutgoing(PeerId), /// A peer has successfully dialed and connected to us. @@ -133,7 +133,7 @@ pub enum BehaviourEvent { poll_method = "poll", event_process = true )] -pub struct Behaviour { +pub struct Network { /* Sub-Behaviours */ /// The routing pub-sub mechanism for eth2. gossipsub: Gossipsub, @@ -151,7 +151,7 @@ pub struct Behaviour { /* Auxiliary Fields */ /// The output events generated by this behaviour to be consumed in the swarm poll. #[behaviour(ignore)] - events: VecDeque>, + events: VecDeque>, /// A collections of variables accessible outside the network service. #[behaviour(ignore)] network_globals: Arc>, @@ -183,7 +183,7 @@ pub struct Behaviour { } /// Implements the combined behaviour for the libp2p service. -impl Behaviour { +impl Network { pub async fn new( local_key: &Keypair, ctx: ServiceContext<'_>, @@ -295,7 +295,7 @@ impl Behaviour { // .sync_committee_message_timeout(timeout) // Do not retry .build(); - Ok(Behaviour { + Ok(Network { // Sub-behaviours gossipsub, eth2_rpc: RPC::new(ctx.fork_context.clone(), log.clone()), @@ -758,7 +758,7 @@ impl Behaviour { response: Response, ) { match id { - RequestId::Application(id) => self.add_event(BehaviourEvent::ResponseReceived { + RequestId::Application(id) => self.add_event(OldBehaviourEvent::ResponseReceived { peer_id, id, response, @@ -781,7 +781,7 @@ impl Behaviour { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) } } - self.add_event(BehaviourEvent::RequestReceived { + self.add_event(OldBehaviourEvent::RequestReceived { peer_id, id, request, @@ -789,7 +789,7 @@ impl Behaviour { } /// Adds an event to the queue waking the current task to process it. - fn add_event(&mut self, event: BehaviourEvent) { + fn add_event(&mut self, event: OldBehaviourEvent) { self.events.push_back(event); if let Some(waker) = &self.waker { waker.wake_by_ref(); @@ -863,7 +863,7 @@ impl Behaviour { */ // Gossipsub -impl NetworkBehaviourEventProcess for Behaviour +impl NetworkBehaviourEventProcess for Network where AppReqId: ReqId, TSpec: EthSpec, @@ -891,7 +891,7 @@ where } Ok(msg) => { // Notify the network - self.add_event(BehaviourEvent::PubsubMessage { + self.add_event(OldBehaviourEvent::PubsubMessage { id, source: propagation_source, topic: gs_msg.topic, @@ -960,7 +960,7 @@ where // RPC impl NetworkBehaviourEventProcess, TSpec>> - for Behaviour + for Network where AppReqId: ReqId, TSpec: EthSpec, @@ -1007,7 +1007,7 @@ where ); // inform failures of requests comming outside the behaviour if let RequestId::Application(id) = id { - self.add_event(BehaviourEvent::RPCFailed { peer_id, id }); + self.add_event(OldBehaviourEvent::RPCFailed { peer_id, id }); } } } @@ -1112,7 +1112,7 @@ where } // Discovery -impl NetworkBehaviourEventProcess for Behaviour +impl NetworkBehaviourEventProcess for Network where AppReqId: ReqId, TSpec: EthSpec, @@ -1130,7 +1130,7 @@ where } // Identify -impl NetworkBehaviourEventProcess for Behaviour +impl NetworkBehaviourEventProcess for Network where TSpec: EthSpec, AppReqId: ReqId, @@ -1156,9 +1156,9 @@ where } type BehaviourHandler = - as NetworkBehaviour>::ConnectionHandler; + as NetworkBehaviour>::ConnectionHandler; -impl Behaviour +impl Network where TSpec: EthSpec, AppReqId: ReqId, @@ -1168,7 +1168,7 @@ where &mut self, cx: &mut Context, _: &mut impl PollParameters, - ) -> Poll, BehaviourHandler>> { + ) -> Poll, BehaviourHandler>> { if let Some(waker) = &self.waker { if waker.will_wake(cx.waker()) { self.waker = Some(cx.waker().clone()); @@ -1206,31 +1206,31 @@ where } impl NetworkBehaviourEventProcess - for Behaviour + for Network { fn inject_event(&mut self, event: PeerManagerEvent) { match event { PeerManagerEvent::PeerConnectedIncoming(peer_id) => { - self.add_event(BehaviourEvent::PeerConnectedIncoming(peer_id)); + self.add_event(OldBehaviourEvent::PeerConnectedIncoming(peer_id)); } PeerManagerEvent::PeerConnectedOutgoing(peer_id) => { - self.add_event(BehaviourEvent::PeerConnectedOutgoing(peer_id)); + self.add_event(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)); } PeerManagerEvent::PeerDisconnected(peer_id) => { - self.add_event(BehaviourEvent::PeerDisconnected(peer_id)); + self.add_event(OldBehaviourEvent::PeerDisconnected(peer_id)); } PeerManagerEvent::Banned(peer_id, associated_ips) => { self.discovery.ban_peer(&peer_id, associated_ips); - self.add_event(BehaviourEvent::PeerBanned(peer_id)); + self.add_event(OldBehaviourEvent::PeerBanned(peer_id)); } PeerManagerEvent::UnBanned(peer_id, associated_ips) => { self.discovery.unban_peer(&peer_id, associated_ips); - self.add_event(BehaviourEvent::PeerUnbanned(peer_id)); + self.add_event(OldBehaviourEvent::PeerUnbanned(peer_id)); } PeerManagerEvent::Status(peer_id) => { // it's time to status. We don't keep a beacon chain reference here, so we inform // the network to send a status to this peer - self.add_event(BehaviourEvent::StatusPeer(peer_id)); + self.add_event(OldBehaviourEvent::StatusPeer(peer_id)); } PeerManagerEvent::DiscoverPeers(peers_to_find) => { // Peer manager has requested a discovery query for more peers. diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index f679b7e6572..0be51f10565 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -69,7 +69,7 @@ pub use crate::types::{ pub use prometheus_client; -pub use behaviour::{BehaviourEvent, Gossipsub, PeerRequestId, Request, Response}; +pub use behaviour::{OldBehaviourEvent, Gossipsub, PeerRequestId, Request, Response}; pub use config::Config as NetworkConfig; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; pub use discv5; diff --git a/beacon_node/lighthouse_network/src/service.rs b/beacon_node/lighthouse_network/src/service.rs index bcd546fb00b..0be2954ea09 100644 --- a/beacon_node/lighthouse_network/src/service.rs +++ b/beacon_node/lighthouse_network/src/service.rs @@ -1,5 +1,5 @@ use crate::behaviour::{ - save_metadata_to_disk, Behaviour, BehaviourEvent, PeerRequestId, Request, Response, + save_metadata_to_disk, Network, OldBehaviourEvent, PeerRequestId, Request, Response, }; use crate::config::NetworkLoad; use crate::discovery::enr; @@ -42,7 +42,7 @@ pub const METADATA_FILENAME: &str = "metadata"; #[derive(Debug)] pub enum Libp2pEvent { /// A behaviour event - Behaviour(BehaviourEvent), + Behaviour(OldBehaviourEvent), /// A new listening address has been established. NewListenAddr(Multiaddr), /// We reached zero listening addresses. @@ -52,7 +52,7 @@ pub enum Libp2pEvent { /// The configuration and state of the libp2p components for the beacon node. pub struct Service { /// The libp2p Swarm handler. - pub swarm: Swarm>, + pub swarm: Swarm>, /// The bandwidth logger for the underlying libp2p transport. pub bandwidth: Arc, /// This node's PeerId. @@ -120,7 +120,7 @@ impl Service { // Lighthouse network behaviour let behaviour = - Behaviour::new(&local_keypair, ctx, network_globals.clone(), &log).await?; + Network::new(&local_keypair, ctx, network_globals.clone(), &log).await?; // use the executor for libp2p struct Executor(task_executor::TaskExecutor); @@ -311,10 +311,10 @@ impl Service { SwarmEvent::Behaviour(behaviour) => { // Handle banning here match &behaviour { - BehaviourEvent::PeerBanned(peer_id) => { + OldBehaviourEvent::PeerBanned(peer_id) => { self.swarm.ban_peer_id(*peer_id); } - BehaviourEvent::PeerUnbanned(peer_id) => { + OldBehaviourEvent::PeerUnbanned(peer_id) => { self.swarm.unban_peer_id(*peer_id); } _ => {} diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 90052859bc9..7f593bf04cd 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -1,7 +1,7 @@ #![cfg(test)] use lighthouse_network::rpc::methods::*; use lighthouse_network::{ - rpc::max_rpc_size, BehaviourEvent, Libp2pEvent, ReportSource, Request, Response, + rpc::max_rpc_size, OldBehaviourEvent, Libp2pEvent, ReportSource, Request, Response, }; use slog::{debug, warn, Level}; use ssz::Encode; @@ -86,7 +86,7 @@ fn test_status_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); sender @@ -94,7 +94,7 @@ fn test_status_rpc() { .behaviour_mut() .send_request(peer_id, 10, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { peer_id: _, id: 10, response, @@ -114,7 +114,7 @@ fn test_status_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + Libp2pEvent::Behaviour(OldBehaviourEvent::RequestReceived { peer_id, id, request, @@ -191,7 +191,7 @@ fn test_blocks_by_range_chunked_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); sender.swarm.behaviour_mut().send_request( @@ -200,7 +200,7 @@ fn test_blocks_by_range_chunked_rpc() { rpc_request.clone(), ); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { peer_id: _, id: _, response, @@ -236,7 +236,7 @@ fn test_blocks_by_range_chunked_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + Libp2pEvent::Behaviour(OldBehaviourEvent::RequestReceived { peer_id, id, request, @@ -318,7 +318,7 @@ fn test_blocks_by_range_over_limit() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); sender.swarm.behaviour_mut().send_request( @@ -328,7 +328,7 @@ fn test_blocks_by_range_over_limit() { ); } // The request will fail because the sender will refuse to send anything > MAX_RPC_SIZE - Libp2pEvent::Behaviour(BehaviourEvent::RPCFailed { id, .. }) => { + Libp2pEvent::Behaviour(OldBehaviourEvent::RPCFailed { id, .. }) => { assert_eq!(id, request_id); return; } @@ -341,7 +341,7 @@ fn test_blocks_by_range_over_limit() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + Libp2pEvent::Behaviour(OldBehaviourEvent::RequestReceived { peer_id, id, request, @@ -418,7 +418,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); sender.swarm.behaviour_mut().send_request( @@ -427,7 +427,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { rpc_request.clone(), ); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { peer_id: _, id: _, response, @@ -469,7 +469,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { .await { futures::future::Either::Left(( - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + Libp2pEvent::Behaviour(OldBehaviourEvent::RequestReceived { peer_id, id, request, @@ -550,7 +550,7 @@ fn test_blocks_by_range_single_empty_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); sender @@ -558,7 +558,7 @@ fn test_blocks_by_range_single_empty_rpc() { .behaviour_mut() .send_request(peer_id, 10, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { peer_id: _, id: 10, response, @@ -585,7 +585,7 @@ fn test_blocks_by_range_single_empty_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + Libp2pEvent::Behaviour(OldBehaviourEvent::RequestReceived { peer_id, id, request, @@ -676,7 +676,7 @@ fn test_blocks_by_root_chunked_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); sender @@ -684,7 +684,7 @@ fn test_blocks_by_root_chunked_rpc() { .behaviour_mut() .send_request(peer_id, 6, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { peer_id: _, id: 6, response, @@ -717,7 +717,7 @@ fn test_blocks_by_root_chunked_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + Libp2pEvent::Behaviour(OldBehaviourEvent::RequestReceived { peer_id, id, request, @@ -811,7 +811,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); sender @@ -819,7 +819,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { .behaviour_mut() .send_request(peer_id, 10, rpc_request.clone()); } - Libp2pEvent::Behaviour(BehaviourEvent::ResponseReceived { + Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { peer_id: _, id: 10, response, @@ -861,7 +861,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { .await { futures::future::Either::Left(( - Libp2pEvent::Behaviour(BehaviourEvent::RequestReceived { + Libp2pEvent::Behaviour(OldBehaviourEvent::RequestReceived { peer_id, id, request, @@ -926,7 +926,7 @@ fn test_goodbye_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a goodbye and disconnect debug!(log, "Sending RPC"); sender.swarm.behaviour_mut().goodbye_peer( @@ -935,7 +935,7 @@ fn test_goodbye_rpc() { ReportSource::SyncService, ); } - Libp2pEvent::Behaviour(BehaviourEvent::PeerDisconnected(_)) => { + Libp2pEvent::Behaviour(OldBehaviourEvent::PeerDisconnected(_)) => { return; } _ => {} // Ignore other RPC messages @@ -947,7 +947,7 @@ fn test_goodbye_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(BehaviourEvent::PeerDisconnected(_)) => { + Libp2pEvent::Behaviour(OldBehaviourEvent::PeerDisconnected(_)) => { // Should receive sent RPC request return; } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index f5e32dcff0a..37ff6e4d434 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -21,7 +21,7 @@ use lighthouse_network::{ }; use lighthouse_network::{ types::{GossipEncoding, GossipTopic}, - BehaviourEvent, MessageId, NetworkGlobals, PeerId, + OldBehaviourEvent, MessageId, NetworkGlobals, PeerId, }; use slog::{crit, debug, error, info, o, trace, warn}; use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; @@ -461,18 +461,18 @@ impl NetworkService { ) { match ev { Libp2pEvent::Behaviour(event) => match event { - BehaviourEvent::PeerConnectedOutgoing(peer_id) => { + OldBehaviourEvent::PeerConnectedOutgoing(peer_id) => { self.send_to_router(RouterMessage::PeerDialed(peer_id)); } - BehaviourEvent::PeerConnectedIncoming(_) - | BehaviourEvent::PeerBanned(_) - | BehaviourEvent::PeerUnbanned(_) => { + OldBehaviourEvent::PeerConnectedIncoming(_) + | OldBehaviourEvent::PeerBanned(_) + | OldBehaviourEvent::PeerUnbanned(_) => { // No action required for these events. } - BehaviourEvent::PeerDisconnected(peer_id) => { + OldBehaviourEvent::PeerDisconnected(peer_id) => { self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); } - BehaviourEvent::RequestReceived { + OldBehaviourEvent::RequestReceived { peer_id, id, request, @@ -483,7 +483,7 @@ impl NetworkService { request, }); } - BehaviourEvent::ResponseReceived { + OldBehaviourEvent::ResponseReceived { peer_id, id, response, @@ -494,16 +494,16 @@ impl NetworkService { response, }); } - BehaviourEvent::RPCFailed { id, peer_id } => { + OldBehaviourEvent::RPCFailed { id, peer_id } => { self.send_to_router(RouterMessage::RPCFailed { peer_id, request_id: id, }); } - BehaviourEvent::StatusPeer(peer_id) => { + OldBehaviourEvent::StatusPeer(peer_id) => { self.send_to_router(RouterMessage::StatusPeer(peer_id)); } - BehaviourEvent::PubsubMessage { + OldBehaviourEvent::PubsubMessage { id, source, message, From 289a9d0fc02fe385c3236310599eb5fa3deb70e9 Mon Sep 17 00:00:00 2001 From: Diva M Date: Tue, 23 Aug 2022 14:26:15 -0500 Subject: [PATCH 03/31] remove network behaviour implementation of old behaviour, move swarm inside --- .../lighthouse_network/src/behaviour/mod.rs | 192 ++++++------------ 1 file changed, 62 insertions(+), 130 deletions(-) diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index e41e5c726ed..e8c291381e7 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -19,6 +19,7 @@ use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use futures::stream::StreamExt; use libp2p::gossipsub::error::PublishError; +use libp2p::Swarm; use libp2p::{ core::{connection::ConnectionId, identity::Keypair}, gossipsub::{ @@ -28,10 +29,7 @@ use libp2p::{ MessageAuthenticity, MessageId, }, identify::{Identify, IdentifyConfig, IdentifyEvent}, - swarm::{ - NetworkBehaviour, NetworkBehaviourAction as NBAction, NetworkBehaviourEventProcess, - PollParameters, - }, + swarm::{NetworkBehaviour, NetworkBehaviourAction as NBAction, PollParameters}, NetworkBehaviour, PeerId, }; use slog::{crit, debug, o, trace, warn}; @@ -124,17 +122,8 @@ pub enum OldBehaviourEvent { StatusPeer(PeerId), } -/// Builds the network behaviour that manages the core protocols of eth2. -/// This core behaviour is managed by `Behaviour` which adds peer management to all core -/// behaviours. #[derive(NetworkBehaviour)] -#[behaviour( - out_event = "BehaviourEvent", - poll_method = "poll", - event_process = true -)] -pub struct Network { - /* Sub-Behaviours */ +struct Behaviour { /// The routing pub-sub mechanism for eth2. gossipsub: Gossipsub, /// The Eth2 RPC specified in the wire-0 protocol. @@ -147,38 +136,34 @@ pub struct Network { identify: Identify, /// The peer manager that keeps track of peer's reputation and status. peer_manager: PeerManager, +} +/// Builds the network behaviour that manages the core protocols of eth2. +/// This core behaviour is managed by `Behaviour` which adds peer management to all core +/// behaviours. +pub struct Network { + swarm: libp2p::swarm::Swarm>, /* Auxiliary Fields */ /// The output events generated by this behaviour to be consumed in the swarm poll. - #[behaviour(ignore)] events: VecDeque>, /// A collections of variables accessible outside the network service. - #[behaviour(ignore)] network_globals: Arc>, /// Keeps track of the current EnrForkId for upgrading gossipsub topics. // NOTE: This can be accessed via the network_globals ENR. However we keep it here for quick // lookups for every gossipsub message send. - #[behaviour(ignore)] enr_fork_id: EnrForkId, /// The waker for the current task. This is used to wake the task when events are added to the /// queue. - #[behaviour(ignore)] waker: Option, /// Directory where metadata is stored. - #[behaviour(ignore)] network_dir: PathBuf, - #[behaviour(ignore)] fork_context: Arc, /// Gossipsub score parameters. - #[behaviour(ignore)] score_settings: PeerScoreSettings, /// The interval for updating gossipsub scores - #[behaviour(ignore)] update_gossipsub_scores: tokio::time::Interval, - #[behaviour(ignore)] gossip_cache: GossipCache, /// Logger for behaviour actions. - #[behaviour(ignore)] log: slog::Logger, } @@ -295,14 +280,17 @@ impl Network { // .sync_committee_message_timeout(timeout) // Do not retry .build(); + /* + * gossipsub, + eth2_rpc: RPC::new(ctx.fork_context.clone(), log.clone()), + discovery, + identify: Identify::new(identify_config), + // Auxiliary fields + peer_manager: PeerManager::new(peer_manager_cfg, network_globals.clone(), log).await?, + + */ Ok(Network { - // Sub-behaviours - gossipsub, - eth2_rpc: RPC::new(ctx.fork_context.clone(), log.clone()), - discovery, - identify: Identify::new(identify_config), - // Auxiliary fields - peer_manager: PeerManager::new(peer_manager_cfg, network_globals.clone(), log).await?, + swarm: make_swarm(), events: VecDeque::new(), network_globals, enr_fork_id, @@ -854,21 +842,7 @@ impl Network { } WhitelistSubscriptionFilter(possible_hashes) } -} - -/* Behaviour Event Process Implementations - * - * These implementations dictate how to process each event that is emitted from each - * sub-behaviour. - */ - -// Gossipsub -impl NetworkBehaviourEventProcess for Network -where - AppReqId: ReqId, - TSpec: EthSpec, -{ - fn inject_event(&mut self, event: GossipsubEvent) { + fn inject_gs_event(&mut self, event: GossipsubEvent) { match event { GossipsubEvent::Message { propagation_source, @@ -956,16 +930,7 @@ where } } } -} - -// RPC -impl NetworkBehaviourEventProcess, TSpec>> - for Network -where - AppReqId: ReqId, - TSpec: EthSpec, -{ - fn inject_event(&mut self, event: RPCMessage, TSpec>) { + fn inject_rpc_event(&mut self, event: RPCMessage, TSpec>) { let peer_id = event.peer_id; if !self.peer_manager.is_connected(&peer_id) { @@ -1109,15 +1074,7 @@ where } } } -} - -// Discovery -impl NetworkBehaviourEventProcess for Network -where - AppReqId: ReqId, - TSpec: EthSpec, -{ - fn inject_event(&mut self, event: DiscoveredPeers) { + fn inject_discovery_event(&mut self, event: DiscoveredPeers) { let DiscoveredPeers { peers } = event; let to_dial_peers = self.peer_manager.peers_discovered(peers); for peer_id in to_dial_peers { @@ -1127,15 +1084,7 @@ where self.peer_manager.dial_peer(&peer_id, enr); } } -} - -// Identify -impl NetworkBehaviourEventProcess for Network -where - TSpec: EthSpec, - AppReqId: ReqId, -{ - fn inject_event(&mut self, event: IdentifyEvent) { + fn inject_identify_event(&mut self, event: IdentifyEvent) { match event { IdentifyEvent::Received { peer_id, mut info } => { if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { @@ -1153,62 +1102,7 @@ where IdentifyEvent::Pushed { .. } => {} } } -} - -type BehaviourHandler = - as NetworkBehaviour>::ConnectionHandler; - -impl Network -where - TSpec: EthSpec, - AppReqId: ReqId, -{ - /// Consumes the events list and drives the Lighthouse global NetworkBehaviour. - fn poll( - &mut self, - cx: &mut Context, - _: &mut impl PollParameters, - ) -> Poll, BehaviourHandler>> { - if let Some(waker) = &self.waker { - if waker.will_wake(cx.waker()) { - self.waker = Some(cx.waker().clone()); - } - } else { - self.waker = Some(cx.waker().clone()); - } - - if let Some(event) = self.events.pop_front() { - return Poll::Ready(NBAction::GenerateEvent(event)); - } - - // perform gossipsub score updates when necessary - while self.update_gossipsub_scores.poll_tick(cx).is_ready() { - self.peer_manager.update_gossipsub_scores(&self.gossipsub); - } - - // poll the gossipsub cache to clear expired messages - while let Poll::Ready(Some(result)) = self.gossip_cache.poll_next_unpin(cx) { - match result { - Err(e) => warn!(self.log, "Gossip cache error"; "error" => e), - Ok(expired_topic) => { - if let Some(v) = metrics::get_int_counter( - &metrics::GOSSIP_EXPIRED_LATE_PUBLISH_PER_TOPIC_KIND, - &[expired_topic.kind().as_ref()], - ) { - v.inc() - }; - } - } - } - - Poll::Pending - } -} - -impl NetworkBehaviourEventProcess - for Network -{ - fn inject_event(&mut self, event: PeerManagerEvent) { + fn inject_pm_event(&mut self, event: PeerManagerEvent) { match event { PeerManagerEvent::PeerConnectedIncoming(peer_id) => { self.add_event(OldBehaviourEvent::PeerConnectedIncoming(peer_id)); @@ -1256,6 +1150,40 @@ impl NetworkBehaviourEventProcess Poll> { + if let Some(waker) = &self.waker { + if waker.will_wake(cx.waker()) { + self.waker = Some(cx.waker().clone()); + } + } else { + self.waker = Some(cx.waker().clone()); + } + + if let Some(event) = self.events.pop_front() { + return Poll::Ready(event); + } + + // perform gossipsub score updates when necessary + while self.update_gossipsub_scores.poll_tick(cx).is_ready() { + self.peer_manager.update_gossipsub_scores(&self.gossipsub); + } + + // poll the gossipsub cache to clear expired messages + while let Poll::Ready(Some(result)) = self.gossip_cache.poll_next_unpin(cx) { + match result { + Err(e) => warn!(self.log, "Gossip cache error"; "error" => e), + Ok(expired_topic) => { + if let Some(v) = metrics::get_int_counter( + &metrics::GOSSIP_EXPIRED_LATE_PUBLISH_PER_TOPIC_KIND, + &[expired_topic.kind().as_ref()], + ) { + v.inc() + }; + } + } + } + } } /* Public API types */ @@ -1358,3 +1286,7 @@ impl slog::Value for RequestId { } } } + +fn make_swarm() -> Swarm> { + todo!() +} From 7de09e3e17872239027b34164597576794db68a3 Mon Sep 17 00:00:00 2001 From: Diva M Date: Tue, 23 Aug 2022 15:00:13 -0500 Subject: [PATCH 04/31] fix most compilation issues --- .../lighthouse_network/src/behaviour/mod.rs | 172 +++++++++++------- .../lighthouse_network/src/discovery/mod.rs | 3 +- beacon_node/lighthouse_network/src/rpc/mod.rs | 1 + beacon_node/lighthouse_network/src/service.rs | 47 +++-- 4 files changed, 130 insertions(+), 93 deletions(-) diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index e8c291381e7..874b0b0dec5 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -29,7 +29,7 @@ use libp2p::{ MessageAuthenticity, MessageId, }, identify::{Identify, IdentifyConfig, IdentifyEvent}, - swarm::{NetworkBehaviour, NetworkBehaviourAction as NBAction, PollParameters}, + swarm::NetworkBehaviour, NetworkBehaviour, PeerId, }; use slog::{crit, debug, o, trace, warn}; @@ -306,14 +306,46 @@ impl Network { /* Public Accessible Functions to interact with the behaviour */ - /// Get a mutable reference to the underlying discovery sub-behaviour. + /// The routing pub-sub mechanism for eth2. + pub fn gossipsub_mut(&mut self) -> &mut Gossipsub { + &mut self.swarm.behaviour_mut().gossipsub + } + /// The Eth2 RPC specified in the wire-0 protocol. + pub fn eth2_rpc_mut(&mut self) -> &mut RPC, TSpec> { + &mut self.swarm.behaviour_mut().eth2_rpc + } + /// Discv5 Discovery protocol. pub fn discovery_mut(&mut self) -> &mut Discovery { - &mut self.discovery + &mut self.swarm.behaviour_mut().discovery } - - /// Get a mutable reference to the peer manager. + /// Provides IP addresses and peer information. + pub fn identify_mut(&mut self) -> &mut Identify { + &mut self.swarm.behaviour_mut().identify + } + /// The peer manager that keeps track of peer's reputation and status. pub fn peer_manager_mut(&mut self) -> &mut PeerManager { - &mut self.peer_manager + &mut self.swarm.behaviour_mut().peer_manager + } + + /// The routing pub-sub mechanism for eth2. + pub fn gossipsub(&self) -> &Gossipsub { + &self.swarm.behaviour().gossipsub + } + /// The Eth2 RPC specified in the wire-0 protocol. + pub fn eth2_rpc(&self) -> &RPC, TSpec> { + &self.swarm.behaviour().eth2_rpc + } + /// Discv5 Discovery protocol. + pub fn discovery(&self) -> &Discovery { + &self.swarm.behaviour().discovery + } + /// Provides IP addresses and peer information. + pub fn identify(&self) -> &Identify { + &self.swarm.behaviour().identify + } + /// The peer manager that keeps track of peer's reputation and status. + pub fn peer_manager(&self) -> &PeerManager { + &self.swarm.behaviour().peer_manager } /// Returns the local ENR of the node. @@ -321,11 +353,6 @@ impl Network { self.network_globals.local_enr() } - /// Obtain a reference to the gossipsub protocol. - pub fn gs(&self) -> &Gossipsub { - &self.gossipsub - } - /* Pubsub behaviour functions */ /// Subscribes to a gossipsub topic kind, letting the network service determine the @@ -384,7 +411,7 @@ impl Network { let topic: Topic = topic.into(); - match self.gossipsub.subscribe(&topic) { + match self.gossipsub_mut().subscribe(&topic) { Err(e) => { warn!(self.log, "Failed to subscribe to topic"; "topic" => %topic, "error" => ?e); false @@ -407,7 +434,7 @@ impl Network { // unsubscribe from the topic let libp2p_topic: Topic = topic.clone().into(); - match self.gossipsub.unsubscribe(&libp2p_topic) { + match self.gossipsub_mut().unsubscribe(&libp2p_topic) { Err(_) => { warn!(self.log, "Failed to unsubscribe from topic"; "topic" => %libp2p_topic); false @@ -426,7 +453,7 @@ impl Network { for topic in message.topics(GossipEncoding::default(), self.enr_fork_id.fork_digest) { let message_data = message.encode(GossipEncoding::default()); if let Err(e) = self - .gossipsub + .gossipsub_mut() .publish(topic.clone().into(), message_data.clone()) { slog::warn!(self.log, "Could not publish message"; "error" => ?e); @@ -486,7 +513,7 @@ impl Network { } } - if let Err(e) = self.gossipsub.report_message_validation_result( + if let Err(e) = self.gossipsub_mut().report_message_validation_result( &message_id, propagation_source, validation_result, @@ -519,16 +546,16 @@ impl Network { "beacon_attestation_subnet_params" => ?beacon_attestation_subnet_params, ); - self.gossipsub + self.gossipsub_mut() .set_topic_params(get_topic(GossipKind::BeaconBlock), beacon_block_params)?; - self.gossipsub.set_topic_params( + self.gossipsub_mut().set_topic_params( get_topic(GossipKind::BeaconAggregateAndProof), beacon_aggregate_proof_params, )?; for i in 0..self.score_settings.attestation_subnet_count() { - self.gossipsub.set_topic_params( + self.gossipsub_mut().set_topic_params( get_topic(GossipKind::Attestation(SubnetId::new(i))), beacon_attestation_subnet_params.clone(), )?; @@ -541,8 +568,11 @@ impl Network { /// Send a request to a peer over RPC. pub fn send_request(&mut self, peer_id: PeerId, request_id: AppReqId, request: Request) { - self.eth2_rpc - .send_request(peer_id, RequestId::Application(request_id), request.into()) + self.eth2_rpc_mut().send_request( + peer_id, + RequestId::Application(request_id), + request.into(), + ) } /// Send a successful response to a peer over RPC. @@ -552,7 +582,8 @@ impl Network { id: PeerRequestId, response: Response, ) { - self.eth2_rpc.send_response(peer_id, id, response.into()) + self.eth2_rpc_mut() + .send_response(peer_id, id, response.into()) } /// Inform the peer that their request produced an error. @@ -563,8 +594,11 @@ impl Network { error: RPCResponseErrorCode, reason: String, ) { - self.eth2_rpc - .send_response(peer_id, id, RPCCodedResponse::Error(error, reason.into())) + self.eth2_rpc_mut().send_response( + peer_id, + id, + RPCCodedResponse::Error(error, reason.into()), + ) } /* Peer management functions */ @@ -574,24 +608,25 @@ impl Network { /// This will send a goodbye, disconnect and then ban the peer. /// This is fatal for a peer, and should be used in unrecoverable circumstances. pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) { - self.peer_manager.goodbye_peer(peer_id, reason, source); + self.peer_manager_mut() + .goodbye_peer(peer_id, reason, source); } /// Returns an iterator over all enr entries in the DHT. - pub fn enr_entries(&mut self) -> Vec { - self.discovery.table_entries_enr() + pub fn enr_entries(&self) -> Vec { + self.discovery().table_entries_enr() } /// Add an ENR to the routing table of the discovery mechanism. pub fn add_enr(&mut self, enr: Enr) { - self.discovery.add_enr(enr); + self.discovery_mut().add_enr(enr); } /// Updates a subnet value to the ENR attnets/syncnets bitfield. /// /// The `value` is `true` if a subnet is being added and false otherwise. pub fn update_enr_subnet(&mut self, subnet_id: Subnet, value: bool) { - if let Err(e) = self.discovery.update_enr_bitfield(subnet_id, value) { + if let Err(e) = self.discovery_mut().update_enr_bitfield(subnet_id, value) { crit!(self.log, "Could not update ENR bitfield"; "error" => e); } // update the local meta data which informs our peers of the update during PINGS @@ -602,7 +637,7 @@ impl Network { /// would like to retain the peers for. pub fn discover_subnet_peers(&mut self, subnets_to_discover: Vec) { // If discovery is not started or disabled, ignore the request - if !self.discovery.started { + if !self.discovery().started { return; } @@ -649,13 +684,13 @@ impl Network { // request the subnet query from discovery if !filtered.is_empty() { - self.discovery.discover_subnet_peers(filtered); + self.discovery_mut().discover_subnet_peers(filtered); } } /// Updates the local ENR's "eth2" field with the latest EnrForkId. pub fn update_fork_version(&mut self, enr_fork_id: EnrForkId) { - self.discovery.update_eth2_enr(enr_fork_id.clone()); + self.discovery_mut().update_eth2_enr(enr_fork_id.clone()); // update the local reference self.enr_fork_id = enr_fork_id; @@ -666,13 +701,13 @@ impl Network { /// Updates the current meta data of the node to match the local ENR. fn update_metadata_bitfields(&mut self) { let local_attnets = self - .discovery + .discovery_mut() .local_enr() .attestation_bitfield::() .expect("Local discovery must have attestation bitfield"); let local_syncnets = self - .discovery + .discovery_mut() .local_enr() .sync_committee_bitfield::() .expect("Local discovery must have sync committee bitfield"); @@ -702,7 +737,7 @@ impl Network { }; trace!(self.log, "Sending Ping"; "peer_id" => %peer_id); let id = RequestId::Behaviour; - self.eth2_rpc + self.eth2_rpc_mut() .send_request(peer_id, id, OutboundRequest::Ping(ping)); } @@ -713,13 +748,13 @@ impl Network { }; trace!(self.log, "Sending Pong"; "request_id" => id.1, "peer_id" => %peer_id); let event = RPCCodedResponse::Success(RPCResponse::Pong(ping)); - self.eth2_rpc.send_response(peer_id, id, event); + self.eth2_rpc_mut().send_response(peer_id, id, event); } /// Sends a METADATA request to a peer. fn send_meta_data_request(&mut self, peer_id: PeerId) { let event = OutboundRequest::MetaData(PhantomData); - self.eth2_rpc + self.eth2_rpc_mut() .send_request(peer_id, RequestId::Behaviour, event); } @@ -728,13 +763,7 @@ impl Network { let event = RPCCodedResponse::Success(RPCResponse::MetaData( self.network_globals.local_metadata.read().clone(), )); - self.eth2_rpc.send_response(peer_id, id, event); - } - - /// Returns a reference to the peer manager to allow the swarm to notify the manager of peer - /// status - pub fn peer_manager(&mut self) -> &mut PeerManager { - &mut self.peer_manager + self.eth2_rpc_mut().send_response(peer_id, id, event); } // RPC Propagation methods @@ -789,7 +818,7 @@ impl Network { fn dial_cached_enrs_in_subnet(&mut self, subnet: Subnet) { let predicate = subnet_predicate::(vec![subnet], &self.log); let peers_to_dial: Vec = self - .discovery + .discovery() .cached_enrs() .filter_map(|(peer_id, enr)| { let peers = self.network_globals.peers.read(); @@ -804,10 +833,10 @@ impl Network { debug!(self.log, "Dialing cached ENR peer"; "peer_id" => %peer_id); // Remove the ENR from the cache to prevent continual re-dialing on disconnects - self.discovery.remove_cached_enr(&peer_id); + self.discovery_mut().remove_cached_enr(&peer_id); // For any dial event, inform the peer manager let enr = self.discovery_mut().enr_of_peer(&peer_id); - self.peer_manager.dial_peer(&peer_id, enr); + self.peer_manager_mut().dial_peer(&peer_id, enr); } } @@ -855,7 +884,7 @@ impl Network { Err(e) => { debug!(self.log, "Could not decode gossipsub message"; "topic" => ?gs_msg.topic,"error" => e); //reject the message - if let Err(e) = self.gossipsub.report_message_validation_result( + if let Err(e) = self.gossipsub_mut().report_message_validation_result( &id, &propagation_source, MessageAcceptance::Reject, @@ -886,7 +915,12 @@ impl Network { if let Some(msgs) = self.gossip_cache.retrieve(&topic) { for data in msgs { let topic_str: &str = topic.kind().as_ref(); - match self.gossipsub.publish(topic.clone().into(), data) { + match self + .swarm + .behaviour_mut() + .gossipsub + .publish(topic.clone().into(), data) + { Ok(_) => { warn!(self.log, "Gossip message published on retry"; "topic" => topic_str); if let Some(v) = metrics::get_int_counter( @@ -920,7 +954,7 @@ impl Network { } GossipsubEvent::GossipsubNotSupported { peer_id } => { debug!(self.log, "Peer does not support gossipsub"; "peer_id" => %peer_id); - self.peer_manager.report_peer( + self.peer_manager_mut().report_peer( &peer_id, PeerAction::LowToleranceError, ReportSource::Gossipsub, @@ -933,7 +967,7 @@ impl Network { fn inject_rpc_event(&mut self, event: RPCMessage, TSpec>) { let peer_id = event.peer_id; - if !self.peer_manager.is_connected(&peer_id) { + if !self.peer_manager().is_connected(&peer_id) { debug!( self.log, "Ignoring rpc message of disconnecting peer"; @@ -955,7 +989,7 @@ impl Network { // Inform the peer manager of the error. // An inbound error here means we sent an error to the peer, or the stream // timed out. - self.peer_manager.handle_rpc_error( + self.peer_manager_mut().handle_rpc_error( &peer_id, proto, &error, @@ -964,7 +998,7 @@ impl Network { } HandlerErr::Outbound { id, proto, error } => { // Inform the peer manager that a request we sent to the peer failed - self.peer_manager.handle_rpc_error( + self.peer_manager_mut().handle_rpc_error( &peer_id, proto, &error, @@ -983,7 +1017,7 @@ impl Network { /* Behaviour managed protocols: Ping and Metadata */ InboundRequest::Ping(ping) => { // inform the peer manager and send the response - self.peer_manager.ping_request(&peer_id, ping.data); + self.peer_manager_mut().ping_request(&peer_id, ping.data); // send a ping response self.pong(peer_request_id, peer_id); } @@ -1007,7 +1041,7 @@ impl Network { /* Protocols propagated to the Network */ InboundRequest::Status(msg) => { // inform the peer manager that we have received a status from a peer - self.peer_manager.peer_statusd(&peer_id); + self.peer_manager_mut().peer_statusd(&peer_id); // propagate the STATUS message upwards self.propagate_request(peer_request_id, peer_id, Request::Status(msg)) } @@ -1019,7 +1053,7 @@ impl Network { } = req; // Still disconnect the peer if the request is naughty. if step == 0 { - self.peer_manager.handle_rpc_error( + self.peer_manager_mut().handle_rpc_error( &peer_id, Protocol::BlocksByRange, &RPCError::InvalidData( @@ -1046,14 +1080,16 @@ impl Network { Ok(RPCReceived::Response(id, resp)) => { match resp { /* Behaviour managed protocols */ - RPCResponse::Pong(ping) => self.peer_manager.pong_response(&peer_id, ping.data), - RPCResponse::MetaData(meta_data) => { - self.peer_manager.meta_data_response(&peer_id, meta_data) + RPCResponse::Pong(ping) => { + self.peer_manager_mut().pong_response(&peer_id, ping.data) } + RPCResponse::MetaData(meta_data) => self + .peer_manager_mut() + .meta_data_response(&peer_id, meta_data), /* Network propagated protocols */ RPCResponse::Status(msg) => { // inform the peer manager that we have received a status from a peer - self.peer_manager.peer_statusd(&peer_id); + self.peer_manager_mut().peer_statusd(&peer_id); // propagate the STATUS message upwards self.propagate_response(id, peer_id, Response::Status(msg)); } @@ -1076,12 +1112,12 @@ impl Network { } fn inject_discovery_event(&mut self, event: DiscoveredPeers) { let DiscoveredPeers { peers } = event; - let to_dial_peers = self.peer_manager.peers_discovered(peers); + let to_dial_peers = self.peer_manager_mut().peers_discovered(peers); for peer_id in to_dial_peers { debug!(self.log, "Dialing discovered peer"; "peer_id" => %peer_id); // For any dial event, inform the peer manager let enr = self.discovery_mut().enr_of_peer(&peer_id); - self.peer_manager.dial_peer(&peer_id, enr); + self.peer_manager_mut().dial_peer(&peer_id, enr); } } fn inject_identify_event(&mut self, event: IdentifyEvent) { @@ -1095,7 +1131,7 @@ impl Network { info.listen_addrs.truncate(MAX_IDENTIFY_ADDRESSES); } // send peer info to the peer manager. - self.peer_manager.identify(&peer_id, &info); + self.peer_manager_mut().identify(&peer_id, &info); } IdentifyEvent::Sent { .. } => {} IdentifyEvent::Error { .. } => {} @@ -1114,11 +1150,11 @@ impl Network { self.add_event(OldBehaviourEvent::PeerDisconnected(peer_id)); } PeerManagerEvent::Banned(peer_id, associated_ips) => { - self.discovery.ban_peer(&peer_id, associated_ips); + self.discovery_mut().ban_peer(&peer_id, associated_ips); self.add_event(OldBehaviourEvent::PeerBanned(peer_id)); } PeerManagerEvent::UnBanned(peer_id, associated_ips) => { - self.discovery.unban_peer(&peer_id, associated_ips); + self.discovery_mut().unban_peer(&peer_id, associated_ips); self.add_event(OldBehaviourEvent::PeerUnbanned(peer_id)); } PeerManagerEvent::Status(peer_id) => { @@ -1128,7 +1164,7 @@ impl Network { } PeerManagerEvent::DiscoverPeers(peers_to_find) => { // Peer manager has requested a discovery query for more peers. - self.discovery.discover_peers(peers_to_find); + self.discovery_mut().discover_peers(peers_to_find); } PeerManagerEvent::DiscoverSubnetPeers(subnets_to_discover) => { // Peer manager has requested a subnet discovery query for more peers. @@ -1145,7 +1181,7 @@ impl Network { debug!(self.log, "Peer Manager disconnecting peer"; "peer_id" => %peer_id, "reason" => %reason); // send one goodbye - self.eth2_rpc + self.eth2_rpc_mut() .shutdown(peer_id, RequestId::Behaviour, reason); } } @@ -1166,7 +1202,8 @@ impl Network { // perform gossipsub score updates when necessary while self.update_gossipsub_scores.poll_tick(cx).is_ready() { - self.peer_manager.update_gossipsub_scores(&self.gossipsub); + let ref mut this = self.swarm.behaviour_mut(); + this.peer_manager.update_gossipsub_scores(&this.gossipsub); } // poll the gossipsub cache to clear expired messages @@ -1183,6 +1220,7 @@ impl Network { } } } + Poll::Pending } } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 7a6f3fc5eb5..b68112ab2c9 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -71,6 +71,7 @@ const DURATION_DIFFERENCE: Duration = Duration::from_millis(1); /// A query has completed. This result contains a mapping of discovered peer IDs to the `min_ttl` /// of the peer if it is specified. +#[derive(Debug)] pub struct DiscoveredPeers { pub peers: HashMap>, } @@ -361,7 +362,7 @@ impl Discovery { } /// Returns an iterator over all enr entries in the DHT. - pub fn table_entries_enr(&mut self) -> Vec { + pub fn table_entries_enr(&self) -> Vec { self.discv5.table_entries_enr() } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 0bedd423b20..7b0092ef713 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -90,6 +90,7 @@ impl std::fmt::Display for RPCSend { } /// Messages sent to the user from the RPC protocol. +#[derive(Debug)] pub struct RPCMessage { /// The peer that sent the message. pub peer_id: PeerId, diff --git a/beacon_node/lighthouse_network/src/service.rs b/beacon_node/lighthouse_network/src/service.rs index 0be2954ea09..9cd3c95fda5 100644 --- a/beacon_node/lighthouse_network/src/service.rs +++ b/beacon_node/lighthouse_network/src/service.rs @@ -51,8 +51,10 @@ pub enum Libp2pEvent { /// The configuration and state of the libp2p components for the beacon node. pub struct Service { + _p_a: std::marker::PhantomData, + _p_b: std::marker::PhantomData, /// The libp2p Swarm handler. - pub swarm: Swarm>, + pub swarm: Swarm, /// The bandwidth logger for the underlying libp2p transport. pub bandwidth: Arc, /// This node's PeerId. @@ -119,8 +121,7 @@ impl Service { .map_err(|e| format!("Failed to build transport: {:?}", e))?; // Lighthouse network behaviour - let behaviour = - Network::new(&local_keypair, ctx, network_globals.clone(), &log).await?; + let behaviour = libp2p::swarm::DummyBehaviour::default(); // use the executor for libp2p struct Executor(task_executor::TaskExecutor); @@ -236,11 +237,12 @@ impl Service { let mut subscribed_topics: Vec = vec![]; for topic_kind in &config.topics { - if swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { - subscribed_topics.push(topic_kind.clone()); - } else { - warn!(log, "Could not subscribe to topic"; "topic" => %topic_kind); - } + // TODO + // if swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { + // subscribed_topics.push(topic_kind.clone()); + // } else { + // warn!(log, "Could not subscribe to topic"; "topic" => %topic_kind); + // } } if !subscribed_topics.is_empty() { @@ -248,6 +250,8 @@ impl Service { } let service = Service { + _p_a: Default::default(), + _p_b: Default::default(), swarm, bandwidth, local_peer_id, @@ -259,9 +263,7 @@ impl Service { /// Sends a request to a peer, with a given Id. pub fn send_request(&mut self, peer_id: PeerId, request_id: AppReqId, request: Request) { - self.swarm - .behaviour_mut() - .send_request(peer_id, request_id, request); + todo!() } /// Informs the peer that their request failed. @@ -272,9 +274,7 @@ impl Service { error: RPCResponseErrorCode, reason: String, ) { - self.swarm - .behaviour_mut() - .send_error_reponse(peer_id, id, error, reason); + todo!() } /// Report a peer's action. @@ -285,27 +285,22 @@ impl Service { source: ReportSource, msg: &'static str, ) { - self.swarm - .behaviour_mut() - .peer_manager_mut() - .report_peer(peer_id, action, source, None, msg); + todo!() } /// Disconnect and ban a peer, providing a reason. pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) { - self.swarm - .behaviour_mut() - .goodbye_peer(peer_id, reason, source); + todo!() } /// Sends a response to a peer's request. pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response) { - self.swarm - .behaviour_mut() - .send_successful_response(peer_id, id, response); + todo!() } pub async fn next_event(&mut self) -> Libp2pEvent { + todo!() + /* loop { match self.swarm.select_next_some().await { SwarmEvent::Behaviour(behaviour) => { @@ -376,6 +371,7 @@ impl Service { SwarmEvent::Dialing(_peer_id) => {} } } + */ } } @@ -386,7 +382,8 @@ type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; fn build_transport( local_private_key: Keypair, ) -> std::io::Result<(BoxedTransport, Arc)> { - let tcp = libp2p::tcp::TokioTcpConfig::new().nodelay(true); + let tcp = + libp2p::tcp::TokioTcpTransport::new(libp2p::tcp::GenTcpConfig::default().nodelay(true)); let transport = libp2p::dns::TokioDnsConfig::system(tcp)?; #[cfg(feature = "libp2p-websocket")] let transport = { From 54525418dcf3bddb7afd4f5dfdc879b6e9d0261e Mon Sep 17 00:00:00 2001 From: Diva M Date: Tue, 23 Aug 2022 16:53:02 -0500 Subject: [PATCH 05/31] create network --- .../lighthouse_network/src/behaviour/mod.rs | 423 ++++++++++++++---- .../src/peer_manager/mod.rs | 6 +- beacon_node/lighthouse_network/src/service.rs | 178 +------- 3 files changed, 331 insertions(+), 276 deletions(-) diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index 874b0b0dec5..170139239f6 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -1,7 +1,7 @@ use crate::behaviour::gossipsub_scoring_parameters::{ lighthouse_gossip_thresholds, PeerScoreSettings, }; -use crate::config::gossipsub_config; +use crate::config::{gossipsub_config, NetworkLoad}; use crate::discovery::{ subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, }; @@ -9,16 +9,22 @@ use crate::peer_manager::{ config::Config as PeerManagerCfg, peerdb::score::PeerAction, peerdb::score::ReportSource, ConnectionDirection, PeerManager, PeerManagerEvent, }; -use crate::rpc::*; -use crate::service::{Context as ServiceContext, METADATA_FILENAME}; +use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; +use crate::service::{ + build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER, + METADATA_FILENAME, +}; use crate::types::{ subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, }; use crate::Eth2Enr; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; +use crate::{rpc::*, EnrExt}; use futures::stream::StreamExt; +use libp2p::bandwidth::BandwidthSinks; use libp2p::gossipsub::error::PublishError; +use libp2p::swarm::{ConnectionLimits, SwarmBuilder}; use libp2p::Swarm; use libp2p::{ core::{connection::ConnectionId, identity::Keypair}, @@ -29,21 +35,24 @@ use libp2p::{ MessageAuthenticity, MessageId, }, identify::{Identify, IdentifyConfig, IdentifyEvent}, + multiaddr::{Multiaddr, Protocol as MProtocol}, swarm::NetworkBehaviour, NetworkBehaviour, PeerId, }; -use slog::{crit, debug, o, trace, warn}; +use slog::{crit, debug, info, o, trace, warn}; use ssz::Encode; use std::collections::HashSet; use std::fs::File; use std::io::Write; use std::path::{Path, PathBuf}; +use std::pin::Pin; use std::{ collections::VecDeque, marker::PhantomData, sync::Arc, task::{Context, Poll}, }; +use types::eth_spec; use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, @@ -163,6 +172,10 @@ pub struct Network { /// The interval for updating gossipsub scores update_gossipsub_scores: tokio::time::Interval, gossip_cache: GossipCache, + /// The bandwidth logger for the underlying libp2p transport. + pub bandwidth: Arc, + /// This node's PeerId. + pub local_peer_id: PeerId, /// Logger for behaviour actions. log: slog::Logger, } @@ -170,138 +183,349 @@ pub struct Network { /// Implements the combined behaviour for the libp2p service. impl Network { pub async fn new( - local_key: &Keypair, + executor: task_executor::TaskExecutor, ctx: ServiceContext<'_>, - network_globals: Arc>, log: &slog::Logger, - ) -> error::Result { - let behaviour_log = log.new(o!()); - + ) -> error::Result<(Self, Arc>)> { + let log = log.new(o!("service"=> "libp2p")); let mut config = ctx.config.clone(); - - // Set up the Identify Behaviour - let identify_config = if config.private { - IdentifyConfig::new( - "".into(), - local_key.public(), // Still send legitimate public key - ) - .with_cache_size(0) - } else { - IdentifyConfig::new("eth2/1.0.0".into(), local_key.public()) - .with_agent_version(lighthouse_version::version_with_platform()) - .with_cache_size(0) + trace!(log, "Libp2p Service starting"); + + // initialise the node's ID + let local_keypair = crate::load_private_key(&config, &log); + + // set up a collection of variables accessible outside of the network crate + let network_globals = { + // Create an ENR or load from disk if appropriate + let enr = crate::discovery::enr::build_or_load_enr::( + local_keypair.clone(), + &config, + &ctx.enr_fork_id, + &log, + )?; + // Construct the metadata + let meta_data = crate::service::load_or_build_metadata(&config.network_dir, &log); + let globals = NetworkGlobals::new( + enr.clone(), + config.libp2p_port, + config.discovery_port, + meta_data, + config + .trusted_peers + .iter() + .map(|x| PeerId::from(x.clone())) + .collect(), + &log, + ); + Arc::new(globals) }; - // Build and start the discovery sub-behaviour - let mut discovery = - Discovery::new(local_key, &config, network_globals.clone(), log).await?; - // start searching for peers - discovery.discover_peers(FIND_NODE_QUERY_CLOSEST_PEERS); - // Grab our local ENR FORK ID let enr_fork_id = network_globals .local_enr() .eth2() .expect("Local ENR must have a fork id"); - let possible_fork_digests = ctx.fork_context.all_fork_digests(); - let filter = MaxCountSubscriptionFilter { - filter: Self::create_whitelist_filter( - possible_fork_digests, - ctx.chain_spec.attestation_subnet_count, - SYNC_COMMITTEE_SUBNET_COUNT, - ), - max_subscribed_topics: 200, - max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2 + let score_settings = PeerScoreSettings::new(ctx.chain_spec, &config.gs_config); + + let gossip_cache = { + let slot_duration = std::time::Duration::from_secs(ctx.chain_spec.seconds_per_slot); + let half_epoch = std::time::Duration::from_secs( + ctx.chain_spec.seconds_per_slot * TSpec::slots_per_epoch() / 2, + ); + + GossipCache::builder() + .beacon_block_timeout(slot_duration) + .aggregates_timeout(half_epoch) + .attestation_timeout(half_epoch) + .voluntary_exit_timeout(half_epoch * 2) + .proposer_slashing_timeout(half_epoch * 2) + .attester_slashing_timeout(half_epoch * 2) + // .signed_contribution_and_proof_timeout(timeout) // Do not retry + // .sync_committee_message_timeout(timeout) // Do not retry + .build() }; - config.gs_config = gossipsub_config(config.network_load, ctx.fork_context.clone()); + let local_peer_id = network_globals.local_peer_id(); + + let (gossipsub, update_gossipsub_scores) = { + let thresholds = lighthouse_gossip_thresholds(); + + // Prepare scoring parameters + let params = { + // Construct a set of gossipsub peer scoring parameters + // We don't know the number of active validators and the current slot yet + let active_validators = TSpec::minimum_validator_count(); + let current_slot = Slot::new(0); + score_settings.get_peer_score_params( + active_validators, + &thresholds, + &enr_fork_id, + current_slot, + )? + }; - // If metrics are enabled for gossipsub build the configuration - let gossipsub_metrics = ctx - .gossipsub_registry - .map(|registry| (registry, GossipsubMetricsConfig::default())); + trace!(log, "Using peer score params"; "params" => ?params); - let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); - let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( - MessageAuthenticity::Anonymous, - config.gs_config.clone(), - gossipsub_metrics, - filter, - snappy_transform, - ) - .map_err(|e| format!("Could not construct gossipsub: {:?}", e))?; + // Set up a scoring update interval + let update_gossipsub_scores = tokio::time::interval(params.decay_interval); - // Construct a set of gossipsub peer scoring parameters - // We don't know the number of active validators and the current slot yet - let active_validators = TSpec::minimum_validator_count(); - let current_slot = Slot::new(0); + let possible_fork_digests = ctx.fork_context.all_fork_digests(); + let filter = MaxCountSubscriptionFilter { + filter: Self::create_whitelist_filter( + possible_fork_digests, + ctx.chain_spec.attestation_subnet_count, + SYNC_COMMITTEE_SUBNET_COUNT, + ), + max_subscribed_topics: 200, + max_subscriptions_per_request: 150, // 148 in theory = (64 attestation + 4 sync committee + 6 core topics) * 2 + }; - let thresholds = lighthouse_gossip_thresholds(); + config.gs_config = gossipsub_config(config.network_load, ctx.fork_context.clone()); - let score_settings = PeerScoreSettings::new(ctx.chain_spec, &config.gs_config); + // If metrics are enabled for gossipsub build the configuration + let gossipsub_metrics = ctx + .gossipsub_registry + .map(|registry| (registry, GossipsubMetricsConfig::default())); - // Prepare scoring parameters - let params = score_settings.get_peer_score_params( - active_validators, - &thresholds, - &enr_fork_id, - current_slot, - )?; + let snappy_transform = SnappyTransform::new(config.gs_config.max_transmit_size()); + let mut gossipsub = Gossipsub::new_with_subscription_filter_and_transform( + MessageAuthenticity::Anonymous, + config.gs_config.clone(), + gossipsub_metrics, + filter, + snappy_transform, + ) + .map_err(|e| format!("Could not construct gossipsub: {:?}", e))?; - trace!(behaviour_log, "Using peer score params"; "params" => ?params); + gossipsub + .with_peer_score(params, thresholds) + .expect("Valid score params and thresholds"); - // Set up a scoring update interval - let update_gossipsub_scores = tokio::time::interval(params.decay_interval); + (gossipsub, update_gossipsub_scores) + }; - gossipsub - .with_peer_score(params, thresholds) - .expect("Valid score params and thresholds"); + let eth2_rpc = RPC::new(ctx.fork_context.clone(), log.clone()); - let peer_manager_cfg = PeerManagerCfg { - discovery_enabled: !config.disable_discovery, - metrics_enabled: config.metrics_enabled, - target_peer_count: config.target_peers, - ..Default::default() + let discovery = { + // Build and start the discovery sub-behaviour + let mut discovery = + Discovery::new(&local_keypair, &config, network_globals.clone(), &log).await?; + // start searching for peers + discovery.discover_peers(FIND_NODE_QUERY_CLOSEST_PEERS); + discovery }; - let slot_duration = std::time::Duration::from_secs(ctx.chain_spec.seconds_per_slot); - let half_epoch = std::time::Duration::from_secs( - ctx.chain_spec.seconds_per_slot * TSpec::slots_per_epoch() / 2, - ); - let gossip_cache = GossipCache::builder() - .beacon_block_timeout(slot_duration) - .aggregates_timeout(half_epoch) - .attestation_timeout(half_epoch) - .voluntary_exit_timeout(half_epoch * 2) - .proposer_slashing_timeout(half_epoch * 2) - .attester_slashing_timeout(half_epoch * 2) - // .signed_contribution_and_proof_timeout(timeout) // Do not retry - // .sync_committee_message_timeout(timeout) // Do not retry - .build(); + let identify = { + let identify_config = if config.private { + IdentifyConfig::new( + "".into(), + local_keypair.public(), // Still send legitimate public key + ) + .with_cache_size(0) + } else { + IdentifyConfig::new("eth2/1.0.0".into(), local_keypair.public()) + .with_agent_version(lighthouse_version::version_with_platform()) + .with_cache_size(0) + }; + Identify::new(identify_config) + }; + + let peer_manager = { + let peer_manager_cfg = PeerManagerCfg { + discovery_enabled: !config.disable_discovery, + metrics_enabled: config.metrics_enabled, + target_peer_count: config.target_peers, + ..Default::default() + }; + PeerManager::new(peer_manager_cfg, network_globals.clone(), &log)? + }; + + let behaviour = { + Behaviour { + gossipsub, + eth2_rpc, + discovery, + identify, + peer_manager, + } + }; + + let (swarm, bandwidth) = { + // Set up the transport - tcp/ws with noise and mplex + let (transport, bandwidth) = build_transport(local_keypair.clone()) + .map_err(|e| format!("Failed to build transport: {:?}", e))?; + + // use the executor for libp2p + struct Executor(task_executor::TaskExecutor); + impl libp2p::core::Executor for Executor { + fn exec(&self, f: Pin + Send>>) { + self.0.spawn(f, "libp2p"); + } + } + + // sets up the libp2p connection limits + let limits = ConnectionLimits::default() + .with_max_pending_incoming(Some(5)) + .with_max_pending_outgoing(Some(16)) + .with_max_established_incoming(Some( + (config.target_peers as f32 + * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) + .ceil() as u32, + )) + .with_max_established_outgoing(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32, + )) + .with_max_established(Some( + (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) + .ceil() as u32, + )) + .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); + + ( + SwarmBuilder::new(transport, behaviour, local_peer_id) + .notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero")) + .connection_event_buffer_size(64) + .connection_limits(limits) + .executor(Box::new(Executor(executor))) + .build(), + bandwidth, + ) + }; + + /* + info!(log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); + let discovery_string = if config.disable_discovery { + "None".into() + } else { + config.discovery_port.to_string() + }; + debug!(log, "Attempting to open listening ports"; "address" => ?config.listen_address, "tcp_port" => config.libp2p_port, "udp_port" => discovery_string); + + + let listen_multiaddr = { + let mut m = Multiaddr::from(config.listen_address); + m.push(MProtocol::Tcp(config.libp2p_port)); + m + }; + + match Swarm::listen_on(&mut swarm, listen_multiaddr.clone()) { + Ok(_) => { + let mut log_address = listen_multiaddr; + log_address.push(MProtocol::P2p(local_peer_id.into())); + info!(log, "Listening established"; "address" => %log_address); + } + Err(err) => { + crit!( + log, + "Unable to listen on libp2p address"; + "error" => ?err, + "listen_multiaddr" => %listen_multiaddr, + ); + return Err("Libp2p was unable to listen on the given listen address.".into()); + } + }; + + // helper closure for dialing peers + let mut dial = |mut multiaddr: Multiaddr| { + // strip the p2p protocol if it exists + strip_peer_id(&mut multiaddr); + match Swarm::dial(&mut swarm, multiaddr.clone()) { + Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => %multiaddr), + Err(err) => debug!( + log, + "Could not connect to peer"; "address" => %multiaddr, "error" => ?err + ), + }; + }; + + // attempt to connect to user-input libp2p nodes + for multiaddr in &config.libp2p_nodes { + dial(multiaddr.clone()); + } + + // attempt to connect to any specified boot-nodes + let mut boot_nodes = config.boot_nodes_enr.clone(); + boot_nodes.dedup(); + + for bootnode_enr in boot_nodes { + for multiaddr in &bootnode_enr.multiaddr() { + // ignore udp multiaddr if it exists + let components = multiaddr.iter().collect::>(); + if let MProtocol::Udp(_) = components[1] { + continue; + } + + if !network_globals + .peers + .read() + .is_connected_or_dialing(&bootnode_enr.peer_id()) + { + dial(multiaddr.clone()); + } + } + } + + for multiaddr in &config.boot_nodes_multiaddr { + // check TCP support for dialing + if multiaddr + .iter() + .any(|proto| matches!(proto, MProtocol::Tcp(_))) + { + dial(multiaddr.clone()); + } + } + + let mut subscribed_topics: Vec = vec![]; + + for topic_kind in &config.topics { + // TODO + // if swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { + // subscribed_topics.push(topic_kind.clone()); + // } else { + // warn!(log, "Could not subscribe to topic"; "topic" => %topic_kind); + // } + } + + if !subscribed_topics.is_empty() { + info!(log, "Subscribed to topics"; "topics" => ?subscribed_topics); + } + + let mut config = ctx.config.clone(); + + // Set up the Identify Behaviour + + + + + /* * gossipsub, - eth2_rpc: RPC::new(ctx.fork_context.clone(), log.clone()), + eth2_rpc: , discovery, - identify: Identify::new(identify_config), + identify: , // Auxiliary fields - peer_manager: PeerManager::new(peer_manager_cfg, network_globals.clone(), log).await?, + peer_manager: */ - Ok(Network { - swarm: make_swarm(), + */ + let network = Network { + swarm, events: VecDeque::new(), - network_globals, + network_globals: network_globals.clone(), enr_fork_id, waker: None, network_dir: config.network_dir.clone(), - log: behaviour_log, - score_settings, fork_context: ctx.fork_context, - gossip_cache, + score_settings, update_gossipsub_scores, - }) + gossip_cache, + bandwidth, + local_peer_id, + log, + }; + Ok((network, network_globals)) } /* Public Accessible Functions to interact with the behaviour */ @@ -1110,6 +1334,7 @@ impl Network { } } } + fn inject_discovery_event(&mut self, event: DiscoveredPeers) { let DiscoveredPeers { peers } = event; let to_dial_peers = self.peer_manager_mut().peers_discovered(peers); @@ -1120,6 +1345,7 @@ impl Network { self.peer_manager_mut().dial_peer(&peer_id, enr); } } + fn inject_identify_event(&mut self, event: IdentifyEvent) { match event { IdentifyEvent::Received { peer_id, mut info } => { @@ -1138,6 +1364,7 @@ impl Network { IdentifyEvent::Pushed { .. } => {} } } + fn inject_pm_event(&mut self, event: PeerManagerEvent) { match event { PeerManagerEvent::PeerConnectedIncoming(peer_id) => { diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index b9396698274..48c02b72c3e 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -118,7 +118,7 @@ pub enum PeerManagerEvent { impl PeerManager { // NOTE: Must be run inside a tokio executor. - pub async fn new( + pub fn new( cfg: config::Config, network_globals: Arc>, log: &slog::Logger, @@ -1251,9 +1251,7 @@ mod tests { }; let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new_test_globals(&log); - PeerManager::new(config, Arc::new(globals), &log) - .await - .unwrap() + PeerManager::new(config, Arc::new(globals), &log).unwrap() } #[tokio::test] diff --git a/beacon_node/lighthouse_network/src/service.rs b/beacon_node/lighthouse_network/src/service.rs index 9cd3c95fda5..b1ba5d47ad2 100644 --- a/beacon_node/lighthouse_network/src/service.rs +++ b/beacon_node/lighthouse_network/src/service.rs @@ -32,7 +32,7 @@ use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY pub const NETWORK_KEY_FILENAME: &str = "key"; /// The maximum simultaneous libp2p connections per peer. -const MAX_CONNECTIONS_PER_PEER: u32 = 1; +pub const MAX_CONNECTIONS_PER_PEER: u32 = 1; /// The filename to store our local metadata. pub const METADATA_FILENAME: &str = "metadata"; @@ -77,177 +77,7 @@ impl Service { ctx: Context<'_>, log: &Logger, ) -> error::Result<(Arc>, Self)> { - let log = log.new(o!("service"=> "libp2p")); - trace!(log, "Libp2p Service starting"); - - let config = ctx.config; - // initialise the node's ID - let local_keypair = load_private_key(config, &log); - - // Create an ENR or load from disk if appropriate - let enr = - enr::build_or_load_enr::(local_keypair.clone(), config, &ctx.enr_fork_id, &log)?; - - let local_peer_id = enr.peer_id(); - - // Construct the metadata - let meta_data = load_or_build_metadata(&config.network_dir, &log); - - // set up a collection of variables accessible outside of the network crate - let network_globals = Arc::new(NetworkGlobals::new( - enr.clone(), - config.libp2p_port, - config.discovery_port, - meta_data, - config - .trusted_peers - .iter() - .map(|x| PeerId::from(x.clone())) - .collect(), - &log, - )); - - info!(log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); - let discovery_string = if config.disable_discovery { - "None".into() - } else { - config.discovery_port.to_string() - }; - debug!(log, "Attempting to open listening ports"; "address" => ?config.listen_address, "tcp_port" => config.libp2p_port, "udp_port" => discovery_string); - - let (mut swarm, bandwidth) = { - // Set up the transport - tcp/ws with noise and mplex - let (transport, bandwidth) = build_transport(local_keypair.clone()) - .map_err(|e| format!("Failed to build transport: {:?}", e))?; - - // Lighthouse network behaviour - let behaviour = libp2p::swarm::DummyBehaviour::default(); - - // use the executor for libp2p - struct Executor(task_executor::TaskExecutor); - impl libp2p::core::Executor for Executor { - fn exec(&self, f: Pin + Send>>) { - self.0.spawn(f, "libp2p"); - } - } - - // sets up the libp2p connection limits - let limits = ConnectionLimits::default() - .with_max_pending_incoming(Some(5)) - .with_max_pending_outgoing(Some(16)) - .with_max_established_incoming(Some( - (config.target_peers as f32 - * (1.0 + PEER_EXCESS_FACTOR - MIN_OUTBOUND_ONLY_FACTOR)) - .ceil() as u32, - )) - .with_max_established_outgoing(Some( - (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR)).ceil() as u32, - )) - .with_max_established(Some( - (config.target_peers as f32 * (1.0 + PEER_EXCESS_FACTOR + PRIORITY_PEER_EXCESS)) - .ceil() as u32, - )) - .with_max_established_per_peer(Some(MAX_CONNECTIONS_PER_PEER)); - - ( - SwarmBuilder::new(transport, behaviour, local_peer_id) - .notify_handler_buffer_size(std::num::NonZeroUsize::new(7).expect("Not zero")) - .connection_event_buffer_size(64) - .connection_limits(limits) - .executor(Box::new(Executor(executor))) - .build(), - bandwidth, - ) - }; - // listen on the specified address - let listen_multiaddr = { - let mut m = Multiaddr::from(config.listen_address); - m.push(Protocol::Tcp(config.libp2p_port)); - m - }; - - match Swarm::listen_on(&mut swarm, listen_multiaddr.clone()) { - Ok(_) => { - let mut log_address = listen_multiaddr; - log_address.push(Protocol::P2p(local_peer_id.into())); - info!(log, "Listening established"; "address" => %log_address); - } - Err(err) => { - crit!( - log, - "Unable to listen on libp2p address"; - "error" => ?err, - "listen_multiaddr" => %listen_multiaddr, - ); - return Err("Libp2p was unable to listen on the given listen address.".into()); - } - }; - - // helper closure for dialing peers - let mut dial = |mut multiaddr: Multiaddr| { - // strip the p2p protocol if it exists - strip_peer_id(&mut multiaddr); - match Swarm::dial(&mut swarm, multiaddr.clone()) { - Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => %multiaddr), - Err(err) => debug!( - log, - "Could not connect to peer"; "address" => %multiaddr, "error" => ?err - ), - }; - }; - - // attempt to connect to user-input libp2p nodes - for multiaddr in &config.libp2p_nodes { - dial(multiaddr.clone()); - } - - // attempt to connect to any specified boot-nodes - let mut boot_nodes = config.boot_nodes_enr.clone(); - boot_nodes.dedup(); - - for bootnode_enr in boot_nodes { - for multiaddr in &bootnode_enr.multiaddr() { - // ignore udp multiaddr if it exists - let components = multiaddr.iter().collect::>(); - if let Protocol::Udp(_) = components[1] { - continue; - } - - if !network_globals - .peers - .read() - .is_connected_or_dialing(&bootnode_enr.peer_id()) - { - dial(multiaddr.clone()); - } - } - } - - for multiaddr in &config.boot_nodes_multiaddr { - // check TCP support for dialing - if multiaddr - .iter() - .any(|proto| matches!(proto, Protocol::Tcp(_))) - { - dial(multiaddr.clone()); - } - } - - let mut subscribed_topics: Vec = vec![]; - - for topic_kind in &config.topics { - // TODO - // if swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { - // subscribed_topics.push(topic_kind.clone()); - // } else { - // warn!(log, "Could not subscribe to topic"; "topic" => %topic_kind); - // } - } - - if !subscribed_topics.is_empty() { - info!(log, "Subscribed to topics"; "topics" => ?subscribed_topics); - } let service = Service { _p_a: Default::default(), @@ -379,7 +209,7 @@ type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; /// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and /// mplex as the multiplexing layer. -fn build_transport( +pub fn build_transport( local_private_key: Keypair, ) -> std::io::Result<(BoxedTransport, Arc)> { let tcp = @@ -500,7 +330,7 @@ fn generate_noise_config( /// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p /// only supports dialing to an address without providing the peer id. -fn strip_peer_id(addr: &mut Multiaddr) { +pub fn strip_peer_id(addr: &mut Multiaddr) { let last = addr.pop(); match last { Some(Protocol::P2p(_)) => {} @@ -510,7 +340,7 @@ fn strip_peer_id(addr: &mut Multiaddr) { } /// Load metadata from persisted file. Return default metadata if loading fails. -fn load_or_build_metadata( +pub fn load_or_build_metadata( network_dir: &std::path::Path, log: &slog::Logger, ) -> MetaData { From 68132a015cfb3cb0258f33f3fd8f642a5a89089a Mon Sep 17 00:00:00 2001 From: Diva M Date: Wed, 24 Aug 2022 10:32:14 -0500 Subject: [PATCH 06/31] add start function --- .../lighthouse_network/src/behaviour/mod.rs | 100 ++++++++---------- 1 file changed, 44 insertions(+), 56 deletions(-) diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index 170139239f6..339251e461b 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -393,15 +393,38 @@ impl Network { ) }; - /* - info!(log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); + let network = Network { + swarm, + events: VecDeque::new(), + network_globals: network_globals.clone(), + enr_fork_id, + waker: None, + network_dir: config.network_dir.clone(), + fork_context: ctx.fork_context, + score_settings, + update_gossipsub_scores, + gossip_cache, + bandwidth, + local_peer_id, + log, + }; + + // Start initialization routine + // TODO: split new and start functions? + + Ok((network, network_globals)) + } + + async fn start(&mut self, config: &crate::NetworkConfig) -> error::Result<()> { + let enr = self.network_globals.local_enr(); + info!(self.log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); let discovery_string = if config.disable_discovery { "None".into() } else { config.discovery_port.to_string() }; - debug!(log, "Attempting to open listening ports"; "address" => ?config.listen_address, "tcp_port" => config.libp2p_port, "udp_port" => discovery_string); + debug!(self.log, "Attempting to open listening ports"; "address" => ?config.listen_address, "tcp_port" => config.libp2p_port, "udp_port" => discovery_string); let listen_multiaddr = { let mut m = Multiaddr::from(config.listen_address); @@ -409,15 +432,15 @@ impl Network { m }; - match Swarm::listen_on(&mut swarm, listen_multiaddr.clone()) { + match self.swarm.listen_on(listen_multiaddr.clone()) { Ok(_) => { let mut log_address = listen_multiaddr; - log_address.push(MProtocol::P2p(local_peer_id.into())); - info!(log, "Listening established"; "address" => %log_address); + log_address.push(MProtocol::P2p(enr.peer_id().into())); + info!(self.log, "Listening established"; "address" => %log_address); } Err(err) => { crit!( - log, + self.log, "Unable to listen on libp2p address"; "error" => ?err, "listen_multiaddr" => %listen_multiaddr, @@ -430,12 +453,11 @@ impl Network { let mut dial = |mut multiaddr: Multiaddr| { // strip the p2p protocol if it exists strip_peer_id(&mut multiaddr); - match Swarm::dial(&mut swarm, multiaddr.clone()) { - Ok(()) => debug!(log, "Dialing libp2p peer"; "address" => %multiaddr), - Err(err) => debug!( - log, - "Could not connect to peer"; "address" => %multiaddr, "error" => ?err - ), + match self.swarm.dial(multiaddr.clone()) { + Ok(()) => debug!(self.log, "Dialing libp2p peer"; "address" => %multiaddr), + Err(err) => { + debug!(self.log, "Could not connect to peer"; "address" => %multiaddr, "error" => ?err) + } }; }; @@ -456,7 +478,8 @@ impl Network { continue; } - if !network_globals + if !self + .network_globals .peers .read() .is_connected_or_dialing(&bootnode_enr.peer_id()) @@ -479,53 +502,18 @@ impl Network { let mut subscribed_topics: Vec = vec![]; for topic_kind in &config.topics { - // TODO - // if swarm.behaviour_mut().subscribe_kind(topic_kind.clone()) { - // subscribed_topics.push(topic_kind.clone()); - // } else { - // warn!(log, "Could not subscribe to topic"; "topic" => %topic_kind); - // } + if self.subscribe_kind(topic_kind.clone()) { + subscribed_topics.push(topic_kind.clone()); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic_kind); + } } if !subscribed_topics.is_empty() { - info!(log, "Subscribed to topics"; "topics" => ?subscribed_topics); + info!(self.log, "Subscribed to topics"; "topics" => ?subscribed_topics); } - let mut config = ctx.config.clone(); - - // Set up the Identify Behaviour - - - - - - - /* - * gossipsub, - eth2_rpc: , - discovery, - identify: , - // Auxiliary fields - peer_manager: - - */ - */ - let network = Network { - swarm, - events: VecDeque::new(), - network_globals: network_globals.clone(), - enr_fork_id, - waker: None, - network_dir: config.network_dir.clone(), - fork_context: ctx.fork_context, - score_settings, - update_gossipsub_scores, - gossip_cache, - bandwidth, - local_peer_id, - log, - }; - Ok((network, network_globals)) + Ok(()) } /* Public Accessible Functions to interact with the behaviour */ From ed4d82036900a4754b77e17d0971179a5ad00442 Mon Sep 17 00:00:00 2001 From: Diva M Date: Wed, 24 Aug 2022 12:03:23 -0500 Subject: [PATCH 07/31] poll without event queueing --- .../lighthouse_network/src/behaviour/mod.rs | 220 +++++++++++++----- beacon_node/lighthouse_network/src/service.rs | 8 +- 2 files changed, 163 insertions(+), 65 deletions(-) diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/behaviour/mod.rs index 339251e461b..38951e4d242 100644 --- a/beacon_node/lighthouse_network/src/behaviour/mod.rs +++ b/beacon_node/lighthouse_network/src/behaviour/mod.rs @@ -24,7 +24,7 @@ use crate::{rpc::*, EnrExt}; use futures::stream::StreamExt; use libp2p::bandwidth::BandwidthSinks; use libp2p::gossipsub::error::PublishError; -use libp2p::swarm::{ConnectionLimits, SwarmBuilder}; +use libp2p::swarm::{ConnectionLimits, SwarmBuilder, SwarmEvent}; use libp2p::Swarm; use libp2p::{ core::{connection::ConnectionId, identity::Keypair}, @@ -129,6 +129,8 @@ pub enum OldBehaviourEvent { }, /// Inform the network to send a Status to this peer. StatusPeer(PeerId), + NewListenAddr(Multiaddr), + ZeroListeners, } #[derive(NetworkBehaviour)] @@ -153,17 +155,12 @@ struct Behaviour { pub struct Network { swarm: libp2p::swarm::Swarm>, /* Auxiliary Fields */ - /// The output events generated by this behaviour to be consumed in the swarm poll. - events: VecDeque>, /// A collections of variables accessible outside the network service. network_globals: Arc>, /// Keeps track of the current EnrForkId for upgrading gossipsub topics. // NOTE: This can be accessed via the network_globals ENR. However we keep it here for quick // lookups for every gossipsub message send. enr_fork_id: EnrForkId, - /// The waker for the current task. This is used to wake the task when events are added to the - /// queue. - waker: Option, /// Directory where metadata is stored. network_dir: PathBuf, fork_context: Arc, @@ -190,7 +187,6 @@ impl Network { let log = log.new(o!("service"=> "libp2p")); let mut config = ctx.config.clone(); trace!(log, "Libp2p Service starting"); - // initialise the node's ID let local_keypair = crate::load_private_key(&config, &log); @@ -393,12 +389,10 @@ impl Network { ) }; - let network = Network { + let mut network = Network { swarm, - events: VecDeque::new(), network_globals: network_globals.clone(), enr_fork_id, - waker: None, network_dir: config.network_dir.clone(), fork_context: ctx.fork_context, score_settings, @@ -409,12 +403,12 @@ impl Network { log, }; - // Start initialization routine - // TODO: split new and start functions? + network.start(&config).await?; Ok((network, network_globals)) } + // TODO: docs async fn start(&mut self, config: &crate::NetworkConfig) -> error::Result<()> { let enr = self.network_globals.local_enr(); info!(self.log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name)); @@ -980,24 +974,31 @@ impl Network { // RPC Propagation methods /// Queues the response to be sent upwards as long at it was requested outside the Behaviour. - fn propagate_response( + #[must_use = "return the response"] + fn build_response( &mut self, id: RequestId, peer_id: PeerId, response: Response, - ) { + ) -> Option> { match id { - RequestId::Application(id) => self.add_event(OldBehaviourEvent::ResponseReceived { + RequestId::Application(id) => Some(OldBehaviourEvent::ResponseReceived { peer_id, id, response, }), - RequestId::Behaviour => {} + RequestId::Behaviour => None, } } /// Convenience function to propagate a request. - fn propagate_request(&mut self, id: PeerRequestId, peer_id: PeerId, request: Request) { + #[must_use = "actually return the event"] + fn build_request( + &mut self, + id: PeerRequestId, + peer_id: PeerId, + request: Request, + ) -> OldBehaviourEvent { // Increment metrics match &request { Request::Status(_) => { @@ -1010,19 +1011,11 @@ impl Network { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) } } - self.add_event(OldBehaviourEvent::RequestReceived { + return OldBehaviourEvent::RequestReceived { peer_id, id, request, - }); - } - - /// Adds an event to the queue waking the current task to process it. - fn add_event(&mut self, event: OldBehaviourEvent) { - self.events.push_back(event); - if let Some(waker) = &self.waker { - waker.wake_by_ref(); - } + }; } /// Dial cached enrs in discovery service that are in the given `subnet_id` and aren't @@ -1083,7 +1076,10 @@ impl Network { } WhitelistSubscriptionFilter(possible_hashes) } - fn inject_gs_event(&mut self, event: GossipsubEvent) { + fn inject_gs_event( + &mut self, + event: GossipsubEvent, + ) -> Option> { match event { GossipsubEvent::Message { propagation_source, @@ -1106,7 +1102,7 @@ impl Network { } Ok(msg) => { // Notify the network - self.add_event(OldBehaviourEvent::PubsubMessage { + return Some(OldBehaviourEvent::PubsubMessage { id, source: propagation_source, topic: gs_msg.topic, @@ -1175,8 +1171,13 @@ impl Network { ); } } + None } - fn inject_rpc_event(&mut self, event: RPCMessage, TSpec>) { + + fn inject_rpc_event( + &mut self, + event: RPCMessage, TSpec>, + ) -> Option> { let peer_id = event.peer_id; if !self.peer_manager().is_connected(&peer_id) { @@ -1185,7 +1186,7 @@ impl Network { "Ignoring rpc message of disconnecting peer"; event ); - return; + return None; } let handler_id = event.conn_id; @@ -1207,6 +1208,7 @@ impl Network { &error, ConnectionDirection::Incoming, ); + None } HandlerErr::Outbound { id, proto, error } => { // Inform the peer manager that a request we sent to the peer failed @@ -1218,7 +1220,9 @@ impl Network { ); // inform failures of requests comming outside the behaviour if let RequestId::Application(id) = id { - self.add_event(OldBehaviourEvent::RPCFailed { peer_id, id }); + Some(OldBehaviourEvent::RPCFailed { peer_id, id }) + } else { + None } } } @@ -1232,10 +1236,12 @@ impl Network { self.peer_manager_mut().ping_request(&peer_id, ping.data); // send a ping response self.pong(peer_request_id, peer_id); + None } InboundRequest::MetaData(_) => { // send the requested meta-data self.send_meta_data_response((handler_id, id), peer_id); + None } InboundRequest::Goodbye(reason) => { // queue for disconnection without a goodbye message @@ -1249,13 +1255,16 @@ impl Network { // disconnecting here. The RPC handler will automatically // disconnect for us. // The actual disconnection event will be relayed to the application. + None } /* Protocols propagated to the Network */ InboundRequest::Status(msg) => { // inform the peer manager that we have received a status from a peer self.peer_manager_mut().peer_statusd(&peer_id); // propagate the STATUS message upwards - self.propagate_request(peer_request_id, peer_id, Request::Status(msg)) + let event = + self.build_request(peer_request_id, peer_id, Request::Status(msg)); + Some(event) } InboundRequest::BlocksByRange(req) => { let methods::OldBlocksByRangeRequest { @@ -1273,19 +1282,26 @@ impl Network { ), ConnectionDirection::Incoming, ); + return None; } // return just one block in case the step parameter is used. https://github.com/ethereum/consensus-specs/pull/2856 if step > 1 { count = 1; } - self.propagate_request( + let event = self.build_request( peer_request_id, peer_id, Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }), ); + Some(event) } InboundRequest::BlocksByRoot(req) => { - self.propagate_request(peer_request_id, peer_id, Request::BlocksByRoot(req)) + let event = self.build_request( + peer_request_id, + peer_id, + Request::BlocksByRoot(req), + ); + Some(event) } } } @@ -1293,23 +1309,26 @@ impl Network { match resp { /* Behaviour managed protocols */ RPCResponse::Pong(ping) => { - self.peer_manager_mut().pong_response(&peer_id, ping.data) + self.peer_manager_mut().pong_response(&peer_id, ping.data); + None + } + RPCResponse::MetaData(meta_data) => { + self.peer_manager_mut() + .meta_data_response(&peer_id, meta_data); + None } - RPCResponse::MetaData(meta_data) => self - .peer_manager_mut() - .meta_data_response(&peer_id, meta_data), /* Network propagated protocols */ RPCResponse::Status(msg) => { // inform the peer manager that we have received a status from a peer self.peer_manager_mut().peer_statusd(&peer_id); // propagate the STATUS message upwards - self.propagate_response(id, peer_id, Response::Status(msg)); + self.build_response(id, peer_id, Response::Status(msg)) } RPCResponse::BlocksByRange(resp) => { - self.propagate_response(id, peer_id, Response::BlocksByRange(Some(resp))) + self.build_response(id, peer_id, Response::BlocksByRange(Some(resp))) } RPCResponse::BlocksByRoot(resp) => { - self.propagate_response(id, peer_id, Response::BlocksByRoot(Some(resp))) + self.build_response(id, peer_id, Response::BlocksByRoot(Some(resp))) } } } @@ -1318,12 +1337,15 @@ impl Network { ResponseTermination::BlocksByRange => Response::BlocksByRange(None), ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), }; - self.propagate_response(id, peer_id, response); + self.build_response(id, peer_id, response) } } } - fn inject_discovery_event(&mut self, event: DiscoveredPeers) { + fn inject_discovery_event( + &mut self, + event: DiscoveredPeers, + ) -> Option> { let DiscoveredPeers { peers } = event; let to_dial_peers = self.peer_manager_mut().peers_discovered(peers); for peer_id in to_dial_peers { @@ -1332,9 +1354,13 @@ impl Network { let enr = self.discovery_mut().enr_of_peer(&peer_id); self.peer_manager_mut().dial_peer(&peer_id, enr); } + None } - fn inject_identify_event(&mut self, event: IdentifyEvent) { + fn inject_identify_event( + &mut self, + event: IdentifyEvent, + ) -> Option> { match event { IdentifyEvent::Received { peer_id, mut info } => { if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { @@ -1351,46 +1377,54 @@ impl Network { IdentifyEvent::Error { .. } => {} IdentifyEvent::Pushed { .. } => {} } + None } - fn inject_pm_event(&mut self, event: PeerManagerEvent) { + fn inject_pm_event( + &mut self, + event: PeerManagerEvent, + ) -> Option> { match event { PeerManagerEvent::PeerConnectedIncoming(peer_id) => { - self.add_event(OldBehaviourEvent::PeerConnectedIncoming(peer_id)); + Some(OldBehaviourEvent::PeerConnectedIncoming(peer_id)) } PeerManagerEvent::PeerConnectedOutgoing(peer_id) => { - self.add_event(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)); + Some(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) } PeerManagerEvent::PeerDisconnected(peer_id) => { - self.add_event(OldBehaviourEvent::PeerDisconnected(peer_id)); + Some(OldBehaviourEvent::PeerDisconnected(peer_id)) } PeerManagerEvent::Banned(peer_id, associated_ips) => { self.discovery_mut().ban_peer(&peer_id, associated_ips); - self.add_event(OldBehaviourEvent::PeerBanned(peer_id)); + Some(OldBehaviourEvent::PeerBanned(peer_id)) } PeerManagerEvent::UnBanned(peer_id, associated_ips) => { self.discovery_mut().unban_peer(&peer_id, associated_ips); - self.add_event(OldBehaviourEvent::PeerUnbanned(peer_id)); + Some(OldBehaviourEvent::PeerUnbanned(peer_id)) } PeerManagerEvent::Status(peer_id) => { // it's time to status. We don't keep a beacon chain reference here, so we inform // the network to send a status to this peer - self.add_event(OldBehaviourEvent::StatusPeer(peer_id)); + Some(OldBehaviourEvent::StatusPeer(peer_id)) } PeerManagerEvent::DiscoverPeers(peers_to_find) => { // Peer manager has requested a discovery query for more peers. self.discovery_mut().discover_peers(peers_to_find); + None } PeerManagerEvent::DiscoverSubnetPeers(subnets_to_discover) => { // Peer manager has requested a subnet discovery query for more peers. self.discover_subnet_peers(subnets_to_discover); + None } PeerManagerEvent::Ping(peer_id) => { // send a ping request to this peer self.ping(peer_id); + None } PeerManagerEvent::MetaData(peer_id) => { self.send_meta_data_request(peer_id); + None } PeerManagerEvent::DisconnectPeer(peer_id, reason) => { debug!(self.log, "Peer Manager disconnecting peer"; @@ -1398,21 +1432,85 @@ impl Network { // send one goodbye self.eth2_rpc_mut() .shutdown(peer_id, RequestId::Behaviour, reason); + None } } } pub fn poll(&mut self, cx: &mut Context) -> Poll> { - if let Some(waker) = &self.waker { - if waker.will_wake(cx.waker()) { - self.waker = Some(cx.waker().clone()); - } - } else { - self.waker = Some(cx.waker().clone()); - } + let maybe_event = match self.swarm.poll_next_unpin(cx) { + Poll::Ready(Some(swarm_event)) => match swarm_event { + SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { + BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), + BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), + BehaviourEvent::Discovery(de) => self.inject_discovery_event(de), + BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), + BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), + }, + SwarmEvent::ConnectionEstablished { .. } => None, + SwarmEvent::ConnectionClosed { .. } => None, + SwarmEvent::IncomingConnection { + local_addr, + send_back_addr, + } => { + trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); + None + } + SwarmEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error, + } => { + debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); + None + } + SwarmEvent::OutgoingConnectionError { peer_id, error } => { + debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); + None + } + SwarmEvent::BannedPeer { peer_id, endpoint } => { + debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); + None + } + SwarmEvent::NewListenAddr { + listener_id, + address, + } => todo!(), + SwarmEvent::ExpiredListenAddr { + listener_id, + address, + } => { + debug!(self.log, "Listen address expired"; "address" => %address); + None + } + SwarmEvent::ListenerClosed { + listener_id, + addresses, + reason, + } => { + crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); + if Swarm::listeners(&self.swarm).count() == 0 { + Some(OldBehaviourEvent::ZeroListeners) + } else { + None + } + } + SwarmEvent::ListenerError { listener_id, error } => { + // this is non fatal, but we still check + warn!(self.log, "Listener error"; "error" => ?error); + if Swarm::listeners(&self.swarm).count() == 0 { + Some(OldBehaviourEvent::ZeroListeners) + } else { + None + } + } + SwarmEvent::Dialing(_) => None, + }, + Poll::Pending | Poll::Ready(None) => None, + }; - if let Some(event) = self.events.pop_front() { - return Poll::Ready(event); + if let Some(ev) = maybe_event { + return Poll::Ready(ev); } // perform gossipsub score updates when necessary diff --git a/beacon_node/lighthouse_network/src/service.rs b/beacon_node/lighthouse_network/src/service.rs index b1ba5d47ad2..f717ea8e8b8 100644 --- a/beacon_node/lighthouse_network/src/service.rs +++ b/beacon_node/lighthouse_network/src/service.rs @@ -82,10 +82,10 @@ impl Service { let service = Service { _p_a: Default::default(), _p_b: Default::default(), - swarm, - bandwidth, - local_peer_id, - log, + swarm: todo!(), + bandwidth: todo!(), + local_peer_id: todo!(), + log: log.clone(), }; Ok((network_globals, service)) From 37e6f5e6455f4a1abe69eb09f497deedf8e269eb Mon Sep 17 00:00:00 2001 From: Diva M Date: Wed, 24 Aug 2022 12:07:02 -0500 Subject: [PATCH 08/31] file renaming --- beacon_node/lighthouse_network/src/service.rs | 400 ------------------ .../{behaviour => service}/gossip_cache.rs | 0 .../gossipsub_scoring_parameters.rs | 0 .../src/{behaviour => service}/mod.rs | 0 4 files changed, 400 deletions(-) delete mode 100644 beacon_node/lighthouse_network/src/service.rs rename beacon_node/lighthouse_network/src/{behaviour => service}/gossip_cache.rs (100%) rename beacon_node/lighthouse_network/src/{behaviour => service}/gossipsub_scoring_parameters.rs (100%) rename beacon_node/lighthouse_network/src/{behaviour => service}/mod.rs (100%) diff --git a/beacon_node/lighthouse_network/src/service.rs b/beacon_node/lighthouse_network/src/service.rs deleted file mode 100644 index f717ea8e8b8..00000000000 --- a/beacon_node/lighthouse_network/src/service.rs +++ /dev/null @@ -1,400 +0,0 @@ -use crate::behaviour::{ - save_metadata_to_disk, Network, OldBehaviourEvent, PeerRequestId, Request, Response, -}; -use crate::config::NetworkLoad; -use crate::discovery::enr; -use crate::multiaddr::Protocol; -use crate::rpc::{GoodbyeReason, MetaData, MetaDataV1, MetaDataV2, RPCResponseErrorCode, ReqId}; -use crate::types::{error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipKind}; -use crate::EnrExt; -use crate::{NetworkConfig, NetworkGlobals, PeerAction, ReportSource}; -use futures::prelude::*; -use libp2p::core::{ - identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed, -}; -use libp2p::{ - bandwidth::{BandwidthLogging, BandwidthSinks}, - core, noise, - swarm::{ConnectionLimits, SwarmBuilder, SwarmEvent}, - PeerId, Swarm, Transport, -}; -use prometheus_client::registry::Registry; -use slog::{crit, debug, info, o, trace, warn, Logger}; -use ssz::Decode; -use std::fs::File; -use std::io::prelude::*; -use std::pin::Pin; -use std::sync::Arc; -use std::time::Duration; -use types::{ChainSpec, EnrForkId, EthSpec, ForkContext}; - -use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; - -pub const NETWORK_KEY_FILENAME: &str = "key"; -/// The maximum simultaneous libp2p connections per peer. -pub const MAX_CONNECTIONS_PER_PEER: u32 = 1; -/// The filename to store our local metadata. -pub const METADATA_FILENAME: &str = "metadata"; - -/// The types of events than can be obtained from polling the libp2p service. -/// -/// This is a subset of the events that a libp2p swarm emits. -#[derive(Debug)] -pub enum Libp2pEvent { - /// A behaviour event - Behaviour(OldBehaviourEvent), - /// A new listening address has been established. - NewListenAddr(Multiaddr), - /// We reached zero listening addresses. - ZeroListeners, -} - -/// The configuration and state of the libp2p components for the beacon node. -pub struct Service { - _p_a: std::marker::PhantomData, - _p_b: std::marker::PhantomData, - /// The libp2p Swarm handler. - pub swarm: Swarm, - /// The bandwidth logger for the underlying libp2p transport. - pub bandwidth: Arc, - /// This node's PeerId. - pub local_peer_id: PeerId, - /// The libp2p logger handle. - pub log: Logger, -} - -pub struct Context<'a> { - pub config: &'a NetworkConfig, - pub enr_fork_id: EnrForkId, - pub fork_context: Arc, - pub chain_spec: &'a ChainSpec, - pub gossipsub_registry: Option<&'a mut Registry>, -} - -impl Service { - pub async fn new( - executor: task_executor::TaskExecutor, - ctx: Context<'_>, - log: &Logger, - ) -> error::Result<(Arc>, Self)> { - // listen on the specified address - - let service = Service { - _p_a: Default::default(), - _p_b: Default::default(), - swarm: todo!(), - bandwidth: todo!(), - local_peer_id: todo!(), - log: log.clone(), - }; - - Ok((network_globals, service)) - } - - /// Sends a request to a peer, with a given Id. - pub fn send_request(&mut self, peer_id: PeerId, request_id: AppReqId, request: Request) { - todo!() - } - - /// Informs the peer that their request failed. - pub fn respond_with_error( - &mut self, - peer_id: PeerId, - id: PeerRequestId, - error: RPCResponseErrorCode, - reason: String, - ) { - todo!() - } - - /// Report a peer's action. - pub fn report_peer( - &mut self, - peer_id: &PeerId, - action: PeerAction, - source: ReportSource, - msg: &'static str, - ) { - todo!() - } - - /// Disconnect and ban a peer, providing a reason. - pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) { - todo!() - } - - /// Sends a response to a peer's request. - pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response) { - todo!() - } - - pub async fn next_event(&mut self) -> Libp2pEvent { - todo!() - /* - loop { - match self.swarm.select_next_some().await { - SwarmEvent::Behaviour(behaviour) => { - // Handle banning here - match &behaviour { - OldBehaviourEvent::PeerBanned(peer_id) => { - self.swarm.ban_peer_id(*peer_id); - } - OldBehaviourEvent::PeerUnbanned(peer_id) => { - self.swarm.unban_peer_id(*peer_id); - } - _ => {} - } - return Libp2pEvent::Behaviour(behaviour); - } - SwarmEvent::ConnectionEstablished { - peer_id: _, - endpoint: _, - num_established: _, - concurrent_dial_errors: _, - } => {} - SwarmEvent::ConnectionClosed { - peer_id: _, - cause: _, - endpoint: _, - num_established: _, - } => {} - SwarmEvent::NewListenAddr { address, .. } => { - return Libp2pEvent::NewListenAddr(address) - } - SwarmEvent::IncomingConnection { - local_addr, - send_back_addr, - } => { - trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr) - } - SwarmEvent::IncomingConnectionError { - local_addr, - send_back_addr, - error, - } => { - debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); - } - SwarmEvent::BannedPeer { peer_id, .. } => { - debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); - } - SwarmEvent::OutgoingConnectionError { peer_id, error } => { - debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); - } - SwarmEvent::ExpiredListenAddr { address, .. } => { - debug!(self.log, "Listen address expired"; "address" => %address) - } - SwarmEvent::ListenerClosed { - addresses, reason, .. - } => { - crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); - if Swarm::listeners(&self.swarm).count() == 0 { - return Libp2pEvent::ZeroListeners; - } - } - SwarmEvent::ListenerError { error, .. } => { - // this is non fatal, but we still check - warn!(self.log, "Listener error"; "error" => ?error); - if Swarm::listeners(&self.swarm).count() == 0 { - return Libp2pEvent::ZeroListeners; - } - } - SwarmEvent::Dialing(_peer_id) => {} - } - } - */ - } -} - -type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; - -/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and -/// mplex as the multiplexing layer. -pub fn build_transport( - local_private_key: Keypair, -) -> std::io::Result<(BoxedTransport, Arc)> { - let tcp = - libp2p::tcp::TokioTcpTransport::new(libp2p::tcp::GenTcpConfig::default().nodelay(true)); - let transport = libp2p::dns::TokioDnsConfig::system(tcp)?; - #[cfg(feature = "libp2p-websocket")] - let transport = { - let trans_clone = transport.clone(); - transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone)) - }; - - let (transport, bandwidth) = BandwidthLogging::new(transport); - - // mplex config - let mut mplex_config = libp2p::mplex::MplexConfig::new(); - mplex_config.set_max_buffer_size(256); - mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block); - - // yamux config - let mut yamux_config = libp2p::yamux::YamuxConfig::default(); - yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); - - // Authentication - Ok(( - transport - .upgrade(core::upgrade::Version::V1) - .authenticate(generate_noise_config(&local_private_key)) - .multiplex(core::upgrade::SelectUpgrade::new( - yamux_config, - mplex_config, - )) - .timeout(Duration::from_secs(10)) - .boxed(), - bandwidth, - )) -} - -// Useful helper functions for debugging. Currently not used in the client. -#[allow(dead_code)] -fn keypair_from_hex(hex_bytes: &str) -> error::Result { - let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") { - stripped.to_string() - } else { - hex_bytes.to_string() - }; - - hex::decode(&hex_bytes) - .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into()) - .and_then(keypair_from_bytes) -} - -#[allow(dead_code)] -fn keypair_from_bytes(mut bytes: Vec) -> error::Result { - libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes) - .map(|secret| { - let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into(); - Keypair::Secp256k1(keypair) - }) - .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into()) -} - -/// Loads a private key from disk. If this fails, a new key is -/// generated and is then saved to disk. -/// -/// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5. -pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { - // check for key from disk - let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME); - if let Ok(mut network_key_file) = File::open(network_key_f.clone()) { - let mut key_bytes: Vec = Vec::with_capacity(36); - match network_key_file.read_to_end(&mut key_bytes) { - Err(_) => debug!(log, "Could not read network key file"), - Ok(_) => { - // only accept secp256k1 keys for now - if let Ok(secret_key) = - libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes) - { - let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into(); - debug!(log, "Loaded network key from disk."); - return Keypair::Secp256k1(kp); - } else { - debug!(log, "Network key file is not a valid secp256k1 key"); - } - } - } - } - - // if a key could not be loaded from disk, generate a new one and save it - let local_private_key = Keypair::generate_secp256k1(); - if let Keypair::Secp256k1(key) = local_private_key.clone() { - let _ = std::fs::create_dir_all(&config.network_dir); - match File::create(network_key_f.clone()) - .and_then(|mut f| f.write_all(&key.secret().to_bytes())) - { - Ok(_) => { - debug!(log, "New network key generated and written to disk"); - } - Err(e) => { - warn!( - log, - "Could not write node key to file: {:?}. error: {}", network_key_f, e - ); - } - } - } - local_private_key -} - -/// Generate authenticated XX Noise config from identity keys -fn generate_noise_config( - identity_keypair: &Keypair, -) -> noise::NoiseAuthenticated { - let static_dh_keys = noise::Keypair::::new() - .into_authentic(identity_keypair) - .expect("signing can fail only once during starting a node"); - noise::NoiseConfig::xx(static_dh_keys).into_authenticated() -} - -/// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p -/// only supports dialing to an address without providing the peer id. -pub fn strip_peer_id(addr: &mut Multiaddr) { - let last = addr.pop(); - match last { - Some(Protocol::P2p(_)) => {} - Some(other) => addr.push(other), - _ => {} - } -} - -/// Load metadata from persisted file. Return default metadata if loading fails. -pub fn load_or_build_metadata( - network_dir: &std::path::Path, - log: &slog::Logger, -) -> MetaData { - // We load a V2 metadata version by default (regardless of current fork) - // since a V2 metadata can be converted to V1. The RPC encoder is responsible - // for sending the correct metadata version based on the negotiated protocol version. - let mut meta_data = MetaDataV2 { - seq_number: 0, - attnets: EnrAttestationBitfield::::default(), - syncnets: EnrSyncCommitteeBitfield::::default(), - }; - // Read metadata from persisted file if available - let metadata_path = network_dir.join(METADATA_FILENAME); - if let Ok(mut metadata_file) = File::open(metadata_path) { - let mut metadata_ssz = Vec::new(); - if metadata_file.read_to_end(&mut metadata_ssz).is_ok() { - // Attempt to read a MetaDataV2 version from the persisted file, - // if that fails, read MetaDataV1 - match MetaDataV2::::from_ssz_bytes(&metadata_ssz) { - Ok(persisted_metadata) => { - meta_data.seq_number = persisted_metadata.seq_number; - // Increment seq number if persisted attnet is not default - if persisted_metadata.attnets != meta_data.attnets - || persisted_metadata.syncnets != meta_data.syncnets - { - meta_data.seq_number += 1; - } - debug!(log, "Loaded metadata from disk"); - } - Err(_) => { - match MetaDataV1::::from_ssz_bytes(&metadata_ssz) { - Ok(persisted_metadata) => { - let persisted_metadata = MetaData::V1(persisted_metadata); - // Increment seq number as the persisted metadata version is updated - meta_data.seq_number = *persisted_metadata.seq_number() + 1; - debug!(log, "Loaded metadata from disk"); - } - Err(e) => { - debug!( - log, - "Metadata from file could not be decoded"; - "error" => ?e, - ); - } - } - } - } - } - }; - - // Wrap the MetaData - let meta_data = MetaData::V2(meta_data); - - debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number()); - save_metadata_to_disk(network_dir, meta_data.clone(), log); - meta_data -} diff --git a/beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs b/beacon_node/lighthouse_network/src/service/gossip_cache.rs similarity index 100% rename from beacon_node/lighthouse_network/src/behaviour/gossip_cache.rs rename to beacon_node/lighthouse_network/src/service/gossip_cache.rs diff --git a/beacon_node/lighthouse_network/src/behaviour/gossipsub_scoring_parameters.rs b/beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs similarity index 100% rename from beacon_node/lighthouse_network/src/behaviour/gossipsub_scoring_parameters.rs rename to beacon_node/lighthouse_network/src/service/gossipsub_scoring_parameters.rs diff --git a/beacon_node/lighthouse_network/src/behaviour/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs similarity index 100% rename from beacon_node/lighthouse_network/src/behaviour/mod.rs rename to beacon_node/lighthouse_network/src/service/mod.rs From 8e7fcd301058219632556f947efe86350f213a92 Mon Sep 17 00:00:00 2001 From: Diva M Date: Wed, 24 Aug 2022 14:44:00 -0500 Subject: [PATCH 09/31] fix most compilation issues in the network crate --- .../lighthouse_network/src/discovery/mod.rs | 2 +- beacon_node/lighthouse_network/src/lib.rs | 11 +- .../lighthouse_network/src/old_service.rs | 390 ++++++++++++++++++ .../src/peer_manager/mod.rs | 2 +- .../src/peer_manager/peerdb/score.rs | 2 +- .../src/service/behaviour.rs | 39 ++ .../lighthouse_network/src/service/mod.rs | 58 +-- .../tests/common/behaviour.rs | 3 +- .../lighthouse_network/tests/rpc_tests.rs | 108 +---- beacon_node/network/src/service.rs | 100 ++--- 10 files changed, 510 insertions(+), 205 deletions(-) create mode 100644 beacon_node/lighthouse_network/src/old_service.rs create mode 100644 beacon_node/lighthouse_network/src/service/behaviour.rs diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index b68112ab2c9..93adfbe2169 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -7,7 +7,7 @@ pub(crate) mod enr; pub mod enr_ext; // Allow external use of the lighthouse ENR builder -use crate::behaviour::TARGET_SUBNET_PEERS; +use crate::service::TARGET_SUBNET_PEERS; use crate::metrics; use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use discv5::{enr::NodeId, Discv5, Discv5Event}; diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 0be51f10565..05c2755f62d 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -5,15 +5,14 @@ #[macro_use] extern crate lazy_static; -pub mod behaviour; mod config; +pub mod service; #[allow(clippy::mutable_key_type)] // PeerId in hashmaps are no longer permitted by clippy pub mod discovery; pub mod metrics; pub mod peer_manager; pub mod rpc; -mod service; pub mod types; pub use config::gossip_max_size; @@ -69,7 +68,6 @@ pub use crate::types::{ pub use prometheus_client; -pub use behaviour::{OldBehaviourEvent, Gossipsub, PeerRequestId, Request, Response}; pub use config::Config as NetworkConfig; pub use discovery::{CombinedKeyExt, EnrExt, Eth2Enr}; pub use discv5; @@ -85,4 +83,9 @@ pub use peer_manager::{ peerdb::PeerDB, ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, }; -pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; +// pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; +pub use service::{Gossipsub, OldBehaviourEvent, PeerRequestId, Request, Response}; + +mod old_service; + +pub use old_service::*; diff --git a/beacon_node/lighthouse_network/src/old_service.rs b/beacon_node/lighthouse_network/src/old_service.rs new file mode 100644 index 00000000000..262ef8b4c78 --- /dev/null +++ b/beacon_node/lighthouse_network/src/old_service.rs @@ -0,0 +1,390 @@ +use crate::multiaddr::Protocol; +use crate::rpc::{GoodbyeReason, MetaData, MetaDataV1, MetaDataV2, RPCResponseErrorCode, ReqId}; +use crate::service::{save_metadata_to_disk, OldBehaviourEvent, PeerRequestId, Request, Response}; +use crate::types::{error, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; +use crate::{NetworkConfig, NetworkGlobals, PeerAction, ReportSource}; +use libp2p::core::{ + identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed, +}; +use libp2p::{ + bandwidth::{BandwidthLogging, BandwidthSinks}, + core, noise, PeerId, Swarm, Transport, +}; +use prometheus_client::registry::Registry; +use slog::{debug, warn, Logger}; +use ssz::Decode; +use std::fs::File; +use std::io::prelude::*; +use std::sync::Arc; +use std::time::Duration; +use types::{ChainSpec, EnrForkId, EthSpec, ForkContext}; + +pub const NETWORK_KEY_FILENAME: &str = "key"; +/// The maximum simultaneous libp2p connections per peer. +pub const MAX_CONNECTIONS_PER_PEER: u32 = 1; +/// The filename to store our local metadata. +pub const METADATA_FILENAME: &str = "metadata"; + +/// The types of events than can be obtained from polling the libp2p service. +/// +/// This is a subset of the events that a libp2p swarm emits. +#[derive(Debug)] +pub enum Libp2pEvent { + /// A behaviour event + Behaviour(OldBehaviourEvent), + /// A new listening address has been established. + NewListenAddr(Multiaddr), + /// We reached zero listening addresses. + ZeroListeners, +} + +/// The configuration and state of the libp2p components for the beacon node. +pub struct Service { + _p_a: std::marker::PhantomData, + _p_b: std::marker::PhantomData, + /// The libp2p Swarm handler. + pub swarm: Swarm, + /// The bandwidth logger for the underlying libp2p transport. + pub bandwidth: Arc, + /// This node's PeerId. + pub local_peer_id: PeerId, + /// The libp2p logger handle. + pub log: Logger, +} + +pub struct Context<'a> { + pub config: &'a NetworkConfig, + pub enr_fork_id: EnrForkId, + pub fork_context: Arc, + pub chain_spec: &'a ChainSpec, + pub gossipsub_registry: Option<&'a mut Registry>, +} + +impl Service { + pub async fn new( + executor: task_executor::TaskExecutor, + ctx: Context<'_>, + log: &Logger, + ) -> error::Result<(Arc>, Self)> { + // listen on the specified address + + let service = Service { + _p_a: Default::default(), + _p_b: Default::default(), + swarm: todo!(), + bandwidth: todo!(), + local_peer_id: todo!(), + log: log.clone(), + }; + let network_globals = todo!(); + + Ok((network_globals, service)) + } + + /// Sends a request to a peer, with a given Id. + pub fn send_request(&mut self, peer_id: PeerId, request_id: AppReqId, request: Request) { + todo!() + } + + /// Informs the peer that their request failed. + pub fn respond_with_error( + &mut self, + peer_id: PeerId, + id: PeerRequestId, + error: RPCResponseErrorCode, + reason: String, + ) { + todo!() + } + + /// Report a peer's action. + pub fn report_peer( + &mut self, + peer_id: &PeerId, + action: PeerAction, + source: ReportSource, + msg: &'static str, + ) { + todo!() + } + + /// Disconnect and ban a peer, providing a reason. + pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) { + todo!() + } + + /// Sends a response to a peer's request. + pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response) { + todo!() + } + + pub async fn next_event(&mut self) -> Libp2pEvent { + todo!() + /* + loop { + match self.swarm.select_next_some().await { + SwarmEvent::Behaviour(behaviour) => { + // Handle banning here + match &behaviour { + OldBehaviourEvent::PeerBanned(peer_id) => { + self.swarm.ban_peer_id(*peer_id); + } + OldBehaviourEvent::PeerUnbanned(peer_id) => { + self.swarm.unban_peer_id(*peer_id); + } + _ => {} + } + return Libp2pEvent::Behaviour(behaviour); + } + SwarmEvent::ConnectionEstablished { + peer_id: _, + endpoint: _, + num_established: _, + concurrent_dial_errors: _, + } => {} + SwarmEvent::ConnectionClosed { + peer_id: _, + cause: _, + endpoint: _, + num_established: _, + } => {} + SwarmEvent::NewListenAddr { address, .. } => { + return Libp2pEvent::NewListenAddr(address) + } + SwarmEvent::IncomingConnection { + local_addr, + send_back_addr, + } => { + trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr) + } + SwarmEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error, + } => { + debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); + } + SwarmEvent::BannedPeer { peer_id, .. } => { + debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); + } + SwarmEvent::OutgoingConnectionError { peer_id, error } => { + debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); + } + SwarmEvent::ExpiredListenAddr { address, .. } => { + debug!(self.log, "Listen address expired"; "address" => %address) + } + SwarmEvent::ListenerClosed { + addresses, reason, .. + } => { + crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); + if Swarm::listeners(&self.swarm).count() == 0 { + return Libp2pEvent::ZeroListeners; + } + } + SwarmEvent::ListenerError { error, .. } => { + // this is non fatal, but we still check + warn!(self.log, "Listener error"; "error" => ?error); + if Swarm::listeners(&self.swarm).count() == 0 { + return Libp2pEvent::ZeroListeners; + } + } + SwarmEvent::Dialing(_peer_id) => {} + } + } + */ + } +} + +type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; + +/// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and +/// mplex as the multiplexing layer. +pub fn build_transport( + local_private_key: Keypair, +) -> std::io::Result<(BoxedTransport, Arc)> { + let tcp = + libp2p::tcp::TokioTcpTransport::new(libp2p::tcp::GenTcpConfig::default().nodelay(true)); + let transport = libp2p::dns::TokioDnsConfig::system(tcp)?; + #[cfg(feature = "libp2p-websocket")] + let transport = { + let trans_clone = transport.clone(); + transport.or_transport(libp2p::websocket::WsConfig::new(trans_clone)) + }; + + let (transport, bandwidth) = BandwidthLogging::new(transport); + + // mplex config + let mut mplex_config = libp2p::mplex::MplexConfig::new(); + mplex_config.set_max_buffer_size(256); + mplex_config.set_max_buffer_behaviour(libp2p::mplex::MaxBufferBehaviour::Block); + + // yamux config + let mut yamux_config = libp2p::yamux::YamuxConfig::default(); + yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::on_read()); + + // Authentication + Ok(( + transport + .upgrade(core::upgrade::Version::V1) + .authenticate(generate_noise_config(&local_private_key)) + .multiplex(core::upgrade::SelectUpgrade::new( + yamux_config, + mplex_config, + )) + .timeout(Duration::from_secs(10)) + .boxed(), + bandwidth, + )) +} + +// Useful helper functions for debugging. Currently not used in the client. +#[allow(dead_code)] +fn keypair_from_hex(hex_bytes: &str) -> error::Result { + let hex_bytes = if let Some(stripped) = hex_bytes.strip_prefix("0x") { + stripped.to_string() + } else { + hex_bytes.to_string() + }; + + hex::decode(&hex_bytes) + .map_err(|e| format!("Failed to parse p2p secret key bytes: {:?}", e).into()) + .and_then(keypair_from_bytes) +} + +#[allow(dead_code)] +fn keypair_from_bytes(mut bytes: Vec) -> error::Result { + libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut bytes) + .map(|secret| { + let keypair: libp2p::core::identity::secp256k1::Keypair = secret.into(); + Keypair::Secp256k1(keypair) + }) + .map_err(|e| format!("Unable to parse p2p secret key: {:?}", e).into()) +} + +/// Loads a private key from disk. If this fails, a new key is +/// generated and is then saved to disk. +/// +/// Currently only secp256k1 keys are allowed, as these are the only keys supported by discv5. +pub fn load_private_key(config: &NetworkConfig, log: &slog::Logger) -> Keypair { + // check for key from disk + let network_key_f = config.network_dir.join(NETWORK_KEY_FILENAME); + if let Ok(mut network_key_file) = File::open(network_key_f.clone()) { + let mut key_bytes: Vec = Vec::with_capacity(36); + match network_key_file.read_to_end(&mut key_bytes) { + Err(_) => debug!(log, "Could not read network key file"), + Ok(_) => { + // only accept secp256k1 keys for now + if let Ok(secret_key) = + libp2p::core::identity::secp256k1::SecretKey::from_bytes(&mut key_bytes) + { + let kp: libp2p::core::identity::secp256k1::Keypair = secret_key.into(); + debug!(log, "Loaded network key from disk."); + return Keypair::Secp256k1(kp); + } else { + debug!(log, "Network key file is not a valid secp256k1 key"); + } + } + } + } + + // if a key could not be loaded from disk, generate a new one and save it + let local_private_key = Keypair::generate_secp256k1(); + if let Keypair::Secp256k1(key) = local_private_key.clone() { + let _ = std::fs::create_dir_all(&config.network_dir); + match File::create(network_key_f.clone()) + .and_then(|mut f| f.write_all(&key.secret().to_bytes())) + { + Ok(_) => { + debug!(log, "New network key generated and written to disk"); + } + Err(e) => { + warn!( + log, + "Could not write node key to file: {:?}. error: {}", network_key_f, e + ); + } + } + } + local_private_key +} + +/// Generate authenticated XX Noise config from identity keys +fn generate_noise_config( + identity_keypair: &Keypair, +) -> noise::NoiseAuthenticated { + let static_dh_keys = noise::Keypair::::new() + .into_authentic(identity_keypair) + .expect("signing can fail only once during starting a node"); + noise::NoiseConfig::xx(static_dh_keys).into_authenticated() +} + +/// For a multiaddr that ends with a peer id, this strips this suffix. Rust-libp2p +/// only supports dialing to an address without providing the peer id. +pub fn strip_peer_id(addr: &mut Multiaddr) { + let last = addr.pop(); + match last { + Some(Protocol::P2p(_)) => {} + Some(other) => addr.push(other), + _ => {} + } +} + +/// Load metadata from persisted file. Return default metadata if loading fails. +pub fn load_or_build_metadata( + network_dir: &std::path::Path, + log: &slog::Logger, +) -> MetaData { + // We load a V2 metadata version by default (regardless of current fork) + // since a V2 metadata can be converted to V1. The RPC encoder is responsible + // for sending the correct metadata version based on the negotiated protocol version. + let mut meta_data = MetaDataV2 { + seq_number: 0, + attnets: EnrAttestationBitfield::::default(), + syncnets: EnrSyncCommitteeBitfield::::default(), + }; + // Read metadata from persisted file if available + let metadata_path = network_dir.join(METADATA_FILENAME); + if let Ok(mut metadata_file) = File::open(metadata_path) { + let mut metadata_ssz = Vec::new(); + if metadata_file.read_to_end(&mut metadata_ssz).is_ok() { + // Attempt to read a MetaDataV2 version from the persisted file, + // if that fails, read MetaDataV1 + match MetaDataV2::::from_ssz_bytes(&metadata_ssz) { + Ok(persisted_metadata) => { + meta_data.seq_number = persisted_metadata.seq_number; + // Increment seq number if persisted attnet is not default + if persisted_metadata.attnets != meta_data.attnets + || persisted_metadata.syncnets != meta_data.syncnets + { + meta_data.seq_number += 1; + } + debug!(log, "Loaded metadata from disk"); + } + Err(_) => { + match MetaDataV1::::from_ssz_bytes(&metadata_ssz) { + Ok(persisted_metadata) => { + let persisted_metadata = MetaData::V1(persisted_metadata); + // Increment seq number as the persisted metadata version is updated + meta_data.seq_number = *persisted_metadata.seq_number() + 1; + debug!(log, "Loaded metadata from disk"); + } + Err(e) => { + debug!( + log, + "Metadata from file could not be decoded"; + "error" => ?e, + ); + } + } + } + } + } + }; + + // Wrap the MetaData + let meta_data = MetaData::V2(meta_data); + + debug!(log, "Metadata sequence number"; "seq_num" => meta_data.seq_number()); + save_metadata_to_disk(network_dir, meta_data.clone(), log); + meta_data +} diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 48c02b72c3e..a2a7eb42939 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1,6 +1,6 @@ //! Implementation of Lighthouse's peer management system. -use crate::behaviour::TARGET_SUBNET_PEERS; +use crate::service::TARGET_SUBNET_PEERS; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; use crate::{error, metrics, Gossipsub}; use crate::{NetworkGlobals, PeerId}; diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs index accc0b60c59..fca665db981 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/score.rs @@ -5,7 +5,7 @@ //! As the logic develops this documentation will advance. //! //! The scoring algorithms are currently experimental. -use crate::behaviour::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD; +use crate::service::gossipsub_scoring_parameters::GREYLIST_THRESHOLD as GOSSIPSUB_GREYLIST_THRESHOLD; use serde::Serialize; use std::time::Instant; use strum::AsRefStr; diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs new file mode 100644 index 00000000000..bd5109546cd --- /dev/null +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -0,0 +1,39 @@ +use crate::discovery::Discovery; +use crate::peer_manager::PeerManager; +use crate::rpc::{ReqId, RPC}; +use crate::types::SnappyTransform; + +use libp2p::gossipsub::subscription_filter::{ + MaxCountSubscriptionFilter, WhitelistSubscriptionFilter, +}; +use libp2p::gossipsub::Gossipsub as BaseGossipsub; +use libp2p::identify::Identify; +use libp2p::swarm::NetworkBehaviour; +use libp2p::NetworkBehaviour; +use types::EthSpec; + +pub type SubscriptionFilter = MaxCountSubscriptionFilter; +pub type Gossipsub = BaseGossipsub; + +/// Identifier of a request. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum RequestId { + Application(AppReqId), + Internal, +} + +#[derive(NetworkBehaviour)] +pub(crate) struct Behaviour { + /// The routing pub-sub mechanism for eth2. + pub gossipsub: Gossipsub, + /// The Eth2 RPC specified in the wire-0 protocol. + pub eth2_rpc: RPC, TSpec>, + /// Discv5 Discovery protocol. + pub discovery: Discovery, + /// Keep regular connection to peers and disconnect if absent. + // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. + /// Provides IP addresses and peer information. + pub identify: Identify, + /// The peer manager that keeps track of peer's reputation and status. + pub peer_manager: PeerManager, +} diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 38951e4d242..150e10a6e0c 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1,6 +1,3 @@ -use crate::behaviour::gossipsub_scoring_parameters::{ - lighthouse_gossip_thresholds, PeerScoreSettings, -}; use crate::config::{gossipsub_config, NetworkLoad}; use crate::discovery::{ subnet_predicate, DiscoveredPeers, Discovery, FIND_NODE_QUERY_CLOSEST_PEERS, @@ -10,18 +7,21 @@ use crate::peer_manager::{ ConnectionDirection, PeerManager, PeerManagerEvent, }; use crate::peer_manager::{MIN_OUTBOUND_ONLY_FACTOR, PEER_EXCESS_FACTOR, PRIORITY_PEER_EXCESS}; -use crate::service::{ - build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER, - METADATA_FILENAME, -}; +use crate::service::behaviour::BehaviourEvent; +pub use crate::service::behaviour::Gossipsub; use crate::types::{ subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, }; use crate::Eth2Enr; +use crate::{ + build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER, + METADATA_FILENAME, +}; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use crate::{rpc::*, EnrExt}; use futures::stream::StreamExt; +use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; use libp2p::bandwidth::BandwidthSinks; use libp2p::gossipsub::error::PublishError; use libp2p::swarm::{ConnectionLimits, SwarmBuilder, SwarmEvent}; @@ -58,8 +58,10 @@ use types::{ SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, }; +use self::behaviour::{Behaviour, RequestId}; use self::gossip_cache::GossipCache; +mod behaviour; mod gossip_cache; pub mod gossipsub_scoring_parameters; @@ -71,16 +73,6 @@ const MAX_IDENTIFY_ADDRESSES: usize = 10; /// Identifier of requests sent by a peer. pub type PeerRequestId = (ConnectionId, SubstreamId); -pub type SubscriptionFilter = MaxCountSubscriptionFilter; -pub type Gossipsub = BaseGossipsub; - -/// Identifier of a request. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum RequestId { - Application(AppReqId), - Behaviour, -} - /// The types of events than can be obtained from polling the behaviour. #[derive(Debug)] pub enum OldBehaviourEvent { @@ -133,22 +125,6 @@ pub enum OldBehaviourEvent { ZeroListeners, } -#[derive(NetworkBehaviour)] -struct Behaviour { - /// The routing pub-sub mechanism for eth2. - gossipsub: Gossipsub, - /// The Eth2 RPC specified in the wire-0 protocol. - eth2_rpc: RPC, TSpec>, - /// Discv5 Discovery protocol. - discovery: Discovery, - /// Keep regular connection to peers and disconnect if absent. - // NOTE: The id protocol is used for initial interop. This will be removed by mainnet. - /// Provides IP addresses and peer information. - identify: Identify, - /// The peer manager that keeps track of peer's reputation and status. - peer_manager: PeerManager, -} - /// Builds the network behaviour that manages the core protocols of eth2. /// This core behaviour is managed by `Behaviour` which adds peer management to all core /// behaviours. @@ -200,7 +176,7 @@ impl Network { &log, )?; // Construct the metadata - let meta_data = crate::service::load_or_build_metadata(&config.network_dir, &log); + let meta_data = crate::load_or_build_metadata(&config.network_dir, &log); let globals = NetworkGlobals::new( enr.clone(), config.libp2p_port, @@ -942,7 +918,7 @@ impl Network { data: *self.network_globals.local_metadata.read().seq_number(), }; trace!(self.log, "Sending Ping"; "peer_id" => %peer_id); - let id = RequestId::Behaviour; + let id = RequestId::Internal; self.eth2_rpc_mut() .send_request(peer_id, id, OutboundRequest::Ping(ping)); } @@ -961,7 +937,7 @@ impl Network { fn send_meta_data_request(&mut self, peer_id: PeerId) { let event = OutboundRequest::MetaData(PhantomData); self.eth2_rpc_mut() - .send_request(peer_id, RequestId::Behaviour, event); + .send_request(peer_id, RequestId::Internal, event); } /// Sends a METADATA response to a peer. @@ -987,7 +963,7 @@ impl Network { id, response, }), - RequestId::Behaviour => None, + RequestId::Internal => None, } } @@ -1431,7 +1407,7 @@ impl Network { "peer_id" => %peer_id, "reason" => %reason); // send one goodbye self.eth2_rpc_mut() - .shutdown(peer_id, RequestId::Behaviour, reason); + .shutdown(peer_id, RequestId::Internal, reason); None } } @@ -1630,14 +1606,10 @@ impl slog::Value for RequestId { serializer: &mut dyn slog::Serializer, ) -> slog::Result { match self { - RequestId::Behaviour => slog::Value::serialize("Behaviour", record, key, serializer), + RequestId::Internal => slog::Value::serialize("Behaviour", record, key, serializer), RequestId::Application(ref id) => { slog::Value::serialize(&format_args!("{:?}", id), record, key, serializer) } } } } - -fn make_swarm() -> Swarm> { - todo!() -} diff --git a/beacon_node/lighthouse_network/tests/common/behaviour.rs b/beacon_node/lighthouse_network/tests/common/behaviour.rs index 76eecfcbc57..50fe6941db2 100644 --- a/beacon_node/lighthouse_network/tests/common/behaviour.rs +++ b/beacon_node/lighthouse_network/tests/common/behaviour.rs @@ -23,7 +23,8 @@ use std::collections::HashMap; use std::task::{Context, Poll}; -use libp2p::core::connection::{ConnectedPoint, ConnectionId, ListenerId}; +use libp2p::core::connection::{ConnectedPoint, ConnectionId}; +use libp2p::core::transport::ListenerId; use libp2p::swarm::handler::{ConnectionHandler, DummyConnectionHandler, IntoConnectionHandler}; use libp2p::swarm::{DialError, NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use libp2p::{Multiaddr, PeerId}; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 7f593bf04cd..3f2cb7c247e 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -1,7 +1,7 @@ #![cfg(test)] use lighthouse_network::rpc::methods::*; use lighthouse_network::{ - rpc::max_rpc_size, OldBehaviourEvent, Libp2pEvent, ReportSource, Request, Response, + rpc::max_rpc_size, Libp2pEvent, OldBehaviourEvent, ReportSource, Request, Response, }; use slog::{debug, warn, Level}; use ssz::Encode; @@ -89,10 +89,7 @@ fn test_status_rpc() { Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender - .swarm - .behaviour_mut() - .send_request(peer_id, 10, rpc_request.clone()); + sender.send_request(peer_id, 10, rpc_request.clone()); } Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { peer_id: _, @@ -122,11 +119,7 @@ fn test_status_rpc() { if request == rpc_request { // send the response debug!(log, "Receiver Received"); - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response.clone(), - ); + receiver.send_response(peer_id, id, rpc_response.clone()); } } _ => {} // Ignore other events @@ -194,11 +187,7 @@ fn test_blocks_by_range_chunked_rpc() { Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.behaviour_mut().send_request( - peer_id, - request_id, - rpc_request.clone(), - ); + sender.send_request(peer_id, request_id, rpc_request.clone()); } Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { peer_id: _, @@ -254,18 +243,10 @@ fn test_blocks_by_range_chunked_rpc() { } else { rpc_response_merge_small.clone() }; - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response.clone(), - ); + receiver.send_response(peer_id, id, rpc_response.clone()); } // send the stream termination - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); + receiver.send_response(peer_id, id, Response::BlocksByRange(None)); } } _ => {} // Ignore other events @@ -321,11 +302,7 @@ fn test_blocks_by_range_over_limit() { Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.behaviour_mut().send_request( - peer_id, - request_id, - rpc_request.clone(), - ); + sender.send_request(peer_id, request_id, rpc_request.clone()); } // The request will fail because the sender will refuse to send anything > MAX_RPC_SIZE Libp2pEvent::Behaviour(OldBehaviourEvent::RPCFailed { id, .. }) => { @@ -351,18 +328,10 @@ fn test_blocks_by_range_over_limit() { warn!(log, "Receiver got request"); for _ in 0..messages_to_send { let rpc_response = rpc_response_merge_large.clone(); - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response.clone(), - ); + receiver.send_response(peer_id, id, rpc_response.clone()); } // send the stream termination - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); + receiver.send_response(peer_id, id, Response::BlocksByRange(None)); } } _ => {} // Ignore other events @@ -421,11 +390,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender.swarm.behaviour_mut().send_request( - peer_id, - request_id, - rpc_request.clone(), - ); + sender.send_request(peer_id, request_id, rpc_request.clone()); } Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { peer_id: _, @@ -490,11 +455,7 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { if message_info.is_some() { messages_sent += 1; let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.swarm.behaviour_mut().send_successful_response( - *peer_id, - *stream_id, - rpc_response.clone(), - ); + receiver.send_response(*peer_id, *stream_id, rpc_response.clone()); debug!(log, "Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages @@ -553,10 +514,7 @@ fn test_blocks_by_range_single_empty_rpc() { Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender - .swarm - .behaviour_mut() - .send_request(peer_id, 10, rpc_request.clone()); + sender.send_request(peer_id, 10, rpc_request.clone()); } Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { peer_id: _, @@ -595,18 +553,10 @@ fn test_blocks_by_range_single_empty_rpc() { warn!(log, "Receiver got request"); for _ in 1..=messages_to_send { - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response.clone(), - ); + receiver.send_response(peer_id, id, rpc_response.clone()); } // send the stream termination - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); + receiver.send_response(peer_id, id, Response::BlocksByRange(None)); } } _ => {} // Ignore other events @@ -679,10 +629,7 @@ fn test_blocks_by_root_chunked_rpc() { Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender - .swarm - .behaviour_mut() - .send_request(peer_id, 6, rpc_request.clone()); + sender.send_request(peer_id, 6, rpc_request.clone()); } Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { peer_id: _, @@ -735,19 +682,11 @@ fn test_blocks_by_root_chunked_rpc() { } else { rpc_response_merge_small.clone() }; - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - rpc_response, - ); + receiver.send_response(peer_id, id, rpc_response); debug!(log, "Sending message"); } // send the stream termination - receiver.swarm.behaviour_mut().send_successful_response( - peer_id, - id, - Response::BlocksByRange(None), - ); + receiver.send_response(peer_id, id, Response::BlocksByRange(None)); debug!(log, "Send stream term"); } } @@ -814,10 +753,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a STATUS message debug!(log, "Sending RPC"); - sender - .swarm - .behaviour_mut() - .send_request(peer_id, 10, rpc_request.clone()); + sender.send_request(peer_id, 10, rpc_request.clone()); } Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { peer_id: _, @@ -882,11 +818,7 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { if message_info.is_some() { messages_sent += 1; let (peer_id, stream_id) = message_info.as_ref().unwrap(); - receiver.swarm.behaviour_mut().send_successful_response( - *peer_id, - *stream_id, - rpc_response.clone(), - ); + receiver.send_response(*peer_id, *stream_id, rpc_response.clone()); debug!(log, "Sending message {}", messages_sent); if messages_sent == messages_to_send + extra_messages_to_send { // stop sending messages @@ -929,7 +861,7 @@ fn test_goodbye_rpc() { Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { // Send a goodbye and disconnect debug!(log, "Sending RPC"); - sender.swarm.behaviour_mut().goodbye_peer( + sender.goodbye_peer( &peer_id, GoodbyeReason::IrrelevantNetwork, ReportSource::SyncService, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 37ff6e4d434..523e8d82fd4 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -11,6 +11,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; +use lighthouse_network::service::Network; use lighthouse_network::{ prometheus_client::registry::Registry, MessageAcceptance, Service as LibP2PService, }; @@ -21,7 +22,7 @@ use lighthouse_network::{ }; use lighthouse_network::{ types::{GossipEncoding, GossipTopic}, - OldBehaviourEvent, MessageId, NetworkGlobals, PeerId, + MessageId, NetworkGlobals, OldBehaviourEvent, PeerId, }; use slog::{crit, debug, error, info, o, trace, warn}; use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; @@ -171,7 +172,7 @@ pub struct NetworkService { /// A reference to the underlying beacon chain. beacon_chain: Arc>, /// The underlying libp2p service that drives all the network interactions. - libp2p: LibP2PService, + libp2p: Network, /// An attestation and subnet manager service. attestation_service: AttestationService, /// A sync committeee subnet manager service. @@ -273,8 +274,8 @@ impl NetworkService { }; // launch libp2p service - let (network_globals, mut libp2p) = - LibP2PService::new(executor.clone(), service_context, &network_log).await?; + let (mut libp2p, network_globals) = + Network::new(executor.clone(), service_context, &network_log).await?; // Repopulate the DHT with stored ENR's if discovery is not disabled. if !config.disable_discovery { @@ -284,7 +285,7 @@ impl NetworkService { "Loading peers into the routing table"; "peers" => enrs_to_load.len() ); for enr in enrs_to_load { - libp2p.swarm.behaviour_mut().add_enr(enr.clone()); + libp2p.add_enr(enr.clone()); } } @@ -402,7 +403,7 @@ impl NetworkService { _ = self.metrics_update.tick(), if self.metrics_enabled => { // update various network metrics metrics::update_gossip_metrics::( - self.libp2p.swarm.behaviour().gs(), + self.libp2p.gossipsub(), &self.network_globals, ); // update sync metrics @@ -429,7 +430,7 @@ impl NetworkService { Some(_) = &mut self.next_unsubscribe => { let new_enr_fork_id = self.beacon_chain.enr_fork_id(); - self.libp2p.swarm.behaviour_mut().unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); + self.libp2p.unsubscribe_from_fork_topics_except(new_enr_fork_id.fork_digest); info!(self.log, "Unsubscribed from old fork topics"); self.next_unsubscribe = Box::pin(None.into()); } @@ -439,7 +440,7 @@ impl NetworkService { let fork_version = self.beacon_chain.spec.fork_version_for_name(fork_name); let fork_digest = ChainSpec::compute_fork_digest(fork_version, self.beacon_chain.genesis_validators_root); info!(self.log, "Subscribing to new fork topics"); - self.libp2p.swarm.behaviour_mut().subscribe_new_fork_topics(fork_digest); + self.libp2p.subscribe_new_fork_topics(fork_digest); self.next_fork_subscriptions = Box::pin(None.into()); } else { @@ -580,7 +581,7 @@ impl NetworkService { response, id, } => { - self.libp2p.send_response(peer_id, id, response); + self.libp2p.send_successful_response(peer_id, id, response); } NetworkMessage::SendErrorResponse { peer_id, @@ -588,7 +589,7 @@ impl NetworkService { id, reason, } => { - self.libp2p.respond_with_error(peer_id, id, error, reason); + self.libp2p.send_error_reponse(peer_id, id, error, reason); } NetworkMessage::UPnPMappingEstablished { tcp_socket, @@ -599,8 +600,6 @@ impl NetworkService { if let Some(tcp_socket) = tcp_socket { if let Err(e) = self .libp2p - .swarm - .behaviour_mut() .discovery_mut() .update_enr_tcp_port(tcp_socket.port()) { @@ -613,8 +612,6 @@ impl NetworkService { if let Some(udp_socket) = udp_socket { if let Err(e) = self .libp2p - .swarm - .behaviour_mut() .discovery_mut() .update_enr_udp_socket(udp_socket) { @@ -633,14 +630,11 @@ impl NetworkService { "message_id" => %message_id, "validation_result" => ?validation_result ); - self.libp2p - .swarm - .behaviour_mut() - .report_message_validation_result( - &propagation_source, - message_id, - validation_result, - ); + self.libp2p.report_message_validation_result( + &propagation_source, + message_id, + validation_result, + ); } NetworkMessage::Publish { messages } => { let mut topic_kinds = Vec::new(); @@ -655,7 +649,7 @@ impl NetworkService { "count" => messages.len(), "topics" => ?topic_kinds ); - self.libp2p.swarm.behaviour_mut().publish(messages); + self.libp2p.publish(messages); } NetworkMessage::ReportPeer { peer_id, @@ -693,7 +687,7 @@ impl NetworkService { GossipEncoding::default(), fork_digest, ); - if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + if self.libp2p.subscribe(topic.clone()) { subscribed_topics.push(topic); } else { warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); @@ -706,10 +700,10 @@ impl NetworkService { for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { let subnet = Subnet::Attestation(SubnetId::new(subnet_id)); // Update the ENR bitfield - self.libp2p.swarm.behaviour_mut().update_enr_subnet(subnet, true); + self.libp2p.update_enr_subnet(subnet, true); for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + if self.libp2p.subscribe(topic.clone()) { subscribed_topics.push(topic); } else { warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); @@ -720,17 +714,14 @@ impl NetworkService { for subnet_id in 0..subnet_max { let subnet = Subnet::SyncCommittee(SyncSubnetId::new(subnet_id)); // Update the ENR bitfield - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, true); + self.libp2p.update_enr_subnet(subnet, true); for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new( subnet.into(), GossipEncoding::default(), fork_digest, ); - if self.libp2p.swarm.behaviour_mut().subscribe(topic.clone()) { + if self.libp2p.subscribe(topic.clone()) { subscribed_topics.push(topic); } else { warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); @@ -782,8 +773,6 @@ impl NetworkService { if let Some(active_validators) = active_validators_opt { if self .libp2p - .swarm - .behaviour_mut() .update_gossipsub_parameters(active_validators, slot) .is_err() { @@ -811,33 +800,24 @@ impl NetworkService { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.swarm.behaviour_mut().subscribe(topic); + self.libp2p.subscribe(topic); } } SubnetServiceMessage::Unsubscribe(subnet) => { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.swarm.behaviour_mut().unsubscribe(topic); + self.libp2p.unsubscribe(topic); } } SubnetServiceMessage::EnrAdd(subnet) => { - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, true); + self.libp2p.update_enr_subnet(subnet, true); } SubnetServiceMessage::EnrRemove(subnet) => { - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, false); + self.libp2p.update_enr_subnet(subnet, false); } SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - self.libp2p - .swarm - .behaviour_mut() - .discover_subnet_peers(subnets_to_discover); + self.libp2p.discover_subnet_peers(subnets_to_discover); } } } @@ -848,33 +828,24 @@ impl NetworkService { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.swarm.behaviour_mut().subscribe(topic); + self.libp2p.subscribe(topic); } } SubnetServiceMessage::Unsubscribe(subnet) => { for fork_digest in self.required_gossip_fork_digests() { let topic = GossipTopic::new(subnet.into(), GossipEncoding::default(), fork_digest); - self.libp2p.swarm.behaviour_mut().unsubscribe(topic); + self.libp2p.unsubscribe(topic); } } SubnetServiceMessage::EnrAdd(subnet) => { - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, true); + self.libp2p.update_enr_subnet(subnet, true); } SubnetServiceMessage::EnrRemove(subnet) => { - self.libp2p - .swarm - .behaviour_mut() - .update_enr_subnet(subnet, false); + self.libp2p.update_enr_subnet(subnet, false); } SubnetServiceMessage::DiscoverPeers(subnets_to_discover) => { - self.libp2p - .swarm - .behaviour_mut() - .discover_subnet_peers(subnets_to_discover); + self.libp2p.discover_subnet_peers(subnets_to_discover); } } } @@ -892,10 +863,7 @@ impl NetworkService { ); fork_context.update_current_fork(*new_fork_name); - self.libp2p - .swarm - .behaviour_mut() - .update_fork_version(new_enr_fork_id); + self.libp2p.update_fork_version(new_enr_fork_id); // Reinitialize the next_fork_update self.next_fork_update = Box::pin(next_fork_delay(&self.beacon_chain).into()); @@ -944,7 +912,7 @@ fn next_fork_subscriptions_delay( impl Drop for NetworkService { fn drop(&mut self) { // network thread is terminating - let enrs = self.libp2p.swarm.behaviour_mut().enr_entries(); + let enrs = self.libp2p.enr_entries(); debug!( self.log, "Persisting DHT to store"; From 53d9159a0dc22062db18bd2dc82bea5e9e71b5f9 Mon Sep 17 00:00:00 2001 From: Diva M Date: Wed, 24 Aug 2022 14:46:47 -0500 Subject: [PATCH 10/31] use better name for Old BehaviourEvent --- .../lighthouse_network/src/discovery/mod.rs | 2 +- beacon_node/lighthouse_network/src/lib.rs | 2 +- .../lighthouse_network/src/old_service.rs | 4 +- .../lighthouse_network/src/service/mod.rs | 42 +++++++++---------- beacon_node/network/src/service.rs | 22 +++++----- 5 files changed, 36 insertions(+), 36 deletions(-) diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 93adfbe2169..d766fd23a3e 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -7,8 +7,8 @@ pub(crate) mod enr; pub mod enr_ext; // Allow external use of the lighthouse ENR builder -use crate::service::TARGET_SUBNET_PEERS; use crate::metrics; +use crate::service::TARGET_SUBNET_PEERS; use crate::{error, Enr, NetworkConfig, NetworkGlobals, Subnet, SubnetDiscovery}; use discv5::{enr::NodeId, Discv5, Discv5Event}; pub use enr::{ diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 05c2755f62d..3bfdd36ab39 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -84,7 +84,7 @@ pub use peer_manager::{ ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, }; // pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; -pub use service::{Gossipsub, OldBehaviourEvent, PeerRequestId, Request, Response}; +pub use service::{Gossipsub, NetworkEvent, PeerRequestId, Request, Response}; mod old_service; diff --git a/beacon_node/lighthouse_network/src/old_service.rs b/beacon_node/lighthouse_network/src/old_service.rs index 262ef8b4c78..9b43ec16a4b 100644 --- a/beacon_node/lighthouse_network/src/old_service.rs +++ b/beacon_node/lighthouse_network/src/old_service.rs @@ -1,6 +1,6 @@ use crate::multiaddr::Protocol; use crate::rpc::{GoodbyeReason, MetaData, MetaDataV1, MetaDataV2, RPCResponseErrorCode, ReqId}; -use crate::service::{save_metadata_to_disk, OldBehaviourEvent, PeerRequestId, Request, Response}; +use crate::service::{save_metadata_to_disk, NetworkEvent, PeerRequestId, Request, Response}; use crate::types::{error, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use crate::{NetworkConfig, NetworkGlobals, PeerAction, ReportSource}; use libp2p::core::{ @@ -31,7 +31,7 @@ pub const METADATA_FILENAME: &str = "metadata"; #[derive(Debug)] pub enum Libp2pEvent { /// A behaviour event - Behaviour(OldBehaviourEvent), + Behaviour(NetworkEvent), /// A new listening address has been established. NewListenAddr(Multiaddr), /// We reached zero listening addresses. diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 150e10a6e0c..f2eef6fefd9 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -75,7 +75,7 @@ pub type PeerRequestId = (ConnectionId, SubstreamId); /// The types of events than can be obtained from polling the behaviour. #[derive(Debug)] -pub enum OldBehaviourEvent { +pub enum NetworkEvent { /// We have successfully dialed and connected to a peer. PeerConnectedOutgoing(PeerId), /// A peer has successfully dialed and connected to us. @@ -956,9 +956,9 @@ impl Network { id: RequestId, peer_id: PeerId, response: Response, - ) -> Option> { + ) -> Option> { match id { - RequestId::Application(id) => Some(OldBehaviourEvent::ResponseReceived { + RequestId::Application(id) => Some(NetworkEvent::ResponseReceived { peer_id, id, response, @@ -974,7 +974,7 @@ impl Network { id: PeerRequestId, peer_id: PeerId, request: Request, - ) -> OldBehaviourEvent { + ) -> NetworkEvent { // Increment metrics match &request { Request::Status(_) => { @@ -987,7 +987,7 @@ impl Network { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) } } - return OldBehaviourEvent::RequestReceived { + return NetworkEvent::RequestReceived { peer_id, id, request, @@ -1055,7 +1055,7 @@ impl Network { fn inject_gs_event( &mut self, event: GossipsubEvent, - ) -> Option> { + ) -> Option> { match event { GossipsubEvent::Message { propagation_source, @@ -1078,7 +1078,7 @@ impl Network { } Ok(msg) => { // Notify the network - return Some(OldBehaviourEvent::PubsubMessage { + return Some(NetworkEvent::PubsubMessage { id, source: propagation_source, topic: gs_msg.topic, @@ -1153,7 +1153,7 @@ impl Network { fn inject_rpc_event( &mut self, event: RPCMessage, TSpec>, - ) -> Option> { + ) -> Option> { let peer_id = event.peer_id; if !self.peer_manager().is_connected(&peer_id) { @@ -1196,7 +1196,7 @@ impl Network { ); // inform failures of requests comming outside the behaviour if let RequestId::Application(id) = id { - Some(OldBehaviourEvent::RPCFailed { peer_id, id }) + Some(NetworkEvent::RPCFailed { peer_id, id }) } else { None } @@ -1321,7 +1321,7 @@ impl Network { fn inject_discovery_event( &mut self, event: DiscoveredPeers, - ) -> Option> { + ) -> Option> { let DiscoveredPeers { peers } = event; let to_dial_peers = self.peer_manager_mut().peers_discovered(peers); for peer_id in to_dial_peers { @@ -1336,7 +1336,7 @@ impl Network { fn inject_identify_event( &mut self, event: IdentifyEvent, - ) -> Option> { + ) -> Option> { match event { IdentifyEvent::Received { peer_id, mut info } => { if info.listen_addrs.len() > MAX_IDENTIFY_ADDRESSES { @@ -1359,29 +1359,29 @@ impl Network { fn inject_pm_event( &mut self, event: PeerManagerEvent, - ) -> Option> { + ) -> Option> { match event { PeerManagerEvent::PeerConnectedIncoming(peer_id) => { - Some(OldBehaviourEvent::PeerConnectedIncoming(peer_id)) + Some(NetworkEvent::PeerConnectedIncoming(peer_id)) } PeerManagerEvent::PeerConnectedOutgoing(peer_id) => { - Some(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) + Some(NetworkEvent::PeerConnectedOutgoing(peer_id)) } PeerManagerEvent::PeerDisconnected(peer_id) => { - Some(OldBehaviourEvent::PeerDisconnected(peer_id)) + Some(NetworkEvent::PeerDisconnected(peer_id)) } PeerManagerEvent::Banned(peer_id, associated_ips) => { self.discovery_mut().ban_peer(&peer_id, associated_ips); - Some(OldBehaviourEvent::PeerBanned(peer_id)) + Some(NetworkEvent::PeerBanned(peer_id)) } PeerManagerEvent::UnBanned(peer_id, associated_ips) => { self.discovery_mut().unban_peer(&peer_id, associated_ips); - Some(OldBehaviourEvent::PeerUnbanned(peer_id)) + Some(NetworkEvent::PeerUnbanned(peer_id)) } PeerManagerEvent::Status(peer_id) => { // it's time to status. We don't keep a beacon chain reference here, so we inform // the network to send a status to this peer - Some(OldBehaviourEvent::StatusPeer(peer_id)) + Some(NetworkEvent::StatusPeer(peer_id)) } PeerManagerEvent::DiscoverPeers(peers_to_find) => { // Peer manager has requested a discovery query for more peers. @@ -1413,7 +1413,7 @@ impl Network { } } - pub fn poll(&mut self, cx: &mut Context) -> Poll> { + pub fn poll(&mut self, cx: &mut Context) -> Poll> { let maybe_event = match self.swarm.poll_next_unpin(cx) { Poll::Ready(Some(swarm_event)) => match swarm_event { SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { @@ -1466,7 +1466,7 @@ impl Network { } => { crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); if Swarm::listeners(&self.swarm).count() == 0 { - Some(OldBehaviourEvent::ZeroListeners) + Some(NetworkEvent::ZeroListeners) } else { None } @@ -1475,7 +1475,7 @@ impl Network { // this is non fatal, but we still check warn!(self.log, "Listener error"; "error" => ?error); if Swarm::listeners(&self.swarm).count() == 0 { - Some(OldBehaviourEvent::ZeroListeners) + Some(NetworkEvent::ZeroListeners) } else { None } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 523e8d82fd4..586cf0ae608 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -22,7 +22,7 @@ use lighthouse_network::{ }; use lighthouse_network::{ types::{GossipEncoding, GossipTopic}, - MessageId, NetworkGlobals, OldBehaviourEvent, PeerId, + MessageId, NetworkGlobals, NetworkEvent, PeerId, }; use slog::{crit, debug, error, info, o, trace, warn}; use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; @@ -462,18 +462,18 @@ impl NetworkService { ) { match ev { Libp2pEvent::Behaviour(event) => match event { - OldBehaviourEvent::PeerConnectedOutgoing(peer_id) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { self.send_to_router(RouterMessage::PeerDialed(peer_id)); } - OldBehaviourEvent::PeerConnectedIncoming(_) - | OldBehaviourEvent::PeerBanned(_) - | OldBehaviourEvent::PeerUnbanned(_) => { + NetworkEvent::PeerConnectedIncoming(_) + | NetworkEvent::PeerBanned(_) + | NetworkEvent::PeerUnbanned(_) => { // No action required for these events. } - OldBehaviourEvent::PeerDisconnected(peer_id) => { + NetworkEvent::PeerDisconnected(peer_id) => { self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); } - OldBehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, @@ -484,7 +484,7 @@ impl NetworkService { request, }); } - OldBehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id, id, response, @@ -495,16 +495,16 @@ impl NetworkService { response, }); } - OldBehaviourEvent::RPCFailed { id, peer_id } => { + NetworkEvent::RPCFailed { id, peer_id } => { self.send_to_router(RouterMessage::RPCFailed { peer_id, request_id: id, }); } - OldBehaviourEvent::StatusPeer(peer_id) => { + NetworkEvent::StatusPeer(peer_id) => { self.send_to_router(RouterMessage::StatusPeer(peer_id)); } - OldBehaviourEvent::PubsubMessage { + NetworkEvent::PubsubMessage { id, source, message, From bb16b073ce5fc1856d78fb46c0608fbd3a1908ac Mon Sep 17 00:00:00 2001 From: Diva M Date: Wed, 24 Aug 2022 16:39:37 -0500 Subject: [PATCH 11/31] get right pollling the network --- .../lighthouse_network/src/service/mod.rs | 34 ++-- .../lighthouse_network/tests/common/mod.rs | 24 +-- .../lighthouse_network/tests/rpc_tests.rs | 76 ++++----- beacon_node/network/src/service.rs | 145 +++++++++--------- 4 files changed, 146 insertions(+), 133 deletions(-) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index f2eef6fefd9..ca1d5019f47 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -21,6 +21,7 @@ use crate::{ use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use crate::{rpc::*, EnrExt}; use futures::stream::StreamExt; +use futures::FutureExt; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; use libp2p::bandwidth::BandwidthSinks; use libp2p::gossipsub::error::PublishError; @@ -758,12 +759,7 @@ impl Network { } /// Send a successful response to a peer over RPC. - pub fn send_successful_response( - &mut self, - peer_id: PeerId, - id: PeerRequestId, - response: Response, - ) { + pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response) { self.eth2_rpc_mut() .send_response(peer_id, id, response.into()) } @@ -785,6 +781,21 @@ impl Network { /* Peer management functions */ + pub fn testing_dial(&mut self, addr: Multiaddr) -> Result<(), libp2p::swarm::DialError> { + self.swarm.dial(addr) + } + + pub fn report_peer( + &mut self, + peer_id: &PeerId, + action: PeerAction, + source: ReportSource, + msg: &'static str, + ) { + self.peer_manager_mut() + .report_peer(peer_id, action, source, None, msg); + } + /// Disconnects from a peer providing a reason. /// /// This will send a goodbye, disconnect and then ban the peer. @@ -1052,10 +1063,7 @@ impl Network { } WhitelistSubscriptionFilter(possible_hashes) } - fn inject_gs_event( - &mut self, - event: GossipsubEvent, - ) -> Option> { + fn inject_gs_event(&mut self, event: GossipsubEvent) -> Option> { match event { GossipsubEvent::Message { propagation_source, @@ -1413,7 +1421,7 @@ impl Network { } } - pub fn poll(&mut self, cx: &mut Context) -> Poll> { + pub fn poll_network(&mut self, cx: &mut Context) -> Poll> { let maybe_event = match self.swarm.poll_next_unpin(cx) { Poll::Ready(Some(swarm_event)) => match swarm_event { SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { @@ -1511,6 +1519,10 @@ impl Network { } Poll::Pending } + + pub async fn next_event(&mut self) -> NetworkEvent { + futures::future::poll_fn(|cx| self.poll_network(cx)).await + } } /* Public API types */ diff --git a/beacon_node/lighthouse_network/tests/common/mod.rs b/beacon_node/lighthouse_network/tests/common/mod.rs index ea770de6c23..135851d6bdf 100644 --- a/beacon_node/lighthouse_network/tests/common/mod.rs +++ b/beacon_node/lighthouse_network/tests/common/mod.rs @@ -1,10 +1,11 @@ #![cfg(test)] +use futures::StreamExt; use libp2p::gossipsub::GossipsubConfigBuilder; +use lighthouse_network::service::Network as LibP2PService; use lighthouse_network::Enr; use lighthouse_network::EnrExt; use lighthouse_network::Multiaddr; -use lighthouse_network::Service as LibP2PService; -use lighthouse_network::{Libp2pEvent, NetworkConfig}; +use lighthouse_network::{NetworkConfig, NetworkEvent}; use slog::{debug, error, o, Drain}; use std::sync::Arc; use std::sync::Weak; @@ -119,18 +120,19 @@ pub async fn build_libp2p_instance( LibP2PService::new(executor, libp2p_context, &log) .await .expect("should build libp2p instance") - .1, + .0, signal, ) } #[allow(dead_code)] pub fn get_enr(node: &LibP2PService) -> Enr { - node.swarm.behaviour().local_enr() + node.local_enr() } // Returns `n` libp2p peers in fully connected topology. #[allow(dead_code)] +/* pub async fn build_full_mesh( rt: Weak, log: slog::Logger, @@ -157,8 +159,7 @@ pub async fn build_full_mesh( } } nodes -} - +}*/ // Constructs a pair of nodes with separate loggers. The sender dials the receiver. // This returns a (sender, receiver) pair. #[allow(dead_code)] @@ -173,19 +174,19 @@ pub async fn build_node_pair( let mut sender = build_libp2p_instance(rt.clone(), vec![], sender_log, fork_name).await; let mut receiver = build_libp2p_instance(rt, vec![], receiver_log, fork_name).await; - let receiver_multiaddr = receiver.swarm.behaviour_mut().local_enr().multiaddr()[1].clone(); + let receiver_multiaddr = receiver.local_enr().multiaddr()[1].clone(); // let the two nodes set up listeners let sender_fut = async { loop { - if let Libp2pEvent::NewListenAddr(_) = sender.next_event().await { + if let NetworkEvent::NewListenAddr(_) = sender.next_event().await { return; } } }; let receiver_fut = async { loop { - if let Libp2pEvent::NewListenAddr(_) = receiver.next_event().await { + if let NetworkEvent::NewListenAddr(_) = receiver.next_event().await { return; } } @@ -199,7 +200,8 @@ pub async fn build_node_pair( _ = joined => {} } - match libp2p::Swarm::dial(&mut sender.swarm, receiver_multiaddr.clone()) { + // sender.dial_peer(peer_id); + match sender.testing_dial(receiver_multiaddr.clone()) { Ok(()) => { debug!(log, "Sender dialed receiver"; "address" => format!("{:?}", receiver_multiaddr)) } @@ -226,7 +228,7 @@ pub async fn build_linear( .map(|x| get_enr(x).multiaddr()[1].clone()) .collect(); for i in 0..n - 1 { - match libp2p::Swarm::dial(&mut nodes[i].swarm, multiaddrs[i + 1].clone()) { + match nodes[i].testing_dial(multiaddrs[i + 1].clone()) { Ok(()) => debug!(log, "Connected"), Err(_) => error!(log, "Failed to connect"), }; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index 3f2cb7c247e..b275cba863a 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -1,7 +1,7 @@ #![cfg(test)] use lighthouse_network::rpc::methods::*; use lighthouse_network::{ - rpc::max_rpc_size, Libp2pEvent, OldBehaviourEvent, ReportSource, Request, Response, + rpc::max_rpc_size, Libp2pEvent, NetworkEvent, ReportSource, Request, Response, }; use slog::{debug, warn, Level}; use ssz::Encode; @@ -86,16 +86,16 @@ fn test_status_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); sender.send_request(peer_id, 10, rpc_request.clone()); } - Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: 10, response, - }) => { + } => { // Should receive the RPC response debug!(log, "Sender Received"); assert_eq!(response, rpc_response.clone()); @@ -111,11 +111,11 @@ fn test_status_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(OldBehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response debug!(log, "Receiver Received"); @@ -184,16 +184,16 @@ fn test_blocks_by_range_chunked_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); sender.send_request(peer_id, request_id, rpc_request.clone()); } - Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: _, response, - }) => { + } => { warn!(log, "Sender received a response"); match response { Response::BlocksByRange(Some(_)) => { @@ -225,11 +225,11 @@ fn test_blocks_by_range_chunked_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(OldBehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response warn!(log, "Receiver got request"); @@ -299,13 +299,13 @@ fn test_blocks_by_range_over_limit() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); sender.send_request(peer_id, request_id, rpc_request.clone()); } // The request will fail because the sender will refuse to send anything > MAX_RPC_SIZE - Libp2pEvent::Behaviour(OldBehaviourEvent::RPCFailed { id, .. }) => { + NetworkEvent::RPCFailed { id, .. } => { assert_eq!(id, request_id); return; } @@ -318,11 +318,11 @@ fn test_blocks_by_range_over_limit() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(OldBehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response warn!(log, "Receiver got request"); @@ -387,16 +387,16 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); sender.send_request(peer_id, request_id, rpc_request.clone()); } - Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: _, response, - }) => + } => // Should receive the RPC response { debug!(log, "Sender received a response"); @@ -434,11 +434,11 @@ fn test_blocks_by_range_chunked_rpc_terminates_correctly() { .await { futures::future::Either::Left(( - Libp2pEvent::Behaviour(OldBehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }), + }, _, )) => { if request == rpc_request { @@ -511,16 +511,16 @@ fn test_blocks_by_range_single_empty_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); sender.send_request(peer_id, 10, rpc_request.clone()); } - Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: 10, response, - }) => match response { + } => match response { Response::BlocksByRange(Some(_)) => { assert_eq!(response, rpc_response.clone()); messages_received += 1; @@ -543,11 +543,11 @@ fn test_blocks_by_range_single_empty_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(OldBehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response warn!(log, "Receiver got request"); @@ -626,16 +626,16 @@ fn test_blocks_by_root_chunked_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); sender.send_request(peer_id, 6, rpc_request.clone()); } - Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: 6, response, - }) => match response { + } => match response { Response::BlocksByRoot(Some(_)) => { if messages_received < 2 { assert_eq!(response, rpc_response_base.clone()); @@ -664,11 +664,11 @@ fn test_blocks_by_root_chunked_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(OldBehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }) => { + } => { if request == rpc_request { // send the response debug!(log, "Receiver got request"); @@ -750,16 +750,16 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a STATUS message debug!(log, "Sending RPC"); sender.send_request(peer_id, 10, rpc_request.clone()); } - Libp2pEvent::Behaviour(OldBehaviourEvent::ResponseReceived { + NetworkEvent::ResponseReceived { peer_id: _, id: 10, response, - }) => { + } => { debug!(log, "Sender received a response"); match response { Response::BlocksByRoot(Some(_)) => { @@ -797,11 +797,11 @@ fn test_blocks_by_root_chunked_rpc_terminates_correctly() { .await { futures::future::Either::Left(( - Libp2pEvent::Behaviour(OldBehaviourEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }), + }, _, )) => { if request == rpc_request { @@ -858,7 +858,7 @@ fn test_goodbye_rpc() { let sender_future = async { loop { match sender.next_event().await { - Libp2pEvent::Behaviour(OldBehaviourEvent::PeerConnectedOutgoing(peer_id)) => { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { // Send a goodbye and disconnect debug!(log, "Sending RPC"); sender.goodbye_peer( @@ -867,7 +867,7 @@ fn test_goodbye_rpc() { ReportSource::SyncService, ); } - Libp2pEvent::Behaviour(OldBehaviourEvent::PeerDisconnected(_)) => { + NetworkEvent::PeerDisconnected(_) => { return; } _ => {} // Ignore other RPC messages @@ -879,7 +879,7 @@ fn test_goodbye_rpc() { let receiver_future = async { loop { match receiver.next_event().await { - Libp2pEvent::Behaviour(OldBehaviourEvent::PeerDisconnected(_)) => { + NetworkEvent::PeerDisconnected(_) => { // Should receive sent RPC request return; } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 586cf0ae608..0dc49d1458e 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -11,6 +11,7 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; use futures::channel::mpsc::Sender; use futures::future::OptionFuture; use futures::prelude::*; +use futures::StreamExt; use lighthouse_network::service::Network; use lighthouse_network::{ prometheus_client::registry::Registry, MessageAcceptance, Service as LibP2PService, @@ -22,7 +23,7 @@ use lighthouse_network::{ }; use lighthouse_network::{ types::{GossipEncoding, GossipTopic}, - MessageId, NetworkGlobals, NetworkEvent, PeerId, + MessageId, NetworkEvent, NetworkGlobals, PeerId, }; use slog::{crit, debug, error, info, o, trace, warn}; use std::{net::SocketAddr, pin::Pin, sync::Arc, time::Duration}; @@ -457,92 +458,90 @@ impl NetworkService { /// Handle an event received from the network. async fn on_libp2p_event( &mut self, - ev: Libp2pEvent, + ev: NetworkEvent, shutdown_sender: &mut Sender, ) { match ev { - Libp2pEvent::Behaviour(event) => match event { - NetworkEvent::PeerConnectedOutgoing(peer_id) => { - self.send_to_router(RouterMessage::PeerDialed(peer_id)); - } - NetworkEvent::PeerConnectedIncoming(_) - | NetworkEvent::PeerBanned(_) - | NetworkEvent::PeerUnbanned(_) => { - // No action required for these events. - } - NetworkEvent::PeerDisconnected(peer_id) => { - self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); - } - NetworkEvent::RequestReceived { + NetworkEvent::PeerConnectedOutgoing(peer_id) => { + self.send_to_router(RouterMessage::PeerDialed(peer_id)); + } + NetworkEvent::PeerConnectedIncoming(_) + | NetworkEvent::PeerBanned(_) + | NetworkEvent::PeerUnbanned(_) => { + // No action required for these events. + } + NetworkEvent::PeerDisconnected(peer_id) => { + self.send_to_router(RouterMessage::PeerDisconnected(peer_id)); + } + NetworkEvent::RequestReceived { + peer_id, + id, + request, + } => { + self.send_to_router(RouterMessage::RPCRequestReceived { peer_id, id, request, - } => { - self.send_to_router(RouterMessage::RPCRequestReceived { - peer_id, - id, - request, - }); - } - NetworkEvent::ResponseReceived { + }); + } + NetworkEvent::ResponseReceived { + peer_id, + id, + response, + } => { + self.send_to_router(RouterMessage::RPCResponseReceived { peer_id, - id, + request_id: id, response, - } => { - self.send_to_router(RouterMessage::RPCResponseReceived { - peer_id, - request_id: id, - response, - }); - } - NetworkEvent::RPCFailed { id, peer_id } => { - self.send_to_router(RouterMessage::RPCFailed { - peer_id, - request_id: id, - }); - } - NetworkEvent::StatusPeer(peer_id) => { - self.send_to_router(RouterMessage::StatusPeer(peer_id)); - } - NetworkEvent::PubsubMessage { - id, - source, - message, - .. - } => { - match message { - // attestation information gets processed in the attestation service - PubsubMessage::Attestation(ref subnet_and_attestation) => { - let subnet = subnet_and_attestation.0; - let attestation = &subnet_and_attestation.1; - // checks if we have an aggregator for the slot. If so, we should process - // the attestation, else we just just propagate the Attestation. - let should_process = self - .attestation_service - .should_process_attestation(subnet, attestation); - self.send_to_router(RouterMessage::PubsubMessage( - id, - source, - message, - should_process, - )); - } - _ => { - // all else is sent to the router - self.send_to_router(RouterMessage::PubsubMessage( - id, source, message, true, - )); - } + }); + } + NetworkEvent::RPCFailed { id, peer_id } => { + self.send_to_router(RouterMessage::RPCFailed { + peer_id, + request_id: id, + }); + } + NetworkEvent::StatusPeer(peer_id) => { + self.send_to_router(RouterMessage::StatusPeer(peer_id)); + } + NetworkEvent::PubsubMessage { + id, + source, + message, + .. + } => { + match message { + // attestation information gets processed in the attestation service + PubsubMessage::Attestation(ref subnet_and_attestation) => { + let subnet = subnet_and_attestation.0; + let attestation = &subnet_and_attestation.1; + // checks if we have an aggregator for the slot. If so, we should process + // the attestation, else we just just propagate the Attestation. + let should_process = self + .attestation_service + .should_process_attestation(subnet, attestation); + self.send_to_router(RouterMessage::PubsubMessage( + id, + source, + message, + should_process, + )); + } + _ => { + // all else is sent to the router + self.send_to_router(RouterMessage::PubsubMessage( + id, source, message, true, + )); } } - }, - Libp2pEvent::NewListenAddr(multiaddr) => { + } + NetworkEvent::NewListenAddr(multiaddr) => { self.network_globals .listen_multiaddrs .write() .push(multiaddr); } - Libp2pEvent::ZeroListeners => { + NetworkEvent::ZeroListeners => { let _ = shutdown_sender .send(ShutdownReason::Failure( "All listeners are closed. Unable to listen", @@ -581,7 +580,7 @@ impl NetworkService { response, id, } => { - self.libp2p.send_successful_response(peer_id, id, response); + self.libp2p.send_response(peer_id, id, response); } NetworkMessage::SendErrorResponse { peer_id, From 4ad7131e6e202f877bbb880621390f41582c6842 Mon Sep 17 00:00:00 2001 From: Diva M Date: Wed, 24 Aug 2022 16:42:17 -0500 Subject: [PATCH 12/31] fix pm tests --- beacon_node/lighthouse_network/tests/pm_tests.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/beacon_node/lighthouse_network/tests/pm_tests.rs b/beacon_node/lighthouse_network/tests/pm_tests.rs index 96767204db9..f7110844a2c 100644 --- a/beacon_node/lighthouse_network/tests/pm_tests.rs +++ b/beacon_node/lighthouse_network/tests/pm_tests.rs @@ -1,4 +1,4 @@ -#![cfg(not(debug_assertions))] +// #![cfg(not(debug_assertions))] mod common; use std::{ @@ -98,9 +98,7 @@ async fn banned_peers_consistency() { discovery_enabled: false, ..Default::default() }; - let pm = PeerManager::new(pm_config, globals.clone(), &pm_log) - .await - .unwrap(); + let pm = PeerManager::new(pm_config, globals.clone(), &pm_log).unwrap(); let mut pm_swarm = swarm::new_test_swarm(Behaviour::new(pm)); let pm_addr = swarm::bind_listener(&mut pm_swarm).await; let service = Service { swarm: pm_swarm }; From 1a765148c76319823a597ae3b0fdde618808778b Mon Sep 17 00:00:00 2001 From: Diva M Date: Wed, 24 Aug 2022 16:42:40 -0500 Subject: [PATCH 13/31] revert allowing pm tests in debug --- beacon_node/lighthouse_network/tests/pm_tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/lighthouse_network/tests/pm_tests.rs b/beacon_node/lighthouse_network/tests/pm_tests.rs index f7110844a2c..17a044ced02 100644 --- a/beacon_node/lighthouse_network/tests/pm_tests.rs +++ b/beacon_node/lighthouse_network/tests/pm_tests.rs @@ -1,4 +1,4 @@ -// #![cfg(not(debug_assertions))] +#![cfg(not(debug_assertions))] mod common; use std::{ From 43b2edadc6f897f6f1cf190479329d04dc91513b Mon Sep 17 00:00:00 2001 From: Diva M Date: Wed, 24 Aug 2022 16:52:24 -0500 Subject: [PATCH 14/31] further cleanup --- beacon_node/http_api/tests/common.rs | 1 - .../lighthouse_network/src/old_service.rs | 176 +----------------- .../lighthouse_network/src/service/mod.rs | 34 ++-- .../lighthouse_network/tests/common/mod.rs | 1 - .../lighthouse_network/tests/rpc_tests.rs | 4 +- beacon_node/network/src/service.rs | 7 +- 6 files changed, 22 insertions(+), 201 deletions(-) diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index 032e1346fbb..8f396fde895 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -119,7 +119,6 @@ pub async fn create_api_server_on_port( // Only a peer manager can add peers, so we create a dummy manager. let config = lighthouse_network::peer_manager::config::Config::default(); let mut pm = PeerManager::new(config, network_globals.clone(), &log) - .await .unwrap(); // add a peer diff --git a/beacon_node/lighthouse_network/src/old_service.rs b/beacon_node/lighthouse_network/src/old_service.rs index 9b43ec16a4b..6047d4cd722 100644 --- a/beacon_node/lighthouse_network/src/old_service.rs +++ b/beacon_node/lighthouse_network/src/old_service.rs @@ -1,17 +1,15 @@ use crate::multiaddr::Protocol; -use crate::rpc::{GoodbyeReason, MetaData, MetaDataV1, MetaDataV2, RPCResponseErrorCode, ReqId}; -use crate::service::{save_metadata_to_disk, NetworkEvent, PeerRequestId, Request, Response}; +use crate::rpc::{MetaData, MetaDataV1, MetaDataV2}; +use crate::service::save_metadata_to_disk; use crate::types::{error, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; -use crate::{NetworkConfig, NetworkGlobals, PeerAction, ReportSource}; +use crate::NetworkConfig; +use libp2p::bandwidth::{BandwidthLogging, BandwidthSinks}; use libp2p::core::{ identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed, }; -use libp2p::{ - bandwidth::{BandwidthLogging, BandwidthSinks}, - core, noise, PeerId, Swarm, Transport, -}; +use libp2p::{core, noise, PeerId, Transport}; use prometheus_client::registry::Registry; -use slog::{debug, warn, Logger}; +use slog::{debug, warn}; use ssz::Decode; use std::fs::File; use std::io::prelude::*; @@ -25,33 +23,6 @@ pub const MAX_CONNECTIONS_PER_PEER: u32 = 1; /// The filename to store our local metadata. pub const METADATA_FILENAME: &str = "metadata"; -/// The types of events than can be obtained from polling the libp2p service. -/// -/// This is a subset of the events that a libp2p swarm emits. -#[derive(Debug)] -pub enum Libp2pEvent { - /// A behaviour event - Behaviour(NetworkEvent), - /// A new listening address has been established. - NewListenAddr(Multiaddr), - /// We reached zero listening addresses. - ZeroListeners, -} - -/// The configuration and state of the libp2p components for the beacon node. -pub struct Service { - _p_a: std::marker::PhantomData, - _p_b: std::marker::PhantomData, - /// The libp2p Swarm handler. - pub swarm: Swarm, - /// The bandwidth logger for the underlying libp2p transport. - pub bandwidth: Arc, - /// This node's PeerId. - pub local_peer_id: PeerId, - /// The libp2p logger handle. - pub log: Logger, -} - pub struct Context<'a> { pub config: &'a NetworkConfig, pub enr_fork_id: EnrForkId, @@ -60,141 +31,6 @@ pub struct Context<'a> { pub gossipsub_registry: Option<&'a mut Registry>, } -impl Service { - pub async fn new( - executor: task_executor::TaskExecutor, - ctx: Context<'_>, - log: &Logger, - ) -> error::Result<(Arc>, Self)> { - // listen on the specified address - - let service = Service { - _p_a: Default::default(), - _p_b: Default::default(), - swarm: todo!(), - bandwidth: todo!(), - local_peer_id: todo!(), - log: log.clone(), - }; - let network_globals = todo!(); - - Ok((network_globals, service)) - } - - /// Sends a request to a peer, with a given Id. - pub fn send_request(&mut self, peer_id: PeerId, request_id: AppReqId, request: Request) { - todo!() - } - - /// Informs the peer that their request failed. - pub fn respond_with_error( - &mut self, - peer_id: PeerId, - id: PeerRequestId, - error: RPCResponseErrorCode, - reason: String, - ) { - todo!() - } - - /// Report a peer's action. - pub fn report_peer( - &mut self, - peer_id: &PeerId, - action: PeerAction, - source: ReportSource, - msg: &'static str, - ) { - todo!() - } - - /// Disconnect and ban a peer, providing a reason. - pub fn goodbye_peer(&mut self, peer_id: &PeerId, reason: GoodbyeReason, source: ReportSource) { - todo!() - } - - /// Sends a response to a peer's request. - pub fn send_response(&mut self, peer_id: PeerId, id: PeerRequestId, response: Response) { - todo!() - } - - pub async fn next_event(&mut self) -> Libp2pEvent { - todo!() - /* - loop { - match self.swarm.select_next_some().await { - SwarmEvent::Behaviour(behaviour) => { - // Handle banning here - match &behaviour { - OldBehaviourEvent::PeerBanned(peer_id) => { - self.swarm.ban_peer_id(*peer_id); - } - OldBehaviourEvent::PeerUnbanned(peer_id) => { - self.swarm.unban_peer_id(*peer_id); - } - _ => {} - } - return Libp2pEvent::Behaviour(behaviour); - } - SwarmEvent::ConnectionEstablished { - peer_id: _, - endpoint: _, - num_established: _, - concurrent_dial_errors: _, - } => {} - SwarmEvent::ConnectionClosed { - peer_id: _, - cause: _, - endpoint: _, - num_established: _, - } => {} - SwarmEvent::NewListenAddr { address, .. } => { - return Libp2pEvent::NewListenAddr(address) - } - SwarmEvent::IncomingConnection { - local_addr, - send_back_addr, - } => { - trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr) - } - SwarmEvent::IncomingConnectionError { - local_addr, - send_back_addr, - error, - } => { - debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); - } - SwarmEvent::BannedPeer { peer_id, .. } => { - debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); - } - SwarmEvent::OutgoingConnectionError { peer_id, error } => { - debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); - } - SwarmEvent::ExpiredListenAddr { address, .. } => { - debug!(self.log, "Listen address expired"; "address" => %address) - } - SwarmEvent::ListenerClosed { - addresses, reason, .. - } => { - crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); - if Swarm::listeners(&self.swarm).count() == 0 { - return Libp2pEvent::ZeroListeners; - } - } - SwarmEvent::ListenerError { error, .. } => { - // this is non fatal, but we still check - warn!(self.log, "Listener error"; "error" => ?error); - if Swarm::listeners(&self.swarm).count() == 0 { - return Libp2pEvent::ZeroListeners; - } - } - SwarmEvent::Dialing(_peer_id) => {} - } - } - */ - } -} - type BoxedTransport = Boxed<(PeerId, StreamMuxerBox)>; /// The implementation supports TCP/IP, WebSockets over TCP/IP, noise as the encryption layer, and diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index ca1d5019f47..71257e37916 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -21,24 +21,21 @@ use crate::{ use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use crate::{rpc::*, EnrExt}; use futures::stream::StreamExt; -use futures::FutureExt; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; use libp2p::bandwidth::BandwidthSinks; use libp2p::gossipsub::error::PublishError; use libp2p::swarm::{ConnectionLimits, SwarmBuilder, SwarmEvent}; use libp2p::Swarm; use libp2p::{ - core::{connection::ConnectionId, identity::Keypair}, + core::connection::ConnectionId, gossipsub::{ metrics::Config as GossipsubMetricsConfig, subscription_filter::{MaxCountSubscriptionFilter, WhitelistSubscriptionFilter}, - Gossipsub as BaseGossipsub, GossipsubEvent, IdentTopic as Topic, MessageAcceptance, - MessageAuthenticity, MessageId, + GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, }, identify::{Identify, IdentifyConfig, IdentifyEvent}, multiaddr::{Multiaddr, Protocol as MProtocol}, - swarm::NetworkBehaviour, - NetworkBehaviour, PeerId, + PeerId, }; use slog::{crit, debug, info, o, trace, warn}; use ssz::Encode; @@ -48,12 +45,10 @@ use std::io::Write; use std::path::{Path, PathBuf}; use std::pin::Pin; use std::{ - collections::VecDeque, marker::PhantomData, sync::Arc, task::{Context, Poll}, }; -use types::eth_spec; use types::{ consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, @@ -1452,25 +1447,22 @@ impl Network { debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); None } - SwarmEvent::BannedPeer { peer_id, endpoint } => { + SwarmEvent::BannedPeer { + peer_id, + endpoint: _, + } => { debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); None } - SwarmEvent::NewListenAddr { - listener_id, - address, - } => todo!(), - SwarmEvent::ExpiredListenAddr { - listener_id, - address, - } => { + SwarmEvent::NewListenAddr { address, .. } => { + Some(NetworkEvent::NewListenAddr(address)) + } + SwarmEvent::ExpiredListenAddr { address, .. } => { debug!(self.log, "Listen address expired"; "address" => %address); None } SwarmEvent::ListenerClosed { - listener_id, - addresses, - reason, + addresses, reason, .. } => { crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); if Swarm::listeners(&self.swarm).count() == 0 { @@ -1479,7 +1471,7 @@ impl Network { None } } - SwarmEvent::ListenerError { listener_id, error } => { + SwarmEvent::ListenerError { error, .. } => { // this is non fatal, but we still check warn!(self.log, "Listener error"; "error" => ?error); if Swarm::listeners(&self.swarm).count() == 0 { diff --git a/beacon_node/lighthouse_network/tests/common/mod.rs b/beacon_node/lighthouse_network/tests/common/mod.rs index 135851d6bdf..a3c32d0fb1b 100644 --- a/beacon_node/lighthouse_network/tests/common/mod.rs +++ b/beacon_node/lighthouse_network/tests/common/mod.rs @@ -1,5 +1,4 @@ #![cfg(test)] -use futures::StreamExt; use libp2p::gossipsub::GossipsubConfigBuilder; use lighthouse_network::service::Network as LibP2PService; use lighthouse_network::Enr; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index b275cba863a..9183453492c 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -1,8 +1,6 @@ #![cfg(test)] use lighthouse_network::rpc::methods::*; -use lighthouse_network::{ - rpc::max_rpc_size, Libp2pEvent, NetworkEvent, ReportSource, Request, Response, -}; +use lighthouse_network::{rpc::max_rpc_size, NetworkEvent, ReportSource, Request, Response}; use slog::{debug, warn, Level}; use ssz::Encode; use ssz_types::VariableList; diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 0dc49d1458e..ec8573ea1f5 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -13,13 +13,10 @@ use futures::future::OptionFuture; use futures::prelude::*; use futures::StreamExt; use lighthouse_network::service::Network; -use lighthouse_network::{ - prometheus_client::registry::Registry, MessageAcceptance, Service as LibP2PService, -}; +use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ rpc::{GoodbyeReason, RPCResponseErrorCode}, - Context, Libp2pEvent, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, - Response, Subnet, + Context, PeerAction, PeerRequestId, PubsubMessage, ReportSource, Request, Response, Subnet, }; use lighthouse_network::{ types::{GossipEncoding, GossipTopic}, From d42734820fef3dc1a27fd1eed78d9eed67a7a3d1 Mon Sep 17 00:00:00 2001 From: Diva M Date: Wed, 24 Aug 2022 17:04:48 -0500 Subject: [PATCH 15/31] fmt and clippy for the day --- beacon_node/lighthouse_network/src/peer_manager/mod.rs | 2 +- beacon_node/lighthouse_network/src/service/mod.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index a2a7eb42939..0f291359565 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -1,7 +1,7 @@ //! Implementation of Lighthouse's peer management system. -use crate::service::TARGET_SUBNET_PEERS; use crate::rpc::{GoodbyeReason, MetaData, Protocol, RPCError, RPCResponseErrorCode}; +use crate::service::TARGET_SUBNET_PEERS; use crate::{error, metrics, Gossipsub}; use crate::{NetworkGlobals, PeerId}; use crate::{Subnet, SubnetDiscovery}; diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 71257e37916..04a3a2803af 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -174,7 +174,7 @@ impl Network { // Construct the metadata let meta_data = crate::load_or_build_metadata(&config.network_dir, &log); let globals = NetworkGlobals::new( - enr.clone(), + enr, config.libp2p_port, config.discovery_port, meta_data, @@ -993,11 +993,11 @@ impl Network { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blocks_by_root"]) } } - return NetworkEvent::RequestReceived { + NetworkEvent::RequestReceived { peer_id, id, request, - }; + } } /// Dial cached enrs in discovery service that are in the given `subnet_id` and aren't @@ -1491,7 +1491,7 @@ impl Network { // perform gossipsub score updates when necessary while self.update_gossipsub_scores.poll_tick(cx).is_ready() { - let ref mut this = self.swarm.behaviour_mut(); + let this = self.swarm.behaviour_mut(); this.peer_manager.update_gossipsub_scores(&this.gossipsub); } From 0f972aacfdd4bc50f6e885133cfa262706c3241c Mon Sep 17 00:00:00 2001 From: Diva M Date: Wed, 24 Aug 2022 17:16:15 -0500 Subject: [PATCH 16/31] more fmt --- beacon_node/http_api/tests/common.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/beacon_node/http_api/tests/common.rs b/beacon_node/http_api/tests/common.rs index 8f396fde895..a0dbf40b290 100644 --- a/beacon_node/http_api/tests/common.rs +++ b/beacon_node/http_api/tests/common.rs @@ -118,8 +118,7 @@ pub async fn create_api_server_on_port( // Only a peer manager can add peers, so we create a dummy manager. let config = lighthouse_network::peer_manager::config::Config::default(); - let mut pm = PeerManager::new(config, network_globals.clone(), &log) - .unwrap(); + let mut pm = PeerManager::new(config, network_globals.clone(), &log).unwrap(); // add a peer let peer_id = PeerId::random(); From bcbd9d19e0678546c7678ed9e1237b5f8be43568 Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 25 Aug 2022 12:50:35 -0500 Subject: [PATCH 17/31] update installation guide --- book/src/installation-source.md | 14 +++++++++++--- book/src/setup.md | 2 ++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 661035ca510..81a826d64ff 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -16,7 +16,7 @@ operating system. Install the following packages: ```bash -sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf3-compiler ``` > Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories @@ -32,13 +32,18 @@ sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clan brew install cmake ``` +1. Install protoc using Homebrew: +``` +brew install protobuf +``` + [Homebrew]: https://brew.sh/ #### Windows 1. Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git). 1. Install the [Chocolatey](https://chocolatey.org/install) package manager for Windows. -1. Install Make, CMake and LLVM using Chocolatey: +1. Install Make, CMake, LLVM and protoc using Chocolatey: ``` choco install make @@ -52,10 +57,13 @@ choco install cmake --installargs 'ADD_CMAKE_TO_PATH=System' choco install llvm ``` +``` +choco install protoc +``` + These dependencies are for compiling Lighthouse natively on Windows. Lighthouse can also run successfully under the [Windows Subsystem for Linux (WSL)][WSL]. If using Ubuntu under WSL, you should follow the instructions for Ubuntu listed in the [Dependencies (Ubuntu)](#ubuntu) section. - [WSL]: https://docs.microsoft.com/en-us/windows/wsl/about ## Build Lighthouse diff --git a/book/src/setup.md b/book/src/setup.md index e8c56623bec..a1febe4a026 100644 --- a/book/src/setup.md +++ b/book/src/setup.md @@ -14,6 +14,8 @@ The additional requirements for developers are: don't have `ganache` available on your `PATH` or if ganache is older than v7. - [`cmake`](https://cmake.org/cmake/help/latest/command/install.html). Used by some dependencies. See [`Installation Guide`](./installation.md) for more info. +- [`protoc`](https://github.com/protocolbuffers/protobuf/releases) required for + the networking stack. - [`java 11 runtime`](https://openjdk.java.net/projects/jdk/). 11 is the minimum, used by web3signer_tests. From 812f34ec28480ab3d1685ba06760f57c732cf95b Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 25 Aug 2022 12:54:02 -0500 Subject: [PATCH 18/31] install protoc for windows in CI using installation guide commands --- .github/workflows/test-suite.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 1a7d78f61f6..357e59de08e 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -68,7 +68,7 @@ jobs: node-version: '14' - name: Install windows build tools run: | - choco install python visualstudio2019-workload-vctools -y + choco install python protoc visualstudio2019-workload-vctools -y npm config set msvs_version 2019 - name: Install ganache run: npm install -g ganache --loglevel verbose From 421fe097c1e64032cdc7d11e461c95adab87095c Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 25 Aug 2022 12:55:25 -0500 Subject: [PATCH 19/31] only windows tests for quick CI feedback --- .github/workflows/test-suite.yml | 602 +++++++++++++++---------------- 1 file changed, 301 insertions(+), 301 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 357e59de08e..526a6cb9c17 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -21,39 +21,39 @@ jobs: steps: - name: Check that the pull request is not targeting the stable branch run: test ${{ github.base_ref }} != "stable" - extract-msrv: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v1 - - name: Extract Minimum Supported Rust Version (MSRV) - run: | - metadata=$(cargo metadata --no-deps --format-version 1) - msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') - echo "::set-output name=MSRV::$msrv" - id: extract_msrv - outputs: - MSRV: ${{ steps.extract_msrv.outputs.MSRV }} - cargo-fmt: - name: cargo-fmt - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Check formatting with cargo fmt - run: make cargo-fmt - release-tests-ubuntu: - name: release-tests-ubuntu - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Install ganache - run: sudo npm install -g ganache - - name: Run tests in release - run: make test-release + # extract-msrv: + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v1 + # - name: Extract Minimum Supported Rust Version (MSRV) + # run: | + # metadata=$(cargo metadata --no-deps --format-version 1) + # msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') + # echo "::set-output name=MSRV::$msrv" + # id: extract_msrv + # outputs: + # MSRV: ${{ steps.extract_msrv.outputs.MSRV }} + # cargo-fmt: + # name: cargo-fmt + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Check formatting with cargo fmt + # run: make cargo-fmt + # release-tests-ubuntu: + # name: release-tests-ubuntu + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Install ganache + # run: sudo npm install -g ganache + # - name: Run tests in release + # run: make test-release release-tests-windows: name: release-tests-windows runs-on: windows-2019 @@ -82,271 +82,271 @@ jobs: run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - name: Run tests in release run: make test-release - beacon-chain-tests: - name: beacon-chain-tests - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Run beacon_chain tests for all known forks - run: make test-beacon-chain - op-pool-tests: - name: op-pool-tests - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Run operation_pool tests for all known forks - run: make test-op-pool - slasher-tests: - name: slasher-tests - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Run slasher tests for all supported backends - run: make test-slasher - debug-tests-ubuntu: - name: debug-tests-ubuntu - runs-on: ubuntu-22.04 - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Install ganache - run: sudo npm install -g ganache - - name: Run tests in debug - run: make test-debug - state-transition-vectors-ubuntu: - name: state-transition-vectors-ubuntu - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Run state_transition_vectors in release. - run: make run-state-transition-tests - ef-tests-ubuntu: - name: ef-tests-ubuntu - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Run consensus-spec-tests with blst, milagro and fake_crypto - run: make test-ef - dockerfile-ubuntu: - name: dockerfile-ubuntu - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Build the root Dockerfile - run: docker build --build-arg FEATURES=portable -t lighthouse:local . - - name: Test the built image - run: docker run -t lighthouse:local lighthouse --version - eth1-simulator-ubuntu: - name: eth1-simulator-ubuntu - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Install ganache - run: sudo npm install -g ganache - - name: Run the beacon chain sim that starts from an eth1 contract - run: cargo run --release --bin simulator eth1-sim - merge-transition-ubuntu: - name: merge-transition-ubuntu - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Install ganache - run: sudo npm install -g ganache - - name: Run the beacon chain sim and go through the merge transition - run: cargo run --release --bin simulator eth1-sim --post-merge - no-eth1-simulator-ubuntu: - name: no-eth1-simulator-ubuntu - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Install ganache - run: sudo npm install -g ganache - - name: Run the beacon chain sim without an eth1 connection - run: cargo run --release --bin simulator no-eth1-sim - syncing-simulator-ubuntu: - name: syncing-simulator-ubuntu - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Install ganache - run: sudo npm install -g ganache - - name: Run the syncing simulator - run: cargo run --release --bin simulator syncing-sim - doppelganger-protection-test: - name: doppelganger-protection-test - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Install ganache - run: sudo npm install -g ganache - - name: Install lighthouse and lcli - run: | - make - make install-lcli - - name: Run the doppelganger protection success test script - run: | - cd scripts/tests - ./doppelganger_protection.sh success - - name: Run the doppelganger protection failure test script - run: | - cd scripts/tests - ./doppelganger_protection.sh failure - execution-engine-integration-ubuntu: - name: execution-engine-integration-ubuntu - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - uses: actions/setup-go@v2 - with: - go-version: '1.17' - - uses: actions/setup-dotnet@v1 - with: - dotnet-version: '6.0.201' - - name: Get latest version of stable Rust - run: rustup update stable - - name: Run exec engine integration tests in release - run: make test-exec-engine - check-benchmarks: - name: check-benchmarks - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Typecheck benchmark code without running it - run: make check-benches - check-consensus: - name: check-consensus - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Typecheck consensus code in strict mode - run: make check-consensus - clippy: - name: clippy - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Lint code for quality and style with Clippy - run: make lint - - name: Certify Cargo.lock freshness - run: git diff --exit-code Cargo.lock - disallowed-from-async-lint: - name: disallowed-from-async-lint - runs-on: ubuntu-latest - needs: cargo-fmt - continue-on-error: true - steps: - - uses: actions/checkout@v1 - - name: Install SigP Clippy fork - run: | - cd .. - git clone https://github.com/michaelsproul/rust-clippy.git - cd rust-clippy - git checkout 31a49666ccfcd7963b63345d6ce757c373f22c2a - cargo build --release --bin cargo-clippy --bin clippy-driver - cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin - - name: Run Clippy with the disallowed-from-async lint - run: make nightly-lint - check-msrv: - name: check-msrv - runs-on: ubuntu-latest - needs: [cargo-fmt, extract-msrv] - steps: - - uses: actions/checkout@v1 - - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) - run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} - - name: Run cargo check - run: cargo check --workspace - arbitrary-check: - name: arbitrary-check - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Validate state_processing feature arbitrary-fuzz - run: make arbitrary-fuzz - cargo-audit: - name: cargo-audit - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Get latest version of stable Rust - run: rustup update stable - - name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database - run: make audit - cargo-vendor: - name: cargo-vendor - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose - run: CARGO_HOME=$(readlink -f $HOME) make vendor - cargo-udeps: - name: cargo-udeps - runs-on: ubuntu-latest - needs: cargo-fmt - steps: - - uses: actions/checkout@v1 - - name: Install Rust (${{ env.PINNED_NIGHTLY }}) - run: rustup toolchain install $PINNED_NIGHTLY - # NOTE: cargo-udeps version is pinned until this issue is resolved: - # https://github.com/est31/cargo-udeps/issues/135 - - name: Install cargo-udeps - run: cargo install cargo-udeps --locked --force --version 0.1.30 - - name: Create Cargo config dir - run: mkdir -p .cargo - - name: Install custom Cargo config - run: cp -f .github/custom/config.toml .cargo/config.toml - - name: Run cargo udeps to identify unused crates in the dependency graph - run: make udeps - env: - # Allow warnings on Nightly - RUSTFLAGS: "" + # beacon-chain-tests: + # name: beacon-chain-tests + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Run beacon_chain tests for all known forks + # run: make test-beacon-chain + # op-pool-tests: + # name: op-pool-tests + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Run operation_pool tests for all known forks + # run: make test-op-pool + # slasher-tests: + # name: slasher-tests + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Run slasher tests for all supported backends + # run: make test-slasher + # debug-tests-ubuntu: + # name: debug-tests-ubuntu + # runs-on: ubuntu-22.04 + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Install ganache + # run: sudo npm install -g ganache + # - name: Run tests in debug + # run: make test-debug + # state-transition-vectors-ubuntu: + # name: state-transition-vectors-ubuntu + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Run state_transition_vectors in release. + # run: make run-state-transition-tests + # ef-tests-ubuntu: + # name: ef-tests-ubuntu + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Run consensus-spec-tests with blst, milagro and fake_crypto + # run: make test-ef + # dockerfile-ubuntu: + # name: dockerfile-ubuntu + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Build the root Dockerfile + # run: docker build --build-arg FEATURES=portable -t lighthouse:local . + # - name: Test the built image + # run: docker run -t lighthouse:local lighthouse --version + # eth1-simulator-ubuntu: + # name: eth1-simulator-ubuntu + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Install ganache + # run: sudo npm install -g ganache + # - name: Run the beacon chain sim that starts from an eth1 contract + # run: cargo run --release --bin simulator eth1-sim + # merge-transition-ubuntu: + # name: merge-transition-ubuntu + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Install ganache + # run: sudo npm install -g ganache + # - name: Run the beacon chain sim and go through the merge transition + # run: cargo run --release --bin simulator eth1-sim --post-merge + # no-eth1-simulator-ubuntu: + # name: no-eth1-simulator-ubuntu + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Install ganache + # run: sudo npm install -g ganache + # - name: Run the beacon chain sim without an eth1 connection + # run: cargo run --release --bin simulator no-eth1-sim + # syncing-simulator-ubuntu: + # name: syncing-simulator-ubuntu + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Install ganache + # run: sudo npm install -g ganache + # - name: Run the syncing simulator + # run: cargo run --release --bin simulator syncing-sim + # doppelganger-protection-test: + # name: doppelganger-protection-test + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Install ganache + # run: sudo npm install -g ganache + # - name: Install lighthouse and lcli + # run: | + # make + # make install-lcli + # - name: Run the doppelganger protection success test script + # run: | + # cd scripts/tests + # ./doppelganger_protection.sh success + # - name: Run the doppelganger protection failure test script + # run: | + # cd scripts/tests + # ./doppelganger_protection.sh failure + # execution-engine-integration-ubuntu: + # name: execution-engine-integration-ubuntu + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - uses: actions/setup-go@v2 + # with: + # go-version: '1.17' + # - uses: actions/setup-dotnet@v1 + # with: + # dotnet-version: '6.0.201' + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Run exec engine integration tests in release + # run: make test-exec-engine + # check-benchmarks: + # name: check-benchmarks + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Typecheck benchmark code without running it + # run: make check-benches + # check-consensus: + # name: check-consensus + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Typecheck consensus code in strict mode + # run: make check-consensus + # clippy: + # name: clippy + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Lint code for quality and style with Clippy + # run: make lint + # - name: Certify Cargo.lock freshness + # run: git diff --exit-code Cargo.lock + # disallowed-from-async-lint: + # name: disallowed-from-async-lint + # runs-on: ubuntu-latest + # needs: cargo-fmt + # continue-on-error: true + # steps: + # - uses: actions/checkout@v1 + # - name: Install SigP Clippy fork + # run: | + # cd .. + # git clone https://github.com/michaelsproul/rust-clippy.git + # cd rust-clippy + # git checkout 31a49666ccfcd7963b63345d6ce757c373f22c2a + # cargo build --release --bin cargo-clippy --bin clippy-driver + # cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin + # - name: Run Clippy with the disallowed-from-async lint + # run: make nightly-lint + # check-msrv: + # name: check-msrv + # runs-on: ubuntu-latest + # needs: [cargo-fmt, extract-msrv] + # steps: + # - uses: actions/checkout@v1 + # - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) + # run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} + # - name: Run cargo check + # run: cargo check --workspace + # arbitrary-check: + # name: arbitrary-check + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Validate state_processing feature arbitrary-fuzz + # run: make arbitrary-fuzz + # cargo-audit: + # name: cargo-audit + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Get latest version of stable Rust + # run: rustup update stable + # - name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database + # run: make audit + # cargo-vendor: + # name: cargo-vendor + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose + # run: CARGO_HOME=$(readlink -f $HOME) make vendor + # cargo-udeps: + # name: cargo-udeps + # runs-on: ubuntu-latest + # needs: cargo-fmt + # steps: + # - uses: actions/checkout@v1 + # - name: Install Rust (${{ env.PINNED_NIGHTLY }}) + # run: rustup toolchain install $PINNED_NIGHTLY + # # NOTE: cargo-udeps version is pinned until this issue is resolved: + # # https://github.com/est31/cargo-udeps/issues/135 + # - name: Install cargo-udeps + # run: cargo install cargo-udeps --locked --force --version 0.1.30 + # - name: Create Cargo config dir + # run: mkdir -p .cargo + # - name: Install custom Cargo config + # run: cp -f .github/custom/config.toml .cargo/config.toml + # - name: Run cargo udeps to identify unused crates in the dependency graph + # run: make udeps + # env: + # # Allow warnings on Nightly + # RUSTFLAGS: "" From ca5bf5594d7722dd68355664df3bc34bdf6372db Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 25 Aug 2022 13:20:16 -0500 Subject: [PATCH 20/31] remove more workflows --- .github/workflows/linkcheck.yml | 30 ------------- .github/workflows/local-testnet.yml | 68 ----------------------------- 2 files changed, 98 deletions(-) delete mode 100644 .github/workflows/linkcheck.yml delete mode 100644 .github/workflows/local-testnet.yml diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml deleted file mode 100644 index 30a891febf3..00000000000 --- a/.github/workflows/linkcheck.yml +++ /dev/null @@ -1,30 +0,0 @@ -name: linkcheck - -on: - push: - branches: - - unstable - pull_request: - paths: - - 'book/**' - -jobs: - linkcheck: - name: Check broken links - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v2 - - - name: Create docker network - run: docker network create book - - - name: Run mdbook server - run: docker run -v ${{ github.workspace }}/book:/book --network book --name book -p 3000:3000 -d peaceiris/mdbook:v0.4.20-rust serve --hostname 0.0.0.0 - - - name: Print logs - run: docker logs book - - - name: Run linkcheck - run: docker run --network book tennox/linkcheck:latest book:3000 diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml deleted file mode 100644 index 35032a09326..00000000000 --- a/.github/workflows/local-testnet.yml +++ /dev/null @@ -1,68 +0,0 @@ -# Test that local testnet starts successfully. -name: local testnet - -on: - push: - branches: - - unstable - pull_request: - -jobs: - run-local-testnet: - strategy: - matrix: - os: - - ubuntu-18.04 - - macos-latest - runs-on: ${{ matrix.os }} - steps: - - uses: actions/checkout@v1 - - - name: Get latest version of stable Rust - run: rustup update stable - - - name: Install ganache - run: npm install ganache@latest --global - - # https://github.com/actions/cache/blob/main/examples.md#rust---cargo - - uses: actions/cache@v2 - id: cache-cargo - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - target/ - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - - - name: Install lighthouse - run: make && make install-lcli - - - name: Start local testnet - run: ./start_local_testnet.sh && sleep 60 - working-directory: scripts/local_testnet - - - name: Print logs - run: ./dump_logs.sh - working-directory: scripts/local_testnet - - - name: Stop local testnet - run: ./stop_local_testnet.sh - working-directory: scripts/local_testnet - - - name: Clean-up testnet - run: ./clean.sh - working-directory: scripts/local_testnet - - - name: Start local testnet with blinded block production - run: ./start_local_testnet.sh -p && sleep 60 - working-directory: scripts/local_testnet - - - name: Print logs for blinded block testnet - run: ./dump_logs.sh - working-directory: scripts/local_testnet - - - name: Stop local testnet with blinded block production - run: ./stop_local_testnet.sh - working-directory: scripts/local_testnet From 3843eaf118ddb8fb800d73da3374fb24befa3dce Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 25 Aug 2022 13:22:43 -0500 Subject: [PATCH 21/31] enable fmt --- .github/workflows/test-suite.yml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 526a6cb9c17..90e2a4453f1 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -33,15 +33,15 @@ jobs: # id: extract_msrv # outputs: # MSRV: ${{ steps.extract_msrv.outputs.MSRV }} - # cargo-fmt: - # name: cargo-fmt - # runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Check formatting with cargo fmt - # run: make cargo-fmt + cargo-fmt: + name: cargo-fmt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Check formatting with cargo fmt + run: make cargo-fmt # release-tests-ubuntu: # name: release-tests-ubuntu # runs-on: ubuntu-latest From df17e618ebdff0076a07b15ffb9a45a1aee226de Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 25 Aug 2022 13:37:39 -0500 Subject: [PATCH 22/31] enable debug tests with protoc --- .github/workflows/test-suite.yml | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 90e2a4453f1..99e94059ab7 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -112,18 +112,20 @@ jobs: # run: rustup update stable # - name: Run slasher tests for all supported backends # run: make test-slasher - # debug-tests-ubuntu: - # name: debug-tests-ubuntu - # runs-on: ubuntu-22.04 - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Install ganache - # run: sudo npm install -g ganache - # - name: Run tests in debug - # run: make test-debug + debug-tests-ubuntu: + name: debug-tests-ubuntu + runs-on: ubuntu-22.04 + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Install ganache + run: sudo npm install -g ganache + - name: Run tests in debug + run: make test-debug # state-transition-vectors-ubuntu: # name: state-transition-vectors-ubuntu # runs-on: ubuntu-latest From 9b999a58fb58f61c5632dddb88c201c256aa59cb Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 25 Aug 2022 14:31:58 -0500 Subject: [PATCH 23/31] Revert "remove more workflows" This reverts commit c1d4221ad9d88d1702e8ad6ab5ac797af0730531. --- .github/workflows/linkcheck.yml | 30 +++++++++++++ .github/workflows/local-testnet.yml | 68 +++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+) create mode 100644 .github/workflows/linkcheck.yml create mode 100644 .github/workflows/local-testnet.yml diff --git a/.github/workflows/linkcheck.yml b/.github/workflows/linkcheck.yml new file mode 100644 index 00000000000..30a891febf3 --- /dev/null +++ b/.github/workflows/linkcheck.yml @@ -0,0 +1,30 @@ +name: linkcheck + +on: + push: + branches: + - unstable + pull_request: + paths: + - 'book/**' + +jobs: + linkcheck: + name: Check broken links + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Create docker network + run: docker network create book + + - name: Run mdbook server + run: docker run -v ${{ github.workspace }}/book:/book --network book --name book -p 3000:3000 -d peaceiris/mdbook:v0.4.20-rust serve --hostname 0.0.0.0 + + - name: Print logs + run: docker logs book + + - name: Run linkcheck + run: docker run --network book tennox/linkcheck:latest book:3000 diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml new file mode 100644 index 00000000000..35032a09326 --- /dev/null +++ b/.github/workflows/local-testnet.yml @@ -0,0 +1,68 @@ +# Test that local testnet starts successfully. +name: local testnet + +on: + push: + branches: + - unstable + pull_request: + +jobs: + run-local-testnet: + strategy: + matrix: + os: + - ubuntu-18.04 + - macos-latest + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v1 + + - name: Get latest version of stable Rust + run: rustup update stable + + - name: Install ganache + run: npm install ganache@latest --global + + # https://github.com/actions/cache/blob/main/examples.md#rust---cargo + - uses: actions/cache@v2 + id: cache-cargo + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + target/ + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + + - name: Install lighthouse + run: make && make install-lcli + + - name: Start local testnet + run: ./start_local_testnet.sh && sleep 60 + working-directory: scripts/local_testnet + + - name: Print logs + run: ./dump_logs.sh + working-directory: scripts/local_testnet + + - name: Stop local testnet + run: ./stop_local_testnet.sh + working-directory: scripts/local_testnet + + - name: Clean-up testnet + run: ./clean.sh + working-directory: scripts/local_testnet + + - name: Start local testnet with blinded block production + run: ./start_local_testnet.sh -p && sleep 60 + working-directory: scripts/local_testnet + + - name: Print logs for blinded block testnet + run: ./dump_logs.sh + working-directory: scripts/local_testnet + + - name: Stop local testnet with blinded block production + run: ./stop_local_testnet.sh + working-directory: scripts/local_testnet From fd3b43555594f156346bb800d430fadf0c198494 Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 25 Aug 2022 14:49:26 -0500 Subject: [PATCH 24/31] add protoc step to Dockerfile and CI --- .github/workflows/test-suite.yml | 592 ++++++++++++++++--------------- Dockerfile | 2 +- 2 files changed, 313 insertions(+), 281 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 99e94059ab7..a3e9625b50a 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -21,18 +21,18 @@ jobs: steps: - name: Check that the pull request is not targeting the stable branch run: test ${{ github.base_ref }} != "stable" - # extract-msrv: - # runs-on: ubuntu-latest - # steps: - # - uses: actions/checkout@v1 - # - name: Extract Minimum Supported Rust Version (MSRV) - # run: | - # metadata=$(cargo metadata --no-deps --format-version 1) - # msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') - # echo "::set-output name=MSRV::$msrv" - # id: extract_msrv - # outputs: - # MSRV: ${{ steps.extract_msrv.outputs.MSRV }} + extract-msrv: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: Extract Minimum Supported Rust Version (MSRV) + run: | + metadata=$(cargo metadata --no-deps --format-version 1) + msrv=$(echo $metadata | jq -r '.packages | map(select(.name == "lighthouse")) | .[0].rust_version') + echo "::set-output name=MSRV::$msrv" + id: extract_msrv + outputs: + MSRV: ${{ steps.extract_msrv.outputs.MSRV }} cargo-fmt: name: cargo-fmt runs-on: ubuntu-latest @@ -42,18 +42,20 @@ jobs: run: rustup update stable - name: Check formatting with cargo fmt run: make cargo-fmt - # release-tests-ubuntu: - # name: release-tests-ubuntu - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Install ganache - # run: sudo npm install -g ganache - # - name: Run tests in release - # run: make test-release + release-tests-ubuntu: + name: release-tests-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Install ganache + run: sudo npm install -g ganache + - name: Run tests in release + run: make test-release release-tests-windows: name: release-tests-windows runs-on: windows-2019 @@ -82,36 +84,40 @@ jobs: run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - name: Run tests in release run: make test-release - # beacon-chain-tests: - # name: beacon-chain-tests - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Run beacon_chain tests for all known forks - # run: make test-beacon-chain - # op-pool-tests: - # name: op-pool-tests - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Run operation_pool tests for all known forks - # run: make test-op-pool - # slasher-tests: - # name: slasher-tests - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Run slasher tests for all supported backends - # run: make test-slasher + beacon-chain-tests: + name: beacon-chain-tests + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Run beacon_chain tests for all known forks + run: make test-beacon-chain + op-pool-tests: + name: op-pool-tests + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Run operation_pool tests for all known forks + run: make test-op-pool + slasher-tests: + name: slasher-tests + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Run slasher tests for all supported backends + run: make test-slasher debug-tests-ubuntu: name: debug-tests-ubuntu runs-on: ubuntu-22.04 @@ -126,229 +132,255 @@ jobs: run: sudo npm install -g ganache - name: Run tests in debug run: make test-debug - # state-transition-vectors-ubuntu: - # name: state-transition-vectors-ubuntu - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Run state_transition_vectors in release. - # run: make run-state-transition-tests - # ef-tests-ubuntu: - # name: ef-tests-ubuntu - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Run consensus-spec-tests with blst, milagro and fake_crypto - # run: make test-ef - # dockerfile-ubuntu: - # name: dockerfile-ubuntu - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Build the root Dockerfile - # run: docker build --build-arg FEATURES=portable -t lighthouse:local . - # - name: Test the built image - # run: docker run -t lighthouse:local lighthouse --version - # eth1-simulator-ubuntu: - # name: eth1-simulator-ubuntu - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Install ganache - # run: sudo npm install -g ganache - # - name: Run the beacon chain sim that starts from an eth1 contract - # run: cargo run --release --bin simulator eth1-sim - # merge-transition-ubuntu: - # name: merge-transition-ubuntu - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Install ganache - # run: sudo npm install -g ganache - # - name: Run the beacon chain sim and go through the merge transition - # run: cargo run --release --bin simulator eth1-sim --post-merge - # no-eth1-simulator-ubuntu: - # name: no-eth1-simulator-ubuntu - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Install ganache - # run: sudo npm install -g ganache - # - name: Run the beacon chain sim without an eth1 connection - # run: cargo run --release --bin simulator no-eth1-sim - # syncing-simulator-ubuntu: - # name: syncing-simulator-ubuntu - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Install ganache - # run: sudo npm install -g ganache - # - name: Run the syncing simulator - # run: cargo run --release --bin simulator syncing-sim - # doppelganger-protection-test: - # name: doppelganger-protection-test - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Install ganache - # run: sudo npm install -g ganache - # - name: Install lighthouse and lcli - # run: | - # make - # make install-lcli - # - name: Run the doppelganger protection success test script - # run: | - # cd scripts/tests - # ./doppelganger_protection.sh success - # - name: Run the doppelganger protection failure test script - # run: | - # cd scripts/tests - # ./doppelganger_protection.sh failure - # execution-engine-integration-ubuntu: - # name: execution-engine-integration-ubuntu - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - uses: actions/setup-go@v2 - # with: - # go-version: '1.17' - # - uses: actions/setup-dotnet@v1 - # with: - # dotnet-version: '6.0.201' - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Run exec engine integration tests in release - # run: make test-exec-engine - # check-benchmarks: - # name: check-benchmarks - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Typecheck benchmark code without running it - # run: make check-benches - # check-consensus: - # name: check-consensus - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Typecheck consensus code in strict mode - # run: make check-consensus - # clippy: - # name: clippy - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Lint code for quality and style with Clippy - # run: make lint - # - name: Certify Cargo.lock freshness - # run: git diff --exit-code Cargo.lock - # disallowed-from-async-lint: - # name: disallowed-from-async-lint - # runs-on: ubuntu-latest - # needs: cargo-fmt - # continue-on-error: true - # steps: - # - uses: actions/checkout@v1 - # - name: Install SigP Clippy fork - # run: | - # cd .. - # git clone https://github.com/michaelsproul/rust-clippy.git - # cd rust-clippy - # git checkout 31a49666ccfcd7963b63345d6ce757c373f22c2a - # cargo build --release --bin cargo-clippy --bin clippy-driver - # cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin - # - name: Run Clippy with the disallowed-from-async lint - # run: make nightly-lint - # check-msrv: - # name: check-msrv - # runs-on: ubuntu-latest - # needs: [cargo-fmt, extract-msrv] - # steps: - # - uses: actions/checkout@v1 - # - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) - # run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} - # - name: Run cargo check - # run: cargo check --workspace - # arbitrary-check: - # name: arbitrary-check - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Validate state_processing feature arbitrary-fuzz - # run: make arbitrary-fuzz - # cargo-audit: - # name: cargo-audit - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Get latest version of stable Rust - # run: rustup update stable - # - name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database - # run: make audit - # cargo-vendor: - # name: cargo-vendor - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose - # run: CARGO_HOME=$(readlink -f $HOME) make vendor - # cargo-udeps: - # name: cargo-udeps - # runs-on: ubuntu-latest - # needs: cargo-fmt - # steps: - # - uses: actions/checkout@v1 - # - name: Install Rust (${{ env.PINNED_NIGHTLY }}) - # run: rustup toolchain install $PINNED_NIGHTLY - # # NOTE: cargo-udeps version is pinned until this issue is resolved: - # # https://github.com/est31/cargo-udeps/issues/135 - # - name: Install cargo-udeps - # run: cargo install cargo-udeps --locked --force --version 0.1.30 - # - name: Create Cargo config dir - # run: mkdir -p .cargo - # - name: Install custom Cargo config - # run: cp -f .github/custom/config.toml .cargo/config.toml - # - name: Run cargo udeps to identify unused crates in the dependency graph - # run: make udeps - # env: - # # Allow warnings on Nightly - # RUSTFLAGS: "" + state-transition-vectors-ubuntu: + name: state-transition-vectors-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Run state_transition_vectors in release. + run: make run-state-transition-tests + ef-tests-ubuntu: + name: ef-tests-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Run consensus-spec-tests with blst, milagro and fake_crypto + run: make test-ef + dockerfile-ubuntu: + name: dockerfile-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Build the root Dockerfile + run: docker build --build-arg FEATURES=portable -t lighthouse:local . + - name: Test the built image + run: docker run -t lighthouse:local lighthouse --version + eth1-simulator-ubuntu: + name: eth1-simulator-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Install ganache + run: sudo npm install -g ganache + - name: Run the beacon chain sim that starts from an eth1 contract + run: cargo run --release --bin simulator eth1-sim + merge-transition-ubuntu: + name: merge-transition-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Install ganache + run: sudo npm install -g ganache + - name: Run the beacon chain sim and go through the merge transition + run: cargo run --release --bin simulator eth1-sim --post-merge + no-eth1-simulator-ubuntu: + name: no-eth1-simulator-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Install ganache + run: sudo npm install -g ganache + - name: Run the beacon chain sim without an eth1 connection + run: cargo run --release --bin simulator no-eth1-sim + syncing-simulator-ubuntu: + name: syncing-simulator-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Install ganache + run: sudo npm install -g ganache + - name: Run the syncing simulator + run: cargo run --release --bin simulator syncing-sim + doppelganger-protection-test: + name: doppelganger-protection-test + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Install ganache + run: sudo npm install -g ganache + - name: Install lighthouse and lcli + run: | + make + make install-lcli + - name: Run the doppelganger protection success test script + run: | + cd scripts/tests + ./doppelganger_protection.sh success + - name: Run the doppelganger protection failure test script + run: | + cd scripts/tests + ./doppelganger_protection.sh failure + execution-engine-integration-ubuntu: + name: execution-engine-integration-ubuntu + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + - uses: actions/setup-dotnet@v1 + with: + dotnet-version: '6.0.201' + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Run exec engine integration tests in release + run: make test-exec-engine + check-benchmarks: + name: check-benchmarks + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Typecheck benchmark code without running it + run: make check-benches + check-consensus: + name: check-consensus + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Typecheck consensus code in strict mode + run: make check-consensus + clippy: + name: clippy + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Lint code for quality and style with Clippy + run: make lint + - name: Certify Cargo.lock freshness + run: git diff --exit-code Cargo.lock + disallowed-from-async-lint: + name: disallowed-from-async-lint + runs-on: ubuntu-latest + needs: cargo-fmt + continue-on-error: true + steps: + - uses: actions/checkout@v1 + - name: Install SigP Clippy fork + run: | + cd .. + git clone https://github.com/michaelsproul/rust-clippy.git + cd rust-clippy + git checkout 31a49666ccfcd7963b63345d6ce757c373f22c2a + cargo build --release --bin cargo-clippy --bin clippy-driver + cargo build --release --bin cargo-clippy --bin clippy-driver -Zunstable-options --out-dir $(rustc --print=sysroot)/bin + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Run Clippy with the disallowed-from-async lint + run: make nightly-lint + check-msrv: + name: check-msrv + runs-on: ubuntu-latest + needs: [cargo-fmt, extract-msrv] + steps: + - uses: actions/checkout@v1 + - name: Install Rust @ MSRV (${{ needs.extract-msrv.outputs.MSRV }}) + run: rustup override set ${{ needs.extract-msrv.outputs.MSRV }} + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Run cargo check + run: cargo check --workspace + arbitrary-check: + name: arbitrary-check + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Validate state_processing feature arbitrary-fuzz + run: make arbitrary-fuzz + cargo-audit: + name: cargo-audit + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Get latest version of stable Rust + run: rustup update stable + - name: Run cargo audit to identify known security vulnerabilities reported to the RustSec Advisory Database + run: make audit + cargo-vendor: + name: cargo-vendor + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose + run: CARGO_HOME=$(readlink -f $HOME) make vendor + cargo-udeps: + name: cargo-udeps + runs-on: ubuntu-latest + needs: cargo-fmt + steps: + - uses: actions/checkout@v1 + - name: Install Rust (${{ env.PINNED_NIGHTLY }}) + run: rustup toolchain install $PINNED_NIGHTLY + # NOTE: cargo-udeps version is pinned until this issue is resolved: + # https://github.com/est31/cargo-udeps/issues/135 + - name: Install Protoc + uses: arduino/setup-protoc@v1 + - name: Install cargo-udeps + run: cargo install cargo-udeps --locked --force --version 0.1.30 + - name: Create Cargo config dir + run: mkdir -p .cargo + - name: Install custom Cargo config + run: cp -f .github/custom/config.toml .cargo/config.toml + - name: Run cargo udeps to identify unused crates in the dependency graph + run: make udeps + env: + # Allow warnings on Nightly + RUSTFLAGS: "" diff --git a/Dockerfile b/Dockerfile index 86a69c6539d..c6de6f9befa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ FROM rust:1.62.1-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf3-compiler COPY . lighthouse ARG FEATURES ENV FEATURES $FEATURES From 199326359454428f743d06809e6f2e35762915d3 Mon Sep 17 00:00:00 2001 From: Diva M Date: Thu, 25 Aug 2022 15:01:33 -0500 Subject: [PATCH 25/31] update instalation steps in ubuntu --- Dockerfile | 2 +- book/src/installation-source.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index c6de6f9befa..72423b17c68 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ FROM rust:1.62.1-bullseye AS builder -RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf3-compiler +RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev protobuf-compiler COPY . lighthouse ARG FEATURES ENV FEATURES $FEATURES diff --git a/book/src/installation-source.md b/book/src/installation-source.md index 81a826d64ff..61eb206421b 100644 --- a/book/src/installation-source.md +++ b/book/src/installation-source.md @@ -16,7 +16,7 @@ operating system. Install the following packages: ```bash -sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf3-compiler +sudo apt install -y git gcc g++ make cmake pkg-config llvm-dev libclang-dev clang protobuf-compiler ``` > Note: Lighthouse requires CMake v3.12 or newer, which isn't available in the package repositories From 9538870a0234336721565e4c1de9200e0d115b0d Mon Sep 17 00:00:00 2001 From: Diva M Date: Fri, 26 Aug 2022 09:32:08 -0500 Subject: [PATCH 26/31] poll swarm in loop --- .../lighthouse_network/src/service/mod.rs | 132 +++++++++--------- 1 file changed, 68 insertions(+), 64 deletions(-) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 04a3a2803af..980225063cb 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1417,76 +1417,80 @@ impl Network { } pub fn poll_network(&mut self, cx: &mut Context) -> Poll> { - let maybe_event = match self.swarm.poll_next_unpin(cx) { - Poll::Ready(Some(swarm_event)) => match swarm_event { - SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { - BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), - BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), - BehaviourEvent::Discovery(de) => self.inject_discovery_event(de), - BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), - BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), - }, - SwarmEvent::ConnectionEstablished { .. } => None, - SwarmEvent::ConnectionClosed { .. } => None, - SwarmEvent::IncomingConnection { - local_addr, - send_back_addr, - } => { - trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); - None - } - SwarmEvent::IncomingConnectionError { - local_addr, - send_back_addr, - error, - } => { - debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); - None - } - SwarmEvent::OutgoingConnectionError { peer_id, error } => { - debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); - None - } - SwarmEvent::BannedPeer { - peer_id, - endpoint: _, - } => { - debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); - None - } - SwarmEvent::NewListenAddr { address, .. } => { - Some(NetworkEvent::NewListenAddr(address)) - } - SwarmEvent::ExpiredListenAddr { address, .. } => { - debug!(self.log, "Listen address expired"; "address" => %address); - None - } - SwarmEvent::ListenerClosed { - addresses, reason, .. - } => { - crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); - if Swarm::listeners(&self.swarm).count() == 0 { - Some(NetworkEvent::ZeroListeners) - } else { + loop { + let maybe_event = match self.swarm.poll_next_unpin(cx) { + Poll::Ready(Some(swarm_event)) => match swarm_event { + SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { + BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), + BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), + BehaviourEvent::Discovery(de) => self.inject_discovery_event(de), + BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), + BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), + }, + SwarmEvent::ConnectionEstablished { .. } => None, + SwarmEvent::ConnectionClosed { .. } => None, + SwarmEvent::IncomingConnection { + local_addr, + send_back_addr, + } => { + trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); None } - } - SwarmEvent::ListenerError { error, .. } => { - // this is non fatal, but we still check - warn!(self.log, "Listener error"; "error" => ?error); - if Swarm::listeners(&self.swarm).count() == 0 { - Some(NetworkEvent::ZeroListeners) - } else { + SwarmEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error, + } => { + debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); + None + } + SwarmEvent::OutgoingConnectionError { peer_id, error } => { + debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); + None + } + SwarmEvent::BannedPeer { + peer_id, + endpoint: _, + } => { + debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); + None + } + SwarmEvent::NewListenAddr { address, .. } => { + Some(NetworkEvent::NewListenAddr(address)) + } + SwarmEvent::ExpiredListenAddr { address, .. } => { + debug!(self.log, "Listen address expired"; "address" => %address); None } + SwarmEvent::ListenerClosed { + addresses, reason, .. + } => { + crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); + if Swarm::listeners(&self.swarm).count() == 0 { + Some(NetworkEvent::ZeroListeners) + } else { + None + } + } + SwarmEvent::ListenerError { error, .. } => { + // this is non fatal, but we still check + warn!(self.log, "Listener error"; "error" => ?error); + if Swarm::listeners(&self.swarm).count() == 0 { + Some(NetworkEvent::ZeroListeners) + } else { + None + } + } + SwarmEvent::Dialing(_) => None, + }, + Poll::Pending | Poll::Ready(None) => { + break; } - SwarmEvent::Dialing(_) => None, - }, - Poll::Pending | Poll::Ready(None) => None, - }; + }; - if let Some(ev) = maybe_event { - return Poll::Ready(ev); + if let Some(ev) = maybe_event { + return Poll::Ready(ev); + } } // perform gossipsub score updates when necessary From 825ff6fc76bfdffcfb9cd7764198ed594a5d1cff Mon Sep 17 00:00:00 2001 From: Diva M Date: Fri, 26 Aug 2022 09:51:35 -0500 Subject: [PATCH 27/31] fixes --- .github/workflows/local-testnet.yml | 3 +- .../lighthouse_network/src/service/mod.rs | 123 +++++++++--------- 2 files changed, 61 insertions(+), 65 deletions(-) diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index 35032a09326..c688c0df330 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -20,7 +20,8 @@ jobs: - name: Get latest version of stable Rust run: rustup update stable - + - name: Install Protoc + uses: arduino/setup-protoc@v1 - name: Install ganache run: npm install ganache@latest --global diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 980225063cb..772976d3162 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1417,75 +1417,70 @@ impl Network { } pub fn poll_network(&mut self, cx: &mut Context) -> Poll> { - loop { - let maybe_event = match self.swarm.poll_next_unpin(cx) { - Poll::Ready(Some(swarm_event)) => match swarm_event { - SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { - BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), - BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), - BehaviourEvent::Discovery(de) => self.inject_discovery_event(de), - BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), - BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), - }, - SwarmEvent::ConnectionEstablished { .. } => None, - SwarmEvent::ConnectionClosed { .. } => None, - SwarmEvent::IncomingConnection { - local_addr, - send_back_addr, - } => { - trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); - None - } - SwarmEvent::IncomingConnectionError { - local_addr, - send_back_addr, - error, - } => { - debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); - None - } - SwarmEvent::OutgoingConnectionError { peer_id, error } => { - debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); - None - } - SwarmEvent::BannedPeer { - peer_id, - endpoint: _, - } => { - debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); + while let Poll::Ready(Some(swarm_event)) = self.swarm.poll_next_unpin(cx) { + let maybe_event = match swarm_event { + SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { + BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), + BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), + BehaviourEvent::Discovery(de) => self.inject_discovery_event(de), + BehaviourEvent::Identify(ie) => self.inject_identify_event(ie), + BehaviourEvent::PeerManager(pe) => self.inject_pm_event(pe), + }, + SwarmEvent::ConnectionEstablished { .. } => None, + SwarmEvent::ConnectionClosed { .. } => None, + SwarmEvent::IncomingConnection { + local_addr, + send_back_addr, + } => { + trace!(self.log, "Incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr); + None + } + SwarmEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error, + } => { + debug!(self.log, "Failed incoming connection"; "our_addr" => %local_addr, "from" => %send_back_addr, "error" => %error); + None + } + SwarmEvent::OutgoingConnectionError { peer_id, error } => { + debug!(self.log, "Failed to dial address"; "peer_id" => ?peer_id, "error" => %error); + None + } + SwarmEvent::BannedPeer { + peer_id, + endpoint: _, + } => { + debug!(self.log, "Banned peer connection rejected"; "peer_id" => %peer_id); + None + } + SwarmEvent::NewListenAddr { address, .. } => { + Some(NetworkEvent::NewListenAddr(address)) + } + SwarmEvent::ExpiredListenAddr { address, .. } => { + debug!(self.log, "Listen address expired"; "address" => %address); + None + } + SwarmEvent::ListenerClosed { + addresses, reason, .. + } => { + crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); + if Swarm::listeners(&self.swarm).count() == 0 { + Some(NetworkEvent::ZeroListeners) + } else { None } - SwarmEvent::NewListenAddr { address, .. } => { - Some(NetworkEvent::NewListenAddr(address)) - } - SwarmEvent::ExpiredListenAddr { address, .. } => { - debug!(self.log, "Listen address expired"; "address" => %address); + } + SwarmEvent::ListenerError { error, .. } => { + // this is non fatal, but we still check + warn!(self.log, "Listener error"; "error" => ?error); + if Swarm::listeners(&self.swarm).count() == 0 { + Some(NetworkEvent::ZeroListeners) + } else { None } - SwarmEvent::ListenerClosed { - addresses, reason, .. - } => { - crit!(self.log, "Listener closed"; "addresses" => ?addresses, "reason" => ?reason); - if Swarm::listeners(&self.swarm).count() == 0 { - Some(NetworkEvent::ZeroListeners) - } else { - None - } - } - SwarmEvent::ListenerError { error, .. } => { - // this is non fatal, but we still check - warn!(self.log, "Listener error"; "error" => ?error); - if Swarm::listeners(&self.swarm).count() == 0 { - Some(NetworkEvent::ZeroListeners) - } else { - None - } - } - SwarmEvent::Dialing(_) => None, - }, - Poll::Pending | Poll::Ready(None) => { - break; } + SwarmEvent::Dialing(_) => None, }; if let Some(ev) = maybe_event { From 1f6db5d85b30ecb61ef31b75cf16adf7498d2574 Mon Sep 17 00:00:00 2001 From: Diva M Date: Fri, 26 Aug 2022 14:12:10 -0500 Subject: [PATCH 28/31] code reorg --- beacon_node/lighthouse_network/src/lib.rs | 8 +- .../src/service/api_types.rs | 101 +++++++++ .../src/service/behaviour.rs | 9 +- .../lighthouse_network/src/service/mod.rs | 207 ++++-------------- .../src/{old_service.rs => service/utils.rs} | 70 +++++- 5 files changed, 210 insertions(+), 185 deletions(-) create mode 100644 beacon_node/lighthouse_network/src/service/api_types.rs rename beacon_node/lighthouse_network/src/{old_service.rs => service/utils.rs} (80%) diff --git a/beacon_node/lighthouse_network/src/lib.rs b/beacon_node/lighthouse_network/src/lib.rs index 3bfdd36ab39..be4da809cb2 100644 --- a/beacon_node/lighthouse_network/src/lib.rs +++ b/beacon_node/lighthouse_network/src/lib.rs @@ -84,8 +84,6 @@ pub use peer_manager::{ ConnectionDirection, PeerConnectionStatus, PeerInfo, PeerManager, SyncInfo, SyncStatus, }; // pub use service::{load_private_key, Context, Libp2pEvent, Service, NETWORK_KEY_FILENAME}; -pub use service::{Gossipsub, NetworkEvent, PeerRequestId, Request, Response}; - -mod old_service; - -pub use old_service::*; +pub use service::api_types::{PeerRequestId, Request, Response}; +pub use service::utils::*; +pub use service::{Gossipsub, NetworkEvent}; diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs new file mode 100644 index 00000000000..e5d81737cfb --- /dev/null +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -0,0 +1,101 @@ +use std::sync::Arc; + +use libp2p::core::connection::ConnectionId; +use types::{EthSpec, SignedBeaconBlock}; + +use crate::rpc::{ + methods::{ + BlocksByRangeRequest, BlocksByRootRequest, OldBlocksByRangeRequest, RPCCodedResponse, + RPCResponse, ResponseTermination, StatusMessage, + }, + OutboundRequest, SubstreamId, +}; + +/// Identifier of requests sent by a peer. +pub type PeerRequestId = (ConnectionId, SubstreamId); + +/// Identifier of a request. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum RequestId { + Application(AppReqId), + Internal, +} + +/// The type of RPC requests the Behaviour informs it has received and allows for sending. +/// +// NOTE: This is an application-level wrapper over the lower network level requests that can be +// sent. The main difference is the absence of the Ping, Metadata and Goodbye protocols, which don't +// leave the Behaviour. For all protocols managed by RPC see `RPCRequest`. +#[derive(Debug, Clone, PartialEq)] +pub enum Request { + /// A Status message. + Status(StatusMessage), + /// A blocks by range request. + BlocksByRange(BlocksByRangeRequest), + /// A request blocks root request. + BlocksByRoot(BlocksByRootRequest), +} + +impl std::convert::From for OutboundRequest { + fn from(req: Request) -> OutboundRequest { + match req { + Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r), + Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }) => { + OutboundRequest::BlocksByRange(OldBlocksByRangeRequest { + start_slot, + count, + step: 1, + }) + } + Request::Status(s) => OutboundRequest::Status(s), + } + } +} + +/// The type of RPC responses the Behaviour informs it has received, and allows for sending. +/// +// NOTE: This is an application-level wrapper over the lower network level responses that can be +// sent. The main difference is the absense of Pong and Metadata, which don't leave the +// Behaviour. For all protocol reponses managed by RPC see `RPCResponse` and +// `RPCCodedResponse`. +#[derive(Debug, Clone, PartialEq)] +pub enum Response { + /// A Status message. + Status(StatusMessage), + /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. + BlocksByRange(Option>>), + /// A response to a get BLOCKS_BY_ROOT request. + BlocksByRoot(Option>>), +} + +impl std::convert::From> for RPCCodedResponse { + fn from(resp: Response) -> RPCCodedResponse { + match resp { + Response::BlocksByRoot(r) => match r { + Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRoot(b)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRoot), + }, + Response::BlocksByRange(r) => match r { + Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), + }, + Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), + } + } +} + +impl slog::Value for RequestId { + fn serialize( + &self, + record: &slog::Record, + key: slog::Key, + serializer: &mut dyn slog::Serializer, + ) -> slog::Result { + match self { + RequestId::Internal => slog::Value::serialize("Behaviour", record, key, serializer), + RequestId::Application(ref id) => { + slog::Value::serialize(&format_args!("{:?}", id), record, key, serializer) + } + } + } +} diff --git a/beacon_node/lighthouse_network/src/service/behaviour.rs b/beacon_node/lighthouse_network/src/service/behaviour.rs index bd5109546cd..8327293a745 100644 --- a/beacon_node/lighthouse_network/src/service/behaviour.rs +++ b/beacon_node/lighthouse_network/src/service/behaviour.rs @@ -12,16 +12,11 @@ use libp2p::swarm::NetworkBehaviour; use libp2p::NetworkBehaviour; use types::EthSpec; +use super::api_types::RequestId; + pub type SubscriptionFilter = MaxCountSubscriptionFilter; pub type Gossipsub = BaseGossipsub; -/// Identifier of a request. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum RequestId { - Application(AppReqId), - Internal, -} - #[derive(NetworkBehaviour)] pub(crate) struct Behaviour { /// The routing pub-sub mechanism for eth2. diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 772976d3162..a06e09ea483 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -14,61 +14,47 @@ use crate::types::{ SubnetDiscovery, }; use crate::Eth2Enr; -use crate::{ - build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER, - METADATA_FILENAME, -}; use crate::{error, metrics, Enr, NetworkGlobals, PubsubMessage, TopicHash}; use crate::{rpc::*, EnrExt}; +use api_types::{PeerRequestId, Request, RequestId, Response}; use futures::stream::StreamExt; use gossipsub_scoring_parameters::{lighthouse_gossip_thresholds, PeerScoreSettings}; use libp2p::bandwidth::BandwidthSinks; use libp2p::gossipsub::error::PublishError; -use libp2p::swarm::{ConnectionLimits, SwarmBuilder, SwarmEvent}; -use libp2p::Swarm; -use libp2p::{ - core::connection::ConnectionId, - gossipsub::{ - metrics::Config as GossipsubMetricsConfig, - subscription_filter::{MaxCountSubscriptionFilter, WhitelistSubscriptionFilter}, - GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, - }, - identify::{Identify, IdentifyConfig, IdentifyEvent}, - multiaddr::{Multiaddr, Protocol as MProtocol}, - PeerId, +use libp2p::gossipsub::metrics::Config as GossipsubMetricsConfig; +use libp2p::gossipsub::subscription_filter::MaxCountSubscriptionFilter; +use libp2p::gossipsub::{ + GossipsubEvent, IdentTopic as Topic, MessageAcceptance, MessageAuthenticity, MessageId, }; +use libp2p::identify::{Identify, IdentifyConfig, IdentifyEvent}; +use libp2p::multiaddr::{Multiaddr, Protocol as MProtocol}; +use libp2p::swarm::{ConnectionLimits, Swarm, SwarmBuilder, SwarmEvent}; +use libp2p::PeerId; use slog::{crit, debug, info, o, trace, warn}; -use ssz::Encode; -use std::collections::HashSet; -use std::fs::File; -use std::io::Write; -use std::path::{Path, PathBuf}; + +use std::marker::PhantomData; +use std::path::PathBuf; use std::pin::Pin; -use std::{ - marker::PhantomData, - sync::Arc, - task::{Context, Poll}, -}; +use std::sync::Arc; +use std::task::{Context, Poll}; use types::{ - consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, - SignedBeaconBlock, Slot, SubnetId, SyncSubnetId, + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT, EnrForkId, EthSpec, ForkContext, Slot, SubnetId, }; +use utils::{build_transport, strip_peer_id, Context as ServiceContext, MAX_CONNECTIONS_PER_PEER}; -use self::behaviour::{Behaviour, RequestId}; +use self::behaviour::Behaviour; use self::gossip_cache::GossipCache; +pub mod api_types; mod behaviour; mod gossip_cache; pub mod gossipsub_scoring_parameters; - +pub mod utils; /// The number of peers we target per subnet for discovery queries. pub const TARGET_SUBNET_PEERS: usize = 6; const MAX_IDENTIFY_ADDRESSES: usize = 10; -/// Identifier of requests sent by a peer. -pub type PeerRequestId = (ConnectionId, SubstreamId); - /// The types of events than can be obtained from polling the behaviour. #[derive(Debug)] pub enum NetworkEvent { @@ -160,7 +146,7 @@ impl Network { let mut config = ctx.config.clone(); trace!(log, "Libp2p Service starting"); // initialise the node's ID - let local_keypair = crate::load_private_key(&config, &log); + let local_keypair = utils::load_private_key(&config, &log); // set up a collection of variables accessible outside of the network crate let network_globals = { @@ -172,7 +158,7 @@ impl Network { &log, )?; // Construct the metadata - let meta_data = crate::load_or_build_metadata(&config.network_dir, &log); + let meta_data = utils::load_or_build_metadata(&config.network_dir, &log); let globals = NetworkGlobals::new( enr, config.libp2p_port, @@ -240,7 +226,7 @@ impl Network { let possible_fork_digests = ctx.fork_context.all_fork_digests(); let filter = MaxCountSubscriptionFilter { - filter: Self::create_whitelist_filter( + filter: utils::create_whitelist_filter( possible_fork_digests, ctx.chain_spec.attestation_subnet_count, SYNC_COMMITTEE_SUBNET_COUNT, @@ -363,7 +349,7 @@ impl Network { let mut network = Network { swarm, - network_globals: network_globals.clone(), + network_globals, enr_fork_id, network_dir: config.network_dir.clone(), fork_context: ctx.fork_context, @@ -377,6 +363,8 @@ impl Network { network.start(&config).await?; + let network_globals = network.network_globals.clone(); + Ok((network, network_globals)) } @@ -911,7 +899,7 @@ impl Network { } } // Save the updated metadata to disk - save_metadata_to_disk( + utils::save_metadata_to_disk( &self.network_dir, self.network_globals.local_metadata.read().clone(), &self.log, @@ -1027,37 +1015,9 @@ impl Network { } } - /// Creates a whitelist topic filter that covers all possible topics using the given set of - /// possible fork digests. - fn create_whitelist_filter( - possible_fork_digests: Vec<[u8; 4]>, - attestation_subnet_count: u64, - sync_committee_subnet_count: u64, - ) -> WhitelistSubscriptionFilter { - let mut possible_hashes = HashSet::new(); - for fork_digest in possible_fork_digests { - let mut add = |kind| { - let topic: Topic = - GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into(); - possible_hashes.insert(topic.hash()); - }; + /* Sub-behaviour event handling functions */ - use GossipKind::*; - add(BeaconBlock); - add(BeaconAggregateAndProof); - add(VoluntaryExit); - add(ProposerSlashing); - add(AttesterSlashing); - add(SignedContributionAndProof); - for id in 0..attestation_subnet_count { - add(Attestation(SubnetId::new(id))); - } - for id in 0..sync_committee_subnet_count { - add(SyncCommitteeMessage(SyncSubnetId::new(id))); - } - } - WhitelistSubscriptionFilter(possible_hashes) - } + /// Handle a gossipsub event. fn inject_gs_event(&mut self, event: GossipsubEvent) -> Option> { match event { GossipsubEvent::Message { @@ -1153,6 +1113,7 @@ impl Network { None } + /// Handle an RPC event. fn inject_rpc_event( &mut self, event: RPCMessage, TSpec>, @@ -1321,6 +1282,7 @@ impl Network { } } + /// Handle a discovery event. fn inject_discovery_event( &mut self, event: DiscoveredPeers, @@ -1336,6 +1298,7 @@ impl Network { None } + /// Handle an identify event. fn inject_identify_event( &mut self, event: IdentifyEvent, @@ -1359,6 +1322,7 @@ impl Network { None } + /// Handle a peer manager event. fn inject_pm_event( &mut self, event: PeerManagerEvent, @@ -1416,10 +1380,16 @@ impl Network { } } + /* Networking polling */ + + /// Poll the p2p networking stack. + /// + /// This will poll the swarm and do maintenance routines. pub fn poll_network(&mut self, cx: &mut Context) -> Poll> { while let Poll::Ready(Some(swarm_event)) = self.swarm.poll_next_unpin(cx) { let maybe_event = match swarm_event { SwarmEvent::Behaviour(behaviour_event) => match behaviour_event { + // Handle sub-behaviour events. BehaviourEvent::Gossipsub(ge) => self.inject_gs_event(ge), BehaviourEvent::Eth2Rpc(re) => self.inject_rpc_event(re), BehaviourEvent::Discovery(de) => self.inject_discovery_event(de), @@ -1515,104 +1485,3 @@ impl Network { futures::future::poll_fn(|cx| self.poll_network(cx)).await } } - -/* Public API types */ - -/// The type of RPC requests the Behaviour informs it has received and allows for sending. -/// -// NOTE: This is an application-level wrapper over the lower network level requests that can be -// sent. The main difference is the absence of the Ping, Metadata and Goodbye protocols, which don't -// leave the Behaviour. For all protocols managed by RPC see `RPCRequest`. -#[derive(Debug, Clone, PartialEq)] -pub enum Request { - /// A Status message. - Status(StatusMessage), - /// A blocks by range request. - BlocksByRange(BlocksByRangeRequest), - /// A request blocks root request. - BlocksByRoot(BlocksByRootRequest), -} - -impl std::convert::From for OutboundRequest { - fn from(req: Request) -> OutboundRequest { - match req { - Request::BlocksByRoot(r) => OutboundRequest::BlocksByRoot(r), - Request::BlocksByRange(BlocksByRangeRequest { start_slot, count }) => { - OutboundRequest::BlocksByRange(methods::OldBlocksByRangeRequest { - start_slot, - count, - step: 1, - }) - } - Request::Status(s) => OutboundRequest::Status(s), - } - } -} - -/// The type of RPC responses the Behaviour informs it has received, and allows for sending. -/// -// NOTE: This is an application-level wrapper over the lower network level responses that can be -// sent. The main difference is the absense of Pong and Metadata, which don't leave the -// Behaviour. For all protocol reponses managed by RPC see `RPCResponse` and -// `RPCCodedResponse`. -#[derive(Debug, Clone, PartialEq)] -pub enum Response { - /// A Status message. - Status(StatusMessage), - /// A response to a get BLOCKS_BY_RANGE request. A None response signals the end of the batch. - BlocksByRange(Option>>), - /// A response to a get BLOCKS_BY_ROOT request. - BlocksByRoot(Option>>), -} - -impl std::convert::From> for RPCCodedResponse { - fn from(resp: Response) -> RPCCodedResponse { - match resp { - Response::BlocksByRoot(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRoot(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRoot), - }, - Response::BlocksByRange(r) => match r { - Some(b) => RPCCodedResponse::Success(RPCResponse::BlocksByRange(b)), - None => RPCCodedResponse::StreamTermination(ResponseTermination::BlocksByRange), - }, - Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), - } - } -} - -/// Persist metadata to disk -pub fn save_metadata_to_disk(dir: &Path, metadata: MetaData, log: &slog::Logger) { - let _ = std::fs::create_dir_all(&dir); - match File::create(dir.join(METADATA_FILENAME)) - .and_then(|mut f| f.write_all(&metadata.as_ssz_bytes())) - { - Ok(_) => { - debug!(log, "Metadata written to disk"); - } - Err(e) => { - warn!( - log, - "Could not write metadata to disk"; - "file" => format!("{:?}{:?}", dir, METADATA_FILENAME), - "error" => %e - ); - } - } -} - -impl slog::Value for RequestId { - fn serialize( - &self, - record: &slog::Record, - key: slog::Key, - serializer: &mut dyn slog::Serializer, - ) -> slog::Result { - match self { - RequestId::Internal => slog::Value::serialize("Behaviour", record, key, serializer), - RequestId::Application(ref id) => { - slog::Value::serialize(&format_args!("{:?}", id), record, key, serializer) - } - } - } -} diff --git a/beacon_node/lighthouse_network/src/old_service.rs b/beacon_node/lighthouse_network/src/service/utils.rs similarity index 80% rename from beacon_node/lighthouse_network/src/old_service.rs rename to beacon_node/lighthouse_network/src/service/utils.rs index 6047d4cd722..2aaa46fe8b6 100644 --- a/beacon_node/lighthouse_network/src/old_service.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -1,21 +1,27 @@ use crate::multiaddr::Protocol; use crate::rpc::{MetaData, MetaDataV1, MetaDataV2}; -use crate::service::save_metadata_to_disk; -use crate::types::{error, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; -use crate::NetworkConfig; +use crate::types::{ + error, EnrAttestationBitfield, EnrSyncCommitteeBitfield, GossipEncoding, GossipKind, +}; +use crate::{GossipTopic, NetworkConfig}; use libp2p::bandwidth::{BandwidthLogging, BandwidthSinks}; use libp2p::core::{ identity::Keypair, multiaddr::Multiaddr, muxing::StreamMuxerBox, transport::Boxed, }; +use libp2p::gossipsub::subscription_filter::WhitelistSubscriptionFilter; +use libp2p::gossipsub::IdentTopic as Topic; use libp2p::{core, noise, PeerId, Transport}; use prometheus_client::registry::Registry; use slog::{debug, warn}; use ssz::Decode; +use ssz::Encode; +use std::collections::HashSet; use std::fs::File; use std::io::prelude::*; +use std::path::Path; use std::sync::Arc; use std::time::Duration; -use types::{ChainSpec, EnrForkId, EthSpec, ForkContext}; +use types::{ChainSpec, EnrForkId, EthSpec, ForkContext, SubnetId, SyncSubnetId}; pub const NETWORK_KEY_FILENAME: &str = "key"; /// The maximum simultaneous libp2p connections per peer. @@ -224,3 +230,59 @@ pub fn load_or_build_metadata( save_metadata_to_disk(network_dir, meta_data.clone(), log); meta_data } + +/// Creates a whitelist topic filter that covers all possible topics using the given set of +/// possible fork digests. +pub(crate) fn create_whitelist_filter( + possible_fork_digests: Vec<[u8; 4]>, + attestation_subnet_count: u64, + sync_committee_subnet_count: u64, +) -> WhitelistSubscriptionFilter { + let mut possible_hashes = HashSet::new(); + for fork_digest in possible_fork_digests { + let mut add = |kind| { + let topic: Topic = + GossipTopic::new(kind, GossipEncoding::SSZSnappy, fork_digest).into(); + possible_hashes.insert(topic.hash()); + }; + + use GossipKind::*; + add(BeaconBlock); + add(BeaconAggregateAndProof); + add(VoluntaryExit); + add(ProposerSlashing); + add(AttesterSlashing); + add(SignedContributionAndProof); + for id in 0..attestation_subnet_count { + add(Attestation(SubnetId::new(id))); + } + for id in 0..sync_committee_subnet_count { + add(SyncCommitteeMessage(SyncSubnetId::new(id))); + } + } + WhitelistSubscriptionFilter(possible_hashes) +} + +/// Persist metadata to disk +pub(crate) fn save_metadata_to_disk( + dir: &Path, + metadata: MetaData, + log: &slog::Logger, +) { + let _ = std::fs::create_dir_all(&dir); + match File::create(dir.join(METADATA_FILENAME)) + .and_then(|mut f| f.write_all(&metadata.as_ssz_bytes())) + { + Ok(_) => { + debug!(log, "Metadata written to disk"); + } + Err(e) => { + warn!( + log, + "Could not write metadata to disk"; + "file" => format!("{:?}{:?}", dir, METADATA_FILENAME), + "error" => %e + ); + } + } +} From 2d04c8790aaeb7c738dd2c7de94acfaf22afa761 Mon Sep 17 00:00:00 2001 From: Diva M Date: Tue, 30 Aug 2022 15:39:28 -0500 Subject: [PATCH 29/31] remove advisory ignore --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6b5c6b3e5d5..0e7af8573f7 100644 --- a/Makefile +++ b/Makefile @@ -178,7 +178,7 @@ arbitrary-fuzz: # Runs cargo audit (Audit Cargo.lock files for crates with security vulnerabilities reported to the RustSec Advisory Database) audit: cargo install --force cargo-audit - cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 --ignore RUSTSEC-2022-0040 + cargo audit --ignore RUSTSEC-2020-0071 --ignore RUSTSEC-2020-0159 # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: From 01cf4806d9ebc0109098af9f76c5b5553d3fb66f Mon Sep 17 00:00:00 2001 From: Diva M Date: Mon, 5 Sep 2022 12:10:02 -0500 Subject: [PATCH 30/31] update diff From 1e5694fee8b14c908c3c7a99c7bce2073cf110a8 Mon Sep 17 00:00:00 2001 From: Diva M Date: Tue, 6 Sep 2022 16:10:39 -0500 Subject: [PATCH 31/31] add docs to the Network's start function --- beacon_node/lighthouse_network/src/service/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index a06e09ea483..59512d29553 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -368,7 +368,11 @@ impl Network { Ok((network, network_globals)) } - // TODO: docs + /// Starts the network: + /// + /// - Starts listening in the given ports. + /// - Dials boot-nodes and libp2p peers. + /// - Subscribes to starting gossipsub topics. async fn start(&mut self, config: &crate::NetworkConfig) -> error::Result<()> { let enr = self.network_globals.local_enr(); info!(self.log, "Libp2p Starting"; "peer_id" => %enr.peer_id(), "bandwidth_config" => format!("{}-{}", config.network_load, NetworkLoad::from(config.network_load).name));