From fdad1c6bf7914bbdc0ffc25ef729506196881c35 Mon Sep 17 00:00:00 2001 From: stringhandler Date: Fri, 3 Mar 2023 15:59:52 +0200 Subject: [PATCH] feat(peer_db)!: more accurate peer stats per address (#5142) Description --- Removes a lot of duplicate stats and online/offline fields on peers. Motivation --- Due to a recent bug, wallets would get a new address every time they restarted. There was some handling of peers with multiple addresses, but in general only the latest address signed by the peer was used. This prevents a node from being a bridge between tor and IP or even IP and DNS. This PR controversially (I'm sure) removes the signed address and instead allows a node to try an address to see if the correct public key is on the other end. If so, the peer can choose the best latency according to communicate with it. IMO, this is no less secure, since the worst case is that a node can force another node to contact an address that is not correct, only to find out the node at the other end has a different public key, This already can be done by generating a new peer id, using the address and signing it with the new peer id (i.e a sybil address). Previously a peer may have been banned for answering an address with the wrong public key, but this can also be manipulated. Now, only the address is marked as bad. Also reworked the general sorting of peer addresses, although in most previous cases, there would only be one address. Also reworked the order in which the peer tries neighbours. In order to make it more predictable and prevent looping, the connectivity service will try to find it's neighbours in a strict distance away from it. Previously it took into account a last_attempted_at or last_seen_at, which I am not convinced was working. You'll note that there are some tokio improvements as well. During the testing, I noticed that the peer query was, and still is, extremely heavy. In tokio console, queries took in the order of 10s to 100s. Moving this to a `block_in_place` frees up the tokio worker threads. In a future PR I will improve this even further by removing unnecessary calls. Also: - Removed some dead code OptionalBoundedExecutor and replaced the re-export of runtime from tokio - Used standard tokio spawns for BoundedExecutor instead of holding a runtime reference. BREAKING CHANGE: Note that because the Peer struct has changed so dramatically, a migration is not really pragmatic. The peer_db for wallets and base nodes **must** be deleted on before starting up --- .gitignore | 2 + Cargo.lock | 286 ++++++++++++-- .../tari_app_grpc/proto/network.proto | 5 +- .../tari_app_grpc/src/conversions/peer.rs | 23 +- .../src/identity_management.rs | 21 +- applications/tari_base_node/Cargo.toml | 1 + applications/tari_base_node/src/builder.rs | 4 +- applications/tari_base_node/src/cli.rs | 2 + .../src/commands/command/add_peer.rs | 3 +- .../src/commands/command/get_peer.rs | 66 ++-- .../src/commands/command/list_peers.rs | 30 +- .../commands/command/reset_offline_peers.rs | 8 +- .../src/commands/command/whoami.rs | 4 +- .../src/grpc/base_node_grpc_server.rs | 2 +- applications/tari_base_node/src/lib.rs | 4 + applications/tari_base_node/src/main.rs | 16 +- .../src/grpc/wallet_grpc_server.rs | 2 +- .../tari_console_wallet/src/init/mod.rs | 30 +- .../src/ui/components/menu.rs | 2 +- .../src/ui/components/network_tab.rs | 2 +- .../src/ui/components/notification_tab.rs | 2 +- .../src/ui/state/app_state.rs | 23 +- .../tari_console_wallet/src/utils/db.rs | 3 +- .../src/utils/formatting.rs | 2 +- .../chain_metadata_service/service.rs | 5 +- .../comms_interface/inbound_handlers.rs | 4 +- .../core/src/mempool/sync_protocol/mod.rs | 2 +- .../core/src/mempool/sync_protocol/test.rs | 8 +- base_layer/core/tests/mempool.rs | 2 +- base_layer/core/tests/node_service.rs | 6 +- base_layer/core/tests/node_state_machine.rs | 2 +- base_layer/p2p/src/config.rs | 4 +- base_layer/p2p/src/initialization.rs | 4 +- base_layer/p2p/src/peer_seeds.rs | 3 +- .../p2p/src/services/liveness/service.rs | 5 +- base_layer/p2p/src/test_utils.rs | 4 +- .../wallet/src/transaction_service/handle.rs | 6 +- .../wallet/src/transaction_service/service.rs | 4 +- base_layer/wallet/src/wallet.rs | 13 +- base_layer/wallet/tests/contacts_service.rs | 4 +- .../tests/support/comms_and_services.rs | 3 +- .../transaction_service_tests/service.rs | 6 +- base_layer/wallet/tests/wallet.rs | 28 +- base_layer/wallet_ffi/src/lib.rs | 23 +- comms/core/examples/stress/node.rs | 2 +- comms/core/examples/stress/prompt.rs | 5 +- comms/core/examples/stress/service.rs | 4 +- comms/core/examples/tor.rs | 18 +- comms/core/examples/vanity_id.rs | 2 +- comms/core/src/bounded_executor.rs | 128 +------ comms/core/src/builder/comms_node.rs | 15 +- comms/core/src/builder/tests.rs | 30 +- comms/core/src/connection_manager/common.rs | 195 +++++----- .../core/src/connection_manager/dial_state.rs | 4 + comms/core/src/connection_manager/dialer.rs | 287 ++++++++------ comms/core/src/connection_manager/error.rs | 2 - comms/core/src/connection_manager/listener.rs | 39 +- comms/core/src/connection_manager/liveness.rs | 10 +- comms/core/src/connection_manager/manager.rs | 2 +- comms/core/src/connection_manager/mod.rs | 2 +- .../src/connection_manager/peer_connection.rs | 40 +- .../tests/listener_dialer.rs | 18 +- .../src/connection_manager/tests/manager.rs | 23 +- comms/core/src/connectivity/manager.rs | 58 +-- comms/core/src/connectivity/requester.rs | 5 +- comms/core/src/connectivity/test.rs | 28 +- comms/core/src/lib.rs | 3 +- comms/core/src/memsocket/mod.rs | 24 +- comms/core/src/multiplexing/yamux.rs | 31 +- comms/core/src/net_address/mod.rs | 2 +- .../src/net_address/multiaddr_with_stats.rs | 352 +++++++++++------- .../net_address/mutliaddresses_with_stats.rs | 322 ++++++++-------- comms/core/src/noise/config.rs | 9 +- comms/core/src/noise/socket.rs | 14 +- .../core/src/peer_manager/connection_stats.rs | 191 ---------- comms/core/src/peer_manager/error.rs | 6 + .../src/peer_manager/identity_signature.rs | 52 +-- comms/core/src/peer_manager/manager.rs | 89 +++-- comms/core/src/peer_manager/migrations.rs | 4 +- comms/core/src/peer_manager/migrations/v5.rs | 144 ------- comms/core/src/peer_manager/migrations/v6.rs | 124 ------ comms/core/src/peer_manager/migrations/v7.rs | 74 ++-- comms/core/src/peer_manager/mod.rs | 6 +- comms/core/src/peer_manager/node_identity.rs | 94 ++++- comms/core/src/peer_manager/peer.rs | 231 +++--------- .../src/peer_manager/peer_identity_claim.rs | 114 ++++++ comms/core/src/peer_manager/peer_query.rs | 7 +- comms/core/src/peer_manager/peer_storage.rs | 228 +++--------- comms/core/src/pipeline/inbound.rs | 8 +- comms/core/src/pipeline/outbound.rs | 53 +-- comms/core/src/pipeline/translate_sink.rs | 10 +- comms/core/src/proto/identity.proto | 2 + comms/core/src/protocol/identity.rs | 25 +- .../core/src/protocol/messaging/extension.rs | 17 +- comms/core/src/protocol/messaging/protocol.rs | 5 +- comms/core/src/protocol/messaging/test.rs | 16 +- comms/core/src/protocol/negotiation.rs | 12 +- comms/core/src/protocol/protocols.rs | 5 +- comms/core/src/protocol/rpc/body.rs | 6 +- comms/core/src/protocol/rpc/client/mod.rs | 3 +- comms/core/src/protocol/rpc/client/tests.rs | 22 +- comms/core/src/protocol/rpc/server/mod.rs | 2 +- comms/core/src/protocol/rpc/server/router.rs | 6 +- .../core/src/protocol/rpc/test/client_pool.rs | 14 +- .../protocol/rpc/test/comms_integration.rs | 7 +- comms/core/src/protocol/rpc/test/handshake.rs | 7 +- comms/core/src/protocol/rpc/test/smoke.rs | 25 +- comms/core/src/rate_limit.rs | 5 +- comms/core/src/runtime.rs | 33 -- .../src/test_utils/factories/node_identity.rs | 2 +- comms/core/src/test_utils/factories/peer.rs | 4 +- .../test_utils/mocks/connection_manager.rs | 3 +- .../test_utils/mocks/connectivity_manager.rs | 3 +- .../src/test_utils/mocks/peer_connection.rs | 18 +- comms/core/src/test_utils/mod.rs | 2 +- comms/core/src/tor/control_client/client.rs | 42 +-- comms/core/src/tor/control_client/monitor.rs | 3 +- .../src/tor/control_client/test_server.rs | 4 +- .../core/src/tor/hidden_service/controller.rs | 3 +- comms/core/src/transports/dns/system.rs | 3 +- comms/core/src/transports/dns/tor.rs | 2 +- comms/core/src/transports/memory.rs | 5 +- comms/core/src/transports/socks.rs | 3 +- comms/core/tests/helpers.rs | 1 + comms/core/tests/rpc.rs | 3 +- comms/core/tests/rpc_stress.rs | 2 +- comms/core/tests/substream_stress.rs | 2 +- comms/dht/examples/memory_net/drain_burst.rs | 4 +- comms/dht/examples/memory_net/utilities.rs | 6 +- comms/dht/examples/propagation/node.rs | 10 +- comms/dht/examples/propagation/prompt.rs | 5 +- comms/dht/src/actor.rs | 26 +- comms/dht/src/config.rs | 8 +- comms/dht/src/connectivity/mod.rs | 95 ++--- comms/dht/src/connectivity/test.rs | 17 +- comms/dht/src/dht.rs | 17 +- comms/dht/src/discovery/error.rs | 4 + comms/dht/src/discovery/service.rs | 70 ++-- comms/dht/src/inbound/decryption.rs | 23 +- comms/dht/src/inbound/deserialize.rs | 7 +- comms/dht/src/inbound/dht_handler/task.rs | 88 ++--- comms/dht/src/inbound/error.rs | 11 +- comms/dht/src/inbound/forward.rs | 8 +- .../dht/src/network_discovery/discovering.rs | 30 +- comms/dht/src/network_discovery/on_connect.rs | 16 +- comms/dht/src/network_discovery/ready.rs | 39 +- .../src/network_discovery/state_machine.rs | 8 +- comms/dht/src/network_discovery/test.rs | 79 +--- comms/dht/src/outbound/broadcast.rs | 13 +- comms/dht/src/outbound/message_send_state.rs | 10 +- comms/dht/src/outbound/serialize.rs | 4 +- comms/dht/src/peer_validator.rs | 224 ++++------- comms/dht/src/proto/dht.proto | 12 +- comms/dht/src/proto/mod.rs | 167 +++++++-- comms/dht/src/proto/rpc.proto | 22 +- comms/dht/src/rpc/mod.rs | 3 + comms/dht/src/rpc/peer_info.rs | 67 ++++ comms/dht/src/rpc/service.rs | 18 +- comms/dht/src/rpc/test.rs | 40 +- comms/dht/src/storage/connection.rs | 5 +- comms/dht/src/store_forward/database/mod.rs | 7 +- .../dht/src/store_forward/saf_handler/task.rs | 8 +- comms/dht/src/store_forward/store.rs | 10 +- comms/dht/src/test_utils/makers.rs | 6 +- comms/dht/tests/dht.rs | 28 +- .../storage/src/key_val_store/cached_store.rs | 125 +++++++ .../storage/src/key_val_store/mod.rs | 1 + infrastructure/storage/src/lib.rs | 1 + integration_tests/log4rs/cucumber.yml | 28 +- integration_tests/tests/cucumber.rs | 42 ++- integration_tests/tests/features/Sync.feature | 4 +- .../tests/features/WalletFFI.feature | 7 +- .../tests/utils/base_node_process.rs | 29 +- integration_tests/tests/utils/mod.rs | 2 +- .../tests/utils/wallet_process.rs | 3 +- 175 files changed, 2761 insertions(+), 2801 deletions(-) delete mode 100644 comms/core/src/peer_manager/connection_stats.rs delete mode 100644 comms/core/src/peer_manager/migrations/v5.rs delete mode 100644 comms/core/src/peer_manager/migrations/v6.rs create mode 100644 comms/core/src/peer_manager/peer_identity_claim.rs delete mode 100644 comms/core/src/runtime.rs create mode 100644 comms/dht/src/rpc/peer_info.rs create mode 100644 infrastructure/storage/src/key_val_store/cached_store.rs diff --git a/.gitignore b/.gitignore index eba14fa03a..dda85c290e 100644 --- a/.gitignore +++ b/.gitignore @@ -62,3 +62,5 @@ clients/base_node_grpc_client/package-lock.json clients/validator_node_grpc_client/package-lock.json clients/wallet_grpc_client/package-lock.json pie/ +/integration_tests/.husky/_/husky.sh +/integration_tests/tests/temp diff --git a/Cargo.lock b/Cargo.lock index 5b07608c85..3f6a04f079 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -190,6 +190,52 @@ dependencies = [ "cc", ] +[[package]] +name = "axum" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e246206a63c9830e118d12c894f56a82033da1a2361f5544deeee3df85c99d9" +dependencies = [ + "async-trait", + "axum-core", + "bitflags 1.3.2", + "bytes 1.4.0", + "futures-util", + "http", + "http-body", + "hyper", + "itoa 1.0.5", + "matchit", + "memchr", + "mime", + "percent-encoding 2.2.0", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower", + "tower-http", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cae3e661676ffbacb30f1a824089a8c9150e71017f7e1e38f2aa32009188d34" +dependencies = [ + "async-trait", + "bytes 1.4.0", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + [[package]] name = "base58-monero" version = "0.3.2" @@ -941,6 +987,42 @@ dependencies = [ "windows-sys 0.42.0", ] +[[package]] +name = "console-api" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57ff02e8ad8e06ab9731d5dc72dc23bef9200778eae1a89d555d8c42e5d4a86" +dependencies = [ + "prost 0.11.6", + "prost-types 0.11.6", + "tonic 0.8.3", + "tracing-core", +] + +[[package]] +name = "console-subscriber" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22a3a81dfaf6b66bce5d159eddae701e3a002f194d378cbf7be5f053c281d9be" +dependencies = [ + "console-api", + "crossbeam-channel", + "crossbeam-utils", + "futures 0.3.26", + "hdrhistogram", + "humantime 2.1.0", + "prost-types 0.11.6", + "serde", + "serde_json", + "thread_local", + "tokio", + "tokio-stream", + "tonic 0.8.3", + "tracing", + "tracing-core", + "tracing-subscriber", +] + [[package]] name = "console_error_panic_hook" version = "0.1.7" @@ -2291,7 +2373,10 @@ version = "7.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f19b9f54f7c7f55e31401bb647626ce0cf0f67b0004982ce815b3ee72a02aa8" dependencies = [ + "base64 0.13.1", "byteorder", + "flate2", + "nom 7.1.3", "num-traits", ] @@ -2387,6 +2472,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "httparse" version = "1.8.0" @@ -2925,12 +3016,27 @@ dependencies = [ "winapi", ] +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata", +] + [[package]] name = "matches" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" +[[package]] +name = "matchit" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b87248edafb776e59e6ee64a79086f65890d3510f2c656c000bf2a7e8a0aea40" + [[package]] name = "md-5" version = "0.9.1" @@ -3920,7 +4026,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" dependencies = [ "bytes 1.4.0", - "prost-derive", + "prost-derive 0.9.0", +] + +[[package]] +name = "prost" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21dc42e00223fc37204bd4aa177e69420c604ca4a183209a8f9de30c6d934698" +dependencies = [ + "bytes 1.4.0", + "prost-derive 0.11.6", ] [[package]] @@ -3936,8 +4052,8 @@ dependencies = [ "log", "multimap", "petgraph 0.6.2", - "prost", - "prost-types", + "prost 0.9.0", + "prost-types 0.9.0", "regex", "tempfile", "which", @@ -3956,6 +4072,19 @@ dependencies = [ "syn", ] +[[package]] +name = "prost-derive" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bda8c0881ea9f722eb9629376db3d0b903b462477c1aafcb0566610ac28ac5d" +dependencies = [ + "anyhow", + "itertools 0.10.5", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "prost-types" version = "0.9.0" @@ -3963,7 +4092,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" dependencies = [ "bytes 1.4.0", - "prost", + "prost 0.9.0", +] + +[[package]] +name = "prost-types" +version = "0.11.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e0526209433e96d83d750dd81a99118edbc55739e7e61a46764fd2ad537788" +dependencies = [ + "bytes 1.4.0", + "prost 0.11.6", ] [[package]] @@ -4211,6 +4350,9 @@ name = "regex-automata" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax", +] [[package]] name = "regex-syntax" @@ -4406,6 +4548,12 @@ dependencies = [ "base64 0.13.1", ] +[[package]] +name = "rustversion" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" + [[package]] name = "rustyline" version = "9.1.2" @@ -4735,6 +4883,15 @@ dependencies = [ "opaque-debug", ] +[[package]] +name = "sharded-slab" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +dependencies = [ + "lazy_static", +] + [[package]] name = "shlex" version = "1.1.0" @@ -4945,6 +5102,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + [[package]] name = "synstructure" version = "0.12.6" @@ -4999,8 +5162,8 @@ dependencies = [ "borsh", "chrono", "log", - "prost", - "prost-types", + "prost 0.9.0", + "prost-types 0.9.0", "rand 0.8.5", "tari_common_types", "tari_comms", @@ -5009,7 +5172,7 @@ dependencies = [ "tari_script", "tari_utilities", "thiserror", - "tonic", + "tonic 0.6.2", "tonic-build", "zeroize", ] @@ -5043,6 +5206,7 @@ dependencies = [ "chrono", "clap 3.2.23", "config", + "console-subscriber", "crossterm 0.23.2", "derive_more", "either", @@ -5072,7 +5236,7 @@ dependencies = [ "tari_utilities", "thiserror", "tokio", - "tonic", + "tonic 0.6.2", ] [[package]] @@ -5200,7 +5364,7 @@ dependencies = [ "nom 5.1.2", "once_cell", "pin-project 1.0.12", - "prost", + "prost 0.9.0", "rand 0.7.3", "serde", "serde_derive", @@ -5248,8 +5412,8 @@ dependencies = [ "log-mdc", "petgraph 0.5.1", "pin-project 0.4.30", - "prost", - "prost-types", + "prost 0.9.0", + "prost-types 0.9.0", "rand 0.7.3", "serde", "tari_common", @@ -5275,7 +5439,7 @@ version = "0.47.0-pre.0" dependencies = [ "futures 0.3.26", "proc-macro2", - "prost", + "prost 0.9.0", "quote", "syn", "tari_comms", @@ -5323,7 +5487,7 @@ dependencies = [ "tari_wallet", "thiserror", "tokio", - "tonic", + "tonic 0.6.2", "tui", "unicode-segmentation", "unicode-width", @@ -5362,8 +5526,8 @@ dependencies = [ "num-format", "num-traits", "once_cell", - "prost", - "prost-types", + "prost 0.9.0", + "prost-types 0.9.0", "rand 0.7.3", "randomx-rs", "serde", @@ -5466,7 +5630,7 @@ dependencies = [ "thiserror", "time 0.3.17", "tokio", - "tonic", + "tonic 0.6.2", ] [[package]] @@ -5541,7 +5705,7 @@ dependencies = [ "tari_wallet_grpc_client", "thiserror", "tokio", - "tonic", + "tonic 0.6.2", "tracing", "url 2.3.1", ] @@ -5579,7 +5743,7 @@ dependencies = [ "log", "native-tls", "num_cpus", - "prost-types", + "prost-types 0.9.0", "rand 0.7.3", "serde", "serde_json", @@ -5593,7 +5757,7 @@ dependencies = [ "tari_utilities", "thiserror", "tokio", - "tonic", + "tonic 0.6.2", ] [[package]] @@ -5644,7 +5808,7 @@ dependencies = [ "lmdb-zero", "log", "pgp", - "prost", + "prost 0.9.0", "rand 0.7.3", "reqwest", "rustls", @@ -5774,7 +5938,7 @@ dependencies = [ "itertools 0.10.5", "libsqlite3-sys", "log", - "prost", + "prost 0.9.0", "rand 0.7.3", "serde", "serde_json", @@ -5848,7 +6012,7 @@ dependencies = [ "tari_common_types", "thiserror", "tokio", - "tonic", + "tonic 0.6.2", ] [[package]] @@ -6021,6 +6185,7 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", + "tracing", "windows-sys 0.42.0", ] @@ -6135,8 +6300,8 @@ dependencies = [ "hyper-timeout", "percent-encoding 2.2.0", "pin-project 1.0.12", - "prost", - "prost-derive", + "prost 0.9.0", + "prost-derive 0.9.0", "tokio", "tokio-stream", "tokio-util 0.6.10", @@ -6147,6 +6312,38 @@ dependencies = [ "tracing-futures", ] +[[package]] +name = "tonic" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f219fad3b929bef19b1f86fbc0358d35daed8f2cac972037ac0dc10bbb8d5fb" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64 0.13.1", + "bytes 1.4.0", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding 2.2.0", + "pin-project 1.0.12", + "prost 0.11.6", + "prost-derive 0.11.6", + "tokio", + "tokio-stream", + "tokio-util 0.7.4", + "tower", + "tower-layer", + "tower-service", + "tracing", + "tracing-futures", +] + [[package]] name = "tonic-build" version = "0.6.2" @@ -6193,6 +6390,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" +dependencies = [ + "bitflags 1.3.2", + "bytes 1.4.0", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" version = "0.3.2" @@ -6236,6 +6452,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ "once_cell", + "valuable", ] [[package]] @@ -6248,6 +6465,21 @@ dependencies = [ "tracing", ] +[[package]] +name = "tracing-subscriber" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +dependencies = [ + "matchers", + "once_cell", + "regex", + "sharded-slab", + "thread_local", + "tracing", + "tracing-core", +] + [[package]] name = "trust-dns-client" version = "0.21.0-alpha.5" @@ -6508,6 +6740,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "936e4b492acfd135421d8dca4b1aa80a7bfc26e702ef3af710e0752684df5372" +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + [[package]] name = "vcpkg" version = "0.2.15" diff --git a/applications/tari_app_grpc/proto/network.proto b/applications/tari_app_grpc/proto/network.proto index 63dcdb463e..cc3f9cc1bf 100644 --- a/applications/tari_app_grpc/proto/network.proto +++ b/applications/tari_app_grpc/proto/network.proto @@ -27,7 +27,7 @@ import "google/protobuf/timestamp.proto"; message NodeIdentity { bytes public_key = 1; - string public_address = 2; + repeated string public_addresses = 2; bytes node_id = 3; } @@ -47,8 +47,6 @@ message Peer{ google.protobuf.Timestamp offline_at = 8; /// Features supported by the peer uint64 features = 9; - /// Connection statics for the peer - google.protobuf.Timestamp last_connected_at = 10; /// Protocols supported by the peer. This should not be considered a definitive list of supported protocols and is /// used as information for more efficient protocol negotiation. repeated bytes supported_protocols = 11; /// User agent advertised by the peer @@ -77,7 +75,6 @@ message Address{ bytes address =1; string last_seen = 2; uint32 connection_attempts = 3; - uint32 rejected_message_count = 4; uint64 avg_latency = 5; } diff --git a/applications/tari_app_grpc/src/conversions/peer.rs b/applications/tari_app_grpc/src/conversions/peer.rs index 00dd104333..25e9309de1 100644 --- a/applications/tari_app_grpc/src/conversions/peer.rs +++ b/applications/tari_app_grpc/src/conversions/peer.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use tari_comms::{connectivity::ConnectivityStatus, net_address::MutliaddrWithStats, peer_manager::Peer}; +use tari_comms::{connectivity::ConnectivityStatus, net_address::MultiaddrWithStats, peer_manager::Peer}; use tari_utilities::ByteArray; use crate::{conversions::naive_datetime_to_timestamp, tari_rpc as grpc}; @@ -31,21 +31,17 @@ impl From for grpc::Peer { fn from(peer: Peer) -> Self { let public_key = peer.public_key.to_vec(); let node_id = peer.node_id.to_vec(); - let mut addresses = Vec::with_capacity(peer.addresses.addresses.len()); - let last_connection = peer - .addresses - .last_seen() - .map(|v| naive_datetime_to_timestamp(v.naive_utc())); - for address in peer.addresses.addresses { + let mut addresses = Vec::with_capacity(peer.addresses.len()); + let last_connection = peer.addresses.last_seen().map(naive_datetime_to_timestamp); + for address in peer.addresses.addresses() { addresses.push(address.clone().into()) } let flags = u32::from(peer.flags.bits()); let banned_until = peer.banned_until.map(naive_datetime_to_timestamp); let banned_reason = peer.banned_reason.to_string(); - let offline_at = peer.offline_at.map(naive_datetime_to_timestamp); + let offline_at = peer.offline_at().map(naive_datetime_to_timestamp); let features = peer.features.bits(); - let last_connected_at = peer.connection_stats.last_connected_at.map(naive_datetime_to_timestamp); let supported_protocols = peer.supported_protocols.into_iter().map(|p| p.to_vec()).collect(); let user_agent = peer.user_agent; Self { @@ -58,28 +54,25 @@ impl From for grpc::Peer { banned_reason, offline_at, features, - last_connected_at, supported_protocols, user_agent, } } } -impl From for grpc::Address { - fn from(address_with_stats: MutliaddrWithStats) -> Self { - let address = address_with_stats.address.to_vec(); +impl From for grpc::Address { + fn from(address_with_stats: MultiaddrWithStats) -> Self { + let address = address_with_stats.address().to_vec(); let last_seen = match address_with_stats.last_seen { Some(v) => v.to_string(), None => String::new(), }; let connection_attempts = address_with_stats.connection_attempts; - let rejected_message_count = address_with_stats.rejected_message_count; let avg_latency = address_with_stats.avg_latency.as_secs(); Self { address, last_seen, connection_attempts, - rejected_message_count, avg_latency, } } diff --git a/applications/tari_app_utilities/src/identity_management.rs b/applications/tari_app_utilities/src/identity_management.rs index dd5d67f4f6..e2b71837f2 100644 --- a/applications/tari_app_utilities/src/identity_management.rs +++ b/applications/tari_app_utilities/src/identity_management.rs @@ -49,20 +49,17 @@ const REQUIRED_IDENTITY_PERMS: u32 = 0o100600; /// A NodeIdentity wrapped in an atomic reference counter on success, the exit code indicating the reason on failure pub fn setup_node_identity>( identity_file: P, - public_address: Option<&Multiaddr>, + public_addresses: Vec, create_id: bool, peer_features: PeerFeatures, ) -> Result, ExitError> { match load_node_identity(&identity_file) { Ok(mut id) => { id.set_peer_features(peer_features); - match public_address { - Some(public_address) => { - id.set_public_address(public_address.clone()); - Ok(Arc::new(id)) - }, - None => Ok(Arc::new(id)), + for public_address in public_addresses { + id.add_public_address(public_address.clone()); } + Ok(Arc::new(id)) }, Err(IdentityError::InvalidPermissions) => Err(ExitError::new( ExitCode::ConfigError, @@ -96,11 +93,7 @@ pub fn setup_node_identity>( debug!(target: LOG_TARGET, "Existing node id not found. {}. Creating new ID", e); - match create_new_node_identity( - &identity_file, - public_address.cloned().unwrap_or_else(Multiaddr::empty), - peer_features, - ) { + match create_new_node_identity(&identity_file, public_addresses, peer_features) { Ok(id) => { info!( target: LOG_TARGET, @@ -159,10 +152,10 @@ fn load_node_identity>(path: P) -> Result>( path: P, - public_addr: Multiaddr, + public_addresses: Vec, features: PeerFeatures, ) -> Result { - let node_identity = NodeIdentity::random(&mut OsRng, public_addr, features); + let node_identity = NodeIdentity::random_multiple_addresses(&mut OsRng, public_addresses, features); save_as_json(&path, &node_identity)?; Ok(node_identity) } diff --git a/applications/tari_base_node/Cargo.toml b/applications/tari_base_node/Cargo.toml index 00a337a20d..b9c84973e4 100644 --- a/applications/tari_base_node/Cargo.toml +++ b/applications/tari_base_node/Cargo.toml @@ -29,6 +29,7 @@ bincode = "1.3.1" borsh = "0.9.3" chrono = { version = "0.4.19", default-features = false } clap = { version = "3.1.1", features = ["derive", "env"] } +console-subscriber = "0.1.8" config = { version = "0.13.0" } crossterm = { version = "0.23.1", features = ["event-stream"] } derive_more = "0.99.17" diff --git a/applications/tari_base_node/src/builder.rs b/applications/tari_base_node/src/builder.rs index 56bbe1682c..955eb1df80 100644 --- a/applications/tari_base_node/src/builder.rs +++ b/applications/tari_base_node/src/builder.rs @@ -225,10 +225,10 @@ async fn build_node_context( ) .map_err(|err| { if let ChainStorageError::DatabaseResyncRequired(reason) = err { - return ExitError::new( + ExitError::new( ExitCode::DbInconsistentState, format!("You may need to re-sync your database because {}", reason), - ); + ) } else { ExitError::new(ExitCode::DatabaseError, err) } diff --git a/applications/tari_base_node/src/cli.rs b/applications/tari_base_node/src/cli.rs index f5e7097851..11b4c28db3 100644 --- a/applications/tari_base_node/src/cli.rs +++ b/applications/tari_base_node/src/cli.rs @@ -47,6 +47,8 @@ pub struct Cli { /// Supply a network (overrides existing configuration) #[clap(long, env = "TARI_NETWORK")] pub network: Option, + #[clap(long, alias = "profile")] + pub profile_with_tokio_console: bool, } impl ConfigOverrideProvider for Cli { diff --git a/applications/tari_base_node/src/commands/command/add_peer.rs b/applications/tari_base_node/src/commands/command/add_peer.rs index f5c2a74bd7..92d52577d8 100644 --- a/applications/tari_base_node/src/commands/command/add_peer.rs +++ b/applications/tari_base_node/src/commands/command/add_peer.rs @@ -26,6 +26,7 @@ use clap::Parser; use tari_app_utilities::utilities::UniPublicKey; use tari_comms::{ multiaddr::Multiaddr, + net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{NodeId, Peer, PeerFeatures, PeerFlags}, }; @@ -52,7 +53,7 @@ impl HandleCommand for CommandContext { let peer = Peer::new( public_key, node_id.clone(), - vec![args.address].into(), + MultiaddressesWithStats::from_addresses_with_source(vec![args.address], &PeerAddressSource::Config), PeerFlags::empty(), PeerFeatures::empty(), vec![], diff --git a/applications/tari_base_node/src/commands/command/get_peer.rs b/applications/tari_base_node/src/commands/command/get_peer.rs index 545bfc2748..def3c21ba3 100644 --- a/applications/tari_base_node/src/commands/command/get_peer.rs +++ b/applications/tari_base_node/src/commands/command/get_peer.rs @@ -65,10 +65,8 @@ impl CommandContext { pub async fn get_peer(&self, partial: Vec, original_str: String) -> Result<(), Error> { let peer_manager = self.comms.peer_manager(); let peers = peer_manager.find_all_starts_with(&partial).await?; - let peer = { - if let Some(peer) = peers.into_iter().next() { - peer - } else { + let peers = { + if peers.is_empty() { let pk = parse_emoji_id_or_public_key(&original_str).ok_or_else(|| ArgsError::NoPeerMatching { original_str: original_str.clone(), })?; @@ -76,33 +74,45 @@ impl CommandContext { .find_by_public_key(&pk) .await? .ok_or(ArgsError::NoPeerMatching { original_str })?; - peer + vec![peer] + } else { + peers } }; - let eid = EmojiId::from_public_key(&peer.public_key).to_emoji_string(); - println!("Emoji ID: {}", eid); - println!("Public Key: {}", peer.public_key); - println!("NodeId: {}", peer.node_id); - println!("Addresses:"); - peer.addresses.iter().for_each(|a| { - println!("- {}", a); - }); - println!("User agent: {}", peer.user_agent); - println!("Features: {:?}", peer.features); - println!("Flags: {:?}", peer.flags); - println!("Supported protocols:"); - peer.supported_protocols.iter().for_each(|p| { - println!("- {}", String::from_utf8_lossy(p)); - }); - if let Some(dt) = peer.banned_until() { - println!("Banned until {}, reason: {}", dt, peer.banned_reason); - } - if let Some(dt) = peer.last_seen() { - println!("Last seen: {}", dt); - } - if let Some(updated_at) = peer.identity_signature.map(|i| i.updated_at()) { - println!("Last updated: {} (UTC)", updated_at); + for peer in peers { + let eid = EmojiId::from_public_key(&peer.public_key).to_emoji_string(); + println!("Emoji ID: {}", eid); + println!("Public Key: {}", peer.public_key); + println!("NodeId: {}", peer.node_id); + println!("Addresses:"); + peer.addresses.addresses().iter().for_each(|a| { + println!( + "- {} Score: {} - Source: {} Latency: {:?} - Last Seen: {} - Last Failure:{}", + a.address(), + a.quality_score, + a.source, + a.avg_latency, + a.last_seen + .as_ref() + .map(|t| t.to_string()) + .unwrap_or_else(|| "Never".to_string()), + a.last_failed_reason.as_ref().unwrap_or(&"None".to_string()) + ); + }); + println!("User agent: {}", peer.user_agent); + println!("Features: {:?}", peer.features); + println!("Flags: {:?}", peer.flags); + println!("Supported protocols:"); + peer.supported_protocols.iter().for_each(|p| { + println!("- {}", String::from_utf8_lossy(p)); + }); + if let Some(dt) = peer.banned_until() { + println!("Banned until {}, reason: {}", dt, peer.banned_reason); + } + if let Some(dt) = peer.last_seen() { + println!("Last seen: {}", dt); + } } Ok(()) } diff --git a/applications/tari_base_node/src/commands/command/list_peers.rs b/applications/tari_base_node/src/commands/command/list_peers.rs index bb7ea82cf3..9199e32fb1 100644 --- a/applications/tari_base_node/src/commands/command/list_peers.rs +++ b/applications/tari_base_node/src/commands/command/list_peers.rs @@ -54,33 +54,21 @@ impl CommandContext { _ => false, }) } - let peers = self.comms.peer_manager().perform_query(query).await?; + let mut peers = self.comms.peer_manager().perform_query(query).await?; let num_peers = peers.len(); println!(); let mut table = Table::new(); table.set_titles(vec!["NodeId", "Public Key", "Role", "User Agent", "Info"]); + peers.sort_by(|a, b| a.node_id.cmp(&b.node_id)); for peer in peers { let info_str = { let mut s = vec![]; if peer.is_seed() { s.push("SEED".to_string()); } - if peer.is_offline() { - if !peer.is_banned() { - s.push("OFFLINE".to_string()); - } - } else if let Some(dt) = peer.last_seen() { - s.push(format!( - "LAST_SEEN: {}", - Utc::now() - .naive_utc() - .signed_duration_since(dt) - .to_std() - .map(format_duration_basic) - .unwrap_or_else(|_| "?".into()) - )); - } else { + if peer.is_offline() && !peer.is_banned() { + s.push("OFFLINE".to_string()); } if let Some(dt) = peer.banned_until() { @@ -101,8 +89,14 @@ impl CommandContext { s.push(format!("chain height: {}", metadata.metadata.height_of_longest_chain())); } - if let Some(updated_at) = peer.identity_signature.map(|i| i.updated_at()) { - s.push(format!("updated_at: {} (UTC)", updated_at)); + if let Some(last_seen) = peer.addresses.last_seen() { + let duration = Utc::now() + .naive_utc() + .signed_duration_since(last_seen) + .to_std() + .map(format_duration_basic) + .unwrap_or_else(|_| "?".into()); + s.push(format!("last seen: {}", duration)); } if s.is_empty() { diff --git a/applications/tari_base_node/src/commands/command/reset_offline_peers.rs b/applications/tari_base_node/src/commands/command/reset_offline_peers.rs index b949aba1a4..71c3e9da8a 100644 --- a/applications/tari_base_node/src/commands/command/reset_offline_peers.rs +++ b/applications/tari_base_node/src/commands/command/reset_offline_peers.rs @@ -43,12 +43,8 @@ impl CommandContext { .comms .peer_manager() .update_each(|mut peer| { - if peer.is_offline() { - peer.set_offline(false); - Some(peer) - } else { - None - } + peer.addresses.reset_connection_attempts(); + Some(peer) }) .await?; diff --git a/applications/tari_base_node/src/commands/command/whoami.rs b/applications/tari_base_node/src/commands/command/whoami.rs index c729a5a29a..f287cf16ab 100644 --- a/applications/tari_base_node/src/commands/command/whoami.rs +++ b/applications/tari_base_node/src/commands/command/whoami.rs @@ -45,9 +45,9 @@ impl CommandContext { pub fn whoami(&self) -> Result<(), Error> { println!("{}", self.base_node_identity); let peer = format!( - "{}::{}", + "{}::{:?}", self.base_node_identity.public_key().to_hex(), - self.base_node_identity.public_address() + self.base_node_identity.public_addresses() ); let network = self.config.network(); let qr_link = format!( diff --git a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs index 504631c75c..1b6b5ccdf0 100644 --- a/applications/tari_base_node/src/grpc/base_node_grpc_server.rs +++ b/applications/tari_base_node/src/grpc/base_node_grpc_server.rs @@ -1357,7 +1357,7 @@ impl tari_rpc::base_node_server::BaseNode for BaseNodeGrpcServer { let identity = self.comms.node_identity_ref(); Ok(Response::new(tari_rpc::NodeIdentity { public_key: identity.public_key().to_vec(), - public_address: identity.public_address().to_string(), + public_addresses: identity.public_addresses().iter().map(|a| a.to_string()).collect(), node_id: identity.node_id().to_vec(), })) } diff --git a/applications/tari_base_node/src/lib.rs b/applications/tari_base_node/src/lib.rs index 6ad1d8bf15..a7ce66c080 100644 --- a/applications/tari_base_node/src/lib.rs +++ b/applications/tari_base_node/src/lib.rs @@ -81,6 +81,7 @@ pub async fn run_base_node( non_interactive_mode: true, watch: None, network: None, + profile_with_tokio_console: false, }; run_base_node_with_cli(node_identity, config, cli, shutdown).await @@ -105,6 +106,9 @@ pub async fn run_base_node_with_cli( log_mdc::insert("node-public-key", node_identity.public_key().to_string()); log_mdc::insert("node-id", node_identity.node_id().to_string()); + if let Some(grpc) = config.base_node.grpc_address.as_ref() { + log_mdc::insert("grpc", grpc.to_string()); + } if cli.rebuild_db { info!(target: LOG_TARGET, "Node is in recovery mode, entering recovery"); diff --git a/applications/tari_base_node/src/main.rs b/applications/tari_base_node/src/main.rs index d562a3eb95..bc1b419e8b 100644 --- a/applications/tari_base_node/src/main.rs +++ b/applications/tari_base_node/src/main.rs @@ -69,7 +69,7 @@ /// `whoami` - Displays identity information about this Base Node and it's wallet /// `quit` - Exits the Base Node /// `exit` - Same as quit -use std::{process, sync::Arc}; +use std::{panic, process, sync::Arc}; use clap::Parser; use log::*; @@ -85,6 +85,14 @@ const LOG_TARGET: &str = "tari::base_node::app"; /// Application entry point fn main() { + // Setup a panic hook which prints the default rust panic message but also exits the process. This makes a panic in + // any thread "crash" the system instead of silently continuing. + let default_hook = panic::take_hook(); + panic::set_hook(Box::new(move |info| { + default_hook(info); + process::exit(1); + })); + if let Err(err) = main_inner() { eprintln!("{:?}", err); let exit_code = err.exit_code; @@ -107,6 +115,10 @@ fn main_inner() -> Result<(), ExitError> { let config_path = cli.common.config_path(); let cfg = load_configuration(config_path, true, &cli)?; + if cli.profile_with_tokio_console { + console_subscriber::init(); + } + initialize_logging( &cli.common.log_config_path("base_node"), &cli.common.get_base_path(), @@ -127,7 +139,7 @@ fn main_inner() -> Result<(), ExitError> { // Load or create the Node identity let node_identity = setup_node_identity( &config.base_node.identity_file, - config.base_node.p2p.public_address.as_ref(), + config.base_node.p2p.public_addresses.clone(), cli.non_interactive_mode || cli.init, PeerFeatures::COMMUNICATION_NODE, )?; diff --git a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs index af47fd068d..7c7fa5c339 100644 --- a/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs +++ b/applications/tari_console_wallet/src/grpc/wallet_grpc_server.rs @@ -223,7 +223,7 @@ impl wallet_server::Wallet for WalletGrpcServer { let identity = self.wallet.comms.node_identity(); Ok(Response::new(GetIdentityResponse { public_key: identity.public_key().to_vec(), - public_address: identity.public_address().to_string(), + public_address: identity.public_addresses().iter().map(|a| a.to_string()).collect(), node_id: identity.node_id().to_vec(), })) } diff --git a/applications/tari_console_wallet/src/init/mod.rs b/applications/tari_console_wallet/src/init/mod.rs index 4bd1d7ecdc..83f6d889e1 100644 --- a/applications/tari_console_wallet/src/init/mod.rs +++ b/applications/tari_console_wallet/src/init/mod.rs @@ -314,12 +314,13 @@ pub async fn init_wallet( debug!(target: LOG_TARGET, "Databases Initialized. Wallet is encrypted.",); - let node_address = match config.wallet.p2p.public_address.clone() { - Some(addr) => addr, - None => match wallet_db.get_node_address()? { + let node_addresses = if config.wallet.p2p.public_addresses.is_empty() { + vec![match wallet_db.get_node_address()? { Some(addr) => addr, None => Multiaddr::empty(), - }, + }] + } else { + config.wallet.p2p.public_addresses.clone() }; let master_seed = read_or_create_master_seed(recovery_seed.clone(), &wallet_db)?; @@ -331,14 +332,9 @@ pub async fn init_wallet( "Node identity overridden by file {}", identity_file.to_string_lossy() ); - setup_node_identity( - identity_file, - Some(&node_address), - true, - PeerFeatures::COMMUNICATION_CLIENT, - )? + setup_node_identity(identity_file, node_addresses, true, PeerFeatures::COMMUNICATION_CLIENT)? }, - None => setup_identity_from_db(&wallet_db, &master_seed, node_address.clone())?, + None => setup_identity_from_db(&wallet_db, &master_seed, node_addresses)?, }; let mut wallet_config = config.wallet.clone(); @@ -408,7 +404,7 @@ async fn detect_local_base_node(network: Network) -> Option { let resp = node_conn.identify(Empty {}).await.ok()?; let identity = resp.get_ref(); let public_key = CommsPublicKey::from_bytes(&identity.public_key).ok()?; - let address = Multiaddr::from_str(&identity.public_address).ok()?; + let address = Multiaddr::from_str(identity.public_addresses.first()?).ok()?; debug!( target: LOG_TARGET, "Local base node found with pk={} and addr={}", @@ -421,7 +417,7 @@ async fn detect_local_base_node(network: Network) -> Option { fn setup_identity_from_db( wallet_db: &WalletDatabase, master_seed: &CipherSeed, - node_address: Multiaddr, + node_addresses: Vec, ) -> Result, ExitError> { let node_features = wallet_db .get_node_features()? @@ -435,13 +431,13 @@ fn setup_identity_from_db( // to None let identity_sig = identity_sig.filter(|sig| { let comms_public_key = CommsPublicKey::from_secret_key(&comms_secret_key); - sig.is_valid(&comms_public_key, node_features, [&node_address]) + sig.is_valid(&comms_public_key, node_features, &node_addresses) }); // SAFETY: we are manually checking the validity of this signature before adding Some(..) let node_identity = Arc::new(NodeIdentity::with_signature_unchecked( comms_secret_key, - node_address, + node_addresses, node_features, identity_sig, )); @@ -470,11 +466,11 @@ pub async fn start_wallet( let net_address = base_node .addresses - .first() + .best() .ok_or_else(|| ExitError::new(ExitCode::ConfigError, "Configured base node has no address!"))?; wallet - .set_base_node_peer(base_node.public_key.clone(), net_address.address.clone()) + .set_base_node_peer(base_node.public_key.clone(), net_address.address().clone()) .await .map_err(|e| { ExitError::new( diff --git a/applications/tari_console_wallet/src/ui/components/menu.rs b/applications/tari_console_wallet/src/ui/components/menu.rs index 1a244455f5..d526b18250 100644 --- a/applications/tari_console_wallet/src/ui/components/menu.rs +++ b/applications/tari_console_wallet/src/ui/components/menu.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: BSD-3-Clause use tari_app_utilities::consts; -use tari_comms::runtime::Handle; +use tokio::runtime::Handle; use tui::{ backend::Backend, layout::{Constraint, Direction, Layout, Rect}, diff --git a/applications/tari_console_wallet/src/ui/components/network_tab.rs b/applications/tari_console_wallet/src/ui/components/network_tab.rs index 62b8200da1..96cd93ea87 100644 --- a/applications/tari_console_wallet/src/ui/components/network_tab.rs +++ b/applications/tari_console_wallet/src/ui/components/network_tab.rs @@ -433,7 +433,7 @@ impl Component for NetworkTab { let public_key = base_node.public_key.to_hex(); let address = base_node .addresses - .first() + .best() .map(|a| a.to_string()) .unwrap_or_else(|| "".to_string()); diff --git a/applications/tari_console_wallet/src/ui/components/notification_tab.rs b/applications/tari_console_wallet/src/ui/components/notification_tab.rs index 3d78a3d26f..a8c8536fc4 100644 --- a/applications/tari_console_wallet/src/ui/components/notification_tab.rs +++ b/applications/tari_console_wallet/src/ui/components/notification_tab.rs @@ -8,7 +8,7 @@ // Currently notifications are only added from the wallet_event_monitor which has // add_notification method. -use tari_comms::runtime::Handle; +use tokio::runtime::Handle; use tui::{ backend::Backend, layout::{Constraint, Layout, Rect}, diff --git a/applications/tari_console_wallet/src/ui/state/app_state.rs b/applications/tari_console_wallet/src/ui/state/app_state.rs index dbb7bb271d..05ecec2faf 100644 --- a/applications/tari_console_wallet/src/ui/state/app_state.rs +++ b/applications/tari_console_wallet/src/ui/state/app_state.rs @@ -39,6 +39,7 @@ use tari_common_types::{ use tari_comms::{ connectivity::ConnectivityEventRx, multiaddr::Multiaddr, + net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{NodeId, Peer, PeerFeatures, PeerFlags}, }; use tari_core::transactions::{ @@ -498,7 +499,7 @@ impl AppState { let peer = Peer::new( pub_key, node_id, - addr.into(), + MultiaddressesWithStats::from_addresses_with_source(vec![addr], &PeerAddressSource::Config), PeerFlags::default(), PeerFeatures::COMMUNICATION_NODE, Default::default(), @@ -854,7 +855,7 @@ impl AppStateInner { self.wallet .set_base_node_peer( peer.public_key.clone(), - peer.addresses.first().ok_or(UiError::NoAddress)?.address.clone(), + peer.addresses.best().ok_or(UiError::NoAddress)?.address().clone(), ) .await?; @@ -869,7 +870,7 @@ impl AppStateInner { target: LOG_TARGET, "Setting new base node peer for wallet: {}::{}", peer.public_key, - peer.addresses.first().ok_or(UiError::NoAddress)?.to_string(), + peer.addresses.best().ok_or(UiError::NoAddress)?.to_string(), ); Ok(()) @@ -879,7 +880,7 @@ impl AppStateInner { self.wallet .set_base_node_peer( peer.public_key.clone(), - peer.addresses.first().ok_or(UiError::NoAddress)?.address.clone(), + peer.addresses.best().ok_or(UiError::NoAddress)?.address().clone(), ) .await?; @@ -900,13 +901,13 @@ impl AppStateInner { .set_client_key_value(CUSTOM_BASE_NODE_PUBLIC_KEY_KEY.to_string(), peer.public_key.to_string())?; self.wallet.db.set_client_key_value( CUSTOM_BASE_NODE_ADDRESS_KEY.to_string(), - peer.addresses.first().ok_or(UiError::NoAddress)?.to_string(), + peer.addresses.best().ok_or(UiError::NoAddress)?.to_string(), )?; info!( target: LOG_TARGET, "Setting custom base node peer for wallet: {}::{}", peer.public_key, - peer.addresses.first().ok_or(UiError::NoAddress)?.to_string(), + peer.addresses.best().ok_or(UiError::NoAddress)?.to_string(), ); Ok(()) @@ -917,7 +918,7 @@ impl AppStateInner { self.wallet .set_base_node_peer( previous.public_key.clone(), - previous.addresses.first().ok_or(UiError::NoAddress)?.address.clone(), + previous.addresses.best().ok_or(UiError::NoAddress)?.address().clone(), ) .await?; @@ -1096,7 +1097,13 @@ impl AppStateData { let identity = MyIdentity { tari_address: wallet_identity.address.to_hex(), - network_address: wallet_identity.node_identity.public_address().to_string(), + network_address: wallet_identity + .node_identity + .public_addresses() + .iter() + .map(|a| a.to_string()) + .collect::>() + .join(", "), emoji_id: eid, qr_code: image, node_id: wallet_identity.node_identity.node_id().to_string(), diff --git a/applications/tari_console_wallet/src/utils/db.rs b/applications/tari_console_wallet/src/utils/db.rs index e06bd39d11..eea2ce3d30 100644 --- a/applications/tari_console_wallet/src/utils/db.rs +++ b/applications/tari_console_wallet/src/utils/db.rs @@ -24,6 +24,7 @@ use log::*; use tari_common_types::types::PublicKey; use tari_comms::{ multiaddr::Multiaddr, + net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{NodeId, Peer, PeerFeatures, PeerFlags}, types::CommsPublicKey, }; @@ -74,7 +75,7 @@ pub fn get_custom_base_node_peer_from_db(wallet: &mut WalletSqlite) -> Option String { - match peer.addresses.first() { + match peer.addresses.best() { Some(address) => address.to_string(), None => "".to_string(), } diff --git a/base_layer/core/src/base_node/chain_metadata_service/service.rs b/base_layer/core/src/base_node/chain_metadata_service/service.rs index 2645c0e59f..e7450aa8ff 100644 --- a/base_layer/core/src/base_node/chain_metadata_service/service.rs +++ b/base_layer/core/src/base_node/chain_metadata_service/service.rs @@ -131,10 +131,9 @@ impl ChainMetadataService { match event { // Received a ping, check if it contains ChainMetadata LivenessEvent::ReceivedPing(event) => { - trace!( + debug!( target: LOG_TARGET, - "Received ping from neighbouring node '{}'.", - event.node_id + "Received ping from neighbouring node '{}'.", event.node_id ); self.number_of_rounds_no_pings = 0; if event.metadata.has(MetadataKey::ChainMetadata) { diff --git a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs index 54a1d83c52..e9656a634e 100644 --- a/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs +++ b/base_layer/core/src/base_node/comms_interface/inbound_handlers.rs @@ -608,7 +608,7 @@ where B: BlockchainBackend + 'static source_peer: NodeId, block_hash: BlockHash, ) -> Result { - return match self + match self .outbound_nci .request_blocks_by_hashes_from_peer(block_hash, Some(source_peer.clone())) .await? @@ -636,7 +636,7 @@ where B: BlockchainBackend + 'static source_peer ))) }, - }; + } } /// Handle inbound blocks from remote nodes and local services. diff --git a/base_layer/core/src/mempool/sync_protocol/mod.rs b/base_layer/core/src/mempool/sync_protocol/mod.rs index 08ad3bfe36..ed4bbad4a1 100644 --- a/base_layer/core/src/mempool/sync_protocol/mod.rs +++ b/base_layer/core/src/mempool/sync_protocol/mod.rs @@ -176,7 +176,7 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + Sync + 'static } if !self.is_synched() { - self.spawn_initiator_protocol(conn.clone()).await; + self.spawn_initiator_protocol(*conn.clone()).await; } }, _ => {}, diff --git a/base_layer/core/src/mempool/sync_protocol/test.rs b/base_layer/core/src/mempool/sync_protocol/test.rs index 281228587a..cff2ff4aa4 100644 --- a/base_layer/core/src/mempool/sync_protocol/test.rs +++ b/base_layer/core/src/mempool/sync_protocol/test.rs @@ -121,7 +121,7 @@ async fn empty_set() { create_peer_connection_mock_pair(node1.to_peer(), node2.to_peer()).await; // This node connected to a peer, so it should open the substream - connectivity_manager_state.publish_event(ConnectivityEvent::PeerConnected(node2_conn)); + connectivity_manager_state.publish_event(ConnectivityEvent::PeerConnected(node2_conn.into())); let substream = node1_mock.next_incoming_substream().await.unwrap(); let framed = framing::canonical(substream, MAX_FRAME_SIZE); @@ -149,7 +149,7 @@ async fn synchronise() { create_peer_connection_mock_pair(node1.to_peer(), node2.to_peer()).await; // This node connected to a peer, so it should open the substream - connectivity_manager_state.publish_event(ConnectivityEvent::PeerConnected(node2_conn)); + connectivity_manager_state.publish_event(ConnectivityEvent::PeerConnected(node2_conn.into())); let substream = node1_mock.next_incoming_substream().await.unwrap(); let framed = framing::canonical(substream, MAX_FRAME_SIZE); @@ -180,7 +180,7 @@ async fn duplicate_set() { create_peer_connection_mock_pair(node1.to_peer(), node2.to_peer()).await; // This node connected to a peer, so it should open the substream - connectivity_manager_state.publish_event(ConnectivityEvent::PeerConnected(node2_conn)); + connectivity_manager_state.publish_event(ConnectivityEvent::PeerConnected(node2_conn.into())); let substream = node1_mock.next_incoming_substream().await.unwrap(); let framed = framing::canonical(substream, MAX_FRAME_SIZE); @@ -282,7 +282,7 @@ async fn responder_messages() { create_peer_connection_mock_pair(node1.to_peer(), node2.to_peer()).await; // This node connected to a peer, so it should open the substream - connectivity_manager_state.publish_event(ConnectivityEvent::PeerConnected(node2_conn)); + connectivity_manager_state.publish_event(ConnectivityEvent::PeerConnected(node2_conn.into())); let substream = node1_mock.next_incoming_substream().await.unwrap(); let mut framed = framing::canonical(substream, MAX_FRAME_SIZE); diff --git a/base_layer/core/tests/mempool.rs b/base_layer/core/tests/mempool.rs index e052362548..c399d30bc5 100644 --- a/base_layer/core/tests/mempool.rs +++ b/base_layer/core/tests/mempool.rs @@ -801,7 +801,7 @@ async fn test_reorg() { } static EMISSION: [u64; 2] = [10, 10]; -#[tokio::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[allow(clippy::too_many_lines)] #[allow(clippy::identity_op)] async fn receive_and_propagate_transaction() { diff --git a/base_layer/core/tests/node_service.rs b/base_layer/core/tests/node_service.rs index 2c155a8f29..912915c335 100644 --- a/base_layer/core/tests/node_service.rs +++ b/base_layer/core/tests/node_service.rs @@ -61,7 +61,7 @@ use crate::helpers::block_builders::{construct_chained_blocks, create_coinbase}; mod helpers; #[allow(clippy::too_many_lines)] -#[tokio::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn propagate_and_forward_many_valid_blocks() { let temp_dir = tempdir().unwrap(); let factories = CryptoFactories::default(); @@ -193,7 +193,7 @@ async fn propagate_and_forward_many_valid_blocks() { } static EMISSION: [u64; 2] = [10, 10]; -#[tokio::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn propagate_and_forward_invalid_block_hash() { // Alice will propagate a "made up" block hash to Bob, Bob will request the block from Alice. Alice will not be able // to provide the block and so Bob will not propagate the hash further to Carol. @@ -298,7 +298,7 @@ async fn propagate_and_forward_invalid_block_hash() { carol_node.shutdown().await; } -#[tokio::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[allow(clippy::too_many_lines)] async fn propagate_and_forward_invalid_block() { let temp_dir = tempdir().unwrap(); diff --git a/base_layer/core/tests/node_state_machine.rs b/base_layer/core/tests/node_state_machine.rs index 6b8221e51b..58b35af5de 100644 --- a/base_layer/core/tests/node_state_machine.rs +++ b/base_layer/core/tests/node_state_machine.rs @@ -64,7 +64,7 @@ use crate::helpers::{chain_metadata::MockChainMetadata, nodes::random_node_ident mod helpers; static EMISSION: [u64; 2] = [10, 10]; -#[tokio::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn test_listening_lagging() { let factories = CryptoFactories::default(); let network = Network::LocalNet; diff --git a/base_layer/p2p/src/config.rs b/base_layer/p2p/src/config.rs index 5fb4030411..5fa125388b 100644 --- a/base_layer/p2p/src/config.rs +++ b/base_layer/p2p/src/config.rs @@ -87,7 +87,7 @@ pub struct P2pConfig { /// The public address advertised to other peers by this node. If not set it will be set automatically depending on /// the transport type. The TCP transport is not able to determine the users public IP, so this will need to be /// manually set. - pub public_address: Option, + pub public_addresses: Vec, /// Transport configuration pub transport: TransportConfig, /// Path to the LMDB data files. @@ -132,7 +132,7 @@ impl Default for P2pConfig { fn default() -> Self { Self { override_from: None, - public_address: None, + public_addresses: vec![], transport: Default::default(), datastore_path: PathBuf::from("peer_db"), peer_database_name: "peers".to_string(), diff --git a/base_layer/p2p/src/initialization.rs b/base_layer/p2p/src/initialization.rs index 64f190b9d9..616a3e96d4 100644 --- a/base_layer/p2p/src/initialization.rs +++ b/base_layer/p2p/src/initialization.rs @@ -155,7 +155,7 @@ pub async fn initialize_local_test_comms>( let comms = CommsBuilder::new() .allow_test_addresses() - .with_listener_address(node_identity.public_address()) + .with_listener_address(node_identity.first_public_address()) .with_listener_liveness_max_sessions(1) .with_node_identity(node_identity) .with_user_agent(&"/test/1.0") @@ -204,7 +204,7 @@ pub async fn initialize_local_test_comms>( comms .node_identity() - .set_public_address(comms.listening_address().clone()); + .add_public_address(comms.listening_address().clone()); Ok((comms, dht, event_sender)) } diff --git a/base_layer/p2p/src/peer_seeds.rs b/base_layer/p2p/src/peer_seeds.rs index 7fdc8af8ab..b55feeac2f 100644 --- a/base_layer/p2p/src/peer_seeds.rs +++ b/base_layer/p2p/src/peer_seeds.rs @@ -31,6 +31,7 @@ use serde::{Deserialize, Serialize}; use tari_common::DnsNameServer; use tari_comms::{ multiaddr::Multiaddr, + net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{NodeId, Peer, PeerFeatures}, types::CommsPublicKey, }; @@ -148,7 +149,7 @@ impl From for Peer { Peer::new( seed.public_key, node_id, - seed.addresses.into(), + MultiaddressesWithStats::from_addresses_with_source(seed.addresses, &PeerAddressSource::Config), Default::default(), PeerFeatures::COMMUNICATION_NODE, Default::default(), diff --git a/base_layer/p2p/src/services/liveness/service.rs b/base_layer/p2p/src/services/liveness/service.rs index 5da92ad100..0844872272 100644 --- a/base_layer/p2p/src/services/liveness/service.rs +++ b/base_layer/p2p/src/services/liveness/service.rs @@ -304,7 +304,6 @@ where } let len_peers = selected_peers.len(); - debug!(target: LOG_TARGET, "Sending liveness ping to {} peer(s)", len_peers); for peer in selected_peers { let msg = PingPongMessage::ping_with_metadata(self.state.metadata().clone()); @@ -369,7 +368,7 @@ mod test { use rand::rngs::OsRng; use tari_comms::{ message::MessageTag, - multiaddr::Multiaddr, + net_address::MultiaddressesWithStats, peer_manager::{NodeId, Peer, PeerFeatures, PeerFlags}, test_utils::mocks::create_connectivity_mock, }; @@ -488,7 +487,7 @@ mod test { let source_peer = Peer::new( pk.clone(), NodeId::from_key(&pk), - Vec::::new().into(), + MultiaddressesWithStats::empty(), PeerFlags::empty(), PeerFeatures::COMMUNICATION_NODE, Default::default(), diff --git a/base_layer/p2p/src/test_utils.rs b/base_layer/p2p/src/test_utils.rs index c4210eddaa..e724067d35 100644 --- a/base_layer/p2p/src/test_utils.rs +++ b/base_layer/p2p/src/test_utils.rs @@ -25,7 +25,7 @@ use std::sync::Arc; use rand::rngs::OsRng; use tari_comms::{ message::MessageTag, - multiaddr::Multiaddr, + net_address::MultiaddressesWithStats, peer_manager::{NodeIdentity, Peer, PeerFeatures, PeerFlags}, }; use tari_comms_dht::{ @@ -80,7 +80,7 @@ pub fn make_dht_inbound_message(node_identity: &NodeIdentity, message: Vec) Arc::new(Peer::new( node_identity.public_key().clone(), node_identity.node_id().clone(), - Vec::::new().into(), + MultiaddressesWithStats::empty(), PeerFlags::empty(), PeerFeatures::COMMUNICATION_NODE, Default::default(), diff --git a/base_layer/wallet/src/transaction_service/handle.rs b/base_layer/wallet/src/transaction_service/handle.rs index ed087fa167..96adc655f6 100644 --- a/base_layer/wallet/src/transaction_service/handle.rs +++ b/base_layer/wallet/src/transaction_service/handle.rs @@ -239,9 +239,9 @@ pub enum TransactionServiceResponse { TransactionSent(TxId), BurntTransactionSent { tx_id: TxId, - commitment: Commitment, + commitment: Box, ownership_proof: Option, - rangeproof: BulletRangeProof, + rangeproof: Box, }, TransactionCancelled, PendingInboundTransactions(HashMap), @@ -545,7 +545,7 @@ impl TransactionServiceHandle { commitment, ownership_proof, rangeproof, - } => Ok((tx_id, commitment, ownership_proof, rangeproof)), + } => Ok((tx_id, *commitment, ownership_proof, *rangeproof)), _ => Err(TransactionServiceError::UnexpectedApiResponse), } } diff --git a/base_layer/wallet/src/transaction_service/service.rs b/base_layer/wallet/src/transaction_service/service.rs index bd928b2ea2..87a328880d 100644 --- a/base_layer/wallet/src/transaction_service/service.rs +++ b/base_layer/wallet/src/transaction_service/service.rs @@ -665,9 +665,9 @@ where .map(|(tx_id, commitment, ownership_proof, rangeproof)| { TransactionServiceResponse::BurntTransactionSent { tx_id, - commitment, + commitment: commitment.into(), ownership_proof, - rangeproof, + rangeproof: rangeproof.into(), } }), TransactionServiceRequest::RegisterValidatorNode { diff --git a/base_layer/wallet/src/wallet.rs b/base_layer/wallet/src/wallet.rs index ddfc66ee33..1bfca79d30 100644 --- a/base_layer/wallet/src/wallet.rs +++ b/base_layer/wallet/src/wallet.rs @@ -31,6 +31,7 @@ use tari_common_types::{ }; use tari_comms::{ multiaddr::Multiaddr, + net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{NodeId, Peer, PeerFeatures, PeerFlags}, types::{CommsPublicKey, CommsSecretKey}, CommsNode, @@ -267,7 +268,7 @@ where // Persist the comms node address and features after it has been spawned to capture any modifications made // during comms startup. In the case of a Tor Transport the public address could have been generated - wallet_database.set_node_address(comms.node_identity().public_address())?; + wallet_database.set_node_address(comms.node_identity().first_public_address())?; wallet_database.set_node_features(comms.node_identity().features())?; let identity_sig = comms.node_identity().identity_signature_read().as_ref().cloned(); if let Some(identity_sig) = identity_sig { @@ -323,19 +324,19 @@ where .await?; } - let addresses = vec![address].into(); let peer_manager = self.comms.peer_manager(); let mut connectivity = self.comms.connectivity(); if let Some(mut current_peer) = peer_manager.find_by_public_key(&public_key).await? { // Only invalidate the identity signature if addresses are different - if current_peer.addresses != addresses { + if current_peer.addresses.contains(&address) { info!( target: LOG_TARGET, "Address for base node differs from storage. Was {}, setting to {}", current_peer.addresses, - addresses + address ); - current_peer.update(Some(addresses.into_vec()), None, None, None, None, None, None); + + current_peer.addresses.add_address(&address, &PeerAddressSource::Config); peer_manager.add_peer(current_peer.clone()).await?; } connectivity @@ -347,7 +348,7 @@ where let peer = Peer::new( public_key, node_id, - addresses, + MultiaddressesWithStats::from_addresses_with_source(vec![address], &PeerAddressSource::Config), PeerFlags::empty(), PeerFeatures::COMMUNICATION_NODE, Default::default(), diff --git a/base_layer/wallet/tests/contacts_service.rs b/base_layer/wallet/tests/contacts_service.rs index 8c2916ebb0..62a734fe8f 100644 --- a/base_layer/wallet/tests/contacts_service.rs +++ b/base_layer/wallet/tests/contacts_service.rs @@ -71,11 +71,11 @@ pub fn setup_contacts_service( )); let comms_config = P2pConfig { override_from: None, - public_address: None, + public_addresses: vec![], transport: TransportConfig { transport_type: TransportType::Memory, memory: MemoryTransportConfig { - listener_address: node_identity.public_address(), + listener_address: node_identity.first_public_address(), }, ..Default::default() }, diff --git a/base_layer/wallet/tests/support/comms_and_services.rs b/base_layer/wallet/tests/support/comms_and_services.rs index 749a6a3231..e0b2d82dab 100644 --- a/base_layer/wallet/tests/support/comms_and_services.rs +++ b/base_layer/wallet/tests/support/comms_and_services.rs @@ -25,6 +25,7 @@ use std::{sync::Arc, time::Duration}; use tari_comms::{ message::MessageTag, multiaddr::Multiaddr, + net_address::MultiaddressesWithStats, peer_manager::{NodeId, NodeIdentity, Peer, PeerFeatures, PeerFlags}, transports::MemoryTransport, types::CommsPublicKey, @@ -70,7 +71,7 @@ pub fn create_dummy_message(inner: T, public_key: &CommsPublicKey) -> DomainM let peer_source = Peer::new( public_key.clone(), NodeId::from_key(public_key), - Vec::::new().into(), + MultiaddressesWithStats::empty(), PeerFlags::empty(), PeerFeatures::COMMUNICATION_NODE, Default::default(), diff --git a/base_layer/wallet/tests/transaction_service_tests/service.rs b/base_layer/wallet/tests/transaction_service_tests/service.rs index 5586fcdf5c..96cbe75824 100644 --- a/base_layer/wallet/tests/transaction_service_tests/service.rs +++ b/base_layer/wallet/tests/transaction_service_tests/service.rs @@ -467,7 +467,7 @@ fn try_decode_transaction_cancelled_message(bytes: Vec) -> Option Peer { Peer::new( public_key.clone(), NodeId::from_key(&public_key), - net_address.into(), + MultiaddressesWithStats::from_addresses_with_source(vec![net_address], &PeerAddressSource::Config), PeerFlags::empty(), PeerFeatures::COMMUNICATION_NODE, Default::default(), @@ -121,9 +123,9 @@ async fn create_wallet( let node_identity = NodeIdentity::random(&mut OsRng, get_next_memory_address(), PeerFeatures::COMMUNICATION_NODE); let comms_config = P2pConfig { override_from: None, - public_address: None, + public_addresses: vec![], transport: TransportConfig::new_memory(MemoryTransportConfig { - listener_address: node_identity.public_address(), + listener_address: node_identity.first_public_address(), }), datastore_path: data_path.to_path_buf(), peer_database_name: random::string(8), @@ -198,7 +200,7 @@ async fn create_wallet( .await } -#[tokio::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[allow(clippy::too_many_lines)] async fn test_wallet() { let mut shutdown_a = Shutdown::new(); @@ -248,7 +250,7 @@ async fn test_wallet() { .peer_manager() .add_peer(create_peer( bob_identity.public_key().clone(), - bob_identity.public_address(), + bob_identity.first_public_address(), )) .await .unwrap(); @@ -258,7 +260,7 @@ async fn test_wallet() { .peer_manager() .add_peer(create_peer( alice_identity.public_key().clone(), - alice_identity.public_address(), + alice_identity.first_public_address(), )) .await .unwrap(); @@ -266,7 +268,7 @@ async fn test_wallet() { alice_wallet .set_base_node_peer( (*base_node_identity.public_key()).clone(), - base_node_identity.public_address().clone(), + base_node_identity.first_public_address().clone(), ) .await .unwrap(); @@ -510,7 +512,7 @@ fn test_many_iterations_store_and_forward_send_tx() { } } -#[tokio::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[allow(clippy::too_many_lines)] async fn test_store_and_forward_send_tx() { let shutdown_a = Shutdown::new(); @@ -535,11 +537,7 @@ async fn test_store_and_forward_send_tx() { .await .unwrap(); - let base_node_identity = Arc::new(NodeIdentity::random( - &mut OsRng, - "/memory/0".parse().unwrap(), - PeerFeatures::COMMUNICATION_NODE, - )); + let base_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (tx, _rx) = mpsc::channel(100); let (base_node, _dht, _msg_sender) = initialize_local_test_comms( base_node_identity, @@ -667,7 +665,7 @@ async fn test_import_utxo() { let (connection, _temp_dir) = make_wallet_database_connection(None); let comms_config = P2pConfig { override_from: None, - public_address: None, + public_addresses: vec![], transport: TransportConfig::new_tcp(TcpTransportConfig { listener_address: "/ip4/127.0.0.1/tcp/0".parse().unwrap(), tor_socks_address: None, @@ -732,7 +730,7 @@ async fn test_import_utxo() { let expected_output_hash = output.hash(); let node_address = TariAddress::new(node_identity.public_key().clone(), network); alice_wallet - .set_base_node_peer(node_identity.public_key().clone(), node_identity.public_address()) + .set_base_node_peer(node_identity.public_key().clone(), node_identity.first_public_address()) .await .unwrap(); let tx_id = alice_wallet diff --git a/base_layer/wallet_ffi/src/lib.rs b/base_layer/wallet_ffi/src/lib.rs index 53b7dc8f36..42405ce8d2 100644 --- a/base_layer/wallet_ffi/src/lib.rs +++ b/base_layer/wallet_ffi/src/lib.rs @@ -4742,13 +4742,13 @@ pub unsafe extern "C" fn comms_config_create( Ok(public_address) => { let node_identity = NodeIdentity::new( CommsSecretKey::default(), - public_address, + vec![public_address], PeerFeatures::COMMUNICATION_CLIENT, ); let config = TariCommsConfig { override_from: None, - public_address: Some(node_identity.public_address()), + public_addresses: vec![node_identity.first_public_address()], transport: (*transport).clone(), auxiliary_tcp_listener_address: None, datastore_path, @@ -5260,23 +5260,27 @@ pub unsafe extern "C" fn wallet_create( .map_err(|err| WalletStorageError::RecoverySeedError(err.to_string()))?; let node_features = wallet_database.get_node_features()?.unwrap_or_default(); - let node_address = wallet_database - .get_node_address()? - .or_else(|| comms_config.public_address.clone()) - .unwrap_or_else(Multiaddr::empty); + let node_addresses = if comms_config.public_addresses.is_empty() { + vec![match wallet_database.get_node_address()? { + Some(addr) => addr, + None => Multiaddr::empty(), + }] + } else { + comms_config.public_addresses.clone() + }; let identity_sig = wallet_database.get_comms_identity_signature()?; // This checks if anything has changed by validating the previous signature and if invalid, setting identity_sig // to None let identity_sig = identity_sig.filter(|sig| { let comms_public_key = CommsPublicKey::from_secret_key(&comms_secret_key); - sig.is_valid(&comms_public_key, node_features, [&node_address]) + sig.is_valid(&comms_public_key, node_features, &node_addresses) }); // SAFETY: we are manually checking the validity of this signature before adding Some(..) let node_identity = Arc::new(NodeIdentity::with_signature_unchecked( comms_secret_key, - node_address, + node_addresses, node_features, identity_sig, )); @@ -10522,7 +10526,8 @@ mod test { NodeIdentity::random(&mut OsRng, get_next_memory_address(), PeerFeatures::COMMUNICATION_NODE); let base_node_peer_public_key_ptr = Box::into_raw(Box::new(node_identity.public_key().clone())); let base_node_peer_address_ptr = - CString::into_raw(CString::new(node_identity.public_address().to_string()).unwrap()) as *const c_char; + CString::into_raw(CString::new(node_identity.first_public_address().to_string()).unwrap()) + as *const c_char; wallet_add_base_node_peer( wallet_ptr, base_node_peer_public_key_ptr, diff --git a/comms/core/examples/stress/node.rs b/comms/core/examples/stress/node.rs index 37c403c107..a7c6631a43 100644 --- a/comms/core/examples/stress/node.rs +++ b/comms/core/examples/stress/node.rs @@ -89,7 +89,7 @@ pub async fn create( .unwrap(); let node_identity = node_identity .map(|ni| { - ni.set_public_address(public_addr.clone()); + ni.add_public_address(public_addr.clone()); ni }) .unwrap_or_else(|| Arc::new(NodeIdentity::random(&mut OsRng, public_addr, Default::default()))); diff --git a/comms/core/examples/stress/prompt.rs b/comms/core/examples/stress/prompt.rs index 3ad3e3f239..4ad06183e0 100644 --- a/comms/core/examples/stress/prompt.rs +++ b/comms/core/examples/stress/prompt.rs @@ -24,6 +24,7 @@ use std::{io::stdin, str::FromStr}; use tari_comms::{ multiaddr::Multiaddr, + net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{NodeId, Peer}, types::CommsPublicKey, }; @@ -135,7 +136,7 @@ pub fn user_prompt(default_peer: Option) -> Result<(Peer, StressProtocol), } pub fn to_short_str(peer: &Peer) -> String { - format!("{}::{}", peer.public_key, peer.addresses.first().unwrap()) + format!("{}::{}", peer.public_key, peer.addresses.best().unwrap()) } #[allow(clippy::ptr_arg)] @@ -147,7 +148,7 @@ pub fn parse_from_short_str(s: &String) -> Option { Some(Peer::new( pk, node_id, - vec![address].into(), + MultiaddressesWithStats::from_addresses_with_source(vec![address], &PeerAddressSource::Config), Default::default(), Default::default(), Default::default(), diff --git a/comms/core/examples/stress/service.rs b/comms/core/examples/stress/service.rs index 4c2af5dbb6..d10249065e 100644 --- a/comms/core/examples/stress/service.rs +++ b/comms/core/examples/stress/service.rs @@ -63,9 +63,9 @@ pub fn start_service( let (request_tx, request_rx) = mpsc::channel(1); println!( - "Node credentials are {}::{} (local_listening_addr='{}')", + "Node credentials are {}::{:?} (local_listening_addr='{}')", node_identity.public_key().to_hex(), - node_identity.public_address(), + node_identity.public_addresses(), comms_node.listening_address(), ); diff --git a/comms/core/examples/tor.rs b/comms/core/examples/tor.rs index babf99acc2..21e126e68a 100644 --- a/comms/core/examples/tor.rs +++ b/comms/core/examples/tor.rs @@ -10,6 +10,7 @@ use rand::{rngs::OsRng, thread_rng, RngCore}; use tari_comms::{ message::{InboundMessage, OutboundMessage}, multiaddr::Multiaddr, + net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{NodeId, NodeIdentity, Peer, PeerFeatures}, pipeline, pipeline::SinkService, @@ -84,15 +85,15 @@ async fn run() -> Result<(), Error> { println!("Comms nodes started!"); println!( - "Node 1 is '{}' with address '{}' (local_listening_addr='{}')", + "Node 1 is '{}' with address '{:?}' (local_listening_addr='{}')", node_identity1.node_id().short_str(), - node_identity1.public_address(), + node_identity1.public_addresses(), comms_node1.listening_address(), ); println!( - "Node 2 is '{}' with address '{}' (local_listening_addr='{}')", + "Node 2 is '{}' with address '{:?}' (local_listening_addr='{}')", node_identity2.node_id().short_str(), - node_identity2.public_address(), + node_identity2.public_addresses(), comms_node2.listening_address(), ); @@ -102,7 +103,10 @@ async fn run() -> Result<(), Error> { .add_peer(Peer::new( node_identity2.public_key().clone(), node_identity2.node_id().clone(), - vec![node_identity2.public_address()].into(), + MultiaddressesWithStats::from_addresses_with_source( + node_identity2.public_addresses(), + &PeerAddressSource::Config, + ), Default::default(), PeerFeatures::COMMUNICATION_CLIENT, Default::default(), @@ -204,8 +208,8 @@ async fn setup_node_with_tor>( .await?; println!( - "Tor hidden service created with address '{}'", - comms_node.node_identity().public_address() + "Tor hidden service created with address '{:?}'", + comms_node.node_identity().public_addresses() ); Ok((comms_node, inbound_rx, outbound_tx)) diff --git a/comms/core/examples/vanity_id.rs b/comms/core/examples/vanity_id.rs index f5bfd8cda1..059895188d 100644 --- a/comms/core/examples/vanity_id.rs +++ b/comms/core/examples/vanity_id.rs @@ -114,7 +114,7 @@ fn start_miner(id: usize, prefix: String, tx: mpsc::Sender) -> Res if tx .try_send(NodeIdentity::new( k, - Multiaddr::empty(), + vec![Multiaddr::empty()], PeerFeatures::COMMUNICATION_NODE, )) .is_err() diff --git a/comms/core/src/bounded_executor.rs b/comms/core/src/bounded_executor.rs index 2754e51e06..2ce811083d 100644 --- a/comms/core/src/bounded_executor.rs +++ b/comms/core/src/bounded_executor.rs @@ -22,15 +22,10 @@ use std::{future::Future, sync::Arc}; -use futures::future::Either; use tokio::{ - runtime, sync::{OwnedSemaphorePermit, Semaphore}, task::JoinHandle, }; -use tracing::{span, Instrument, Level}; - -use crate::runtime::current; /// Error emitted from [`try_spawn`](self::BoundedExecutor::try_spawn) when there are no tasks available #[derive(Debug)] @@ -41,26 +36,21 @@ pub struct TrySpawnError; /// Use the asynchronous spawn method to spawn a task. If a given number of tasks are already spawned and have not /// completed, the spawn function will block (asynchronously) until a previously spawned task completes. pub struct BoundedExecutor { - inner: runtime::Handle, + // inner: runtime::Handle, semaphore: Arc, max_available: usize, } impl BoundedExecutor { - pub fn new(executor: runtime::Handle, num_permits: usize) -> Self { + pub fn new(num_permits: usize) -> Self { Self { - inner: executor, semaphore: Arc::new(Semaphore::new(num_permits)), max_available: num_permits, } } - pub fn from_current(num_permits: usize) -> Self { - Self::new(current(), num_permits) - } - pub fn allow_maximum() -> Self { - Self::new(current(), Self::max_theoretical_tasks()) + Self::new(Self::max_theoretical_tasks()) } pub const fn max_theoretical_tasks() -> usize { @@ -70,19 +60,16 @@ impl BoundedExecutor { usize::MAX >> 4 } - #[inline] pub fn can_spawn(&self) -> bool { self.num_available() > 0 } /// Returns the remaining number of tasks that can be spawned on this executor without waiting. - #[inline] pub fn num_available(&self) -> usize { self.semaphore.available_permits() } /// Returns the maximum number of concurrent tasks that can be spawned on this executor without waiting. - #[inline] pub fn max_available(&self) -> usize { self.max_available } @@ -114,13 +101,12 @@ impl BoundedExecutor { /// # Examples /// /// ``` - /// use tokio::runtime::Runtime; + /// use tokio::runtime::Handle; /// use tari_comms::bounded_executor::BoundedExecutor; /// /// # fn dox() { /// // Create the runtime - /// let mut rt = Runtime::new().unwrap(); - /// let executor = BoundedExecutor::new(rt.handle().clone(), 1); + /// let executor = BoundedExecutor::new(1); /// /// // Spawn a future onto the runtime /// // NOTE: BoundedExecutor::spawn is an async function and therefore, must be polled/awaited for the task to be spawned @@ -132,8 +118,8 @@ impl BoundedExecutor { /// println!("will always run after the first task"); /// }); /// - /// rt.block_on(task1); - /// rt.block_on(task2); + /// Handle::current().block_on(task1); + /// Handle::current().block_on(task2); /// # } /// ``` /// @@ -146,16 +132,9 @@ impl BoundedExecutor { F: Future + Send + 'static, F::Output: Send + 'static, { - let span = span!(Level::TRACE, "bounded_executor::waiting_time"); // SAFETY: acquire_owned only fails if the semaphore is closed (i.e self.semaphore.close() is called) - this // never happens in this implementation - let permit = self - .semaphore - .clone() - .acquire_owned() - .instrument(span) - .await - .expect("semaphore closed"); + let permit = self.semaphore.clone().acquire_owned().await.expect("semaphore closed"); self.do_spawn(permit, future) } @@ -164,96 +143,16 @@ impl BoundedExecutor { F: Future + Send + 'static, F::Output: Send + 'static, { - self.inner.spawn(async move { - let span = span!(Level::TRACE, "bounded_executor::do_work"); - let ret = future.instrument(span).await; + // let task = task::Builder::new().inner + tokio::spawn(async move { // Task is finished, release the permit + let ret = future.await; drop(permit); ret }) } } -/// A task executor that can be configured to be bounded or unbounded. -pub struct OptionallyBoundedExecutor { - inner: Either, -} - -impl OptionallyBoundedExecutor { - /// Create a new OptionallyBoundedExecutor. If `num_permits` is `None` the executor will be unbounded. - pub fn new(executor: runtime::Handle, num_permits: Option) -> Self { - Self { - inner: num_permits - .map(|n| Either::Right(BoundedExecutor::new(executor.clone(), n))) - .unwrap_or_else(|| Either::Left(executor)), - } - } - - /// Create a new OptionallyBoundedExecutor from the current tokio context. If `num_permits` is `None` the executor - /// will be unbounded. - pub fn from_current(num_permits: Option) -> Self { - Self::new(current(), num_permits) - } - - /// Returns true if this executor can spawn, otherwise false. - pub fn can_spawn(&self) -> bool { - match &self.inner { - Either::Left(_) => true, - Either::Right(exec) => exec.can_spawn(), - } - } - - /// Try spawn a new task returning its `JoinHandle`. An error is returned if the executor is bounded and currently - /// full. - pub fn try_spawn(&self, future: F) -> Result, TrySpawnError> - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - match &self.inner { - Either::Left(exec) => Ok(exec.spawn(future)), - Either::Right(exec) => exec.try_spawn(future), - } - } - - /// Spawns a new task returning its `JoinHandle`. If the executor is running `num_permits` tasks, this waits until a - /// task is available. - pub async fn spawn(&self, future: F) -> JoinHandle - where - F: Future + Send + 'static, - F::Output: Send + 'static, - { - match &self.inner { - Either::Left(exec) => exec.spawn(future), - Either::Right(exec) => exec.spawn(future).await, - } - } - - /// Returns the number tasks that can be spawned on this executor without blocking. - pub fn num_available(&self) -> usize { - match &self.inner { - Either::Left(_) => usize::MAX, - Either::Right(exec) => exec.num_available(), - } - } - - /// Returns the max number tasks that can be performed concurrenly - pub fn max_available(&self) -> Option { - match &self.inner { - Either::Left(_) => None, - Either::Right(exec) => Some(exec.max_available()), - } - } -} - -impl From for OptionallyBoundedExecutor { - fn from(handle: runtime::Handle) -> Self { - Self { - inner: Either::Left(handle), - } - } -} - #[cfg(test)] mod test { use std::{ @@ -267,13 +166,12 @@ mod test { use tokio::time::sleep; use super::*; - use crate::runtime; - #[runtime::test] + #[tokio::test] async fn spawn() { let flag = Arc::new(AtomicBool::new(false)); let flag_cloned = flag.clone(); - let executor = BoundedExecutor::new(runtime::current(), 1); + let executor = BoundedExecutor::new(1); // Spawn 1 let task1_fut = executor diff --git a/comms/core/src/builder/comms_node.rs b/comms/core/src/builder/comms_node.rs index c7d21db50e..a770af2202 100644 --- a/comms/core/src/builder/comms_node.rs +++ b/comms/core/src/builder/comms_node.rs @@ -227,15 +227,20 @@ impl UnspawnedCommsNode { ctl.set_proxied_addr(listening_info.bind_address()); let hs = ctl.create_hidden_service().await?; let onion_addr = hs.get_onion_address(); - if node_identity.public_address() != onion_addr { - node_identity.set_public_address(onion_addr); + if !node_identity.public_addresses().contains(&onion_addr) { + node_identity.add_public_address(onion_addr); } hidden_service = Some(hs); } info!( target: LOG_TARGET, - "Your node's public address is '{}'", - node_identity.public_address() + "Your node's public addresses are '{}'", + node_identity + .public_addresses() + .iter() + .map(|a| a.to_string()) + .collect::>() + .join(", ") ); // Spawn liveness check now that we have the final address @@ -244,7 +249,7 @@ impl UnspawnedCommsNode { .map(|interval| { LivenessCheck::spawn( transport, - node_identity.public_address(), + node_identity.first_public_address(), interval, shutdown_signal.clone(), ) diff --git a/comms/core/src/builder/tests.rs b/comms/core/src/builder/tests.rs index cc052b8d92..a2f3eb657f 100644 --- a/comms/core/src/builder/tests.rs +++ b/comms/core/src/builder/tests.rs @@ -41,6 +41,7 @@ use crate::{ message::{InboundMessage, OutboundMessage}, multiaddr::{Multiaddr, Protocol}, multiplexing::Substream, + net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{Peer, PeerFeatures}, pipeline, pipeline::SinkService, @@ -49,7 +50,6 @@ use crate::{ ProtocolEvent, Protocols, }, - runtime, test_utils::node_identity::build_node_identity, transports::MemoryTransport, CommsNode, @@ -68,7 +68,7 @@ async fn spawn_node( .parse::() .unwrap(); let node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); - node_identity.set_public_address(addr.clone()); + node_identity.add_public_address(addr.clone()); let (inbound_tx, inbound_rx) = mpsc::channel(10); let (outbound_tx, outbound_rx) = mpsc::channel(10); @@ -108,7 +108,7 @@ async fn spawn_node( (comms_node, inbound_rx, outbound_tx, messaging_events_sender) } -#[runtime::test] +#[tokio::test] async fn peer_to_peer_custom_protocols() { static TEST_PROTOCOL: Bytes = Bytes::from_static(b"/tari/test"); static ANOTHER_TEST_PROTOCOL: Bytes = Bytes::from_static(b"/tari/test-again"); @@ -140,7 +140,10 @@ async fn peer_to_peer_custom_protocols() { .add_peer(Peer::new( node_identity2.public_key().clone(), node_identity2.node_id().clone(), - node_identity2.public_address().clone().into(), + MultiaddressesWithStats::from_addresses_with_source( + node_identity2.public_addresses().clone(), + &PeerAddressSource::Config, + ), Default::default(), Default::default(), vec![TEST_PROTOCOL.clone(), ANOTHER_TEST_PROTOCOL.clone()], @@ -197,7 +200,7 @@ async fn peer_to_peer_custom_protocols() { comms_node2.wait_until_shutdown().await; } -#[runtime::test] +#[tokio::test] async fn peer_to_peer_messaging() { const NUM_MSGS: usize = 100; let shutdown = Shutdown::new(); @@ -215,7 +218,10 @@ async fn peer_to_peer_messaging() { .add_peer(Peer::new( node_identity2.public_key().clone(), node_identity2.node_id().clone(), - node_identity2.public_address().clone().into(), + MultiaddressesWithStats::from_addresses_with_source( + node_identity2.public_addresses(), + &PeerAddressSource::Config, + ), Default::default(), Default::default(), Default::default(), @@ -278,7 +284,7 @@ async fn peer_to_peer_messaging() { comms_node2.wait_until_shutdown().await; } -#[runtime::test] +#[tokio::test] async fn peer_to_peer_messaging_simultaneous() { const NUM_MSGS: usize = 100; let shutdown = Shutdown::new(); @@ -302,7 +308,10 @@ async fn peer_to_peer_messaging_simultaneous() { .add_peer(Peer::new( node_identity2.public_key().clone(), node_identity2.node_id().clone(), - node_identity2.public_address().clone().into(), + MultiaddressesWithStats::from_addresses_with_source( + node_identity2.public_addresses(), + &PeerAddressSource::Config, + ), Default::default(), Default::default(), Default::default(), @@ -315,7 +324,10 @@ async fn peer_to_peer_messaging_simultaneous() { .add_peer(Peer::new( node_identity1.public_key().clone(), node_identity1.node_id().clone(), - node_identity1.public_address().clone().into(), + MultiaddressesWithStats::from_addresses_with_source( + node_identity1.public_addresses(), + &PeerAddressSource::Config, + ), Default::default(), Default::default(), Default::default(), diff --git a/comms/core/src/connection_manager/common.rs b/comms/core/src/connection_manager/common.rs index 60645bb0e3..9389047fb8 100644 --- a/comms/core/src/connection_manager/common.rs +++ b/comms/core/src/connection_manager/common.rs @@ -20,7 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{convert::TryFrom, net::Ipv6Addr}; +use std::{convert::TryInto, net::Ipv6Addr}; use log::*; use tokio::io::{AsyncRead, AsyncWrite}; @@ -28,9 +28,8 @@ use tokio::io::{AsyncRead, AsyncWrite}; use crate::{ connection_manager::error::ConnectionManagerError, multiaddr::{Multiaddr, Protocol}, - peer_manager::{IdentitySignature, NodeId, NodeIdentity, Peer, PeerFeatures, PeerFlags}, - proto, - proto::identity::PeerIdentityMsg, + net_address::{MultiaddrWithStats, MultiaddressesWithStats, PeerAddressSource}, + peer_manager::{NodeId, NodeIdentity, Peer, PeerFlags, PeerIdentityClaim}, protocol, protocol::{NodeNetworkInfo, ProtocolId}, types::CommsPublicKey, @@ -39,9 +38,6 @@ use crate::{ const LOG_TARGET: &str = "comms::connection_manager::common"; -/// The maximum size of the peer's user agent string. If the peer sends a longer string it is truncated. -const MAX_USER_AGENT_LEN: usize = 100; - /// Performs the identity exchange protocol on the given socket. pub(super) async fn perform_identity_exchange< 'p, @@ -52,11 +48,41 @@ pub(super) async fn perform_identity_exchange< node_identity: &NodeIdentity, our_supported_protocols: P, network_info: NodeNetworkInfo, -) -> Result { +) -> Result { let peer_identity = protocol::identity_exchange(node_identity, our_supported_protocols, network_info, socket).await?; - Ok(peer_identity) + Ok(peer_identity.try_into()?) +} + +/// Validate the peer identity info. +/// +/// The following process is used to validate the peer: +/// 1. Check the offered node identity is a valid base node identity (TODO: This won't work for DAN nodes) +/// 1. Check if we know the peer, if so, is the peer banned, if so, return an error +/// 1. Check that the offered addresses are valid +/// +/// If the `allow_test_addrs` parameter is true, loopback, local link and other addresses normally not considered valid +/// for p2p comms will be accepted. +pub(super) async fn validate_peer_identity( + authenticated_public_key: &CommsPublicKey, + peer_identity: &PeerIdentityClaim, + allow_test_addrs: bool, +) -> Result<(), ConnectionManagerError> { + validate_addresses(&peer_identity.addresses, allow_test_addrs)?; + if peer_identity.addresses.is_empty() { + return Err(ConnectionManagerError::PeerIdentityNoAddresses); + } + + if !peer_identity.signature.is_valid( + authenticated_public_key, + peer_identity.features, + &peer_identity.addresses, + ) { + return Err(ConnectionManagerError::PeerIdentityInvalidSignature); + } + + Ok(()) } /// Validate the peer identity info. @@ -73,33 +99,20 @@ pub(super) async fn validate_and_add_peer_from_peer_identity( peer_manager: &PeerManager, known_peer: Option, authenticated_public_key: CommsPublicKey, - mut peer_identity: PeerIdentityMsg, - dialed_addr: Option<&Multiaddr>, + peer_identity: &PeerIdentityClaim, allow_test_addrs: bool, -) -> Result<(NodeId, Vec), ConnectionManagerError> { +) -> Result { let peer_node_id = NodeId::from_public_key(&authenticated_public_key); - let addresses = peer_identity - .addresses - .into_iter() - .filter_map(|addr_bytes| Multiaddr::try_from(addr_bytes).ok()) - .collect::>(); - // TODO: #banheuristic - validate_peer_addresses(&addresses, allow_test_addrs)?; - - if addresses.is_empty() { - return Err(ConnectionManagerError::PeerIdentityNoValidAddresses); - } - - let supported_protocols = peer_identity - .supported_protocols - .into_iter() - .map(bytes::Bytes::from) - .collect::>(); - - peer_identity.user_agent.truncate(MAX_USER_AGENT_LEN); + let addresses = MultiaddressesWithStats::from_addresses_with_source( + peer_identity.addresses.clone(), + &PeerAddressSource::FromPeerConnection { + peer_identity_claim: peer_identity.clone(), + }, + ); + validate_addresses_and_source(&addresses, &authenticated_public_key, allow_test_addrs)?; - // Add or update the peer + // Note: the peer will be merged in the db if it already exists let peer = match known_peer { Some(mut peer) => { debug!( @@ -107,19 +120,15 @@ pub(super) async fn validate_and_add_peer_from_peer_identity( "Peer '{}' already exists in peer list. Updating.", peer.node_id.short_str() ); - peer.connection_stats.set_connection_success(); - peer.addresses = addresses.into(); - peer.set_offline(false); - if let Some(addr) = dialed_addr { - peer.addresses.mark_last_seen_now(addr); - } - peer.features = PeerFeatures::from_bits_truncate(peer_identity.features); - peer.supported_protocols = supported_protocols.clone(); - peer.user_agent = peer_identity.user_agent; - let identity_sig = peer_identity - .identity_signature - .ok_or(ConnectionManagerError::PeerIdentityNoSignature)?; - add_valid_identity_signature_to_peer(&mut peer, identity_sig)?; + peer.addresses + .update_addresses(&peer_identity.addresses, &PeerAddressSource::FromPeerConnection { + peer_identity_claim: peer_identity.clone(), + }); + + peer.features = peer_identity.features; + peer.supported_protocols = peer_identity.supported_protocols(); + peer.user_agent = peer_identity.user_agent().unwrap_or_default(); + peer }, None => { @@ -128,49 +137,26 @@ pub(super) async fn validate_and_add_peer_from_peer_identity( "Peer '{}' does not exist in peer list. Adding.", peer_node_id.short_str() ); - let mut new_peer = Peer::new( + Peer::new( authenticated_public_key.clone(), peer_node_id.clone(), - addresses.into(), + MultiaddressesWithStats::from_addresses_with_source( + peer_identity.addresses.clone(), + &PeerAddressSource::FromPeerConnection { + peer_identity_claim: peer_identity.clone(), + }, + ), PeerFlags::empty(), - PeerFeatures::from_bits_truncate(peer_identity.features), - supported_protocols.clone(), - peer_identity.user_agent, - ); - new_peer.connection_stats.set_connection_success(); - let identity_sig = peer_identity - .identity_signature - .ok_or(ConnectionManagerError::PeerIdentityNoSignature)?; - add_valid_identity_signature_to_peer(&mut new_peer, identity_sig)?; - if let Some(addr) = dialed_addr { - new_peer.addresses.mark_last_seen_now(addr); - } - new_peer + peer_identity.features, + peer_identity.supported_protocols(), + peer_identity.user_agent().unwrap_or_default(), + ) }, }; peer_manager.add_peer(peer).await?; - Ok((peer_node_id, supported_protocols)) -} - -fn add_valid_identity_signature_to_peer( - peer: &mut Peer, - identity_sig: proto::identity::IdentitySignature, -) -> Result<(), ConnectionManagerError> { - let identity_sig = - IdentitySignature::try_from(identity_sig).map_err(|_| ConnectionManagerError::PeerIdentityInvalidSignature)?; - - if !identity_sig.is_valid_for_peer(peer) { - warn!( - target: LOG_TARGET, - "Peer {} sent invalid identity signature", peer.node_id - ); - return Err(ConnectionManagerError::PeerIdentityInvalidSignature); - } - - peer.identity_signature = Some(identity_sig); - Ok(()) + Ok(peer_node_id) } pub(super) async fn find_unbanned_peer( @@ -185,19 +171,50 @@ pub(super) async fn find_unbanned_peer( } /// Checks that the given peer addresses are well-formed and valid. If allow_test_addrs is false, all localhost and -/// memory addresses will be rejected. -pub fn validate_peer_addresses<'a, A: IntoIterator>( - addresses: A, +/// memory addresses will be rejected. Also checks that the source (signature of the address) is correct +pub fn validate_addresses_and_source( + addresses: &MultiaddressesWithStats, + public_key: &CommsPublicKey, allow_test_addrs: bool, ) -> Result<(), ConnectionManagerError> { - let mut has_address = false; + for addr in addresses.addresses() { + validate_address_and_source(public_key, addr, allow_test_addrs)?; + } + + Ok(()) +} + +/// Checks that the given peer addresses are well-formed and valid. If allow_test_addrs is false, all localhost and +/// memory addresses will be rejected. +pub fn validate_addresses(addresses: &[Multiaddr], allow_test_addrs: bool) -> Result<(), ConnectionManagerError> { for addr in addresses { - has_address = true; validate_address(addr, allow_test_addrs)?; } - if !has_address { - return Err(ConnectionManagerError::PeerIdentityNoAddresses); + + Ok(()) +} + +pub fn validate_address_and_source( + public_key: &CommsPublicKey, + addr: &MultiaddrWithStats, + allow_test_addrs: bool, +) -> Result<(), ConnectionManagerError> { + match addr.source { + PeerAddressSource::Config => (), + _ => { + let claim = addr + .source + .peer_identity_claim() + .ok_or(ConnectionManagerError::PeerIdentityInvalidSignature)?; + if !claim.signature.is_valid(public_key, claim.features, &claim.addresses) { + return Err(ConnectionManagerError::PeerIdentityInvalidSignature); + } + if !claim.addresses.contains(addr.address()) { + return Err(ConnectionManagerError::PeerIdentityInvalidSignature); + } + }, } + validate_address(addr.address(), allow_test_addrs)?; Ok(()) } @@ -318,7 +335,7 @@ mod test { multiaddr!(Memory(0u64)), ]; - validate_peer_addresses(&valid, false).unwrap(); + validate_addresses(&valid, false).unwrap(); for addr in invalid { validate_address(addr, false).unwrap_err(); } @@ -346,7 +363,7 @@ mod test { multiaddr!(Memory(0u64)), ]; - validate_peer_addresses(&valid, true).unwrap(); + validate_addresses(&valid, true).unwrap(); for addr in invalid { validate_address(addr, true).unwrap_err(); } diff --git a/comms/core/src/connection_manager/dial_state.rs b/comms/core/src/connection_manager/dial_state.rs index c7ab4e339d..3651b22449 100644 --- a/comms/core/src/connection_manager/dial_state.rs +++ b/comms/core/src/connection_manager/dial_state.rs @@ -87,4 +87,8 @@ impl DialState { pub fn peer(&self) -> &Peer { &self.peer } + + pub fn peer_mut(&mut self) -> &mut Peer { + &mut self.peer + } } diff --git a/comms/core/src/connection_manager/dialer.rs b/comms/core/src/connection_manager/dialer.rs index c3154816b8..cf8077d63c 100644 --- a/comms/core/src/connection_manager/dialer.rs +++ b/comms/core/src/connection_manager/dialer.rs @@ -20,7 +20,11 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{collections::HashMap, sync::Arc, time::Duration}; +use std::{ + collections::HashMap, + sync::Arc, + time::{Duration, Instant}, +}; use futures::{ future, @@ -53,10 +57,10 @@ use crate::{ }, multiaddr::Multiaddr, multiplexing::Yamux, + net_address::PeerAddressSource, noise::{NoiseConfig, NoiseSocket}, - peer_manager::{NodeId, NodeIdentity, Peer, PeerFeatures, PeerManager}, + peer_manager::{NodeId, NodeIdentity, Peer, PeerManager}, protocol::ProtocolId, - runtime, transports::Transport, types::CommsPublicKey, }; @@ -74,7 +78,7 @@ pub(crate) enum DialerRequest { Option>>, ), CancelPendingDial(NodeId), - NotifyNewInboundConnection(PeerConnection), + NotifyNewInboundConnection(Box), } /// Responsible for dialing peers on the given transport. @@ -133,7 +137,7 @@ where } pub fn spawn(self) -> JoinHandle<()> { - runtime::current().spawn(self.run()) + tokio::spawn(self.run()) } pub async fn run(mut self) { @@ -175,7 +179,7 @@ where NotifyNewInboundConnection(conn) => { if conn.is_connected() { - self.resolve_pending_dials(conn); + self.resolve_pending_dials(*conn); } }, } @@ -216,10 +220,47 @@ where let node_id = dial_state.peer().node_id.clone(); metrics::pending_connections(Some(&node_id), ConnectionDirection::Outbound).inc(); + // try save the peer back to the peer manager + let peer = dial_state.peer_mut(); + if let Ok(peer_connection) = &dial_result { + if let Some(peer_identity) = peer_connection.peer_identity_claim() { + peer.update_addresses(&peer_identity.addresses, &PeerAddressSource::FromPeerConnection { + peer_identity_claim: peer_identity.clone(), + }); + if let Some(unverified_data) = &peer_identity.unverified_data { + for protocol in &unverified_data.supported_protocols { + if !peer.supported_protocols.contains(protocol) { + peer.supported_protocols.push(protocol.clone()); + } + } + if peer.user_agent != unverified_data.user_agent && !unverified_data.user_agent.is_empty() { + peer.user_agent = unverified_data.user_agent.clone(); + } + } + } else { + error!(target: LOG_TARGET, "No identity claim provided"); + let _ = dial_state + .send_reply(Err(ConnectionManagerError::PeerConnectionError( + "No identity claim provided".to_string(), + ))) + .map_err(|e| error!(target: LOG_TARGET, "Could not send reply to dial request: {:?}", e)); + } + } + + let _ = self + .peer_manager + .add_peer(dial_state.peer().clone()) + .await + .map_err(|e| { + error!("Could not update peer data:{}", e); + let _ = dial_state + .send_reply(Err(ConnectionManagerError::PeerManagerError(e))) + .map_err(|e| error!(target: LOG_TARGET, "Could not send reply to dial request: {:?}", e)); + }); match &dial_result { Ok(conn) => { debug!(target: LOG_TARGET, "Successfully dialed peer '{}'", node_id); - self.notify_connection_manager(ConnectionManagerEvent::PeerConnected(conn.clone())) + self.notify_connection_manager(ConnectionManagerEvent::PeerConnected(conn.clone().into())) .await }, Err(err) => { @@ -273,7 +314,6 @@ where }); } - #[tracing::instrument(level = "trace", skip(self, pending_dials, reply_tx))] fn handle_dial_peer_request( &mut self, pending_dials: &mut DialFuturesUnordered, @@ -301,7 +341,6 @@ where let dial_state = DialState::new(peer, reply_tx, cancel_signal); let node_identity = Arc::clone(&self.node_identity); - let peer_manager = self.peer_manager.clone(); let conn_man_notifier = self.conn_man_notifier.clone(); let supported_protocols = self.our_supported_protocols.clone(); let noise_config = self.noise_config.clone(); @@ -320,15 +359,19 @@ where match Self::check_authenticated_public_key(&socket, &dial_state.peer().public_key) { Ok(pk) => pk, Err(err) => { + let mut dial_state = dial_state; + dial_state + .peer_mut() + .addresses + .mark_failed_connection_attempt(&addr, err.to_string()); return (dial_state, Err(err)); }, }; let result = Self::perform_socket_upgrade_procedure( - peer_manager, node_identity, socket, - addr, + addr.clone(), authenticated_public_key, conn_man_notifier, supported_protocols, @@ -337,7 +380,16 @@ where ) .await; - (dial_state, result) + if let Err(err) = &result { + let mut dial_state = dial_state; + dial_state + .peer_mut() + .addresses + .mark_failed_connection_attempt(&addr, err.to_string()); + (dial_state, result) + } else { + (dial_state, result) + } }, Err(err) => (dial_state, Err(err)), } @@ -365,12 +417,7 @@ where Ok(authenticated_public_key) } - #[tracing::instrument( - level = "trace", - skip(peer_manager, socket, conn_man_notifier, config, cancel_signal) - )] async fn perform_socket_upgrade_procedure( - peer_manager: Arc, node_identity: Arc, mut socket: NoiseSocket, dialed_addr: Multiaddr, @@ -386,9 +433,6 @@ where "Starting peer identity exchange for peer with public key '{}'", authenticated_public_key ); - // Check if we know the peer and if it is banned - let known_peer = common::find_unbanned_peer(&peer_manager, &authenticated_public_key).await?; - let peer_identity = common::perform_identity_exchange( &mut socket, &node_identity, @@ -401,36 +445,12 @@ where return Err(ConnectionManagerError::DialCancelled); } - let features = PeerFeatures::from_bits_truncate(peer_identity.features); - debug!( - target: LOG_TARGET, - "Peer identity exchange succeeded on Outbound connection for peer '{}' (Features = {:?})", - authenticated_public_key, - features - ); - trace!(target: LOG_TARGET, "{:?}", peer_identity); - - let (peer_node_id, their_supported_protocols) = common::validate_and_add_peer_from_peer_identity( - &peer_manager, - known_peer, - authenticated_public_key, - peer_identity, - Some(&dialed_addr), - config.allow_test_addresses, - ) - .await?; + common::validate_peer_identity(&authenticated_public_key, &peer_identity, config.allow_test_addresses).await?; if cancel_signal.is_terminated() { return Err(ConnectionManagerError::DialCancelled); } - debug!( - target: LOG_TARGET, - "[ThisNode={}] Peer '{}' added to peer list.", - node_identity.node_id().short_str(), - peer_node_id.short_str() - ); - let muxer = Yamux::upgrade_connection(socket, CONNECTION_DIRECTION) .map_err(|err| ConnectionManagerError::YamuxUpgradeFailure(err.to_string()))?; @@ -439,19 +459,19 @@ where return Err(ConnectionManagerError::DialCancelled); } - peer_connection::create( + peer_connection::try_create( muxer, dialed_addr, - peer_node_id, - features, + NodeId::from_public_key(&authenticated_public_key), + peer_identity.features, CONNECTION_DIRECTION, conn_man_notifier, our_supported_protocols, - their_supported_protocols, + peer_identity.supported_protocols(), + peer_identity, ) } - #[tracing::instrument(level = "trace", skip(dial_state, noise_config, transport, backoff, config))] async fn dial_peer_with_retry( dial_state: DialState, noise_config: NoiseConfig, @@ -487,7 +507,8 @@ where }, // Inflight dial was cancelled (state, Err(ConnectionManagerError::DialCancelled)) => break (state, Err(ConnectionManagerError::DialCancelled)), - (state, Err(_err)) => { + (state, Err(err)) => { + warn!(target: LOG_TARGET, "Failed to dial peer {} | Attempt {} | Error: {}", state.peer().node_id.short_str(), state.num_attempts(), err); if state.num_attempts() >= config.max_dial_attempts { break (state, Err(ConnectionManagerError::ConnectFailedMaximumAttemptsReached)); } @@ -500,7 +521,7 @@ where }, // Delayed dial was cancelled _ = cancel_signal => { - debug!(target: LOG_TARGET, "[Attempt {}] Connection attempt cancelled for peer '{}'", current_state.num_attempts(), current_state.peer().node_id.short_str()); + warn!(target: LOG_TARGET, "[Attempt {}] Connection attempt cancelled for peer '{}'", current_state.num_attempts(), current_state.peer().node_id.short_str()); break (current_state, Err(ConnectionManagerError::DialCancelled)); } } @@ -511,7 +532,7 @@ where /// Returns ownership of the given `DialState` and a success or failure result for the dial, /// or None if the dial was cancelled inflight async fn dial_peer( - dial_state: DialState, + mut dial_state: DialState, noise_config: &NoiseConfig, transport: &TTransport, network_byte: u8, @@ -519,79 +540,107 @@ where DialState, Result<(NoiseSocket, Multiaddr), ConnectionManagerError>, ) { - let mut addr_iter = dial_state.peer().addresses.iter(); + let addresses = dial_state.peer().addresses.clone().into_vec(); let cancel_signal = dial_state.get_cancel_signal(); - loop { - let result = match addr_iter.next() { - Some(address) => { + for address in addresses { + debug!( + target: LOG_TARGET, + "Attempting address '{}' for peer '{}'", + address, + dial_state.peer().node_id.short_str() + ); + + let moved_address = address.clone(); + let node_id = dial_state.peer().node_id.clone(); + let dial_fut = async move { + let mut timer = Instant::now(); + let mut socket = + transport + .dial(&moved_address) + .await + .map_err(|err| ConnectionManagerError::TransportError { + address: moved_address.to_string(), + details: err.to_string(), + })?; + debug!( + target: LOG_TARGET, + "Socket established on '{}'. Performing noise upgrade protocol", moved_address + ); + let initial_dial_time = timer.elapsed(); + + debug!( + "Dialed peer: {} on address: {} on tcp after: {}", + node_id.short_str(), + moved_address, + timer.elapsed().as_millis() + ); + timer = Instant::now(); + + socket + .write(&[network_byte]) + .await + .map_err(|_| ConnectionManagerError::WireFormatSendFailed)?; + + // todo!("40 seconds?"); + let noise_socket = time::timeout( + Duration::from_secs(40), + noise_config.upgrade_socket(socket, ConnectionDirection::Outbound), + ) + .await + .map_err(|_| ConnectionManagerError::NoiseProtocolTimeout)??; + + let noise_upgrade_time = timer.elapsed(); + debug!( + "Dial - upgraded noise: {} on address: {} on tcp after: {}", + node_id.short_str(), + moved_address, + timer.elapsed().as_millis() + ); + + Result::<_, ConnectionManagerError>::Ok((initial_dial_time, noise_upgrade_time, noise_socket)) + }; + + pin_mut!(dial_fut); + let either = future::select(dial_fut, cancel_signal.clone()).await; + match either { + Either::Left((Ok((initial_dial_time, noise_upgrade_time, noise_socket)), _)) => { + dial_state.peer_mut().addresses.mark_last_seen_now(&address); + dial_state.peer_mut().addresses.update_address_stats(&address, |addr| { + // Initial dial time can be much slower due to tor discovery. + addr.update_initial_dial_time(initial_dial_time); + addr.update_latency(noise_upgrade_time); + }); + return (dial_state, Ok((noise_socket, address.clone()))); + }, + Either::Left((Err(err), _)) => { debug!( target: LOG_TARGET, - "Attempting address '{}' for peer '{}'", + "(Attempt {}) Dial failed on address '{}' for peer '{}' because '{}'", + dial_state.num_attempts(), address, - dial_state.peer().node_id.short_str() + dial_state.peer().node_id.short_str(), + err, ); - let dial_fut = - async move { - let mut socket = transport.dial(address).await.map_err(|err| { - ConnectionManagerError::TransportError { - address: address.to_string(), - details: err.to_string(), - } - })?; - debug!( - target: LOG_TARGET, - "Socket established on '{}'. Performing noise upgrade protocol", address - ); - - socket - .write(&[network_byte]) - .await - .map_err(|_| ConnectionManagerError::WireFormatSendFailed)?; - - let noise_socket = time::timeout( - Duration::from_secs(40), - noise_config.upgrade_socket(socket, ConnectionDirection::Outbound), - ) - .await - .map_err(|_| ConnectionManagerError::NoiseProtocolTimeout)??; - Result::<_, ConnectionManagerError>::Ok(noise_socket) - }; - - pin_mut!(dial_fut); - let either = future::select(dial_fut, cancel_signal.clone()).await; - match either { - Either::Left((Ok(noise_socket), _)) => Ok((noise_socket, address.clone())), - Either::Left((Err(err), _)) => { - debug!( - target: LOG_TARGET, - "(Attempt {}) Dial failed on address '{}' for peer '{}' because '{}'", - dial_state.num_attempts(), - address, - dial_state.peer().node_id.short_str(), - err, - ); - // Try the next address - continue; - }, - // Canceled - Either::Right(_) => { - debug!( - target: LOG_TARGET, - "Dial for peer '{}' cancelled", - dial_state.peer().node_id.short_str() - ); - Err(ConnectionManagerError::DialCancelled) - }, - } + dial_state + .peer_mut() + .addresses + .mark_failed_connection_attempt(&address, err.to_string()); + // Try the next address + continue; }, - // No more addresses to try - returning failure - None => Err(ConnectionManagerError::DialConnectFailedAllAddresses), - }; - - drop(addr_iter); - - break (dial_state, result); + // Canceled + Either::Right(_) => { + debug!( + target: LOG_TARGET, + "Dial for peer '{}' cancelled", + dial_state.peer().node_id.short_str() + ); + return (dial_state, Err(ConnectionManagerError::DialCancelled)); + }, + } } + + (dial_state, Err(ConnectionManagerError::DialConnectFailedAllAddresses)) } } diff --git a/comms/core/src/connection_manager/error.rs b/comms/core/src/connection_manager/error.rs index e4cc622164..0bac1ece21 100644 --- a/comms/core/src/connection_manager/error.rs +++ b/comms/core/src/connection_manager/error.rs @@ -69,8 +69,6 @@ pub enum ConnectionManagerError { NoiseError(String), #[error("Peer is banned, denying connection")] PeerBanned, - #[error("Unable to parse any of the network addresses offered by the connecting peer")] - PeerIdentityNoValidAddresses, #[error("Identity protocol failed: {0}")] IdentityProtocolError(#[from] IdentityProtocolError), #[error("The dial was cancelled")] diff --git a/comms/core/src/connection_manager/listener.rs b/comms/core/src/connection_manager/listener.rs index 15e4a89104..dc8db5143a 100644 --- a/comms/core/src/connection_manager/listener.rs +++ b/comms/core/src/connection_manager/listener.rs @@ -60,9 +60,8 @@ use crate::{ multiaddr::Multiaddr, multiplexing::Yamux, noise::NoiseConfig, - peer_manager::{NodeIdentity, PeerFeatures}, + peer_manager::NodeIdentity, protocol::ProtocolId, - runtime, transports::Transport, utils::multiaddr::multiaddr_to_socketaddr, PeerManager, @@ -110,7 +109,7 @@ where node_identity, shutdown_signal, our_supported_protocols: Vec::new(), - bounded_executor: BoundedExecutor::from_current(config.max_simultaneous_inbound_connects), + bounded_executor: BoundedExecutor::new(config.max_simultaneous_inbound_connects), liveness_session_count: Arc::new(AtomicUsize::new(config.liveness_max_sessions)), config, on_listening: oneshot_trigger::channel(), @@ -134,7 +133,7 @@ where pub async fn listen(self) -> Result { let on_listening = self.on_listening(); - runtime::current().spawn(self.run()); + tokio::spawn(self.run()); on_listening.await } @@ -223,7 +222,7 @@ where permit.fetch_sub(1, Ordering::SeqCst); let liveness = LivenessSession::new(socket); debug!(target: LOG_TARGET, "Started liveness session"); - runtime::current().spawn(async move { + tokio::spawn(async move { future::select(liveness.run(), shutdown_signal).await; permit.fetch_add(1, Ordering::SeqCst); }); @@ -262,7 +261,7 @@ where log_if_error!( target: LOG_TARGET, conn_man_notifier - .send(ConnectionManagerEvent::PeerConnected(peer_conn)) + .send(ConnectionManagerEvent::PeerConnected(peer_conn.into())) .await, "Failed to publish event because '{error}'", ); @@ -386,44 +385,28 @@ where ) .await?; - let features = PeerFeatures::from_bits_truncate(peer_identity.features); - debug!( - target: LOG_TARGET, - "Peer identity exchange succeeded on Inbound connection for peer '{}' (Features = {:?})", - authenticated_public_key, - features - ); - trace!(target: LOG_TARGET, "{:?}", peer_identity); - - let (peer_node_id, their_supported_protocols) = common::validate_and_add_peer_from_peer_identity( + let peer_node_id = common::validate_and_add_peer_from_peer_identity( &peer_manager, known_peer, authenticated_public_key, - peer_identity, - None, + &peer_identity, config.allow_test_addresses, ) .await?; - debug!( - target: LOG_TARGET, - "[ThisNode={}] Peer '{}' added to peer list.", - node_identity.node_id().short_str(), - peer_node_id.short_str() - ); - let muxer = Yamux::upgrade_connection(noise_socket, CONNECTION_DIRECTION) .map_err(|err| ConnectionManagerError::YamuxUpgradeFailure(err.to_string()))?; - peer_connection::create( + peer_connection::try_create( muxer, peer_addr, peer_node_id, - features, + peer_identity.features, CONNECTION_DIRECTION, conn_man_notifier, our_supported_protocols, - their_supported_protocols, + peer_identity.supported_protocols(), + peer_identity, ) } diff --git a/comms/core/src/connection_manager/liveness.rs b/comms/core/src/connection_manager/liveness.rs index cf73f2a06f..900f50f3bf 100644 --- a/comms/core/src/connection_manager/liveness.rs +++ b/comms/core/src/connection_manager/liveness.rs @@ -118,7 +118,7 @@ where let _ = self.tx_watch.send(LivenessStatus::Checking); match self.transport.dial(&self.address).await { Ok(mut socket) => { - info!(target: LOG_TARGET, "🔌 liveness dial took {:.2?}", timer.elapsed()); + debug!(target: LOG_TARGET, "🔌 liveness dial took {:.2?}", timer.elapsed()); if let Err(err) = socket.write(&[WireMode::Liveness.as_byte()]).await { warn!(target: LOG_TARGET, "🔌️ liveness failed to write byte: {}", err); self.tx_watch.send_replace(LivenessStatus::Unreachable); @@ -128,7 +128,7 @@ where loop { match self.ping_pong(&mut framed).await { Ok(Some(latency)) => { - info!(target: LOG_TARGET, "⚡️️ liveness check latency {:.2?}", latency); + debug!(target: LOG_TARGET, "⚡️️ liveness check latency {:.2?}", latency); self.tx_watch.send_replace(LivenessStatus::Live(latency)); }, Ok(None) => { @@ -183,13 +183,13 @@ mod test { use tokio_stream::StreamExt; use super::*; - use crate::{memsocket::MemorySocket, runtime}; + use crate::memsocket::MemorySocket; - #[runtime::test] + #[tokio::test] async fn echos() { let (inbound, outbound) = MemorySocket::new_pair(); let liveness = LivenessSession::new(inbound); - let join_handle = runtime::current().spawn(liveness.run()); + let join_handle = tokio::spawn(liveness.run()); let mut outbound = Framed::new(outbound, LinesCodec::new()); for _ in 0..10usize { outbound.send("ECHO".to_string()).await.unwrap() diff --git a/comms/core/src/connection_manager/manager.rs b/comms/core/src/connection_manager/manager.rs index 4af961713d..8e34ceada2 100644 --- a/comms/core/src/connection_manager/manager.rs +++ b/comms/core/src/connection_manager/manager.rs @@ -61,7 +61,7 @@ const DIALER_REQUEST_CHANNEL_SIZE: usize = 32; #[derive(Debug)] pub enum ConnectionManagerEvent { // Peer connection - PeerConnected(PeerConnection), + PeerConnected(Box), PeerDisconnected(ConnectionId, NodeId), PeerConnectFailed(NodeId, ConnectionManagerError), PeerInboundConnectFailed(ConnectionManagerError), diff --git a/comms/core/src/connection_manager/mod.rs b/comms/core/src/connection_manager/mod.rs index 82f3e981a2..fd77998d65 100644 --- a/comms/core/src/connection_manager/mod.rs +++ b/comms/core/src/connection_manager/mod.rs @@ -33,7 +33,7 @@ mod listener; mod metrics; mod common; -pub use common::validate_peer_addresses; +pub use common::{validate_address_and_source, validate_addresses, validate_addresses_and_source}; mod direction; pub use direction::ConnectionDirection; diff --git a/comms/core/src/connection_manager/peer_connection.rs b/comms/core/src/connection_manager/peer_connection.rs index b3165e9baa..293123cfc5 100644 --- a/comms/core/src/connection_manager/peer_connection.rs +++ b/comms/core/src/connection_manager/peer_connection.rs @@ -58,9 +58,8 @@ use crate::{ framing, framing::CanonicalFraming, multiplexing::{Control, IncomingSubstreams, Substream, Yamux}, - peer_manager::{NodeId, PeerFeatures}, + peer_manager::{NodeId, PeerFeatures, PeerIdentityClaim}, protocol::{ProtocolId, ProtocolNegotiation}, - runtime, utils::atomic_ref_counter::AtomicRefCounter, }; @@ -68,7 +67,7 @@ const LOG_TARGET: &str = "comms::connection_manager::peer_connection"; static ID_COUNTER: AtomicUsize = AtomicUsize::new(0); -pub fn create( +pub fn try_create( connection: Yamux, peer_addr: Multiaddr, peer_node_id: NodeId, @@ -77,6 +76,7 @@ pub fn create( event_notifier: mpsc::Sender, our_supported_protocols: Vec, their_supported_protocols: Vec, + peer_identity_claim: PeerIdentityClaim, ) -> Result { trace!( target: LOG_TARGET, @@ -95,6 +95,7 @@ pub fn create( peer_addr, direction, substream_counter, + peer_identity_claim, ); let peer_actor = PeerConnectionActor::new( id, @@ -106,7 +107,7 @@ pub fn create( our_supported_protocols, their_supported_protocols, ); - runtime::current().spawn(peer_actor.run()); + tokio::spawn(peer_actor.run()); Ok(peer_conn) } @@ -138,6 +139,7 @@ pub struct PeerConnection { started_at: Instant, substream_counter: AtomicRefCounter, handle_counter: Arc<()>, + peer_identity_claim: Option, } impl PeerConnection { @@ -149,6 +151,7 @@ impl PeerConnection { address: Multiaddr, direction: ConnectionDirection, substream_counter: AtomicRefCounter, + peer_identity_claim: PeerIdentityClaim, ) -> Self { Self { id, @@ -160,6 +163,31 @@ impl PeerConnection { started_at: Instant::now(), substream_counter, handle_counter: Arc::new(()), + peer_identity_claim: Some(peer_identity_claim), + } + } + + /// Should only be used in tests + pub(crate) fn unverified( + id: ConnectionId, + request_tx: mpsc::Sender, + peer_node_id: NodeId, + peer_features: PeerFeatures, + address: Multiaddr, + direction: ConnectionDirection, + substream_counter: AtomicRefCounter, + ) -> Self { + Self { + id, + request_tx, + peer_node_id, + peer_features, + address: Arc::new(address), + direction, + started_at: Instant::now(), + substream_counter, + handle_counter: Arc::new(()), + peer_identity_claim: None, } } @@ -205,6 +233,10 @@ impl PeerConnection { Arc::strong_count(&self.handle_counter) } + pub fn peer_identity_claim(&self) -> Option<&PeerIdentityClaim> { + self.peer_identity_claim.as_ref() + } + #[tracing::instrument(level = "trace", "peer_connection::open_substream", skip(self))] pub async fn open_substream( &mut self, diff --git a/comms/core/src/connection_manager/tests/listener_dialer.rs b/comms/core/src/connection_manager/tests/listener_dialer.rs index 5e0b26a92a..3d1139e97a 100644 --- a/comms/core/src/connection_manager/tests/listener_dialer.rs +++ b/comms/core/src/connection_manager/tests/listener_dialer.rs @@ -40,15 +40,15 @@ use crate::{ ConnectionManagerConfig, ConnectionManagerError, }, + net_address::{MultiaddressesWithStats, PeerAddressSource}, noise::NoiseConfig, peer_manager::PeerFeatures, protocol::ProtocolId, - runtime, test_utils::{build_peer_manager, node_identity::build_node_identity}, transports::MemoryTransport, }; -#[runtime::test] +#[tokio::test] async fn listen() -> Result<(), Box> { let (event_tx, _) = mpsc::channel(1); let mut shutdown = Shutdown::new(); @@ -76,9 +76,8 @@ async fn listen() -> Result<(), Box> { Ok(()) } -#[runtime::test] +#[tokio::test] async fn smoke() { - let rt_handle = runtime::current(); // This test sets up Dialer and Listener components, uses the Dialer to dial the Listener, // asserts the emitted events are correct, opens a substream, sends a small message over the substream, // receives and checks the message and then disconnects and shuts down. @@ -122,10 +121,10 @@ async fn smoke() { ); dialer.set_supported_protocols(supported_protocols.clone()); - let dialer_fut = rt_handle.spawn(dialer.run()); + let dialer_fut = tokio::spawn(dialer.run()); let mut peer = node_identity1.to_peer(); - peer.addresses = vec![address].into(); + peer.addresses = MultiaddressesWithStats::from_addresses_with_source(vec![address], &PeerAddressSource::Config); peer.set_id_for_test(1); let (reply_tx, reply_rx) = oneshot::channel(); @@ -183,9 +182,8 @@ async fn smoke() { timeout(Duration::from_secs(5), dialer_fut).await.unwrap().unwrap(); } -#[runtime::test] +#[tokio::test] async fn banned() { - let rt_handle = runtime::current(); let (event_tx, mut event_rx) = mpsc::channel(10); let mut shutdown = Shutdown::new(); @@ -231,10 +229,10 @@ async fn banned() { ); dialer.set_supported_protocols(supported_protocols); - let dialer_fut = rt_handle.spawn(dialer.run()); + let dialer_fut = tokio::spawn(dialer.run()); let mut peer = node_identity1.to_peer(); - peer.addresses = vec![address].into(); + peer.addresses = MultiaddressesWithStats::from_addresses_with_source(vec![address], &PeerAddressSource::Config); peer.set_id_for_test(1); let (reply_tx, reply_rx) = oneshot::channel(); diff --git a/comms/core/src/connection_manager/tests/manager.rs b/comms/core/src/connection_manager/tests/manager.rs index 8dc084ec24..714e297623 100644 --- a/comms/core/src/connection_manager/tests/manager.rs +++ b/comms/core/src/connection_manager/tests/manager.rs @@ -40,11 +40,10 @@ use crate::{ ConnectionManagerRequester, PeerConnectionError, }, + net_address::{MultiaddressesWithStats, PeerAddressSource}, noise::NoiseConfig, peer_manager::{NodeId, Peer, PeerFeatures, PeerFlags, PeerManagerError}, protocol::{ProtocolEvent, ProtocolId, Protocols}, - runtime, - runtime::task, test_utils::{ build_peer_manager, count_string_occurrences, @@ -54,7 +53,7 @@ use crate::{ transports::{MemoryTransport, TcpTransport}, }; -#[runtime::test] +#[tokio::test] async fn connect_to_nonexistent_peer() { let rt_handle = Handle::current(); let node_identity = build_node_identity(PeerFeatures::empty()); @@ -87,7 +86,7 @@ async fn connect_to_nonexistent_peer() { shutdown.trigger(); } -#[runtime::test] +#[tokio::test] #[allow(clippy::similar_names)] async fn dial_success() { static TEST_PROTO: ProtocolId = ProtocolId::from_static(b"/test/valid"); @@ -146,7 +145,7 @@ async fn dial_success() { .add_peer(Peer::new( node_identity2.public_key().clone(), node_identity2.node_id().clone(), - vec![public_address2].into(), + MultiaddressesWithStats::from_addresses_with_source(vec![public_address2], &PeerAddressSource::Config), PeerFlags::empty(), PeerFeatures::COMMUNICATION_CLIENT, Default::default(), @@ -199,7 +198,7 @@ async fn dial_success() { assert_eq!(buf, MSG); } -#[runtime::test] +#[tokio::test] #[allow(clippy::similar_names)] async fn dial_success_aux_tcp_listener() { static TEST_PROTO: ProtocolId = ProtocolId::from_static(b"/test/valid"); @@ -249,7 +248,7 @@ async fn dial_success_aux_tcp_listener() { .add_peer(Peer::new( node_identity1.public_key().clone(), node_identity1.node_id().clone(), - vec![tcp_listener_addr].into(), + MultiaddressesWithStats::from_addresses_with_source(vec![tcp_listener_addr], &PeerAddressSource::Config), PeerFlags::empty(), PeerFeatures::COMMUNICATION_CLIENT, Default::default(), @@ -295,7 +294,7 @@ async fn dial_success_aux_tcp_listener() { assert_eq!(buf, MSG); } -#[runtime::test] +#[tokio::test] async fn simultaneous_dial_events() { let mut shutdown = Shutdown::new(); @@ -337,7 +336,7 @@ async fn simultaneous_dial_events() { .add_peer(Peer::new( node_identities[1].public_key().clone(), node_identities[1].node_id().clone(), - vec![public_address2].into(), + MultiaddressesWithStats::from_addresses_with_source(vec![public_address2], &PeerAddressSource::Config), PeerFlags::empty(), PeerFeatures::COMMUNICATION_CLIENT, Default::default(), @@ -350,7 +349,7 @@ async fn simultaneous_dial_events() { .add_peer(Peer::new( node_identities[0].public_key().clone(), node_identities[0].node_id().clone(), - vec![public_address1].into(), + MultiaddressesWithStats::from_addresses_with_source(vec![public_address1], &PeerAddressSource::Config), PeerFlags::empty(), PeerFeatures::COMMUNICATION_CLIENT, Default::default(), @@ -389,7 +388,7 @@ async fn simultaneous_dial_events() { // assert!(count_string_occurrences(&events2, &["PeerDisconnected"]) >= 1); } -#[runtime::test] +#[tokio::test] async fn dial_cancelled() { let mut shutdown = Shutdown::new(); @@ -424,7 +423,7 @@ async fn dial_cancelled() { peer_manager1.add_peer(node_identity2.to_peer()).await.unwrap(); let (ready_tx, ready_rx) = oneshot::channel(); - let dial_result = task::spawn({ + let dial_result = tokio::spawn({ let mut cm = conn_man1.clone(); let node_id = node_identity2.node_id().clone(); async move { diff --git a/comms/core/src/connectivity/manager.rs b/comms/core/src/connectivity/manager.rs index b356fed12c..f3e78cf30c 100644 --- a/comms/core/src/connectivity/manager.rs +++ b/comms/core/src/connectivity/manager.rs @@ -54,7 +54,6 @@ use crate::{ ConnectionManagerRequester, }, peer_manager::NodeId, - runtime::task, utils::datetime::format_duration, NodeIdentity, PeerConnection, @@ -164,7 +163,7 @@ impl ConnectivityManagerActor { pub fn spawn(self) -> JoinHandle<()> { let mut mdc = vec![]; log_mdc::iter(|k, v| mdc.push((k.to_owned(), v.to_owned()))); - task::spawn(async { + tokio::spawn(async { log_mdc::extend(mdc); Self::run(self).await }) @@ -316,18 +315,20 @@ impl ConnectivityManagerActor { }, } match self.pool.get(&node_id) { - Some(state) if state.is_connected() => { - debug!( - target: LOG_TARGET, - "Found existing connection for peer `{}`", - node_id.short_str() - ); + Some(state) => { + if !state.is_connected() { + warn!( + target: LOG_TARGET, + "Existing connection is present but is_connected is false for some reason...." + ); + } + if let Some(reply_tx) = reply_tx { let _result = reply_tx.send(Ok(state.connection().cloned().expect("Already checked"))); } }, - _ => { - debug!( + None => { + info!( target: LOG_TARGET, "No existing connection found for peer `{}`. Dialing...", node_id.short_str() @@ -477,12 +478,13 @@ impl ConnectivityManagerActor { fn mark_peer_failed(&mut self, node_id: NodeId) -> usize { let entry = self.get_connection_stat_mut(node_id); entry.set_connection_failed(); + entry.failed_attempts() } async fn on_peer_connection_failure(&mut self, node_id: &NodeId) -> Result<(), ConnectivityError> { if self.status.is_offline() { - debug!( + info!( target: LOG_TARGET, "Node is offline. Ignoring connection failure event for peer '{}'.", node_id ); @@ -498,10 +500,6 @@ impl ConnectivityManagerActor { node_id.short_str(), num_failed ); - if !self.peer_manager.set_offline(node_id, true).await? { - // Only publish the `PeerOffline` event if we change from online to offline - self.publish_event(ConnectivityEvent::PeerOffline(node_id.clone())); - } if let Some(peer) = self.peer_manager.find_by_node_id(node_id).await? { if !peer.is_banned() && @@ -544,11 +542,15 @@ impl ConnectivityManagerActor { event: &ConnectionManagerEvent, ) -> Result<(), ConnectivityError> { use ConnectionManagerEvent::{PeerConnectFailed, PeerConnected, PeerDisconnected}; - debug!(target: LOG_TARGET, "Received event: {}", event); match event { PeerConnected(new_conn) => { match self.on_new_connection(new_conn).await { TieBreak::KeepExisting => { + debug!( + target: LOG_TARGET, + "Discarding new connection to peer '{}' because we already have an existing connection", + new_conn.peer_node_id().short_str() + ); // Ignore event, we discarded the new connection and keeping the current one return Ok(()); }, @@ -605,7 +607,7 @@ impl ConnectivityManagerActor { let old_status = self.pool.set_status(node_id, new_status); if let Some(conn) = connection { - new_status = self.pool.insert_connection(conn); + new_status = self.pool.insert_connection(*conn); } if old_status != new_status { debug!( @@ -618,17 +620,15 @@ impl ConnectivityManagerActor { use ConnectionStatus::{Connected, Disconnected, Failed}; match (old_status, new_status) { - (_, Connected) => { - self.mark_peer_succeeded(node_id.clone()); - match self.pool.get_connection(&node_id).cloned() { - Some(conn) => { - self.publish_event(ConnectivityEvent::PeerConnected(conn)); - }, - None => unreachable!( - "Connection transitioning to CONNECTED state must always have a connection set i.e. \ - ConnectionPool::get_connection is Some" - ), - } + (_, Connected) => match self.pool.get_connection(&node_id).cloned() { + Some(conn) => { + self.mark_peer_succeeded(node_id.clone()); + self.publish_event(ConnectivityEvent::PeerConnected(conn.into())); + }, + None => unreachable!( + "Connection transitioning to CONNECTED state must always have a connection set i.e. \ + ConnectionPool::get_connection is Some" + ), }, (Connected, Disconnected) => { self.publish_event(ConnectivityEvent::PeerDisconnected(node_id)); @@ -668,7 +668,7 @@ impl ConnectivityManagerActor { }, Some(mut existing_conn) => { if self.tie_break_existing_connection(&existing_conn, new_conn) { - debug!( + warn!( target: LOG_TARGET, "Tie break: Keep new connection (id: {}, peer: {}, direction: {}). Disconnect existing \ connection (id: {}, peer: {}, direction: {})", diff --git a/comms/core/src/connectivity/requester.rs b/comms/core/src/connectivity/requester.rs index 0dcb353d83..20d008bf3f 100644 --- a/comms/core/src/connectivity/requester.rs +++ b/comms/core/src/connectivity/requester.rs @@ -56,11 +56,9 @@ pub type ConnectivityEventTx = broadcast::Sender; #[derive(Debug, Clone)] pub enum ConnectivityEvent { PeerDisconnected(NodeId), - PeerConnected(PeerConnection), + PeerConnected(Box), PeerConnectFailed(NodeId), PeerBanned(NodeId), - PeerOffline(NodeId), - ConnectivityStateInitialized, ConnectivityStateOnline(usize), ConnectivityStateDegraded(usize), @@ -76,7 +74,6 @@ impl fmt::Display for ConnectivityEvent { PeerConnected(node_id) => write!(f, "PeerConnected({})", node_id), PeerConnectFailed(node_id) => write!(f, "PeerConnectFailed({})", node_id), PeerBanned(node_id) => write!(f, "PeerBanned({})", node_id), - PeerOffline(node_id) => write!(f, "PeerOffline({})", node_id), ConnectivityStateInitialized => write!(f, "ConnectivityStateInitialized"), ConnectivityStateOnline(n) => write!(f, "ConnectivityStateOnline({})", n), ConnectivityStateDegraded(n) => write!(f, "ConnectivityStateDegraded({})", n), diff --git a/comms/core/src/connectivity/test.rs b/comms/core/src/connectivity/test.rs index 7dc7840fa0..ef5dd65d9f 100644 --- a/comms/core/src/connectivity/test.rs +++ b/comms/core/src/connectivity/test.rs @@ -38,8 +38,6 @@ use crate::{ connection_manager::{ConnectionManagerError, ConnectionManagerEvent}, connectivity::ConnectivityEventRx, peer_manager::{Peer, PeerFeatures}, - runtime, - runtime::task, test_utils::{ build_peer_manager, mocks::{create_connection_manager_mock, create_peer_connection_mock_pair, ConnectionManagerMockState}, @@ -64,7 +62,7 @@ fn setup_connectivity_manager( let node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let (cm_requester, mock) = create_connection_manager_mock(); let cm_mock_state = mock.get_shared_state(); - task::spawn(mock.run()); + tokio::spawn(mock.run()); let shutdown = Shutdown::new(); let (request_tx, request_rx) = mpsc::channel(1); @@ -103,7 +101,7 @@ async fn add_test_peers(peer_manager: &PeerManager, n: usize) -> Vec { peers } -#[runtime::test] +#[tokio::test] async fn connecting_peers() { let (mut connectivity, mut event_stream, node_identity, peer_manager, cm_mock_state, _shutdown) = setup_connectivity_manager(Default::default()); @@ -125,7 +123,7 @@ async fn connecting_peers() { // All connections succeeded for conn in &connections { - cm_mock_state.publish_event(ConnectionManagerEvent::PeerConnected(conn.clone())); + cm_mock_state.publish_event(ConnectionManagerEvent::PeerConnected(conn.clone().into())); } let _events = collect_try_recv!(event_stream, take = 11, timeout = Duration::from_secs(10)); @@ -139,7 +137,7 @@ async fn connecting_peers() { } #[allow(clippy::too_many_lines)] -#[runtime::test] +#[tokio::test] async fn online_then_offline_then_online() { let (mut connectivity, mut event_stream, node_identity, peer_manager, cm_mock_state, _shutdown) = setup_connectivity_manager(ConnectivityConfig { @@ -186,10 +184,10 @@ async fn online_then_offline_then_online() { unpack_enum!(ConnectivityEvent::ConnectivityStateInitialized = events.remove(0)); for conn in connections.iter().skip(1) { - cm_mock_state.publish_event(ConnectionManagerEvent::PeerConnected(conn.clone())); + cm_mock_state.publish_event(ConnectionManagerEvent::PeerConnected(conn.clone().into())); } for conn in &client_connections { - cm_mock_state.publish_event(ConnectionManagerEvent::PeerConnected(conn.clone())); + cm_mock_state.publish_event(ConnectionManagerEvent::PeerConnected(conn.clone().into())); } connectivity @@ -253,7 +251,7 @@ async fn online_then_offline_then_online() { .map(|(conn, _, _, _)| conn) .collect::>(); for conn in connections.iter().skip(1) { - cm_mock_state.publish_event(ConnectionManagerEvent::PeerConnected(conn.clone())); + cm_mock_state.publish_event(ConnectionManagerEvent::PeerConnected(conn.clone().into())); } streams::assert_in_broadcast( @@ -269,7 +267,7 @@ async fn online_then_offline_then_online() { assert!(connectivity.get_connectivity_status().await.unwrap().is_online()); } -#[runtime::test] +#[tokio::test] async fn ban_peer() { let (mut connectivity, mut event_stream, node_identity, peer_manager, cm_mock_state, _shutdown) = setup_connectivity_manager(ConnectivityConfig { @@ -282,7 +280,7 @@ async fn ban_peer() { let mut events = collect_try_recv!(event_stream, take = 1, timeout = Duration::from_secs(10)); unpack_enum!(ConnectivityEvent::ConnectivityStateInitialized = events.remove(0)); - cm_mock_state.publish_event(ConnectionManagerEvent::PeerConnected(conn.clone())); + cm_mock_state.publish_event(ConnectionManagerEvent::PeerConnected(conn.clone().into())); let mut events = collect_try_recv!(event_stream, take = 2, timeout = Duration::from_secs(10)); unpack_enum!(ConnectivityEvent::PeerConnected(_conn) = events.remove(0)); unpack_enum!(ConnectivityEvent::ConnectivityStateOnline(_n) = events.remove(0)); @@ -312,7 +310,7 @@ async fn ban_peer() { assert!(conn.is_none()); } -#[runtime::test] +#[tokio::test] async fn peer_selection() { let config = ConnectivityConfig { min_connectivity: 1, @@ -342,7 +340,7 @@ async fn peer_selection() { unpack_enum!(ConnectivityEvent::ConnectivityStateInitialized = events.remove(0)); // 10 connections for conn in &connections { - cm_mock_state.publish_event(ConnectionManagerEvent::PeerConnected(conn.clone())); + cm_mock_state.publish_event(ConnectionManagerEvent::PeerConnected(conn.clone().into())); } // Wait for all peers to be connected (i.e. for the connection manager events to be received) @@ -372,7 +370,7 @@ async fn peer_selection() { } } -#[runtime::test] +#[tokio::test] async fn pool_management() { let config = ConnectivityConfig { min_connectivity: 1, @@ -405,7 +403,7 @@ async fn pool_management() { unpack_enum!(ConnectivityEvent::ConnectivityStateInitialized = events.remove(0)); // 10 connections for conn in &connections { - cm_mock_state.publish_event(ConnectionManagerEvent::PeerConnected(conn.clone())); + cm_mock_state.publish_event(ConnectionManagerEvent::PeerConnected(conn.clone().into())); } // Wait for all peers to be connected (i.e. for the connection manager events to be received) diff --git a/comms/core/src/lib.rs b/comms/core/src/lib.rs index 570794feb7..69504784db 100644 --- a/comms/core/src/lib.rs +++ b/comms/core/src/lib.rs @@ -18,7 +18,7 @@ mod builder; pub use builder::{CommsBuilder, CommsBuilderError, CommsNode, UnspawnedCommsNode}; pub mod connection_manager; -pub use connection_manager::{validate_peer_addresses, PeerConnection, PeerConnectionError}; +pub use connection_manager::{validate_addresses, PeerConnection, PeerConnectionError}; pub mod connectivity; @@ -40,7 +40,6 @@ pub mod backoff; pub mod bounded_executor; pub mod memsocket; pub mod protocol; -pub mod runtime; #[macro_use] pub mod message; pub mod net_address; diff --git a/comms/core/src/memsocket/mod.rs b/comms/core/src/memsocket/mod.rs index fae585a218..2bee87ad36 100644 --- a/comms/core/src/memsocket/mod.rs +++ b/comms/core/src/memsocket/mod.rs @@ -515,7 +515,7 @@ mod test { use tokio_stream::StreamExt; use super::*; - use crate::{framing, runtime}; + use crate::framing; #[test] fn listener_bind() -> io::Result<()> { @@ -526,7 +526,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn simple_connect() -> io::Result<()> { let port = acquire_next_memsocket_port().into(); let mut listener = MemoryListener::bind(port)?; @@ -544,7 +544,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn listen_on_port_zero() -> io::Result<()> { let mut listener = MemoryListener::bind(0)?; let listener_addr = listener.local_addr(); @@ -569,7 +569,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn listener_correctly_frees_port_on_drop() { async fn connect_on_port(port: u16) { let mut listener = MemoryListener::bind(port).unwrap(); @@ -590,7 +590,7 @@ mod test { connect_on_port(port).await; } - #[runtime::test] + #[tokio::test] async fn simple_write_read() -> io::Result<()> { let (mut a, mut b) = MemorySocket::new_pair(); @@ -605,7 +605,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn partial_read() -> io::Result<()> { let (mut a, mut b) = MemorySocket::new_pair(); @@ -621,7 +621,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn partial_read_write_both_sides() -> io::Result<()> { let (mut a, mut b) = MemorySocket::new_pair(); @@ -645,7 +645,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn many_small_writes() -> io::Result<()> { let (mut a, mut b) = MemorySocket::new_pair(); @@ -666,7 +666,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn large_writes() -> io::Result<()> { let (mut a, mut b) = MemorySocket::new_pair(); @@ -682,7 +682,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn read_zero_bytes() -> io::Result<()> { let (mut a, mut b) = MemorySocket::new_pair(); @@ -698,7 +698,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn read_bytes_with_large_buffer() -> io::Result<()> { let (mut a, mut b) = MemorySocket::new_pair(); @@ -713,7 +713,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn read_and_write_canonical_framing() -> io::Result<()> { let (a, b) = MemorySocket::new_pair(); let mut a = framing::canonical(a, 1024); diff --git a/comms/core/src/multiplexing/yamux.rs b/comms/core/src/multiplexing/yamux.rs index dc847416ed..ffd0608b2a 100644 --- a/comms/core/src/multiplexing/yamux.rs +++ b/comms/core/src/multiplexing/yamux.rs @@ -35,7 +35,6 @@ use yamux::Mode; use crate::{ connection_manager::ConnectionDirection, - runtime, stream_id, stream_id::StreamId, utils::atomic_ref_counter::{AtomicRefCounter, AtomicRefCounterGuard}, @@ -92,7 +91,7 @@ impl Yamux { { let (incoming_tx, incoming_rx) = mpsc::channel(10); let incoming = IncomingWorker::new(connection, incoming_tx); - runtime::task::spawn(incoming.run()); + tokio::spawn(incoming.run()); IncomingSubstreams::new(incoming_rx, counter) } @@ -351,15 +350,9 @@ mod test { }; use tokio_stream::StreamExt; - use crate::{ - connection_manager::ConnectionDirection, - memsocket::MemorySocket, - multiplexing::yamux::Yamux, - runtime, - runtime::task, - }; + use crate::{connection_manager::ConnectionDirection, memsocket::MemorySocket, multiplexing::yamux::Yamux}; - #[runtime::test] + #[tokio::test] async fn open_substream() -> io::Result<()> { let (dialer, listener) = MemorySocket::new_pair(); let msg = b"The Way of Kings"; @@ -367,7 +360,7 @@ mod test { let dialer = Yamux::upgrade_connection(dialer, ConnectionDirection::Outbound)?; let mut dialer_control = dialer.get_yamux_control(); - task::spawn(async move { + tokio::spawn(async move { let mut substream = dialer_control.open_stream().await.unwrap(); substream.write_all(msg).await.unwrap(); @@ -391,7 +384,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn substream_count() { const NUM_SUBSTREAMS: usize = 10; let (dialer, listener) = MemorySocket::new_pair(); @@ -399,7 +392,7 @@ mod test { let dialer = Yamux::upgrade_connection(dialer, ConnectionDirection::Outbound).unwrap(); let mut dialer_control = dialer.get_yamux_control(); - let substreams_out = task::spawn(async move { + let substreams_out = tokio::spawn(async move { let mut substreams = Vec::with_capacity(NUM_SUBSTREAMS); for _ in 0..NUM_SUBSTREAMS { substreams.push(dialer_control.open_stream().await.unwrap()); @@ -422,7 +415,7 @@ mod test { assert_eq!(listener.substream_count(), 0); } - #[runtime::test] + #[tokio::test] async fn close() -> io::Result<()> { let (dialer, listener) = MemorySocket::new_pair(); let msg = b"Words of Radiance"; @@ -430,7 +423,7 @@ mod test { let dialer = Yamux::upgrade_connection(dialer, ConnectionDirection::Outbound)?; let mut dialer_control = dialer.get_yamux_control(); - task::spawn(async move { + tokio::spawn(async move { let mut substream = dialer_control.open_stream().await.unwrap(); substream.write_all(msg).await.unwrap(); @@ -460,14 +453,14 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn rude_close_does_not_freeze() -> io::Result<()> { let (dialer, listener) = MemorySocket::new_pair(); let barrier = Arc::new(Barrier::new(2)); let b = barrier.clone(); - task::spawn(async move { + tokio::spawn(async move { // Drop immediately let incoming = Yamux::upgrade_connection(listener, ConnectionDirection::Inbound) .unwrap() @@ -488,7 +481,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn send_big_message() -> io::Result<()> { #[allow(non_upper_case_globals)] static MiB: usize = 1 << 20; @@ -499,7 +492,7 @@ mod test { let dialer = Yamux::upgrade_connection(dialer, ConnectionDirection::Outbound)?; let mut dialer_control = dialer.get_yamux_control(); - task::spawn(async move { + tokio::spawn(async move { assert_eq!(dialer_control.substream_count(), 0); let mut substream = dialer_control.open_stream().await.unwrap(); assert_eq!(dialer_control.substream_count(), 1); diff --git a/comms/core/src/net_address/mod.rs b/comms/core/src/net_address/mod.rs index a71d3e60ad..a437636096 100644 --- a/comms/core/src/net_address/mod.rs +++ b/comms/core/src/net_address/mod.rs @@ -23,7 +23,7 @@ //! Extension types used by the [PeerManager](crate::PeerManager) to keep track of address reliability. mod multiaddr_with_stats; -pub use multiaddr_with_stats::MutliaddrWithStats; +pub use multiaddr_with_stats::{MultiaddrWithStats, PeerAddressSource}; mod mutliaddresses_with_stats; pub use mutliaddresses_with_stats::MultiaddressesWithStats; diff --git a/comms/core/src/net_address/multiaddr_with_stats.rs b/comms/core/src/net_address/multiaddr_with_stats.rs index b0f09dd8f2..86f110f342 100644 --- a/comms/core/src/net_address/multiaddr_with_stats.rs +++ b/comms/core/src/net_address/multiaddr_with_stats.rs @@ -2,57 +2,188 @@ // SPDX-License-Identifier: BSD-3-Clause use std::{ + cmp, cmp::{Ord, Ordering}, fmt, + fmt::{Display, Formatter}, hash::{Hash, Hasher}, time::Duration, }; -use chrono::{DateTime, Utc}; +use chrono::{NaiveDateTime, Utc}; use multiaddr::Multiaddr; use serde::{Deserialize, Serialize}; +use crate::{peer_manager::PeerIdentityClaim, types::CommsPublicKey}; + const MAX_LATENCY_SAMPLE_COUNT: u32 = 100; +const MAX_INITIAL_DIAL_TIME_SAMPLE_COUNT: u32 = 100; #[derive(Debug, Eq, Clone, Deserialize, Serialize)] -pub struct MutliaddrWithStats { - pub address: Multiaddr, - pub last_seen: Option>, +pub struct MultiaddrWithStats { + address: Multiaddr, + pub last_seen: Option, pub connection_attempts: u32, - pub rejected_message_count: u32, + pub avg_initial_dial_time: Duration, + initial_dial_time_sample_count: u32, pub avg_latency: Duration, latency_sample_count: u32, + pub last_attempted: Option, + pub last_failed_reason: Option, + pub quality_score: i32, + pub source: PeerAddressSource, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Eq)] +pub enum PeerAddressSource { + Config, + FromNodeIdentity { + peer_identity_claim: PeerIdentityClaim, + }, + FromPeerConnection { + peer_identity_claim: PeerIdentityClaim, + }, + FromDiscovery { + peer_identity_claim: PeerIdentityClaim, + }, + FromAnotherPeer { + peer_identity_claim: PeerIdentityClaim, + source_peer: CommsPublicKey, + }, + FromJoinMessage { + peer_identity_claim: PeerIdentityClaim, + }, +} + +impl PeerAddressSource { + pub fn is_config(&self) -> bool { + matches!(self, PeerAddressSource::Config) + } + + pub fn peer_identity_claim(&self) -> Option<&PeerIdentityClaim> { + match self { + PeerAddressSource::Config => None, + PeerAddressSource::FromNodeIdentity { peer_identity_claim } => Some(peer_identity_claim), + PeerAddressSource::FromPeerConnection { peer_identity_claim } => Some(peer_identity_claim), + PeerAddressSource::FromDiscovery { peer_identity_claim } => Some(peer_identity_claim), + PeerAddressSource::FromAnotherPeer { + peer_identity_claim, .. + } => Some(peer_identity_claim), + PeerAddressSource::FromJoinMessage { peer_identity_claim } => Some(peer_identity_claim), + } + } } -impl MutliaddrWithStats { +impl Display for PeerAddressSource { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + PeerAddressSource::Config => write!(f, "Config"), + PeerAddressSource::FromNodeIdentity { .. } => { + write!(f, "FromNodeIdentity") + }, + PeerAddressSource::FromPeerConnection { .. } => write!(f, "FromPeerConnection"), + PeerAddressSource::FromDiscovery { .. } => write!(f, "FromDiscovery"), + PeerAddressSource::FromAnotherPeer { .. } => write!(f, "FromAnotherPeer"), + PeerAddressSource::FromJoinMessage { .. } => write!(f, "FromJoinMessage"), + } + } +} + +impl PartialEq for PeerAddressSource { + fn eq(&self, other: &Self) -> bool { + match self { + PeerAddressSource::Config => { + matches!(other, PeerAddressSource::Config) + }, + PeerAddressSource::FromNodeIdentity { .. } => { + matches!(other, PeerAddressSource::FromNodeIdentity { .. }) + }, + PeerAddressSource::FromPeerConnection { .. } => { + matches!(other, PeerAddressSource::FromPeerConnection { .. }) + }, + PeerAddressSource::FromAnotherPeer { .. } => { + matches!(other, PeerAddressSource::FromAnotherPeer { .. }) + }, + PeerAddressSource::FromDiscovery { .. } => { + matches!(other, PeerAddressSource::FromDiscovery { .. }) + }, + PeerAddressSource::FromJoinMessage { .. } => { + matches!(other, PeerAddressSource::FromJoinMessage { .. }) + }, + } + } +} + +impl MultiaddrWithStats { /// Constructs a new net address with zero stats - pub fn new(address: Multiaddr) -> Self { + pub fn new(address: Multiaddr, source: PeerAddressSource) -> Self { Self { address, last_seen: None, connection_attempts: 0, - rejected_message_count: 0, + avg_initial_dial_time: Duration::from_secs(0), + initial_dial_time_sample_count: 0, avg_latency: Duration::from_millis(0), latency_sample_count: 0, + last_attempted: None, + last_failed_reason: None, + quality_score: 0, + source, } } - /// Constructs a new net address with usage stats - pub fn new_with_stats( - address: Multiaddr, - last_seen: Option>, - connection_attempts: u32, - rejected_message_count: u32, - avg_latency: Duration, - latency_sample_count: u32, - ) -> Self { - Self { - address, - last_seen, - connection_attempts, - rejected_message_count, - avg_latency, - latency_sample_count, + pub fn merge(&mut self, other: &Self) { + if self.address == other.address { + self.last_seen = cmp::max(other.last_seen, self.last_seen); + self.connection_attempts = cmp::max(self.connection_attempts, other.connection_attempts); + match self.latency_sample_count.cmp(&other.latency_sample_count) { + Ordering::Less => { + self.avg_latency = other.avg_latency; + self.latency_sample_count = other.latency_sample_count; + }, + Ordering::Equal | Ordering::Greater => {}, + } + match self + .initial_dial_time_sample_count + .cmp(&other.initial_dial_time_sample_count) + { + Ordering::Less => { + self.avg_initial_dial_time = other.avg_initial_dial_time; + self.initial_dial_time_sample_count = other.initial_dial_time_sample_count; + }, + Ordering::Equal | Ordering::Greater => {}, + } + self.last_attempted = cmp::max(self.last_attempted, other.last_attempted); + self.last_failed_reason = other.last_failed_reason.clone(); + self.update_source_if_better(&other.source); + } + } + + pub fn update_source_if_better(&mut self, source: &PeerAddressSource) { + match (self.source.peer_identity_claim(), source.peer_identity_claim()) { + (None, None) => (), + (None, Some(_)) => { + self.source = source.clone(); + }, + (Some(_), None) => (), + (Some(self_source), Some(other_source)) => { + if other_source.signature.updated_at() > self_source.signature.updated_at() { + self.source = source.clone(); + } + }, + } + self.calculate_quality_score(); + } + + pub fn address(&self) -> &Multiaddr { + &self.address + } + + pub fn offline_at(&self) -> Option { + if self.last_failed_reason.is_some() { + self.last_attempted + } else { + None } } @@ -65,30 +196,35 @@ impl MutliaddrWithStats { /// small weighted change to the avg_latency. The previous avg_latency will have a weight of /// MAX_LATENCY_SAMPLE_COUNT and the new latency_measurement will have a weight of 1. pub fn update_latency(&mut self, latency_measurement: Duration) { - self.last_seen = Some(Utc::now()); + self.last_seen = Some(Utc::now().naive_utc()); self.avg_latency = ((self.avg_latency * self.latency_sample_count) + latency_measurement) / (self.latency_sample_count + 1); if self.latency_sample_count < MAX_LATENCY_SAMPLE_COUNT { self.latency_sample_count += 1; } - } - /// Mark that a message was received from this net address - pub fn mark_message_received(&mut self) { - self.last_seen = Some(Utc::now()); + self.calculate_quality_score(); } - /// Mark that a rejected message was received from this net address - pub fn mark_message_rejected(&mut self) { - self.last_seen = Some(Utc::now()); - self.rejected_message_count += 1; + pub fn update_initial_dial_time(&mut self, initial_dial_time: Duration) { + self.last_seen = Some(Utc::now().naive_utc()); + + self.avg_initial_dial_time = ((self.avg_initial_dial_time * self.initial_dial_time_sample_count) + + initial_dial_time) / + (self.initial_dial_time_sample_count + 1); + if self.initial_dial_time_sample_count < MAX_INITIAL_DIAL_TIME_SAMPLE_COUNT { + self.initial_dial_time_sample_count += 1; + } + self.calculate_quality_score(); } /// Mark that a successful interaction occurred with this address pub fn mark_last_seen_now(&mut self) { - self.last_seen = Some(Utc::now()); - self.connection_attempts = 0; + self.last_seen = Some(Utc::now().naive_utc()); + self.last_failed_reason = None; + self.reset_connection_attempts(); + self.calculate_quality_score(); } /// Reset the connection attempts on this net address for a later session of retries @@ -97,92 +233,77 @@ impl MutliaddrWithStats { } /// Mark that a connection could not be established with this net address - pub fn mark_failed_connection_attempt(&mut self) { + pub fn mark_failed_connection_attempt(&mut self, error_string: String) { self.connection_attempts += 1; + self.last_failed_reason = Some(error_string); + self.calculate_quality_score(); } /// Get as a Multiaddr pub fn as_net_address(&self) -> Multiaddr { self.clone().address } -} -impl From for MutliaddrWithStats { - /// Constructs a new net address with usage stats from a net address - fn from(net_address: Multiaddr) -> Self { - Self { - address: net_address, - last_seen: None, - connection_attempts: 0, - rejected_message_count: 0, - avg_latency: Duration::new(0, 0), - latency_sample_count: 0, + fn calculate_quality_score(&mut self) { + // Try these first + if self.last_seen.is_none() && self.last_attempted.is_none() { + self.quality_score = 1000; + return; } - } -} -// Reliability ordering of net addresses: prioritize net addresses according to previous successful connections, -// connection attempts, latency and last seen A lower ordering has a higher priority and a higher ordering has a lower -// priority, this ordering switch allows searching for, and updating of net addresses to be performed more efficiently -impl Ord for MutliaddrWithStats { - fn cmp(&self, other: &MutliaddrWithStats) -> Ordering { - if self.last_seen.is_some() && other.last_seen.is_none() { - return Ordering::Less; - } + let mut score_self = 0; - if self.last_seen.is_none() && other.last_seen.is_some() { - return Ordering::Greater; - } - if self.connection_attempts < other.connection_attempts { - return Ordering::Less; - } + score_self += cmp::max(0, 100 - (self.avg_latency.as_millis() as i32 / 100)); - if self.connection_attempts > other.connection_attempts { - return Ordering::Greater; - } - if self.latency_sample_count > 0 && other.latency_sample_count > 0 { - if self.avg_latency < other.avg_latency { - return Ordering::Less; - } + score_self += cmp::max( + 0, + 100 - self + .last_seen + .map(|x| Utc::now().naive_utc() - x) + .map(|x| x.num_seconds()) + .unwrap_or(0) as i32, + ); - if self.avg_latency > other.avg_latency { - return Ordering::Greater; - } + if self.last_failed_reason.is_some() { + score_self -= 100; } - if self.last_seen.is_some() && other.last_seen.is_some() { - let self_last_seen = self.last_seen.unwrap(); - let other_last_seen = other.last_seen.unwrap(); - if self_last_seen > other_last_seen { - return Ordering::Less; - } - if self_last_seen < other_last_seen { - return Ordering::Greater; - } - } - Ordering::Equal + self.quality_score = score_self; + } + + pub fn source(&self) -> &PeerAddressSource { + &self.source } } -impl PartialOrd for MutliaddrWithStats { - fn partial_cmp(&self, other: &MutliaddrWithStats) -> Option { +// Reliability ordering of net addresses: prioritize net addresses according to previous successful connections, +// connection attempts, latency and last seen A lower ordering has a higher priority and a higher ordering has a lower +// priority, this ordering switch allows searching for, and updating of net addresses to be performed more efficiently +impl Ord for MultiaddrWithStats { + fn cmp(&self, other: &MultiaddrWithStats) -> Ordering { + self.quality_score.cmp(&other.quality_score).reverse() + } +} + +impl PartialOrd for MultiaddrWithStats { + fn partial_cmp(&self, other: &MultiaddrWithStats) -> Option { Some(self.cmp(other)) } } -impl PartialEq for MutliaddrWithStats { - fn eq(&self, other: &MutliaddrWithStats) -> bool { +impl PartialEq for MultiaddrWithStats { + fn eq(&self, other: &MultiaddrWithStats) -> bool { self.address == other.address } } -impl Hash for MutliaddrWithStats { +impl Hash for MultiaddrWithStats { fn hash(&self, state: &mut H) { self.address.hash(state) } } -impl fmt::Display for MutliaddrWithStats { +impl fmt::Display for MultiaddrWithStats { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.address) } @@ -190,14 +311,14 @@ impl fmt::Display for MutliaddrWithStats { #[cfg(test)] mod test { - use std::{thread, time::Duration}; + use std::time::Duration; use super::*; #[test] fn test_update_latency() { let net_address = "/ip4/123.0.0.123/tcp/8000".parse::().unwrap(); - let mut net_address_with_stats = MutliaddrWithStats::from(net_address); + let mut net_address_with_stats = MultiaddrWithStats::new(net_address, PeerAddressSource::Config); let latency_measurement1 = Duration::from_millis(100); let latency_measurement2 = Duration::from_millis(200); let latency_measurement3 = Duration::from_millis(60); @@ -212,26 +333,12 @@ mod test { assert_eq!(net_address_with_stats.avg_latency, Duration::from_millis(125)); } - #[test] - fn test_message_received_and_rejected() { - let net_address = "/ip4/123.0.0.123/tcp/8000".parse::().unwrap(); - let mut net_address_with_stats = MutliaddrWithStats::from(net_address); - assert!(net_address_with_stats.last_seen.is_none()); - net_address_with_stats.mark_message_received(); - assert!(net_address_with_stats.last_seen.is_some()); - let last_seen = net_address_with_stats.last_seen.unwrap(); - net_address_with_stats.mark_message_rejected(); - net_address_with_stats.mark_message_rejected(); - assert_eq!(net_address_with_stats.rejected_message_count, 2); - assert!(last_seen <= net_address_with_stats.last_seen.unwrap()); - } - #[test] fn test_successful_and_failed_connection_attempts() { let net_address = "/ip4/123.0.0.123/tcp/8000".parse::().unwrap(); - let mut net_address_with_stats = MutliaddrWithStats::from(net_address); - net_address_with_stats.mark_failed_connection_attempt(); - net_address_with_stats.mark_failed_connection_attempt(); + let mut net_address_with_stats = MultiaddrWithStats::new(net_address, PeerAddressSource::Config); + net_address_with_stats.mark_failed_connection_attempt("Error".to_string()); + net_address_with_stats.mark_failed_connection_attempt("Error".to_string()); assert!(net_address_with_stats.last_seen.is_none()); assert_eq!(net_address_with_stats.connection_attempts, 2); net_address_with_stats.mark_last_seen_now(); @@ -242,32 +349,11 @@ mod test { #[test] fn test_reseting_connection_attempts() { let net_address = "/ip4/123.0.0.123/tcp/8000".parse::().unwrap(); - let mut net_address_with_stats = MutliaddrWithStats::from(net_address); - net_address_with_stats.mark_failed_connection_attempt(); - net_address_with_stats.mark_failed_connection_attempt(); + let mut net_address_with_stats = MultiaddrWithStats::new(net_address, PeerAddressSource::Config); + net_address_with_stats.mark_failed_connection_attempt("asdf".to_string()); + net_address_with_stats.mark_failed_connection_attempt("asdf".to_string()); assert_eq!(net_address_with_stats.connection_attempts, 2); net_address_with_stats.reset_connection_attempts(); assert_eq!(net_address_with_stats.connection_attempts, 0); } - - #[test] - fn test_net_address_reliability_ordering() { - let net_address = "/ip4/123.0.0.123/tcp/8000".parse::().unwrap(); - let mut na1 = MutliaddrWithStats::from(net_address.clone()); - let mut na2 = MutliaddrWithStats::from(net_address); - thread::sleep(Duration::from_millis(1)); - na1.mark_last_seen_now(); - assert!(na1 < na2); - thread::sleep(Duration::from_millis(1)); - na2.mark_last_seen_now(); - assert!(na1 > na2); - thread::sleep(Duration::from_millis(1)); - na1.mark_message_rejected(); - assert!(na1 < na2); - na1.update_latency(Duration::from_millis(200)); - na2.update_latency(Duration::from_millis(100)); - assert!(na1 > na2); - na1.mark_failed_connection_attempt(); - assert!(na1 > na2); - } } diff --git a/comms/core/src/net_address/mutliaddresses_with_stats.rs b/comms/core/src/net_address/mutliaddresses_with_stats.rs index 04a78578ac..ca751c8d5c 100644 --- a/comms/core/src/net_address/mutliaddresses_with_stats.rs +++ b/comms/core/src/net_address/mutliaddresses_with_stats.rs @@ -7,35 +7,48 @@ use std::{ time::Duration, }; -use chrono::{DateTime, Utc}; +use chrono::{NaiveDateTime, Utc}; use multiaddr::Multiaddr; use serde::{Deserialize, Serialize}; -use crate::net_address::MutliaddrWithStats; +use crate::net_address::{multiaddr_with_stats::PeerAddressSource, MultiaddrWithStats}; /// This struct is used to store a set of different net addresses such as IPv4, IPv6, Tor or I2P for a single peer. #[derive(Debug, Clone, Deserialize, Serialize, Default, Eq)] pub struct MultiaddressesWithStats { - pub addresses: Vec, - last_attempted: Option>, + addresses: Vec, } impl MultiaddressesWithStats { - /// Constructs a new list of addresses with usage stats from a list of net addresses - pub fn new(addresses: Vec) -> MultiaddressesWithStats { + pub fn from_addresses_with_source( + addresses: Vec, + source: &PeerAddressSource, + ) -> MultiaddressesWithStats { + let mut addresses_with_stats = Vec::with_capacity(addresses.len()); + for address in addresses { + addresses_with_stats.push(MultiaddrWithStats::new(address, source.clone())); + } MultiaddressesWithStats { - addresses, - last_attempted: None, + addresses: addresses_with_stats, } } - pub fn first(&self) -> Option<&MutliaddrWithStats> { + pub fn empty() -> Self { + MultiaddressesWithStats { addresses: Vec::new() } + } + + /// Constructs a new list of addresses with usage stats from a list of net addresses + pub fn new(addresses: Vec) -> MultiaddressesWithStats { + MultiaddressesWithStats { addresses } + } + + pub fn best(&self) -> Option<&MultiaddrWithStats> { self.addresses.first() } /// Provides the date and time of the last successful communication with this peer - pub fn last_seen(&self) -> Option> { - let mut latest_valid_datetime: Option> = None; + pub fn last_seen(&self) -> Option { + let mut latest_valid_datetime: Option = None; for curr_address in &self.addresses { if curr_address.last_seen.is_none() { continue; @@ -52,36 +65,82 @@ impl MultiaddressesWithStats { latest_valid_datetime } + pub fn offline_at(&self) -> Option { + let mut earliest_offline_at: Option = None; + for curr_address in &self.addresses { + // At least one address is online + #[allow(clippy::question_mark)] + if curr_address.offline_at().is_none() { + return None; + } + match earliest_offline_at { + Some(earliest_datetime) => { + if earliest_datetime > curr_address.offline_at().unwrap() { + earliest_offline_at = curr_address.offline_at(); + } + }, + None => earliest_offline_at = curr_address.offline_at(), + } + } + earliest_offline_at + } + /// Return the time of last attempted connection to this collection of addresses - pub fn last_attempted(&self) -> Option> { - self.last_attempted + pub fn last_attempted(&self) -> Option { + let mut latest_valid_datetime: Option = None; + for curr_address in &self.addresses { + if curr_address.last_attempted.is_none() { + continue; + } + match latest_valid_datetime { + Some(latest_datetime) => { + if latest_datetime < curr_address.last_attempted.unwrap() { + latest_valid_datetime = curr_address.last_attempted; + } + }, + None => latest_valid_datetime = curr_address.last_attempted, + } + } + latest_valid_datetime } /// Adds a new net address to the peer. This function will not add a duplicate if the address /// already exists. - pub fn add_address(&mut self, net_address: &Multiaddr) { - if !self.addresses.iter().any(|x| x.address == *net_address) { - self.addresses.push(net_address.clone().into()); + pub fn add_address(&mut self, net_address: &Multiaddr, source: &PeerAddressSource) { + if self.addresses.iter().any(|x| x.address() == net_address) { + self.addresses + .iter_mut() + .find(|x| x.address() == net_address) + .unwrap() + .update_source_if_better(source); + } else { + self.addresses + .push(MultiaddrWithStats::new(net_address.clone(), source.clone())); self.addresses.sort(); } } + pub fn contains(&self, net_address: &Multiaddr) -> bool { + self.addresses.iter().any(|x| x.address() == net_address) + } + /// Compares the existing set of addresses to the provided address set and remove missing addresses and /// add new addresses without discarding the usage stats of the existing and remaining addresses. - pub fn update_addresses(&mut self, addresses: Vec) { - self.addresses = self - .addresses - .drain(..) - .filter(|addr| addresses.contains(&addr.address)) - .collect(); + pub fn update_addresses(&mut self, addresses: &[Multiaddr], source: &PeerAddressSource) { + for address in addresses { + if let Some(addr) = self.addresses.iter_mut().find(|a| a.address() == address) { + addr.update_source_if_better(source); + } + } let to_add = addresses - .into_iter() - .filter(|addr| !self.addresses.iter().any(|a| a.address == *addr)) + .iter() + .filter(|addr| !self.addresses.iter().any(|a| &a.address() == addr)) .collect::>(); for address in to_add { - self.addresses.push(address.into()); + self.addresses + .push(MultiaddrWithStats::new(address.clone(), source.clone())); } self.addresses.sort(); @@ -90,7 +149,7 @@ impl MultiaddressesWithStats { /// Returns an iterator of addresses ordered from 'best' to 'worst' according to heuristics such as failed /// connections and latency. pub fn iter(&self) -> impl Iterator { - self.addresses.iter().map(|addr| &addr.address) + self.addresses.iter().map(|addr| addr.address()) } pub fn to_lexicographically_sorted(&self) -> Vec { @@ -103,9 +162,19 @@ impl MultiaddressesWithStats { addresses } + pub fn merge(&mut self, other: &MultiaddressesWithStats) { + for addr in &other.addresses { + if let Some(existing) = self.find_address_mut(addr.address()) { + existing.merge(addr); + } else { + self.addresses.push(addr.clone()); + } + } + } + /// Finds the specified address in the set and allow updating of its variables such as its usage stats - fn find_address_mut(&mut self, address: &Multiaddr) -> Option<&mut MutliaddrWithStats> { - self.addresses.iter_mut().find(|a| &a.address == address) + fn find_address_mut(&mut self, address: &Multiaddr) -> Option<&mut MultiaddrWithStats> { + self.addresses.iter_mut().find(|a| a.address() == address) } /// The average connection latency of the provided net address will be updated to include the current measured @@ -123,31 +192,11 @@ impl MultiaddressesWithStats { } } - /// Mark that a message was received from the specified net address - /// - /// Returns true if the address is contained in this instance, otherwise false - pub fn mark_message_received(&mut self, address: &Multiaddr) -> bool { - match self.find_address_mut(address) { - Some(addr) => { - addr.mark_message_received(); - self.addresses.sort(); - true - }, - None => false, - } - } - - /// Mark that a rejected message was received from the specified net address - /// - /// Returns true if the address is contained in this instance, otherwise false - pub fn mark_message_rejected(&mut self, address: &Multiaddr) -> bool { - match self.find_address_mut(address) { - Some(addr) => { - addr.mark_message_rejected(); - self.addresses.sort(); - true - }, - None => false, + pub fn update_address_stats(&mut self, address: &Multiaddr, f: F) + where F: FnOnce(&mut MultiaddrWithStats) { + if let Some(addr) = self.find_address_mut(address) { + f(addr); + self.addresses.sort(); } } @@ -158,7 +207,7 @@ impl MultiaddressesWithStats { match self.find_address_mut(address) { Some(addr) => { addr.mark_last_seen_now(); - self.last_attempted = Some(Utc::now()); + addr.last_attempted = Some(Utc::now().naive_utc()); self.addresses.sort(); true }, @@ -169,11 +218,11 @@ impl MultiaddressesWithStats { /// Mark that a connection could not be established with the specified net address /// /// Returns true if the address is contained in this instance, otherwise false - pub fn mark_failed_connection_attempt(&mut self, address: &Multiaddr) -> bool { + pub fn mark_failed_connection_attempt(&mut self, address: &Multiaddr, failed_reason: String) -> bool { match self.find_address_mut(address) { Some(addr) => { - addr.mark_failed_connection_attempt(); - self.last_attempted = Some(Utc::now()); + addr.mark_failed_connection_attempt(failed_reason); + addr.last_attempted = Some(Utc::now().naive_utc()); self.addresses.sort(); true }, @@ -202,7 +251,11 @@ impl MultiaddressesWithStats { } pub fn into_vec(self) -> Vec { - self.addresses.into_iter().map(|addr| addr.address).collect() + self.addresses.into_iter().map(|addr| addr.address().clone()).collect() + } + + pub fn addresses(&self) -> &[MultiaddrWithStats] { + &self.addresses } } @@ -213,7 +266,7 @@ impl PartialEq for MultiaddressesWithStats { } impl Index for MultiaddressesWithStats { - type Output = MutliaddrWithStats; + type Output = MultiaddrWithStats; /// Returns the NetAddressWithStats at the given index fn index(&self, index: usize) -> &Self::Output { @@ -221,36 +274,20 @@ impl Index for MultiaddressesWithStats { } } -impl From for MultiaddressesWithStats { - /// Constructs a new list of addresses with usage stats from a single net address - fn from(net_address: Multiaddr) -> Self { - MultiaddressesWithStats { - addresses: vec![MutliaddrWithStats::from(net_address)], - last_attempted: None, - } - } -} - -impl From> for MultiaddressesWithStats { - /// Constructs a new list of addresses with usage stats from a Vec - fn from(net_addresses: Vec) -> Self { - MultiaddressesWithStats { - addresses: net_addresses - .into_iter() - .map(MutliaddrWithStats::from) - .collect::>(), - last_attempted: None, - } +impl From> for MultiaddressesWithStats { + /// Constructs NetAddressesWithStats from a list of addresses with usage stats + fn from(addresses: Vec) -> Self { + MultiaddressesWithStats { addresses } } } -impl From> for MultiaddressesWithStats { - /// Constructs NetAddressesWithStats from a list of addresses with usage stats - fn from(addresses: Vec) -> Self { - MultiaddressesWithStats { - addresses, - last_attempted: None, - } +impl From for Vec { + fn from(value: MultiaddressesWithStats) -> Self { + value + .addresses + .into_iter() + .map(|addr| addr.address().to_string()) + .collect() } } @@ -261,7 +298,7 @@ impl Display for MultiaddressesWithStats { "{}", self.addresses .iter() - .map(|a| a.address.to_string()) + .map(|a| a.address().to_string()) .collect::>() .join(", ") ) @@ -279,12 +316,14 @@ mod test { let net_address1 = "/ip4/123.0.0.123/tcp/8000".parse::().unwrap(); let net_address2 = "/ip4/125.1.54.254/tcp/7999".parse::().unwrap(); let net_address3 = "/ip4/175.6.3.145/tcp/8000".parse::().unwrap(); - let net_addresses: MultiaddressesWithStats = - vec![net_address1.clone(), net_address2.clone(), net_address3.clone()].into(); - - assert_eq!(net_addresses[0].address, net_address1); - assert_eq!(net_addresses[1].address, net_address2); - assert_eq!(net_addresses[2].address, net_address3); + let net_addresses: MultiaddressesWithStats = MultiaddressesWithStats::from_addresses_with_source( + vec![net_address1.clone(), net_address2.clone(), net_address3.clone()], + &PeerAddressSource::Config, + ); + + assert_eq!(net_addresses[0].address(), &net_address1); + assert_eq!(net_addresses[1].address(), &net_address2); + assert_eq!(net_addresses[2].address(), &net_address3); } #[test] @@ -292,14 +331,19 @@ mod test { let net_address1 = "/ip4/123.0.0.123/tcp/8000".parse::().unwrap(); let net_address2 = "/ip4/125.1.54.254/tcp/7999".parse::().unwrap(); let net_address3 = "/ip4/175.6.3.145/tcp/8000".parse::().unwrap(); - let mut net_addresses = MultiaddressesWithStats::from(net_address1.clone()); - net_addresses.add_address(&net_address2); - net_addresses.add_address(&net_address3); + let mut net_addresses = + MultiaddressesWithStats::from_addresses_with_source(vec![net_address1.clone()], &PeerAddressSource::Config); + net_addresses.add_address(&net_address2, &PeerAddressSource::Config); + net_addresses.add_address(&net_address3, &PeerAddressSource::Config); assert!(net_addresses.mark_last_seen_now(&net_address3)); assert!(net_addresses.mark_last_seen_now(&net_address1)); assert!(net_addresses.mark_last_seen_now(&net_address2)); - let desired_last_seen = net_addresses.addresses[0].last_seen; + let desired_last_seen = net_addresses + .addresses + .iter() + .max_by_key(|a| a.last_seen) + .map(|a| a.last_seen.unwrap()); let last_seen = net_addresses.last_seen(); assert_eq!(desired_last_seen.unwrap(), last_seen.unwrap()); } @@ -309,15 +353,16 @@ mod test { let net_address1 = "/ip4/123.0.0.123/tcp/8000".parse::().unwrap(); let net_address2 = "/ip4/125.1.54.254/tcp/7999".parse::().unwrap(); let net_address3 = "/ip4/175.6.3.145/tcp/8000".parse::().unwrap(); - let mut net_addresses = MultiaddressesWithStats::from(net_address1.clone()); - net_addresses.add_address(&net_address2); - net_addresses.add_address(&net_address3); + let mut net_addresses = + MultiaddressesWithStats::from_addresses_with_source(vec![net_address1.clone()], &PeerAddressSource::Config); + net_addresses.add_address(&net_address2, &PeerAddressSource::Config); + net_addresses.add_address(&net_address3, &PeerAddressSource::Config); // Add duplicate address, test add_net_address is idempotent - net_addresses.add_address(&net_address2); + net_addresses.add_address(&net_address2, &PeerAddressSource::Config); assert_eq!(net_addresses.addresses.len(), 3); - assert_eq!(net_addresses.addresses[0].address, net_address1); - assert_eq!(net_addresses.addresses[1].address, net_address2); - assert_eq!(net_addresses.addresses[2].address, net_address3); + assert_eq!(net_addresses.addresses[0].address(), &net_address1); + assert_eq!(net_addresses.addresses[1].address(), &net_address2); + assert_eq!(net_addresses.addresses[2].address(), &net_address3); } #[test] @@ -325,80 +370,43 @@ mod test { let net_address1 = "/ip4/123.0.0.123/tcp/8000".parse::().unwrap(); let net_address2 = "/ip4/125.1.54.254/tcp/7999".parse::().unwrap(); let net_address3 = "/ip4/175.6.3.145/tcp/8000".parse::().unwrap(); - let mut net_addresses = MultiaddressesWithStats::from(net_address1.clone()); - net_addresses.add_address(&net_address2); - net_addresses.add_address(&net_address3); + let mut net_addresses = + MultiaddressesWithStats::from_addresses_with_source(vec![net_address1.clone()], &PeerAddressSource::Config); + net_addresses.add_address(&net_address2, &PeerAddressSource::Config); + net_addresses.add_address(&net_address3, &PeerAddressSource::Config); let priority_address = net_addresses.iter().next().unwrap(); assert_eq!(priority_address, &net_address1); + net_addresses.mark_last_seen_now(&net_address1); + net_addresses.mark_last_seen_now(&net_address2); + net_addresses.mark_last_seen_now(&net_address3); assert!(net_addresses.update_latency(&net_address1, Duration::from_millis(250))); assert!(net_addresses.update_latency(&net_address2, Duration::from_millis(50))); assert!(net_addresses.update_latency(&net_address3, Duration::from_millis(100))); let priority_address = net_addresses.iter().next().unwrap(); assert_eq!(priority_address, &net_address2); - assert!(net_addresses.mark_failed_connection_attempt(&net_address2)); + assert!(net_addresses.mark_failed_connection_attempt(&net_address2, "error".to_string())); let priority_address = net_addresses.iter().next().unwrap(); assert_eq!(priority_address, &net_address3); } - // TODO: Broken in release mode - investigate and fix - // #[test] - // fn test_stats_updates_on_addresses() { - // let net_address1 = "/ip4/123.0.0.123/tcp/8000".parse::().unwrap(); - // let net_address2 = "/ip4/125.1.54.254/tcp/7999".parse::().unwrap(); - // let net_address3 = "/ip4/175.6.3.145/tcp/8000".parse::().unwrap(); - // let mut addresses: Vec = Vec::new(); - // addresses.push(NetAddressWithStats::from(net_address1.clone())); - // addresses.push(NetAddressWithStats::from(net_address2.clone())); - // addresses.push(NetAddressWithStats::from(net_address3.clone())); - // let mut net_addresses = NetAddressesWithStats::new(addresses); - // - // assert!(net_addresses.update_latency(&net_address2, Duration::from_millis(200))); - // assert_eq!(net_addresses.addresses[0].avg_latency, Duration::from_millis(200)); - // assert_eq!(net_addresses.addresses[1].avg_latency, Duration::from_millis(0)); - // assert_eq!(net_addresses.addresses[2].avg_latency, Duration::from_millis(0)); - // - // thread::sleep(Duration::from_millis(1)); - // assert!(net_addresses.mark_message_received(&net_address1)); - // assert!(net_addresses.addresses[0].last_seen.is_some()); - // assert!(net_addresses.addresses[1].last_seen.is_some()); - // assert!(net_addresses.addresses[2].last_seen.is_none()); - // assert!(net_addresses.addresses[0].last_seen.unwrap() > net_addresses.addresses[1].last_seen.unwrap()); - // - // assert!(net_addresses.mark_message_rejected(&net_address2)); - // assert!(net_addresses.mark_message_rejected(&net_address3)); - // assert!(net_addresses.mark_message_rejected(&net_address3)); - // assert_eq!(net_addresses.addresses[0].rejected_message_count, 2); - // assert_eq!(net_addresses.addresses[1].rejected_message_count, 1); - // assert_eq!(net_addresses.addresses[2].rejected_message_count, 0); - // - // assert!(net_addresses.mark_failed_connection_attempt(&net_address1)); - // assert!(net_addresses.mark_failed_connection_attempt(&net_address2)); - // assert!(net_addresses.mark_failed_connection_attempt(&net_address3)); - // assert!(net_addresses.mark_failed_connection_attempt(&net_address1)); - // assert!(net_addresses.mark_last_seen_now(&net_address2)); - // assert_eq!(net_addresses.addresses[0].connection_attempts, 0); - // assert_eq!(net_addresses.addresses[1].connection_attempts, 1); - // assert_eq!(net_addresses.addresses[2].connection_attempts, 2); - // } - #[test] fn test_resetting_all_connection_attempts() { let net_address1 = "/ip4/123.0.0.123/tcp/8000".parse::().unwrap(); let net_address2 = "/ip4/125.1.54.254/tcp/7999".parse::().unwrap(); let net_address3 = "/ip4/175.6.3.145/tcp/8000".parse::().unwrap(); - let addresses: Vec = vec![ - MutliaddrWithStats::from(net_address1.clone()), - MutliaddrWithStats::from(net_address2.clone()), - MutliaddrWithStats::from(net_address3.clone()), + let addresses: Vec = vec![ + MultiaddrWithStats::new(net_address1.clone(), PeerAddressSource::Config), + MultiaddrWithStats::new(net_address2.clone(), PeerAddressSource::Config), + MultiaddrWithStats::new(net_address3.clone(), PeerAddressSource::Config), ]; let mut net_addresses = MultiaddressesWithStats::new(addresses); - assert!(net_addresses.mark_failed_connection_attempt(&net_address1)); - assert!(net_addresses.mark_failed_connection_attempt(&net_address2)); - assert!(net_addresses.mark_failed_connection_attempt(&net_address3)); - assert!(net_addresses.mark_failed_connection_attempt(&net_address1)); + assert!(net_addresses.mark_failed_connection_attempt(&net_address1, "error".to_string())); + assert!(net_addresses.mark_failed_connection_attempt(&net_address2, "error".to_string())); + assert!(net_addresses.mark_failed_connection_attempt(&net_address3, "error".to_string())); + assert!(net_addresses.mark_failed_connection_attempt(&net_address1, "error".to_string())); assert_eq!(net_addresses.addresses[0].connection_attempts, 1); assert_eq!(net_addresses.addresses[1].connection_attempts, 1); diff --git a/comms/core/src/noise/config.rs b/comms/core/src/noise/config.rs index be7d65957e..60ac1c4848 100644 --- a/comms/core/src/noise/config.rs +++ b/comms/core/src/noise/config.rs @@ -101,12 +101,7 @@ mod test { use tokio::io::{AsyncReadExt, AsyncWriteExt}; use super::*; - use crate::{ - memsocket::MemorySocket, - peer_manager::PeerFeatures, - runtime, - test_utils::node_identity::build_node_identity, - }; + use crate::{memsocket::MemorySocket, peer_manager::PeerFeatures, test_utils::node_identity::build_node_identity}; fn check_noise_params(config: &NoiseConfig) { assert_eq!(config.parameters.hash, HashChoice::Blake2b); @@ -125,7 +120,7 @@ mod test { assert_eq!(config.node_identity.public_key(), node_identity.public_key()); } - #[runtime::test] + #[tokio::test] async fn upgrade_socket() { let node_identity1 = build_node_identity(PeerFeatures::COMMUNICATION_NODE); let config1 = NoiseConfig::new(node_identity1.clone()); diff --git a/comms/core/src/noise/socket.rs b/comms/core/src/noise/socket.rs index 11875503e6..a51e1f8fbe 100644 --- a/comms/core/src/noise/socket.rs +++ b/comms/core/src/noise/socket.rs @@ -647,7 +647,7 @@ mod test { use snow::{params::NoiseParams, Builder, Error, Keypair}; use super::*; - use crate::{memsocket::MemorySocket, noise::config::NOISE_IX_PARAMETER, runtime}; + use crate::{memsocket::MemorySocket, noise::config::NOISE_IX_PARAMETER}; async fn build_test_connection( ) -> Result<((Keypair, Handshake), (Keypair, Handshake)), Error> { @@ -684,7 +684,7 @@ mod test { Ok((dialer_result?, listener_result?)) } - #[runtime::test] + #[tokio::test] async fn test_handshake() { let ((dialer_keypair, dialer), (listener_keypair, listener)) = build_test_connection().await.unwrap(); @@ -700,7 +700,7 @@ mod test { ); } - #[runtime::test] + #[tokio::test] async fn simple_test() -> io::Result<()> { let ((_dialer_keypair, dialer), (_listener_keypair, listener)) = build_test_connection().await.unwrap(); @@ -720,7 +720,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn interleaved_writes() -> io::Result<()> { let ((_dialer_keypair, dialer), (_listener_keypair, listener)) = build_test_connection().await.unwrap(); @@ -748,7 +748,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn u16_max_writes() -> io::Result<()> { let ((_dialer_keypair, dialer), (_listener_keypair, listener)) = build_test_connection().await.unwrap(); @@ -765,7 +765,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn larger_writes() -> io::Result<()> { let ((_dialer_keypair, dialer), (_listener_keypair, listener)) = build_test_connection().await.unwrap(); @@ -782,7 +782,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn unexpected_eof() -> io::Result<()> { let ((_dialer_keypair, dialer), (_listener_keypair, listener)) = build_test_connection().await.unwrap(); diff --git a/comms/core/src/peer_manager/connection_stats.rs b/comms/core/src/peer_manager/connection_stats.rs deleted file mode 100644 index 4d2c4ce54a..0000000000 --- a/comms/core/src/peer_manager/connection_stats.rs +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2019, The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use std::{ - convert::TryFrom, - fmt, - fmt::{Display, Formatter}, - time::Duration, -}; - -use chrono::{NaiveDateTime, Utc}; -use serde::{Deserialize, Serialize}; - -/// Basic connection stats for a [Peer](super::Peer). -#[derive(Debug, Clone, Default, Deserialize, Serialize, PartialEq, Eq)] -pub struct PeerConnectionStats { - /// The last time a connection was successfully made or, None if a successful - /// connection has never been made. - pub last_connected_at: Option, - /// Represents the last connection attempt - pub last_connection_attempt: LastConnectionAttempt, -} - -impl PeerConnectionStats { - /// New connection stats - pub fn new() -> Self { - Default::default() - } - - /// Sets the last connection as a success. `has_connected()` will return true from here on. - pub fn set_connection_success(&mut self) { - self.last_connected_at = Some(Utc::now().naive_utc()); - self.last_connection_attempt = LastConnectionAttempt::Succeeded(Utc::now().naive_utc()); - } - - /// Sets the last connection as a failure - pub fn set_connection_failed(&mut self) { - self.last_connection_attempt = LastConnectionAttempt::Failed { - failed_at: Utc::now().naive_utc(), - num_attempts: self.failed_attempts() + 1, - }; - } - - /// Returns true if a successful connection has ever been recorded, otherwise false - pub fn has_ever_connected(&self) -> bool { - self.last_connected_at.is_some() - } - - /// Returns the number of failed attempts. 0 is returned if the `last_connection_attempt` is not `Failed` - pub fn failed_attempts(&self) -> usize { - match self.last_connection_attempt { - LastConnectionAttempt::Failed { num_attempts, .. } => num_attempts, - _ => 0, - } - } - - /// Returns the date time (UTC) since the last failed connection occurred. None is returned if the - /// `last_connection_attempt` is not `Failed` - pub fn last_failed_at(&self) -> Option<&NaiveDateTime> { - match &self.last_connection_attempt { - LastConnectionAttempt::Failed { failed_at, .. } => Some(failed_at), - _ => None, - } - } - - /// Returns the Duration since the last failed connection occurred. None is returned if the - /// `last_connection_attempt` is not `Failed` - pub fn time_since_last_failure(&self) -> Option { - self.last_failed_at() - .map(|failed_at| Utc::now().naive_utc() - *failed_at) - .map(convert_to_std_duration) - } -} - -impl fmt::Display for PeerConnectionStats { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.last_failed_at() { - Some(_) => { - write!(f, "{}", self.last_connection_attempt)?; - }, - None => match self.last_connected_at.as_ref() { - Some(dt) => { - write!(f, "Last connected at {}", dt.format("%Y-%m-%d %H:%M:%S"))?; - }, - None => { - write!(f, "{}", self.last_connection_attempt)?; - }, - }, - } - - Ok(()) - } -} - -/// Details on the last connection attempt -#[derive(Debug, Clone, Deserialize, Serialize, PartialOrd, PartialEq, Eq)] -pub enum LastConnectionAttempt { - /// This node has never attempted to connect to this peer - Never, - /// The last connection attempt was successful - Succeeded(NaiveDateTime), - /// The last connection attempt failed. - Failed { - /// Timestamp of the last failed attempt - failed_at: NaiveDateTime, - /// Number of failed attempts in a row - num_attempts: usize, - }, -} - -/// Convert `chrono::Duration` to `std::time::Duration` -fn convert_to_std_duration(old_duration: chrono::Duration) -> Duration { - Duration::from_millis(u64::try_from(old_duration.num_milliseconds()).unwrap()) -} - -impl Default for LastConnectionAttempt { - fn default() -> Self { - LastConnectionAttempt::Never - } -} - -impl Display for LastConnectionAttempt { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), fmt::Error> { - use LastConnectionAttempt::{Failed, Never, Succeeded}; - match self { - Never => write!(f, "Connection never attempted"), - Succeeded(succeeded_at) => write!(f, "Connection succeeded at {}", succeeded_at), - Failed { - failed_at, - num_attempts, - } => write!( - f, - "Connection failed at {} ({} attempt(s))", - failed_at.format("%Y-%m-%d %H:%M:%S"), - num_attempts - ), - } - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn peer_connection_stats() { - let state = PeerConnectionStats::new(); - assert!(state.last_failed_at().is_none()); - assert_eq!(state.failed_attempts(), 0); - assert!(state.time_since_last_failure().is_none()); - assert!(!state.has_ever_connected()); - - let mut state = PeerConnectionStats::new(); - state.set_connection_success(); - assert!(state.last_failed_at().is_none()); - assert_eq!(state.failed_attempts(), 0); - assert!(state.time_since_last_failure().is_none()); - assert!(state.has_ever_connected()); - - let mut state = PeerConnectionStats::new(); - state.set_connection_failed(); - state.set_connection_failed(); - state.set_connection_failed(); - assert!(state.last_failed_at().is_some()); - assert_eq!(state.failed_attempts(), 3); - assert!(state.time_since_last_failure().unwrap().as_millis() < 100); - - assert!(!state.has_ever_connected()); - state.set_connection_success(); - assert!(state.has_ever_connected()); - } -} diff --git a/comms/core/src/peer_manager/error.rs b/comms/core/src/peer_manager/error.rs index 9f588acc09..33443745f8 100644 --- a/comms/core/src/peer_manager/error.rs +++ b/comms/core/src/peer_manager/error.rs @@ -40,6 +40,12 @@ pub enum PeerManagerError { MigrationError(String), #[error("Identity signature is invalid")] InvalidIdentitySignature, + #[error("Identity signature missing")] + MissingIdentitySignature, + #[error("Invalid peer address: {0}")] + MultiaddrError(String), + #[error("Unable to parse any of the network addresses offered by the connecting peer")] + PeerIdentityNoValidAddresses, } impl PeerManagerError { diff --git a/comms/core/src/peer_manager/identity_signature.rs b/comms/core/src/peer_manager/identity_signature.rs index de867f9b17..616ccb3c0e 100644 --- a/comms/core/src/peer_manager/identity_signature.rs +++ b/comms/core/src/peer_manager/identity_signature.rs @@ -33,7 +33,7 @@ use super::hashing::{comms_core_peer_manager_domain, CommsCorePeerManagerDomain, use crate::{ message::MessageExt, multiaddr::Multiaddr, - peer_manager::{Peer, PeerFeatures, PeerManagerError}, + peer_manager::{PeerFeatures, PeerManagerError}, proto, types::{CommsChallenge, CommsPublicKey, CommsSecretKey, Signature}, }; @@ -96,14 +96,6 @@ impl IdentitySignature { self.version } - pub fn is_valid_for_peer(&self, peer: &Peer) -> bool { - self.is_valid( - &peer.public_key, - peer.features, - peer.addresses.to_lexicographically_sorted().iter(), - ) - } - pub fn is_valid<'a, I: IntoIterator>( &self, public_key: &CommsPublicKey, @@ -202,7 +194,6 @@ mod test { use tari_crypto::keys::{PublicKey, SecretKey}; use super::*; - use crate::peer_manager::{NodeId, PeerFlags}; mod is_valid_for_peer { use super::*; @@ -215,18 +206,10 @@ mod test { let updated_at = Utc::now(); let identity = IdentitySignature::sign_new(&secret, PeerFeatures::COMMUNICATION_NODE, [&address], updated_at); - let node_id = NodeId::from_public_key(&public_key); - - let peer = Peer::new( - public_key, - node_id, - vec![address].into(), - PeerFlags::empty(), - PeerFeatures::COMMUNICATION_NODE, - vec![], - String::new(), + assert!( + identity.is_valid(&public_key, PeerFeatures::COMMUNICATION_NODE, [&address]), + "Signature is not valid" ); - assert!(identity.is_valid_for_peer(&peer)); } #[test] @@ -237,20 +220,12 @@ mod test { let updated_at = Utc::now(); let identity = IdentitySignature::sign_new(&secret, PeerFeatures::COMMUNICATION_NODE, [&address], updated_at); - let node_id = NodeId::from_public_key(&public_key); let tampered = Multiaddr::from_str("/ip4/127.0.0.1/tcp/4321").unwrap(); - - let peer = Peer::new( - public_key, - node_id, - vec![tampered].into(), - PeerFlags::empty(), - PeerFeatures::COMMUNICATION_NODE, - vec![], - String::new(), + assert!( + !identity.is_valid(&public_key, PeerFeatures::COMMUNICATION_NODE, [&tampered]), + "Signature is not valid" ); - assert!(!identity.is_valid_for_peer(&peer)); } #[test] @@ -261,20 +236,13 @@ mod test { let updated_at = Utc::now(); let identity = IdentitySignature::sign_new(&secret, PeerFeatures::COMMUNICATION_NODE, [&address], updated_at); - let node_id = NodeId::from_public_key(&public_key); let tampered = PeerFeatures::COMMUNICATION_CLIENT; - let peer = Peer::new( - public_key, - node_id, - vec![address].into(), - PeerFlags::empty(), - tampered, - vec![], - String::new(), + assert!( + !identity.is_valid(&public_key, tampered, [&address]), + "Signature is not valid" ); - assert!(!identity.is_valid_for_peer(&peer)); } } } diff --git a/comms/core/src/peer_manager/manager.rs b/comms/core/src/peer_manager/manager.rs index dcc2b7add2..2add95a5e1 100644 --- a/comms/core/src/peer_manager/manager.rs +++ b/comms/core/src/peer_manager/manager.rs @@ -23,12 +23,13 @@ use std::{fmt, fs::File, time::Duration}; use multiaddr::Multiaddr; -use tari_storage::{lmdb_store::LMDBDatabase, IterationResult}; +use tari_storage::{lmdb_store::LMDBDatabase, CachedStore, IterationResult}; use tokio::sync::RwLock; #[cfg(feature = "metrics")] use crate::peer_manager::metrics; use crate::{ + net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{ migrations, peer::{Peer, PeerFlags}, @@ -47,14 +48,15 @@ use crate::{ /// The PeerManager consist of a routing table of previously discovered peers. /// It also provides functionality to add, find and delete peers. pub struct PeerManager { - peer_storage: RwLock>>, + // yo dawg, I heard you like wrappers, so I wrapped your wrapper in a wrapper so you can wrap while you wrap + peer_storage: RwLock>>>, _file_lock: Option, } impl PeerManager { /// Constructs a new empty PeerManager pub fn new(database: CommsDatabase, file_lock: Option) -> Result { - let storage = PeerStorage::new_indexed(KeyValueWrapper::new(database))?; + let storage = PeerStorage::new_indexed(CachedStore::new(KeyValueWrapper::new(database)))?; Ok(Self { peer_storage: RwLock::new(storage), _file_lock: file_lock, @@ -99,7 +101,8 @@ impl PeerManager { /// /// [PeerQuery]: crate::peer_manager::PeerQuery pub async fn perform_query(&self, peer_query: PeerQuery<'_>) -> Result, PeerManagerError> { - self.peer_storage.read().await.perform_query(peer_query) + let lock = self.peer_storage.read().await; + lock.perform_query(peer_query) } /// Find the peer with the provided NodeID @@ -140,12 +143,11 @@ impl PeerManager { node_id: NodeId, addresses: Vec, peer_features: PeerFeatures, + source: &PeerAddressSource, ) -> Result { match self.find_by_public_key(pubkey).await { Ok(Some(mut peer)) => { - peer.connection_stats.set_connection_success(); - peer.addresses = addresses.into(); - peer.set_offline(false); + peer.addresses.update_addresses(&addresses, source); peer.features = peer_features; self.add_peer(peer.clone()).await?; Ok(peer) @@ -154,7 +156,7 @@ impl PeerManager { self.add_peer(Peer::new( pubkey.clone(), node_id, - addresses.into(), + MultiaddressesWithStats::from_addresses_with_source(addresses, source), PeerFlags::default(), peer_features, Default::default(), @@ -216,8 +218,24 @@ impl PeerManager { .closest_peers(node_id, n, excluded_peers, features) } - pub async fn mark_last_seen(&self, node_id: &NodeId) -> Result<(), PeerManagerError> { - self.peer_storage.write().await.mark_last_seen(node_id) + pub async fn mark_last_seen( + &self, + node_id: &NodeId, + addr: &Multiaddr, + source: &PeerAddressSource, + ) -> Result<(), PeerManagerError> { + let mut lock = self.peer_storage.write().await; + let peer = lock.find_by_node_id(node_id)?; + match peer { + Some(mut peer) => { + // if we have an address, update it + peer.addresses.add_address(addr, source); + peer.addresses.mark_last_seen_now(addr); + lock.add_peer(peer)?; + Ok(()) + }, + None => Err(PeerManagerError::PeerNotFoundError), + } } /// Fetch n random peers @@ -246,10 +264,8 @@ impl PeerManager { n: usize, features: PeerFeatures, ) -> Result { - self.peer_storage - .read() - .await - .calc_region_threshold(region_node_id, n, features) + let lock = self.peer_storage.read().await; + lock.calc_region_threshold(region_node_id, n, features) } /// Unbans the peer if it is banned. This function is idempotent. @@ -284,16 +300,6 @@ impl PeerManager { self.peer_storage.read().await.is_peer_banned(node_id) } - /// Changes the offline flag bit of the peer. Return the previous offline state. - pub async fn set_offline(&self, node_id: &NodeId, is_offline: bool) -> Result { - self.peer_storage.write().await.set_offline(node_id, is_offline) - } - - /// Adds a new net address to the peer if it doesn't yet exist - pub async fn add_net_address(&self, node_id: &NodeId, net_address: &Multiaddr) -> Result<(), PeerManagerError> { - self.peer_storage.write().await.add_net_address(node_id, net_address) - } - pub async fn update_each(&self, mut f: F) -> Result where F: FnMut(Peer) -> Option { let mut lock = self.peer_storage.write().await; @@ -353,13 +359,15 @@ mod test { peer::{Peer, PeerFlags}, PeerFeatures, }, - runtime, }; fn create_test_peer(ban_flag: bool, features: PeerFeatures) -> Peer { let (_sk, pk) = RistrettoPublicKey::random_keypair(&mut OsRng); let node_id = NodeId::from_key(&pk); - let net_addresses = MultiaddressesWithStats::from("/ip4/1.2.3.4/tcp/8000".parse::().unwrap()); + let net_addresses = MultiaddressesWithStats::from_addresses_with_source( + vec!["/ip4/1.2.3.4/tcp/8000".parse::().unwrap()], + &PeerAddressSource::Config, + ); let mut peer = Peer::new( pk, node_id, @@ -375,8 +383,8 @@ mod test { peer } - #[runtime::test] - async fn get_broadcast_identities() { + #[tokio::test] + async fn test_get_broadcast_identities() { // Create peer manager with random peers let peer_manager = PeerManager::new(HashmapDatabase::new(), None).unwrap(); let mut test_peers = vec![create_test_peer(true, PeerFeatures::COMMUNICATION_NODE)]; @@ -485,8 +493,8 @@ mod test { assert_ne!(identities1, identities2); } - #[runtime::test] - async fn calc_region_threshold() { + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn test_calc_region_threshold() { let n = 5; // Create peer manager with random peers let peer_manager = PeerManager::new(HashmapDatabase::new(), None).unwrap(); @@ -553,8 +561,8 @@ mod test { } } - #[runtime::test] - async fn closest_peers() { + #[tokio::test] + async fn test_closest_peers() { let n = 5; // Create peer manager with random peers let peer_manager = PeerManager::new(HashmapDatabase::new(), None).unwrap(); @@ -587,21 +595,24 @@ mod test { } } - #[runtime::test] - async fn add_or_update_online_peer() { + #[tokio::test] + async fn test_add_or_update_online_peer() { let peer_manager = PeerManager::new(HashmapDatabase::new(), None).unwrap(); - let mut peer = create_test_peer(false, PeerFeatures::COMMUNICATION_NODE); - peer.set_offline(true); - peer.connection_stats.set_connection_failed(); + let peer = create_test_peer(false, PeerFeatures::COMMUNICATION_NODE); peer_manager.add_peer(peer.clone()).await.unwrap(); let peer = peer_manager - .add_or_update_online_peer(&peer.public_key, peer.node_id, vec![], peer.features) + .add_or_update_online_peer( + &peer.public_key, + peer.node_id, + vec![], + peer.features, + &PeerAddressSource::Config, + ) .await .unwrap(); assert!(!peer.is_offline()); - assert_eq!(peer.connection_stats.failed_attempts(), 0); } } diff --git a/comms/core/src/peer_manager/migrations.rs b/comms/core/src/peer_manager/migrations.rs index 41a17455cc..8672bfe967 100644 --- a/comms/core/src/peer_manager/migrations.rs +++ b/comms/core/src/peer_manager/migrations.rs @@ -20,8 +20,6 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -mod v5; -mod v6; mod v7; use log::*; @@ -33,7 +31,7 @@ pub(super) const MIGRATION_VERSION_KEY: u64 = u64::MAX; pub fn migrate(database: &LMDBDatabase) -> Result<(), LMDBError> { // Add migrations here in version order - let migrations = vec![v5::Migration.boxed(), v6::Migration.boxed(), v7::Migration.boxed()]; + let migrations = vec![v7::Migration.boxed()]; if migrations.is_empty() { return Ok(()); } diff --git a/comms/core/src/peer_manager/migrations/v5.rs b/comms/core/src/peer_manager/migrations/v5.rs deleted file mode 100644 index 782b9f3a08..0000000000 --- a/comms/core/src/peer_manager/migrations/v5.rs +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2020, The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use std::collections::HashMap; - -use chrono::NaiveDateTime; -use log::*; -use serde::{Deserialize, Serialize}; -use tari_storage::{ - lmdb_store::{LMDBDatabase, LMDBError}, - IterationResult, -}; -use tari_utilities::hex::serialize_to_hex; - -use crate::{ - net_address::MultiaddressesWithStats, - peer_manager::{ - connection_stats::PeerConnectionStats, - migrations::MIGRATION_VERSION_KEY, - node_id::deserialize_node_id_from_hex, - IdentitySignature, - NodeId, - PeerFeatures, - PeerFlags, - PeerId, - }, - protocol::ProtocolId, - types::CommsPublicKey, -}; - -const LOG_TARGET: &str = "comms::peer_manager::migrations::v4"; - -#[derive(Debug, Deserialize, Serialize)] -pub struct PeerV4 { - pub(super) id: Option, - pub public_key: CommsPublicKey, - #[serde(serialize_with = "serialize_to_hex")] - #[serde(deserialize_with = "deserialize_node_id_from_hex")] - pub node_id: NodeId, - pub addresses: MultiaddressesWithStats, - pub flags: PeerFlags, - pub banned_until: Option, - pub banned_reason: String, - pub offline_at: Option, - pub last_seen: Option, - pub features: PeerFeatures, - pub connection_stats: PeerConnectionStats, - pub supported_protocols: Vec, - pub added_at: NaiveDateTime, - pub user_agent: String, - pub metadata: HashMap>, -} - -#[derive(Debug, Deserialize, Serialize)] -pub struct PeerV5 { - pub(super) id: Option, - pub public_key: CommsPublicKey, - #[serde(serialize_with = "serialize_to_hex")] - #[serde(deserialize_with = "deserialize_node_id_from_hex")] - pub node_id: NodeId, - pub addresses: MultiaddressesWithStats, - pub flags: PeerFlags, - pub banned_until: Option, - pub banned_reason: String, - pub offline_at: Option, - pub last_seen: Option, - pub features: PeerFeatures, - pub connection_stats: PeerConnectionStats, - pub supported_protocols: Vec, - pub added_at: NaiveDateTime, - pub user_agent: String, - pub metadata: HashMap>, - pub identity_signature: Option, -} - -pub struct Migration; - -impl super::Migration for Migration { - type Error = LMDBError; - - fn get_version(&self) -> u32 { - 5 - } - - fn migrate(&self, db: &LMDBDatabase) -> Result<(), Self::Error> { - db.for_each::(|old_peer| { - let result = old_peer.and_then(|(key, peer)| { - if key == MIGRATION_VERSION_KEY { - return Ok(()); - } - - debug!(target: LOG_TARGET, "Migrating peer `{}`", peer.node_id.short_str()); - db.insert(&key, &PeerV5 { - id: peer.id, - public_key: peer.public_key, - node_id: peer.node_id, - addresses: peer.addresses, - flags: peer.flags, - banned_until: peer.banned_until, - banned_reason: peer.banned_reason, - offline_at: peer.offline_at, - last_seen: peer.last_seen, - features: peer.features, - connection_stats: peer.connection_stats, - supported_protocols: peer.supported_protocols, - added_at: peer.added_at, - user_agent: peer.user_agent, - metadata: peer.metadata, - identity_signature: None, - }) - .map_err(Into::into) - }); - - if let Err(err) = result { - error!( - target: LOG_TARGET, - "Failed to deserialize peer: {} ** Database may be corrupt **", err - ); - } - IterationResult::Continue - })?; - - Ok(()) - } -} diff --git a/comms/core/src/peer_manager/migrations/v6.rs b/comms/core/src/peer_manager/migrations/v6.rs deleted file mode 100644 index 0537a57662..0000000000 --- a/comms/core/src/peer_manager/migrations/v6.rs +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2020, The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -use std::collections::HashMap; - -use chrono::NaiveDateTime; -use log::*; -use serde::{Deserialize, Serialize}; -use tari_storage::{ - lmdb_store::{LMDBDatabase, LMDBError}, - IterationResult, -}; -use tari_utilities::hex::serialize_to_hex; - -use crate::{ - net_address::MultiaddressesWithStats, - peer_manager::{ - connection_stats::PeerConnectionStats, - migrations::MIGRATION_VERSION_KEY, - node_id::deserialize_node_id_from_hex, - IdentitySignature, - NodeId, - PeerFeatures, - PeerFlags, - PeerId, - }, - protocol::ProtocolId, - types::CommsPublicKey, -}; - -const LOG_TARGET: &str = "comms::peer_manager::migrations::v6"; - -#[derive(Debug, Deserialize, Serialize)] -pub struct PeerV5 { - pub(super) id: Option, - pub public_key: CommsPublicKey, - #[serde(serialize_with = "serialize_to_hex")] - #[serde(deserialize_with = "deserialize_node_id_from_hex")] - pub node_id: NodeId, - pub addresses: MultiaddressesWithStats, - pub flags: PeerFlags, - pub banned_until: Option, - pub banned_reason: String, - pub offline_at: Option, - pub last_seen: Option, - pub features: PeerFeatures, - pub connection_stats: PeerConnectionStats, - pub supported_protocols: Vec, - pub added_at: NaiveDateTime, - pub user_agent: String, - pub metadata: HashMap>, - pub identity_signature: Option, -} - -/// No structural changes, just clears the identity signatures -pub struct Migration; - -impl super::Migration for Migration { - type Error = LMDBError; - - fn get_version(&self) -> u32 { - 6 - } - - fn migrate(&self, db: &LMDBDatabase) -> Result<(), Self::Error> { - db.for_each::(|old_peer| { - let result = old_peer.and_then(|(key, peer)| { - if key == MIGRATION_VERSION_KEY { - return Ok(()); - } - - debug!(target: LOG_TARGET, "Migrating peer `{}`", peer.node_id.short_str()); - db.insert(&key, &PeerV5 { - id: peer.id, - public_key: peer.public_key, - node_id: peer.node_id, - addresses: peer.addresses, - flags: peer.flags, - banned_until: peer.banned_until, - banned_reason: peer.banned_reason, - offline_at: peer.offline_at, - last_seen: peer.last_seen, - features: peer.features, - connection_stats: peer.connection_stats, - supported_protocols: peer.supported_protocols, - added_at: peer.added_at, - user_agent: peer.user_agent, - metadata: peer.metadata, - identity_signature: None, - }) - .map_err(Into::into) - }); - - if let Err(err) = result { - error!( - target: LOG_TARGET, - "Failed to deserialize peer: {} ** Database may be corrupt **", err - ); - } - IterationResult::Continue - })?; - - Ok(()) - } -} diff --git a/comms/core/src/peer_manager/migrations/v7.rs b/comms/core/src/peer_manager/migrations/v7.rs index c6e15ce197..f7e6282346 100644 --- a/comms/core/src/peer_manager/migrations/v7.rs +++ b/comms/core/src/peer_manager/migrations/v7.rs @@ -20,15 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use log::*; -use tari_storage::{ - lmdb_store::{LMDBDatabase, LMDBError}, - IterationResult, -}; - -use crate::peer_manager::{migrations::MIGRATION_VERSION_KEY, Peer, PeerId}; - -const LOG_TARGET: &str = "comms::peer_manager::migrations::v7"; +use tari_storage::lmdb_store::{LMDBDatabase, LMDBError}; /// No structural changes, removes peers with onionv2 addresses pub struct Migration; @@ -40,38 +32,40 @@ impl super::Migration for Migration { 7 } - fn migrate(&self, db: &LMDBDatabase) -> Result<(), Self::Error> { - db.for_each::(|old_peer| { - let result = old_peer.and_then(|(key, peer)| { - if key == MIGRATION_VERSION_KEY { - return Ok(()); - } - if peer.addresses.iter().any(|a| { - // Starts with /onion/ - a.iter() - .next() - .map(|p| matches!(p, multiaddr::Protocol::Onion(_, _))) - .unwrap_or(false) - }) { - debug!( - target: LOG_TARGET, - "Removing onionv2 peer `{}`", - peer.node_id.short_str() - ); - db.remove(&key)?; - } - - Ok(()) - }); + fn migrate(&self, _db: &LMDBDatabase) -> Result<(), Self::Error> { + // Kept here as an example... - if let Err(err) = result { - error!( - target: LOG_TARGET, - "Failed to deserialize peer: {} ** Database may be corrupt **", err - ); - } - IterationResult::Continue - })?; + // db.for_each::(|old_peer| { + // let result = old_peer.and_then(|(key, peer)| { + // if key == MIGRATION_VERSION_KEY { + // return Ok(()); + // } + // if peer.addresses.iter().any(|a| { + // // Starts with /onion/ + // a.iter() + // .next() + // .map(|p| matches!(p, multiaddr::Protocol::Onion(_, _))) + // .unwrap_or(false) + // }) { + // debug!( + // target: LOG_TARGET, + // "Removing onionv2 peer `{}`", + // peer.node_id.short_str() + // ); + // db.remove(&key)?; + // } + // + // Ok(()) + // }); + // + // if let Err(err) = result { + // error!( + // target: LOG_TARGET, + // "Failed to deserialize peer: {} ** Database may be corrupt **", err + // ); + // } + // IterationResult::Continue + // })?; Ok(()) } diff --git a/comms/core/src/peer_manager/mod.rs b/comms/core/src/peer_manager/mod.rs index 63cc3a2c93..1eccafa8d2 100644 --- a/comms/core/src/peer_manager/mod.rs +++ b/comms/core/src/peer_manager/mod.rs @@ -70,7 +70,8 @@ //! let returned_peer = peer_manager.find_by_node_id(&node_id).unwrap(); //! ``` -mod connection_stats; +/// The maximum size of the peer's user agent string. If the peer sends a longer string it is truncated. +const MAX_USER_AGENT_LEN: usize = 100; mod error; pub use error::PeerManagerError; @@ -107,6 +108,9 @@ pub use peer_query::{PeerQuery, PeerQuerySortBy}; mod peer_storage; pub use peer_storage::PeerStorage; +mod peer_identity_claim; +pub use peer_identity_claim::PeerIdentityClaim; + mod migrations; mod or_not_found; diff --git a/comms/core/src/peer_manager/node_identity.rs b/comms/core/src/peer_manager/node_identity.rs index 6bb9f4943d..6c07146cc0 100644 --- a/comms/core/src/peer_manager/node_identity.rs +++ b/comms/core/src/peer_manager/node_identity.rs @@ -36,7 +36,15 @@ use tari_crypto::{ use super::node_id::deserialize_node_id_from_hex; use crate::{ - peer_manager::{identity_signature::IdentitySignature, node_id::NodeId, Peer, PeerFeatures, PeerFlags}, + net_address::{MultiaddressesWithStats, PeerAddressSource}, + peer_manager::{ + identity_signature::IdentitySignature, + node_id::NodeId, + Peer, + PeerFeatures, + PeerFlags, + PeerIdentityClaim, + }, types::{CommsPublicKey, CommsSecretKey}, }; @@ -49,7 +57,7 @@ pub struct NodeIdentity { public_key: CommsPublicKey, features: PeerFeatures, secret_key: CommsSecretKey, - public_address: RwLock, + public_addresses: RwLock>, #[serde(default = "rwlock_none")] identity_signature: RwLock>, } @@ -60,7 +68,7 @@ fn rwlock_none() -> RwLock> { impl NodeIdentity { /// Create a new NodeIdentity from the provided key pair and control service address - pub fn new(secret_key: CommsSecretKey, public_address: Multiaddr, features: PeerFeatures) -> Self { + pub fn new(secret_key: CommsSecretKey, public_addresses: Vec, features: PeerFeatures) -> Self { let public_key = CommsPublicKey::from_secret_key(&secret_key); let node_id = NodeId::from_key(&public_key); @@ -69,7 +77,7 @@ impl NodeIdentity { public_key, features, secret_key, - public_address: RwLock::new(public_address), + public_addresses: RwLock::new(public_addresses), identity_signature: RwLock::new(None), }; node_identity.sign(); @@ -83,7 +91,7 @@ impl NodeIdentity { /// Prefer using NodeIdentity::new over this function. pub fn with_signature_unchecked( secret_key: CommsSecretKey, - public_address: Multiaddr, + public_addresses: Vec, features: PeerFeatures, identity_signature: Option, ) -> Self { @@ -95,7 +103,7 @@ impl NodeIdentity { public_key, features, secret_key, - public_address: RwLock::new(public_address), + public_addresses: RwLock::new(public_addresses), identity_signature: RwLock::new(identity_signature), } } @@ -104,21 +112,46 @@ impl NodeIdentity { pub fn random(rng: &mut R, public_address: Multiaddr, features: PeerFeatures) -> Self where R: CryptoRng + Rng { let secret_key = CommsSecretKey::random(rng); - Self::new(secret_key, public_address, features) + Self::new(secret_key, vec![public_address], features) + } + + pub fn random_multiple_addresses(rng: &mut R, public_addresses: Vec, features: PeerFeatures) -> Self + where R: CryptoRng + Rng { + let secret_key = CommsSecretKey::random(rng); + Self::new(secret_key, public_addresses, features) } /// Retrieve the publicly accessible address that peers must connect to establish a connection - pub fn public_address(&self) -> Multiaddr { - acquire_read_lock!(self.public_address).clone() + pub fn public_addresses(&self) -> Vec { + acquire_read_lock!(self.public_addresses).clone() + } + + pub fn first_public_address(&self) -> Multiaddr { + acquire_read_lock!(self.public_addresses)[0].clone() } /// Modify the public address. - pub fn set_public_address(&self, address: Multiaddr) { + pub fn add_public_address(&self, address: Multiaddr) { + let mut must_sign = false; + { + let mut lock = acquire_write_lock!(self.public_addresses); + if !lock.contains(&address) { + lock.push(address); + must_sign = true; + } + } + if must_sign { + self.sign() + } + } + + pub fn replace_public_address(&self, address: Multiaddr) { let mut must_sign = false; { - let mut lock = acquire_write_lock!(self.public_address); - if *lock != address { - *lock = address; + let mut lock = acquire_write_lock!(self.public_addresses); + if !lock.contains(&address) { + lock.clear(); + lock.push(address); must_sign = true; } } @@ -182,7 +215,7 @@ impl NodeIdentity { let identity_sig = IdentitySignature::sign_new( self.secret_key(), self.features, - Some(&*acquire_read_lock!(self.public_address)), + &*acquire_read_lock!(self.public_addresses), Utc::now(), ); @@ -192,16 +225,29 @@ impl NodeIdentity { /// Returns a Peer with the same public key, node id, public address and features as represented in this /// NodeIdentity. _NOTE: PeerFlags, supported_protocols and user agent are empty._ pub fn to_peer(&self) -> Peer { - let mut peer = Peer::new( + let peer_identity_claim = PeerIdentityClaim { + addresses: self.public_addresses(), + features: self.features, + signature: IdentitySignature::sign_new( + &self.secret_key, + self.features, + &self.public_addresses(), + Utc::now(), + ), + unverified_data: None, + }; + let peer = Peer::new( self.public_key().clone(), self.node_id().clone(), - self.public_address().into(), + MultiaddressesWithStats::from_addresses_with_source( + self.public_addresses(), + &PeerAddressSource::FromNodeIdentity { peer_identity_claim }, + ), PeerFlags::empty(), self.features(), Default::default(), Default::default(), ); - peer.identity_signature = acquire_read_lock!(self.identity_signature).clone(); peer } @@ -214,7 +260,7 @@ impl Clone for NodeIdentity { public_key: self.public_key.clone(), features: self.features, secret_key: self.secret_key.clone(), - public_address: RwLock::new(self.public_address()), + public_addresses: RwLock::new(self.public_addresses()), identity_signature: RwLock::new(self.identity_signature_read().as_ref().cloned()), } } @@ -224,7 +270,15 @@ impl fmt::Display for NodeIdentity { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "Public Key: {}", self.public_key)?; writeln!(f, "Node ID: {}", self.node_id)?; - writeln!(f, "Public Address: {}", acquire_read_lock!(self.public_address))?; + writeln!( + f, + "Public Address: {}", + acquire_read_lock!(self.public_addresses) + .iter() + .map(|s| s.to_string()) + .collect::>() + .join(", ") + )?; writeln!(f, "Features: {:?}", self.features)?; Ok(()) @@ -236,7 +290,7 @@ impl fmt::Debug for NodeIdentity { f.debug_struct("NodeIdentity") .field("public_key", &self.public_key) .field("node_id", &self.node_id) - .field("public_address", &self.public_address) + .field("public_address", &self.public_addresses) .field("features", &self.features) .field("secret_key", &"") .field("identity_signature", &*acquire_read_lock!(self.identity_signature)) diff --git a/comms/core/src/peer_manager/peer.rs b/comms/core/src/peer_manager/peer.rs index 1651cffbfb..933a5627e0 100644 --- a/comms/core/src/peer_manager/peer.rs +++ b/comms/core/src/peer_manager/peer.rs @@ -21,6 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{ + cmp, collections::HashMap, convert::TryFrom, fmt::Display, @@ -35,14 +36,12 @@ use serde::{Deserialize, Serialize}; use tari_utilities::hex::serialize_to_hex; use super::{ - connection_stats::PeerConnectionStats, node_id::{deserialize_node_id_from_hex, NodeId}, peer_id::PeerId, PeerFeatures, }; use crate::{ - net_address::MultiaddressesWithStats, - peer_manager::identity_signature::IdentitySignature, + net_address::{MultiaddressesWithStats, PeerAddressSource}, protocol::ProtocolId, types::CommsPublicKey, utils::datetime::{format_local_datetime, is_max_datetime, safe_future_datetime_from_duration}, @@ -76,12 +75,8 @@ pub struct Peer { pub flags: PeerFlags, pub banned_until: Option, pub banned_reason: String, - pub offline_at: Option, - pub last_seen: Option, /// Features supported by the peer pub features: PeerFeatures, - /// Connection statics for the peer - pub connection_stats: PeerConnectionStats, /// Protocols supported by the peer. This should not be considered a definitive list of supported protocols and is /// used as information for more efficient protocol negotiation. pub supported_protocols: Vec, @@ -92,9 +87,8 @@ pub struct Peer { /// Metadata field. This field is for use by upstream clients to record extra info about a peer. /// We use a hashmap here so that we can use more than one "info set" pub metadata: HashMap>, - /// Signs the peer information with a timestamp to prevent malleability. This is optional for backward - /// compatibility, but without this, the identity (addresses etc) cannot be updated. - pub identity_signature: Option, + /// If this peer has been deleted. + pub deleted_at: Option, } impl Peer { @@ -117,14 +111,11 @@ impl Peer { features, banned_until: None, banned_reason: String::new(), - offline_at: None, - last_seen: None, - connection_stats: Default::default(), added_at: Utc::now().naive_utc(), supported_protocols, user_agent, metadata: HashMap::new(), - identity_signature: None, + deleted_at: None, } } @@ -133,11 +124,30 @@ impl Peer { /// This method panics if the peer does not have a PeerId, and therefore is not persisted. /// If the caller should be sure that the peer is persisted before calling this function. /// This can be checked by using `Peer::is_persisted`. - #[inline] pub fn id(&self) -> PeerId { self.id.expect("call to Peer::id() when peer is not persisted") } + /// Merges the data with another peer. This is usually used to update a peer before it is saved to the + /// database so that data is not overwritten + pub fn merge(&mut self, other: &Peer) { + self.addresses.merge(&other.addresses); + self.banned_reason = other.banned_reason.clone(); + self.added_at = cmp::min(self.added_at, other.added_at); + self.banned_until = cmp::max(self.banned_until, other.banned_until); + for protocol in &other.supported_protocols { + if !self.supported_protocols.contains(protocol) { + self.supported_protocols.push(protocol.clone()); + } + } + self.metadata = other.metadata.clone(); + self.features = other.features; + self.flags = other.flags; + if !other.user_agent.is_empty() { + self.user_agent = other.user_agent.clone(); + } + } + pub fn is_persisted(&self) -> bool { self.id.is_some() } @@ -150,12 +160,17 @@ impl Peer { /// Returns true if the peer is marked as offline pub fn is_offline(&self) -> bool { - self.offline_at.is_some() + self.addresses.offline_at().is_some() + } + + pub fn offline_at(&self) -> Option { + self.addresses.offline_at() } /// The length of time since a peer was marked as offline pub fn offline_since(&self) -> Option { - self.offline_at + let offline_at = self.addresses.offline_at(); + offline_at .map(|offline_at| Utc::now().naive_utc() - offline_at) .map(|since| Duration::from_secs(u64::try_from(since.num_seconds()).unwrap_or(0))) } @@ -169,54 +184,9 @@ impl Peer { self.id = Some(id); } - #[allow(clippy::option_option)] - pub fn update( - &mut self, - net_addresses: Option>, - flags: Option, - banned_until: Option>, - banned_reason: Option, - is_offline: Option, - features: Option, - supported_protocols: Option>, - ) { - if let Some(new_net_addresses) = net_addresses { - self.addresses.update_addresses(new_net_addresses); - self.identity_signature = None; - } - if let Some(new_flags) = flags { - self.flags = new_flags - } - if let Some(banned_until) = banned_until { - self.banned_until = banned_until - .map(safe_future_datetime_from_duration) - .map(|dt| dt.naive_utc()); - } - if let Some(banned_reason) = banned_reason { - self.banned_reason = banned_reason; - } - if let Some(is_offline) = is_offline { - self.set_offline(is_offline); - } - if let Some(new_features) = features { - self.features = new_features; - self.identity_signature = None; - } - if let Some(supported_protocols) = supported_protocols { - self.supported_protocols = supported_protocols; - } - } - - /// Returns `Some(true)` if the identity signature is valid, otherwise `Some(false)`. If no signature is present, - /// None is returned. - pub fn is_valid_identity_signature(&self) -> Option { - let identity_signature = self.identity_signature.as_ref()?; - Some(identity_signature.is_valid_for_peer(self)) - } - /// Provides that date time of the last successful interaction with the peer pub fn last_seen(&self) -> Option { - self.last_seen + self.addresses.last_seen() } /// Provides that length of time since the last successful interaction with the peer @@ -257,16 +227,6 @@ impl Peer { self.banned_until.as_ref().filter(|dt| *dt > &Utc::now().naive_utc()) } - /// Marks the peer as offline if true, or not offline if false - pub fn set_offline(&mut self, is_offline: bool) -> &mut Self { - if is_offline { - self.offline_at = Some(Utc::now().naive_utc()); - } else { - self.offline_at = None; - } - self - } - /// This will store metadata inside of the metadata field in the peer. /// It will return None if the value was empty and the old value if the value was updated pub fn set_metadata(&mut self, key: u8, data: Vec) -> Option> { @@ -278,16 +238,9 @@ impl Peer { self.metadata.get(&key) } - /// Set the identity signature of the peer. WARNING: It is up to the caller to ensure that the signature is valid. - pub fn set_valid_identity_signature(&mut self, signature: IdentitySignature) -> &mut Self { - self.identity_signature = Some(signature); - self - } - /// Update the peer's addresses. This call will invalidate the identity signature. - pub fn update_addresses(&mut self, addresses: Vec) -> &mut Self { - self.addresses.update_addresses(addresses); - self.identity_signature = None; + pub fn update_addresses(&mut self, addresses: &[Multiaddr], source: &PeerAddressSource) -> &mut Self { + self.addresses.update_addresses(addresses, source); self } @@ -295,7 +248,6 @@ impl Peer { pub fn set_features(&mut self, features: PeerFeatures) -> &mut Self { if self.features != features { self.features = features; - self.identity_signature = None; } self } @@ -310,16 +262,7 @@ impl Peer { } pub fn to_short_string(&self) -> String { - format!( - "{}::{}", - self.public_key, - self.addresses - .addresses - .iter() - .map(ToString::to_string) - .collect::>() - .join(",") - ) + format!("{}::{}", self.public_key, self.addresses) } } @@ -334,8 +277,8 @@ impl Display for Peer { let status_str = { let mut s = Vec::new(); - if let Some(offline_at) = self.offline_at.as_ref() { - s.push(format!("Offline since: {}", format_local_datetime(offline_at))); + if let Some(offline_at) = self.offline_at() { + s.push(format!("Offline since: {}", format_local_datetime(&offline_at))); } if let Some(dt) = self.banned_until() { @@ -355,16 +298,11 @@ impl Display for Peer { }; f.write_str(&format!( - "{}[{}] PK={} ({}) - {}. Type: {}. User agent: {}. {}.", + "{}[{}] PK={} ({}) - {}. Type: {}. User agent: {}.", flags_str, self.node_id.short_str(), self.public_key, - self.addresses - .addresses - .iter() - .map(ToString::to_string) - .collect::>() - .join(","), + self.addresses, status_str, match self.features { PeerFeatures::COMMUNICATION_NODE => "BASE_NODE".to_string(), @@ -372,7 +310,6 @@ impl Display for Peer { f => format!("{:?}", f), }, user_agent, - self.connection_stats, )) } } @@ -391,7 +328,6 @@ impl Hash for Peer { #[cfg(test)] mod test { - use bytes::Bytes; use serde_json::Value; use tari_crypto::{ keys::PublicKey, @@ -400,19 +336,17 @@ mod test { }; use super::*; - use crate::{ - net_address::MultiaddressesWithStats, - peer_manager::NodeId, - test_utils::node_identity::build_node_identity, - types::CommsPublicKey, - }; + use crate::{net_address::MultiaddressesWithStats, peer_manager::NodeId, types::CommsPublicKey}; #[test] fn test_is_banned_and_ban_for() { let mut rng = rand::rngs::OsRng; let (_sk, pk) = RistrettoPublicKey::random_keypair(&mut rng); let node_id = NodeId::from_key(&pk); - let addresses = MultiaddressesWithStats::from("/ip4/123.0.0.123/tcp/8000".parse::().unwrap()); + let addresses = MultiaddressesWithStats::from_addresses_with_source( + vec!["/ip4/123.0.0.123/tcp/8000".parse::().unwrap()], + &PeerAddressSource::Config, + ); let mut peer: Peer = Peer::new( pk, node_id, @@ -430,76 +364,6 @@ mod test { assert!(!peer.is_banned()); } - #[test] - fn test_offline_since() { - let mut peer = build_node_identity(Default::default()).to_peer(); - assert!(peer.offline_since().is_none()); - peer.set_offline(true); - assert!(peer.offline_since().is_some()); - peer.offline_at = Some(Utc::now().naive_utc() + chrono::Duration::seconds(10)); - assert_eq!(peer.offline_since().unwrap(), Duration::from_secs(0)); - } - - #[test] - fn test_is_offline() { - let mut peer = build_node_identity(Default::default()).to_peer(); - assert!(!peer.is_offline()); - peer.set_offline(true); - assert!(peer.is_offline()); - } - - #[test] - fn test_update() { - let mut rng = rand::rngs::OsRng; - let (_sk, public_key1) = RistrettoPublicKey::random_keypair(&mut rng); - let node_id = NodeId::from_key(&public_key1); - let net_address1 = "/ip4/124.0.0.124/tcp/7000".parse::().unwrap(); - let mut peer: Peer = Peer::new( - public_key1.clone(), - node_id.clone(), - MultiaddressesWithStats::from(net_address1.clone()), - PeerFlags::default(), - PeerFeatures::empty(), - Default::default(), - Default::default(), - ); - - let net_address2 = "/ip4/125.0.0.125/tcp/8000".parse::().unwrap(); - let net_address3 = "/ip4/126.0.0.126/tcp/9000".parse::().unwrap(); - - static DUMMY_PROTOCOL: Bytes = Bytes::from_static(b"dummy"); - peer.update( - Some(vec![net_address2.clone(), net_address3.clone()]), - None, - Some(Some(Duration::from_secs(1000))), - Some("".to_string()), - None, - Some(PeerFeatures::MESSAGE_PROPAGATION), - Some(vec![DUMMY_PROTOCOL.clone()]), - ); - - assert_eq!(peer.public_key, public_key1); - assert_eq!(peer.node_id, node_id); - assert!(!peer - .addresses - .addresses - .iter() - .any(|net_address_with_stats| net_address_with_stats.address == net_address1)); - assert!(peer - .addresses - .addresses - .iter() - .any(|net_address_with_stats| net_address_with_stats.address == net_address2)); - assert!(peer - .addresses - .addresses - .iter() - .any(|net_address_with_stats| net_address_with_stats.address == net_address3)); - assert!(peer.is_banned()); - assert!(peer.has_features(PeerFeatures::MESSAGE_PROPAGATION)); - assert_eq!(peer.supported_protocols, vec![DUMMY_PROTOCOL.clone()]); - } - #[test] fn json_ser_der() { let expected_pk_hex = "02622ace8f7303a31cafc63f8fc48fdc16e1c8c8d234b2f0d6685282a9076031"; @@ -509,7 +373,10 @@ mod test { let peer = Peer::new( pk, node_id, - "/ip4/127.0.0.1/tcp/9000".parse::().unwrap().into(), + MultiaddressesWithStats::from_addresses_with_source( + vec!["/ip4/127.0.0.1/tcp/9000".parse::().unwrap()], + &PeerAddressSource::Config, + ), PeerFlags::empty(), PeerFeatures::empty(), Default::default(), diff --git a/comms/core/src/peer_manager/peer_identity_claim.rs b/comms/core/src/peer_manager/peer_identity_claim.rs new file mode 100644 index 0000000000..78e5f06c47 --- /dev/null +++ b/comms/core/src/peer_manager/peer_identity_claim.rs @@ -0,0 +1,114 @@ +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::convert::{TryFrom, TryInto}; + +use multiaddr::Multiaddr; +use serde_derive::{Deserialize, Serialize}; + +use crate::{ + peer_manager::{IdentitySignature, PeerFeatures, PeerManagerError, MAX_USER_AGENT_LEN}, + proto::identity::PeerIdentityMsg, + protocol::ProtocolId, +}; + +#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] +pub struct PeerIdentityClaim { + pub addresses: Vec, + pub features: PeerFeatures, + pub signature: IdentitySignature, + pub unverified_data: Option, +} + +impl PeerIdentityClaim { + pub fn new( + addresses: Vec, + features: PeerFeatures, + signature: IdentitySignature, + unverified_data: Option, + ) -> Self { + Self { + addresses, + features, + signature, + unverified_data, + } + } + + pub fn supported_protocols(&self) -> Vec { + self.unverified_data + .as_ref() + .map(|d| d.supported_protocols.clone()) + .unwrap_or_default() + } + + pub fn user_agent(&self) -> Option { + self.unverified_data.as_ref().map(|d| d.user_agent.clone()) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)] +pub struct PeerIdentityClaimUnverifiedData { + pub user_agent: String, + pub supported_protocols: Vec, +} + +impl TryFrom for PeerIdentityClaim { + type Error = PeerManagerError; + + fn try_from(value: PeerIdentityMsg) -> Result { + let addresses: Vec = value + .addresses + .iter() + .map(|addr_bytes| Multiaddr::try_from(addr_bytes.clone())) + .collect::>() + .map_err(|e| PeerManagerError::MultiaddrError(e.to_string()))?; + + if addresses.is_empty() { + return Err(PeerManagerError::PeerIdentityNoValidAddresses); + } + let features = PeerFeatures::from_bits_truncate(value.features); + + let supported_protocols = value + .supported_protocols + .iter() + .map(|p| bytes::Bytes::from(p.clone())) + .collect::>(); + + let mut user_agent = value.user_agent; + user_agent.truncate(MAX_USER_AGENT_LEN); + + if let Some(signature) = value.identity_signature { + Ok(Self { + addresses, + features, + signature: signature.try_into()?, + unverified_data: Some(PeerIdentityClaimUnverifiedData { + user_agent, + supported_protocols, + }), + }) + } else { + Err(PeerManagerError::MissingIdentitySignature) + } + } +} diff --git a/comms/core/src/peer_manager/peer_query.rs b/comms/core/src/peer_manager/peer_query.rs index 2c40bbf0a5..a66796c15a 100644 --- a/comms/core/src/peer_manager/peer_query.rs +++ b/comms/core/src/peer_manager/peer_query.rs @@ -220,7 +220,7 @@ mod test { use super::*; use crate::{ - net_address::MultiaddressesWithStats, + net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{ node_id::NodeId, peer::{Peer, PeerFlags}, @@ -231,7 +231,10 @@ mod test { fn create_test_peer(ban_flag: bool) -> Peer { let (_sk, pk) = RistrettoPublicKey::random_keypair(&mut OsRng); let node_id = NodeId::from_key(&pk); - let net_addresses = MultiaddressesWithStats::from("/ip4/1.2.3.4/tcp/8000".parse::().unwrap()); + let net_addresses = MultiaddressesWithStats::from_addresses_with_source( + vec!["/ip4/1.2.3.4/tcp/8000".parse::().unwrap()], + &PeerAddressSource::Config, + ); let mut peer = Peer::new( pk, node_id, diff --git a/comms/core/src/peer_manager/peer_storage.rs b/comms/core/src/peer_manager/peer_storage.rs index e466b2b77f..91b8936177 100644 --- a/comms/core/src/peer_manager/peer_storage.rs +++ b/comms/core/src/peer_manager/peer_storage.rs @@ -24,14 +24,13 @@ use std::{collections::HashMap, time::Duration}; use chrono::Utc; use log::*; -use multiaddr::Multiaddr; use rand::{rngs::OsRng, seq::SliceRandom}; use tari_storage::{IterationResult, KeyValueStore}; use tari_utilities::ByteArray; use crate::{ peer_manager::{ - peer::{Peer, PeerFlags}, + peer::Peer, peer_id::{generate_peer_key, PeerId}, NodeDistance, NodeId, @@ -40,7 +39,6 @@ use crate::{ PeerQuery, PeerQuerySortBy, }, - protocol::ProtocolId, types::{CommsDatabase, CommsPublicKey}, }; @@ -51,7 +49,7 @@ const PEER_MANAGER_MAX_FLOOD_PEERS: usize = 1000; /// PeerStorage provides a mechanism to keep a datastore and a local copy of all peers in sync and allow fast searches /// using the node_id, public key or net_address of a peer. pub struct PeerStorage { - pub(crate) peer_db: DS, + peer_db: DS, public_key_index: HashMap, node_id_index: HashMap, } @@ -103,8 +101,14 @@ where DS: KeyValueStore trace!(target: LOG_TARGET, "Replacing peer that has NodeId '{}'", peer.node_id); // Replace existing entry peer.set_id(peer_key); + let mut existing_peer = self + .peer_db + .get(&peer_key) + .map_err(PeerManagerError::DatabaseError)? + .ok_or(PeerManagerError::PeerNotFoundError)?; + existing_peer.merge(&peer); self.peer_db - .insert(peer_key, peer) + .insert(peer_key, existing_peer) .map_err(PeerManagerError::DatabaseError)?; self.remove_index_links(peer_key); self.add_index_links(peer_key, public_key, node_id); @@ -125,69 +129,21 @@ where DS: KeyValueStore } } - /// Adds a peer to the routing table of the PeerManager if the peer does not already exist. When a peer already - /// exist, the stored version will be replaced with the newly provided peer. - - #[allow(clippy::option_option)] - pub fn update_peer( - &mut self, - public_key: &CommsPublicKey, - net_addresses: Option>, - flags: Option, - banned_until: Option>, - banned_reason: Option, - is_offline: Option, - peer_features: Option, - supported_protocols: Option>, - ) -> Result<(), PeerManagerError> { - match self.public_key_index.get(public_key).copied() { - Some(peer_key) => { - let mut stored_peer = self - .peer_db - .get(&peer_key) - .map_err(PeerManagerError::DatabaseError)? - .expect("Public key index and peer database are out of sync!"); - - trace!(target: LOG_TARGET, "Updating peer '{}'", stored_peer.node_id); - - stored_peer.update( - net_addresses, - flags, - banned_until, - banned_reason, - is_offline, - peer_features, - supported_protocols, - ); - - self.peer_db - .insert(peer_key, stored_peer) - .map_err(PeerManagerError::DatabaseError)?; - - Ok(()) - }, - None => { - trace!( - target: LOG_TARGET, - "Peer not found because the public key '{}' could not be found in the index", - public_key - ); - Err(PeerManagerError::PeerNotFoundError) - }, - } - } - /// The peer with the specified public_key will be removed from the PeerManager pub fn delete_peer(&mut self, node_id: &NodeId) -> Result<(), PeerManagerError> { let peer_key = *self .node_id_index .get(node_id) .ok_or(PeerManagerError::PeerNotFoundError)?; + let mut peer = self + .peer_db + .get(&peer_key) + .map_err(PeerManagerError::DatabaseError)? + .ok_or(PeerManagerError::PeerNotFoundError)?; + peer.deleted_at = Some(Utc::now().naive_utc()); self.peer_db - .delete(&peer_key) + .insert(peer_key, peer) .map_err(PeerManagerError::DatabaseError)?; - - self.remove_index_links(peer_key); Ok(()) } @@ -508,42 +464,6 @@ where DS: KeyValueStore Ok(peer.is_banned()) } - /// Changes the OFFLINE flag bit of the peer. - pub fn set_offline(&mut self, node_id: &NodeId, offline: bool) -> Result { - let peer_key = *self - .node_id_index - .get(node_id) - .ok_or(PeerManagerError::PeerNotFoundError)?; - let mut peer: Peer = self - .peer_db - .get(&peer_key) - .map_err(PeerManagerError::DatabaseError)? - .expect("node_id_index is out of sync with peer db"); - let was_offline = peer.is_offline(); - peer.set_offline(offline); - self.peer_db - .insert(peer_key, peer) - .map_err(PeerManagerError::DatabaseError)?; - Ok(was_offline) - } - - /// Enables Thread safe access - Adds a new net address to the peer if it doesn't yet exist - pub fn add_net_address(&mut self, node_id: &NodeId, net_address: &Multiaddr) -> Result<(), PeerManagerError> { - let peer_key = *self - .node_id_index - .get(node_id) - .ok_or(PeerManagerError::PeerNotFoundError)?; - let mut peer: Peer = self - .peer_db - .get(&peer_key) - .map_err(PeerManagerError::DatabaseError)? - .expect("node_id_index is out of sync with peer db"); - peer.addresses.add_address(net_address); - self.peer_db - .insert(peer_key, peer) - .map_err(PeerManagerError::DatabaseError) - } - /// This will store metadata inside of the metadata field in the peer provided by the nodeID. /// It will return None if the value was empty and the old value if the value was updated pub fn set_peer_metadata( @@ -567,18 +487,6 @@ where DS: KeyValueStore .map_err(PeerManagerError::DatabaseError)?; Ok(result) } - - pub fn mark_last_seen(&mut self, node_id: &NodeId) -> Result<(), PeerManagerError> { - let mut peer = self - .find_by_node_id(node_id)? - .ok_or(PeerManagerError::PeerNotFoundError)?; - peer.last_seen = Some(Utc::now().naive_utc()); - peer.set_offline(false); - self.peer_db - .insert(peer.id(), peer) - .map_err(PeerManagerError::DatabaseError)?; - Ok(()) - } } #[allow(clippy::from_over_into)] @@ -592,12 +500,13 @@ impl Into for PeerStorage { mod test { use std::iter::repeat_with; + use multiaddr::Multiaddr; use tari_crypto::{keys::PublicKey, ristretto::RistrettoPublicKey}; use tari_storage::HashmapDatabase; use super::*; use crate::{ - net_address::MultiaddressesWithStats, + net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{peer::PeerFlags, PeerFeatures}, }; @@ -610,9 +519,10 @@ mod test { let net_address1 = "/ip4/1.2.3.4/tcp/8000".parse::().unwrap(); let net_address2 = "/ip4/5.6.7.8/tcp/8000".parse::().unwrap(); let net_address3 = "/ip4/5.6.7.8/tcp/7000".parse::().unwrap(); - let mut net_addresses = MultiaddressesWithStats::from(net_address1); - net_addresses.add_address(&net_address2); - net_addresses.add_address(&net_address3); + let mut net_addresses = + MultiaddressesWithStats::from_addresses_with_source(vec![net_address1], &PeerAddressSource::Config); + net_addresses.add_address(&net_address2, &PeerAddressSource::Config); + net_addresses.add_address(&net_address3, &PeerAddressSource::Config); let peer1 = Peer::new( pk, node_id, @@ -626,7 +536,8 @@ mod test { let (_sk, pk) = RistrettoPublicKey::random_keypair(&mut rng); let node_id = NodeId::from_key(&pk); let net_address4 = "/ip4/9.10.11.12/tcp/7000".parse::().unwrap(); - let net_addresses = MultiaddressesWithStats::from(net_address4); + let net_addresses = + MultiaddressesWithStats::from_addresses_with_source(vec![net_address4], &PeerAddressSource::Config); let peer2: Peer = Peer::new( pk, node_id, @@ -641,8 +552,9 @@ mod test { let node_id = NodeId::from_key(&pk); let net_address5 = "/ip4/13.14.15.16/tcp/6000".parse::().unwrap(); let net_address6 = "/ip4/17.18.19.20/tcp/8000".parse::().unwrap(); - let mut net_addresses = MultiaddressesWithStats::from(net_address5); - net_addresses.add_address(&net_address6); + let mut net_addresses = + MultiaddressesWithStats::from_addresses_with_source(vec![net_address5], &PeerAddressSource::Config); + net_addresses.add_address(&net_address6, &PeerAddressSource::Config); let peer3 = Peer::new( pk, node_id, @@ -690,9 +602,10 @@ mod test { let net_address1 = "/ip4/1.2.3.4/tcp/8000".parse::().unwrap(); let net_address2 = "/ip4/5.6.7.8/tcp/8000".parse::().unwrap(); let net_address3 = "/ip4/5.6.7.8/tcp/7000".parse::().unwrap(); - let mut net_addresses = MultiaddressesWithStats::from(net_address1); - net_addresses.add_address(&net_address2); - net_addresses.add_address(&net_address3); + let mut net_addresses = + MultiaddressesWithStats::from_addresses_with_source(vec![net_address1], &PeerAddressSource::Config); + net_addresses.add_address(&net_address2, &PeerAddressSource::Config); + net_addresses.add_address(&net_address3, &PeerAddressSource::Config); let peer1 = Peer::new( pk, node_id, @@ -706,7 +619,8 @@ mod test { let (_sk, pk) = RistrettoPublicKey::random_keypair(&mut rng); let node_id = NodeId::from_key(&pk); let net_address4 = "/ip4/9.10.11.12/tcp/7000".parse::().unwrap(); - let net_addresses = MultiaddressesWithStats::from(net_address4); + let net_addresses = + MultiaddressesWithStats::from_addresses_with_source(vec![net_address4], &PeerAddressSource::Config); let peer2: Peer = Peer::new( pk, node_id, @@ -721,8 +635,9 @@ mod test { let node_id = NodeId::from_key(&pk); let net_address5 = "/ip4/13.14.15.16/tcp/6000".parse::().unwrap(); let net_address6 = "/ip4/17.18.19.20/tcp/8000".parse::().unwrap(); - let mut net_addresses = MultiaddressesWithStats::from(net_address5); - net_addresses.add_address(&net_address6); + let mut net_addresses = + MultiaddressesWithStats::from_addresses_with_source(vec![net_address5], &PeerAddressSource::Config); + net_addresses.add_address(&net_address6, &PeerAddressSource::Config); let peer3 = Peer::new( pk, node_id, @@ -784,7 +699,8 @@ mod test { // Test delete of border case peer assert!(peer_storage.delete_peer(&peer3.node_id).is_ok()); - assert_eq!(peer_storage.peer_db.len().unwrap(), 2); + // It is a logical delete, so there should still be 3 peers in the db + assert_eq!(peer_storage.peer_db.len().unwrap(), 3); assert_eq!( peer_storage @@ -802,7 +718,12 @@ mod test { .public_key, peer2.public_key ); - assert!(peer_storage.find_by_public_key(&peer3.public_key).unwrap().is_none()); + assert!(peer_storage + .find_by_public_key(&peer3.public_key) + .unwrap() + .unwrap() + .deleted_at + .is_some()); assert_eq!( peer_storage.find_by_node_id(&peer1.node_id).unwrap().unwrap().node_id, @@ -812,57 +733,21 @@ mod test { peer_storage.find_by_node_id(&peer2.node_id).unwrap().unwrap().node_id, peer2.node_id ); - assert!(peer_storage.find_by_node_id(&peer3.node_id).unwrap().is_none()); - - peer_storage.find_by_public_key(&peer1.public_key).unwrap().unwrap(); - peer_storage.find_by_public_key(&peer2.public_key).unwrap().unwrap(); - assert!(peer_storage.find_by_public_key(&peer3.public_key).unwrap().is_none()); - - // Test of delete with moving behaviour - assert!(peer_storage.add_peer(peer3.clone()).is_ok()); - assert!(peer_storage.delete_peer(&peer2.node_id).is_ok()); - - assert_eq!(peer_storage.peer_db.len().unwrap(), 2); - - assert_eq!( - peer_storage - .find_by_public_key(&peer1.public_key) - .unwrap() - .unwrap() - .public_key, - peer1.public_key - ); - assert!(peer_storage.find_by_public_key(&peer2.public_key).unwrap().is_none()); - assert_eq!( - peer_storage - .find_by_public_key(&peer3.public_key) - .unwrap() - .unwrap() - .public_key, - peer3.public_key - ); - - assert_eq!( - peer_storage.find_by_node_id(&peer1.node_id).unwrap().unwrap().node_id, - peer1.node_id - ); - assert!(peer_storage.find_by_node_id(&peer2.node_id).unwrap().is_none()); - assert_eq!( - peer_storage.find_by_node_id(&peer3.node_id).unwrap().unwrap().node_id, - peer3.node_id - ); - - peer_storage.find_by_public_key(&peer1.public_key).unwrap().unwrap(); - assert!(peer_storage.find_by_public_key(&peer2.public_key).unwrap().is_none()); - peer_storage.find_by_public_key(&peer3.public_key).unwrap().unwrap(); + assert!(peer_storage + .find_by_node_id(&peer3.node_id) + .unwrap() + .unwrap() + .deleted_at + .is_some()); } - fn create_test_peer(features: PeerFeatures, ban: bool, offline: bool) -> Peer { + fn create_test_peer(features: PeerFeatures, ban: bool) -> Peer { let mut rng = rand::rngs::OsRng; let (_sk, pk) = RistrettoPublicKey::random_keypair(&mut rng); let node_id = NodeId::from_key(&pk); let net_address = "/ip4/1.2.3.4/tcp/8000".parse::().unwrap(); - let net_addresses = MultiaddressesWithStats::from(net_address); + let net_addresses = + MultiaddressesWithStats::from_addresses_with_source(vec![net_address], &PeerAddressSource::Config); let mut peer = Peer::new( pk, node_id, @@ -875,7 +760,6 @@ mod test { if ban { peer.ban_for(Duration::from_secs(600), "".to_string()); } - peer.set_offline(offline); peer } @@ -883,16 +767,16 @@ mod test { fn test_in_network_region() { let mut peer_storage = PeerStorage::new_indexed(HashmapDatabase::new()).unwrap(); - let mut nodes = repeat_with(|| create_test_peer(PeerFeatures::COMMUNICATION_NODE, false, false)) + let mut nodes = repeat_with(|| create_test_peer(PeerFeatures::COMMUNICATION_NODE, false)) .take(5) - .chain(repeat_with(|| create_test_peer(PeerFeatures::COMMUNICATION_CLIENT, false, false)).take(4)) + .chain(repeat_with(|| create_test_peer(PeerFeatures::COMMUNICATION_CLIENT, false)).take(4)) .collect::>(); for p in &nodes { peer_storage.add_peer(p.clone()).unwrap(); } - let main_peer_node_id = create_test_peer(PeerFeatures::COMMUNICATION_NODE, false, false).node_id; + let main_peer_node_id = create_test_peer(PeerFeatures::COMMUNICATION_NODE, false).node_id; nodes.sort_by(|a, b| { a.node_id diff --git a/comms/core/src/pipeline/inbound.rs b/comms/core/src/pipeline/inbound.rs index 5f91187252..6b0149b12e 100644 --- a/comms/core/src/pipeline/inbound.rs +++ b/comms/core/src/pipeline/inbound.rs @@ -146,9 +146,8 @@ mod test { use tower::service_fn; use super::*; - use crate::runtime; - #[runtime::test] + #[tokio::test] async fn run() { let items = vec![1, 2, 3, 4, 5, 6]; let (tx, mut stream) = mpsc::channel(items.len()); @@ -159,10 +158,9 @@ mod test { let (out_tx, mut out_rx) = mpsc::channel(items.len()); - let executor = runtime::current(); let shutdown = Shutdown::new(); let pipeline = Inbound::new( - BoundedExecutor::new(executor.clone(), 1), + BoundedExecutor::new(1), stream, service_fn(move |req| { out_tx.try_send(req).unwrap(); @@ -171,7 +169,7 @@ mod test { shutdown.to_signal(), ); - let spawned_task = executor.spawn(pipeline.run()); + let spawned_task = tokio::spawn(pipeline.run()); let received = collect_recv!(out_rx, take = items.len(), timeout = Duration::from_secs(10)); assert!(received.iter().all(|i| items.contains(i))); diff --git a/comms/core/src/pipeline/outbound.rs b/comms/core/src/pipeline/outbound.rs index 83eed9f208..6a8730689f 100644 --- a/comms/core/src/pipeline/outbound.rs +++ b/comms/core/src/pipeline/outbound.rs @@ -29,7 +29,7 @@ use log::*; use tokio::time; use tower::{Service, ServiceExt}; -use crate::{bounded_executor::OptionallyBoundedExecutor, pipeline::builder::OutboundPipelineConfig}; +use crate::{bounded_executor::BoundedExecutor, pipeline::builder::OutboundPipelineConfig}; const LOG_TARGET: &str = "comms::pipeline::outbound"; @@ -37,7 +37,7 @@ const LOG_TARGET: &str = "comms::pipeline::outbound"; /// message as a [MessageRequest](crate::protocol::messaging::MessageRequest). pub struct Outbound { /// Executor used to spawn a pipeline for each received item on the stream - executor: OptionallyBoundedExecutor, + executor: BoundedExecutor, /// Outbound pipeline configuration containing the pipeline and it's in and out streams config: OutboundPipelineConfig, } @@ -50,7 +50,7 @@ where TPipeline::Future: Send, { /// New outbound pipeline. - pub fn new(executor: OptionallyBoundedExecutor, config: OutboundPipelineConfig) -> Self { + pub fn new(executor: BoundedExecutor, config: OutboundPipelineConfig) -> Self { Self { executor, config } } @@ -61,19 +61,19 @@ where while let Some(msg) = self.config.in_receiver.recv().await { // Pipeline IN received a message. Spawn a new task for the pipeline let num_available = self.executor.num_available(); - if let Some(max_available) = self.executor.max_available() { - log!( - target: LOG_TARGET, - if num_available < max_available { - Level::Debug - } else { - Level::Trace - }, - "Outbound pipeline usage: {}/{}", - max_available - num_available, - max_available - ); - } + let max_available = self.executor.max_available(); + log!( + target: LOG_TARGET, + if num_available < max_available { + Level::Debug + } else { + Level::Trace + }, + "Outbound pipeline usage: {}/{}", + max_available - num_available, + max_available + ); + let pipeline = self.config.pipeline.clone(); let id = current_id; current_id = (current_id + 1) % u64::MAX; @@ -89,12 +89,13 @@ where "Outbound pipeline {} returned an error: '{}'", id, err ); }, - Err(_) => { + Err(err) => { error!( target: LOG_TARGET, "Outbound pipeline {} timed out and was aborted. THIS SHOULD NOT HAPPEN: there was a \ - deadlock or excessive delay in processing this pipeline.", - id + deadlock or excessive delay in processing this pipeline. {}", + id, + err ); }, } @@ -122,12 +123,12 @@ mod test { use bytes::Bytes; use tari_test_utils::collect_recv; - use tokio::{runtime::Handle, sync::mpsc, time}; + use tokio::{sync::mpsc, time}; use super::*; - use crate::{message::OutboundMessage, pipeline::SinkService, runtime, utils}; + use crate::{message::OutboundMessage, pipeline::SinkService, utils}; - #[runtime::test] + #[tokio::test] async fn run() { const NUM_ITEMS: usize = 10; let (tx, mut in_receiver) = mpsc::channel(NUM_ITEMS); @@ -140,15 +141,15 @@ mod test { in_receiver.close(); let (out_tx, mut out_rx) = mpsc::unbounded_channel(); - let executor = Handle::current(); + let executor = BoundedExecutor::new(100); - let pipeline = Outbound::new(executor.clone().into(), OutboundPipelineConfig { + let pipeline = Outbound::new(executor, OutboundPipelineConfig { in_receiver, out_receiver: None, pipeline: SinkService::new(out_tx), }); - let spawned_task = executor.spawn(pipeline.run()); + let spawned_task = tokio::spawn(pipeline.run()); let requests = collect_recv!(out_rx, timeout = Duration::from_millis(5)); assert_eq!(requests.len(), NUM_ITEMS); @@ -157,6 +158,6 @@ mod test { time::timeout(Duration::from_secs(5), spawned_task) .await .unwrap() - .unwrap(); + .expect("Task should end") } } diff --git a/comms/core/src/pipeline/translate_sink.rs b/comms/core/src/pipeline/translate_sink.rs index 606c038299..d7588e76ba 100644 --- a/comms/core/src/pipeline/translate_sink.rs +++ b/comms/core/src/pipeline/translate_sink.rs @@ -20,13 +20,14 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use futures::Sink; use std::{ marker::PhantomData, pin::Pin, task::{Context, Poll}, }; +use futures::Sink; + /// A sink which takes inputs sent to it, translates them and sends them on a given sink pub struct TranslateSink { translater: F, @@ -91,12 +92,13 @@ where F: FnMut(I) -> Option #[cfg(test)] mod test { - use super::*; - use crate::runtime; use futures::{SinkExt, StreamExt}; use tokio::sync::mpsc; - #[runtime::test] + use super::*; + use crate::runtime; + + #[tokio::test] async fn check_translates() { let (tx, mut rx) = mpsc::channel(1); diff --git a/comms/core/src/proto/identity.proto b/comms/core/src/proto/identity.proto index 6183da9662..c1ba966691 100644 --- a/comms/core/src/proto/identity.proto +++ b/comms/core/src/proto/identity.proto @@ -10,7 +10,9 @@ package tari.comms.identity; message PeerIdentityMsg { repeated bytes addresses = 1; uint64 features = 2; + // Note: not part of the signature repeated bytes supported_protocols = 3; + // Note: not part of the signature string user_agent = 4; // Signature that signs the peer identity IdentitySignature identity_signature = 5; diff --git a/comms/core/src/protocol/identity.rs b/comms/core/src/protocol/identity.rs index cf03fcfcba..add125a624 100644 --- a/comms/core/src/protocol/identity.rs +++ b/comms/core/src/protocol/identity.rs @@ -65,7 +65,7 @@ where // Send this node's identity let msg_bytes = PeerIdentityMsg { - addresses: vec![node_identity.public_address().to_vec()], + addresses: node_identity.public_addresses().iter().map(|a| a.to_vec()).collect(), features: node_identity.features().bits(), supported_protocols, user_agent: network_info.user_agent, @@ -195,12 +195,11 @@ mod test { use crate::{ peer_manager::PeerFeatures, protocol::{IdentityProtocolError, NodeNetworkInfo}, - runtime, test_utils::node_identity::build_node_identity, transports::{MemoryTransport, Transport}, }; - #[runtime::test] + #[tokio::test] async fn identity_exchange() { let transport = MemoryTransport; let addr = "/memory/0".parse().unwrap(); @@ -241,13 +240,27 @@ mod test { let identity1 = result2.unwrap(); assert_eq!(identity1.features, node_identity1.features().bits()); - assert_eq!(identity1.addresses, vec![node_identity1.public_address().to_vec()]); + assert_eq!( + identity1.addresses, + node_identity1 + .public_addresses() + .iter() + .map(|a| a.to_vec()) + .collect::>() + ); assert_eq!(identity2.features, node_identity2.features().bits()); - assert_eq!(identity2.addresses, vec![node_identity2.public_address().to_vec()]); + assert_eq!( + identity2.addresses, + node_identity2 + .public_addresses() + .iter() + .map(|a| a.to_vec()) + .collect::>() + ); } - #[runtime::test] + #[tokio::test] async fn fail_cases() { let transport = MemoryTransport; let addr = "/memory/0".parse().unwrap(); diff --git a/comms/core/src/protocol/messaging/extension.rs b/comms/core/src/protocol/messaging/extension.rs index 9e8425e367..4f31841c7c 100644 --- a/comms/core/src/protocol/messaging/extension.rs +++ b/comms/core/src/protocol/messaging/extension.rs @@ -27,7 +27,7 @@ use tower::Service; use super::MessagingProtocol; use crate::{ - bounded_executor::{BoundedExecutor, OptionallyBoundedExecutor}, + bounded_executor::BoundedExecutor, message::InboundMessage, pipeline, protocol::{ @@ -36,7 +36,6 @@ use crate::{ ProtocolExtensionContext, ProtocolExtensionError, }, - runtime::task, }; /// Buffer size for inbound messages from _all_ peers. If the message consumer is slow to get through this queue, @@ -88,22 +87,26 @@ where context.register_complete_signal(messaging.complete_signal()); // Spawn messaging protocol - task::spawn(messaging.run()); + tokio::spawn(messaging.run()); // Spawn inbound pipeline - let bounded_executor = BoundedExecutor::from_current(self.pipeline.max_concurrent_inbound_tasks); + let bounded_executor = BoundedExecutor::new(self.pipeline.max_concurrent_inbound_tasks); let inbound = pipeline::Inbound::new( bounded_executor, inbound_message_rx, self.pipeline.inbound, context.shutdown_signal(), ); - task::spawn(inbound.run()); + tokio::spawn(inbound.run()); - let executor = OptionallyBoundedExecutor::from_current(self.pipeline.max_concurrent_outbound_tasks); + let executor = BoundedExecutor::new( + self.pipeline + .max_concurrent_outbound_tasks + .unwrap_or_else(BoundedExecutor::max_theoretical_tasks), + ); // Spawn outbound pipeline let outbound = pipeline::Outbound::new(executor, self.pipeline.outbound); - task::spawn(outbound.run()); + tokio::spawn(outbound.run()); Ok(()) } diff --git a/comms/core/src/protocol/messaging/protocol.rs b/comms/core/src/protocol/messaging/protocol.rs index 2098e7e5c5..4a4da91d66 100644 --- a/comms/core/src/protocol/messaging/protocol.rs +++ b/comms/core/src/protocol/messaging/protocol.rs @@ -49,7 +49,6 @@ use crate::{ ProtocolEvent, ProtocolNotification, }, - runtime::task, }; const LOG_TARGET: &str = "comms::protocol::messaging"; @@ -281,7 +280,7 @@ impl MessagingProtocol { ) -> mpsc::UnboundedSender { let (msg_tx, msg_rx) = mpsc::unbounded_channel(); let outbound_messaging = OutboundMessaging::new(connectivity, events_tx, msg_rx, retry_queue_tx, peer_node_id); - task::spawn(outbound_messaging.run()); + tokio::spawn(outbound_messaging.run()); msg_tx } @@ -295,7 +294,7 @@ impl MessagingProtocol { RATE_LIMIT_CAPACITY, RATE_LIMIT_RESTOCK_INTERVAL, ); - task::spawn(inbound_messaging.run(substream)); + tokio::spawn(inbound_messaging.run(substream)); } fn handle_protocol_notification(&mut self, notification: ProtocolNotification) { diff --git a/comms/core/src/protocol/messaging/test.rs b/comms/core/src/protocol/messaging/test.rs index 4344a55ee0..50deff8ae7 100644 --- a/comms/core/src/protocol/messaging/test.rs +++ b/comms/core/src/protocol/messaging/test.rs @@ -40,8 +40,6 @@ use crate::{ net_address::MultiaddressesWithStats, peer_manager::{NodeId, NodeIdentity, Peer, PeerFeatures, PeerFlags, PeerManager}, protocol::{messaging::SendFailReason, ProtocolEvent, ProtocolNotification}, - runtime, - runtime::task, test_utils::{ mocks::{create_connectivity_mock, create_peer_connection_mock_pair, ConnectivityManagerMockState}, node_id, @@ -84,7 +82,7 @@ async fn spawn_messaging_protocol() -> ( inbound_msg_tx, shutdown.to_signal(), ); - task::spawn(msg_proto.run()); + tokio::spawn(msg_proto.run()); ( peer_manager, @@ -98,7 +96,7 @@ async fn spawn_messaging_protocol() -> ( ) } -#[runtime::test] +#[tokio::test] async fn new_inbound_substream_handling() { let (peer_manager, _, _, proto_tx, _, mut inbound_msg_rx, mut events_rx, _shutdown) = spawn_messaging_protocol().await; @@ -153,7 +151,7 @@ async fn new_inbound_substream_handling() { assert_eq!(*node_id, expected_node_id); } -#[runtime::test] +#[tokio::test] async fn send_message_request() { let (_, node_identity, conn_man_mock, _, request_tx, _, _, _shutdown) = spawn_messaging_protocol().await; @@ -179,7 +177,7 @@ async fn send_message_request() { assert_eq!(peer_conn_mock1.call_count(), 1); } -#[runtime::test] +#[tokio::test] async fn send_message_dial_failed() { let (_, _, conn_manager_mock, _, request_tx, _, mut event_tx, _shutdown) = spawn_messaging_protocol().await; @@ -199,7 +197,7 @@ async fn send_message_dial_failed() { assert!(calls.iter().all(|evt| evt.starts_with("DialPeer"))); } -#[runtime::test] +#[tokio::test] async fn send_message_substream_bulk_failure() { const NUM_MSGS: usize = 10; let (_, node_identity, conn_manager_mock, _, mut request_tx, _, mut events_rx, _shutdown) = @@ -266,7 +264,7 @@ async fn send_message_substream_bulk_failure() { assert_eq!(node_id, peer_node_id); } -#[runtime::test] +#[tokio::test] async fn many_concurrent_send_message_requests() { const NUM_MSGS: usize = 100; let (_, _, conn_man_mock, _, request_tx, _, _, _shutdown) = spawn_messaging_protocol().await; @@ -314,7 +312,7 @@ async fn many_concurrent_send_message_requests() { assert_eq!(peer_conn_mock1.call_count(), 1); } -#[runtime::test] +#[tokio::test] async fn many_concurrent_send_message_requests_that_fail() { const NUM_MSGS: usize = 100; let (_, _, _, _, request_tx, _, _, _shutdown) = spawn_messaging_protocol().await; diff --git a/comms/core/src/protocol/negotiation.rs b/comms/core/src/protocol/negotiation.rs index 96fef87db8..0fbc08ab64 100644 --- a/comms/core/src/protocol/negotiation.rs +++ b/comms/core/src/protocol/negotiation.rs @@ -228,9 +228,9 @@ mod test { use tari_test_utils::unpack_enum; use super::*; - use crate::{memsocket::MemorySocket, runtime}; + use crate::memsocket::MemorySocket; - #[runtime::test] + #[tokio::test] async fn negotiate_success() { let (mut initiator, mut responder) = MemorySocket::new_pair(); let mut negotiate_out = ProtocolNegotiation::new(&mut initiator); @@ -255,7 +255,7 @@ mod test { assert_eq!(out_proto.unwrap(), ProtocolId::from_static(b"A")); } - #[runtime::test] + #[tokio::test] async fn negotiate_fail() { let (mut initiator, mut responder) = MemorySocket::new_pair(); let mut negotiate_out = ProtocolNegotiation::new(&mut initiator); @@ -280,7 +280,7 @@ mod test { unpack_enum!(ProtocolError::ProtocolOutboundNegotiationFailed { .. } = out_proto.unwrap_err()); } - #[runtime::test] + #[tokio::test] async fn negotiate_fail_max_rounds() { let (mut initiator, mut responder) = MemorySocket::new_pair(); let mut negotiate_out = ProtocolNegotiation::new(&mut initiator); @@ -305,7 +305,7 @@ mod test { unpack_enum!(ProtocolError::ProtocolNegotiationTerminatedByPeer = out_proto.unwrap_err()); } - #[runtime::test] + #[tokio::test] async fn negotiate_success_optimistic() { let (mut initiator, mut responder) = MemorySocket::new_pair(); let mut negotiate_out = ProtocolNegotiation::new(&mut initiator); @@ -326,7 +326,7 @@ mod test { out_proto.unwrap(); } - #[runtime::test] + #[tokio::test] async fn negotiate_fail_optimistic() { let (mut initiator, mut responder) = MemorySocket::new_pair(); let mut negotiate_out = ProtocolNegotiation::new(&mut initiator); diff --git a/comms/core/src/protocol/protocols.rs b/comms/core/src/protocol/protocols.rs index 29a74f97f4..63ba6e275f 100644 --- a/comms/core/src/protocol/protocols.rs +++ b/comms/core/src/protocol/protocols.rs @@ -148,7 +148,6 @@ mod test { use tari_test_utils::unpack_enum; use super::*; - use crate::runtime; #[test] fn add() { @@ -163,7 +162,7 @@ mod test { assert!(protocols.get_supported_protocols().iter().all(|p| protos.contains(p))); } - #[runtime::test] + #[tokio::test] async fn notify() { let (tx, mut rx) = mpsc::channel(1); let protos = [ProtocolId::from_static(b"/tari/test/1")]; @@ -180,7 +179,7 @@ mod test { assert_eq!(peer_id, NodeId::new()); } - #[runtime::test] + #[tokio::test] async fn notify_fail_not_registered() { let mut protocols = Protocols::<()>::new(); diff --git a/comms/core/src/protocol/rpc/body.rs b/comms/core/src/protocol/rpc/body.rs index a93ce23e8d..ae0f749b68 100644 --- a/comms/core/src/protocol/rpc/body.rs +++ b/comms/core/src/protocol/rpc/body.rs @@ -302,9 +302,9 @@ mod test { use futures::{stream, StreamExt}; use prost::Message; - use crate::{message::MessageExt, protocol::rpc::body::Body, runtime}; + use crate::{message::MessageExt, protocol::rpc::body::Body}; - #[runtime::test] + #[tokio::test] async fn single_body() { let mut body = Body::single(123u32.to_encoded_bytes()); let bytes = body.next().await.unwrap().unwrap(); @@ -312,7 +312,7 @@ mod test { assert_eq!(u32::decode(bytes).unwrap(), 123u32); } - #[runtime::test] + #[tokio::test] async fn streaming_body() { let body = Body::streaming(stream::repeat(Bytes::new()).map(Ok).take(10)); let body = body.collect::>().await; diff --git a/comms/core/src/protocol/rpc/client/mod.rs b/comms/core/src/protocol/rpc/client/mod.rs index aa68646c36..9f38b011f0 100644 --- a/comms/core/src/protocol/rpc/client/mod.rs +++ b/comms/core/src/protocol/rpc/client/mod.rs @@ -77,7 +77,6 @@ use crate::{ }, ProtocolId, }, - runtime::task, stream_id, stream_id::StreamId, }; @@ -112,7 +111,7 @@ impl RpcClient { let connector = ClientConnector::new(request_tx, last_request_latency_rx, shutdown); let (ready_tx, ready_rx) = oneshot::channel(); let tracing_id = tracing::Span::current().id(); - task::spawn({ + tokio::spawn({ let span = span!(Level::TRACE, "start_rpc_worker"); span.follows_from(tracing_id); diff --git a/comms/core/src/protocol/rpc/client/tests.rs b/comms/core/src/protocol/rpc/client/tests.rs index 6d4417f95e..ecd484f3dd 100644 --- a/comms/core/src/protocol/rpc/client/tests.rs +++ b/comms/core/src/protocol/rpc/client/tests.rs @@ -42,8 +42,6 @@ use crate::{ ProtocolId, ProtocolNotification, }, - runtime, - runtime::task, test_utils::mocks::{new_peer_connection_mock_pair, PeerConnectionMockState}, }; @@ -53,7 +51,7 @@ async fn setup(num_concurrent_sessions: usize) -> (PeerConnection, PeerConnectio let shutdown = Shutdown::new(); let (context, _) = create_mocked_rpc_context(); - task::spawn( + tokio::spawn( RpcServer::builder() .with_maximum_simultaneous_sessions(num_concurrent_sessions) .finish() @@ -61,7 +59,7 @@ async fn setup(num_concurrent_sessions: usize) -> (PeerConnection, PeerConnectio .serve(notif_rx, context), ); - task::spawn(async move { + tokio::spawn(async move { while let Some(stream) = conn2_state.next_incoming_substream().await { notif_tx .send(ProtocolNotification::new( @@ -80,7 +78,7 @@ mod lazy_pool { use super::*; use crate::protocol::rpc::client::pool::{LazyPool, RpcClientPoolError}; - #[runtime::test] + #[tokio::test] async fn it_connects_lazily() { let (conn, mock_state, _shutdown) = setup(2).await; let mut pool = LazyPool::::new(conn, 2, Default::default()); @@ -91,7 +89,7 @@ mod lazy_pool { assert_eq!(mock_state.num_open_substreams(), 2); } - #[runtime::test] + #[tokio::test] async fn it_reuses_unused_connections() { let (conn, mock_state, _shutdown) = setup(2).await; let mut pool = LazyPool::::new(conn, 2, Default::default()); @@ -103,7 +101,7 @@ mod lazy_pool { async_assert_eventually!(mock_state.num_open_substreams(), expect = 2); } - #[runtime::test] + #[tokio::test] async fn it_reuses_least_used_connections() { let (conn, mock_state, _shutdown) = setup(2).await; let mut pool = LazyPool::::new(conn, 2, Default::default()); @@ -124,7 +122,7 @@ mod lazy_pool { assert_eq!(conn3.lease_count(), 2); } - #[runtime::test] + #[tokio::test] async fn it_reuses_used_connections_if_necessary() { let (conn, mock_state, _shutdown) = setup(2).await; let mut pool = LazyPool::::new(conn, 1, Default::default()); @@ -136,7 +134,7 @@ mod lazy_pool { drop(conn2); } - #[runtime::test] + #[tokio::test] async fn it_gracefully_handles_insufficient_server_sessions() { let (conn, mock_state, _shutdown) = setup(1).await; let mut pool = LazyPool::::new(conn, 2, Default::default()); @@ -148,7 +146,7 @@ mod lazy_pool { assert_eq!(conn2.lease_count(), 2); } - #[runtime::test] + #[tokio::test] async fn it_prunes_disconnected_sessions() { let (conn, mock_state, _shutdown) = setup(2).await; let mut pool = LazyPool::::new(conn, 2, Default::default()); @@ -165,7 +163,7 @@ mod lazy_pool { assert_eq!(mock_state.num_open_substreams(), 2); } - #[runtime::test] + #[tokio::test] async fn it_fails_when_peer_connected_disconnects() { let (mut peer_conn, _, _shutdown) = setup(2).await; let mut pool = LazyPool::::new(peer_conn.clone(), 2, Default::default()); @@ -179,7 +177,7 @@ mod lazy_pool { mod last_request_latency { use super::*; - #[runtime::test] + #[tokio::test] async fn it_returns_the_latency_until_the_first_response() { let (mut conn, _, _shutdown) = setup(1).await; diff --git a/comms/core/src/protocol/rpc/server/mod.rs b/comms/core/src/protocol/rpc/server/mod.rs index c244838e01..10fd4a94ec 100644 --- a/comms/core/src/protocol/rpc/server/mod.rs +++ b/comms/core/src/protocol/rpc/server/mod.rs @@ -267,7 +267,7 @@ where Self { executor: match config.maximum_simultaneous_sessions { Some(usize::MAX) => BoundedExecutor::allow_maximum(), - Some(num) => BoundedExecutor::from_current(num), + Some(num) => BoundedExecutor::new(num), None => BoundedExecutor::allow_maximum(), }, config, diff --git a/comms/core/src/protocol/rpc/server/router.rs b/comms/core/src/protocol/rpc/server/router.rs index 280457a09b..6ea12abbcb 100644 --- a/comms/core/src/protocol/rpc/server/router.rs +++ b/comms/core/src/protocol/rpc/server/router.rs @@ -50,7 +50,6 @@ use crate::{ ProtocolId, ProtocolNotificationRx, }, - runtime::task, Bytes, Substream, }; @@ -211,7 +210,7 @@ where let (proto_notif_tx, proto_notif_rx) = mpsc::channel(20); context.add_protocol(&self.protocol_names, &proto_notif_tx); let rpc_context = RpcCommsBackend::new(context.peer_manager(), context.connectivity()); - task::spawn(self.serve(proto_notif_rx, rpc_context)); + tokio::spawn(self.serve(proto_notif_rx, rpc_context)); Ok(()) } } @@ -278,7 +277,6 @@ mod test { use tower::util::BoxService; use super::*; - use crate::runtime; #[derive(Clone)] struct HelloService; @@ -330,7 +328,7 @@ mod test { } } - #[runtime::test] + #[tokio::test] async fn find_route() { let server = RpcServer::new(); let mut router = Router::new(server, HelloService).add_service(GoodbyeService); diff --git a/comms/core/src/protocol/rpc/test/client_pool.rs b/comms/core/src/protocol/rpc/test/client_pool.rs index 88e822f8d5..8ce6f298ea 100644 --- a/comms/core/src/protocol/rpc/test/client_pool.rs +++ b/comms/core/src/protocol/rpc/test/client_pool.rs @@ -77,7 +77,7 @@ mod lazy_pool { use super::*; use crate::protocol::rpc::client::pool::{LazyPool, RpcClientPoolError}; - #[runtime::test] + #[tokio::test] async fn it_connects_lazily() { let (conn, mock_state, _shutdown) = setup(2).await; let mut pool = LazyPool::::new(conn, 2, Default::default()); @@ -88,7 +88,7 @@ mod lazy_pool { assert_eq!(mock_state.num_open_substreams(), 2); } - #[runtime::test] + #[tokio::test] async fn it_reuses_unused_connections() { let (conn, mock_state, _shutdown) = setup(2).await; let mut pool = LazyPool::::new(conn, 2, Default::default()); @@ -100,7 +100,7 @@ mod lazy_pool { async_assert_eventually!(mock_state.num_open_substreams(), expect = 1); } - #[runtime::test] + #[tokio::test] async fn it_reuses_least_used_connections() { let (conn, mock_state, _shutdown) = setup(2).await; let mut pool = LazyPool::::new(conn, 2, Default::default()); @@ -121,7 +121,7 @@ mod lazy_pool { assert_eq!(conn3.lease_count(), 2); } - #[runtime::test] + #[tokio::test] async fn it_reuses_used_connections_if_necessary() { let (conn, mock_state, _shutdown) = setup(2).await; let mut pool = LazyPool::::new(conn, 1, Default::default()); @@ -133,7 +133,7 @@ mod lazy_pool { drop(conn2); } - #[runtime::test] + #[tokio::test] async fn it_gracefully_handles_insufficient_server_sessions() { let (conn, mock_state, _shutdown) = setup(1).await; let mut pool = LazyPool::::new(conn, 2, Default::default()); @@ -145,7 +145,7 @@ mod lazy_pool { assert_eq!(conn2.lease_count(), 2); } - #[runtime::test] + #[tokio::test] async fn it_prunes_disconnected_sessions() { let (conn, mock_state, _shutdown) = setup(2).await; let mut pool = LazyPool::::new(conn, 2, Default::default()); @@ -162,7 +162,7 @@ mod lazy_pool { assert_eq!(mock_state.num_open_substreams(), 2); } - #[runtime::test] + #[tokio::test] async fn it_fails_when_peer_connected_disconnects() { let (mut peer_conn, _, _shutdown) = setup(2).await; let mut pool = LazyPool::::new(peer_conn.clone(), 2, Default::default()); diff --git a/comms/core/src/protocol/rpc/test/comms_integration.rs b/comms/core/src/protocol/rpc/test/comms_integration.rs index 81d82db40c..27fdf4b1e5 100644 --- a/comms/core/src/protocol/rpc/test/comms_integration.rs +++ b/comms/core/src/protocol/rpc/test/comms_integration.rs @@ -31,21 +31,20 @@ use crate::{ RpcStatus, RpcStatusCode, }, - runtime, test_utils::node_identity::build_node_identity, transports::MemoryTransport, types::CommsDatabase, CommsBuilder, }; -#[runtime::test] +#[tokio::test] async fn run_service() { let node_identity1 = build_node_identity(Default::default()); let rpc_service = MockRpcService::new(); let mock_state = rpc_service.shared_state(); let shutdown = Shutdown::new(); let comms1 = CommsBuilder::new() - .with_listener_address(node_identity1.public_address()) + .with_listener_address(node_identity1.first_public_address()) .with_node_identity(node_identity1) .with_shutdown_signal(shutdown.to_signal()) .with_peer_storage(CommsDatabase::new(), None) @@ -58,7 +57,7 @@ async fn run_service() { let node_identity2 = build_node_identity(Default::default()); let comms2 = CommsBuilder::new() - .with_listener_address(node_identity2.public_address()) + .with_listener_address(node_identity2.first_public_address()) .with_shutdown_signal(shutdown.to_signal()) .with_node_identity(node_identity2.clone()) .with_peer_storage(CommsDatabase::new(), None) diff --git a/comms/core/src/protocol/rpc/test/handshake.rs b/comms/core/src/protocol/rpc/test/handshake.rs index d22493adcd..66f645393e 100644 --- a/comms/core/src/protocol/rpc/test/handshake.rs +++ b/comms/core/src/protocol/rpc/test/handshake.rs @@ -21,6 +21,7 @@ // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use tari_test_utils::unpack_enum; +use tokio::task; use crate::{ framing, @@ -30,11 +31,9 @@ use crate::{ handshake::{RpcHandshakeError, SUPPORTED_RPC_VERSIONS}, Handshake, }, - runtime, - runtime::task, }; -#[runtime::test] +#[tokio::test] async fn it_performs_the_handshake() { let (client, server) = MemorySocket::new_pair(); @@ -52,7 +51,7 @@ async fn it_performs_the_handshake() { assert!(SUPPORTED_RPC_VERSIONS.contains(&v)); } -#[runtime::test] +#[tokio::test] async fn it_rejects_the_handshake() { let (client, server) = MemorySocket::new_pair(); diff --git a/comms/core/src/protocol/rpc/test/smoke.rs b/comms/core/src/protocol/rpc/test/smoke.rs index 6ebb3ea466..a74021bda7 100644 --- a/comms/core/src/protocol/rpc/test/smoke.rs +++ b/comms/core/src/protocol/rpc/test/smoke.rs @@ -62,7 +62,6 @@ use crate::{ ProtocolId, ProtocolNotification, }, - runtime, test_utils::{node_identity::build_node_identity, transport::build_multiplexed_connections}, NodeIdentity, Substream, @@ -137,7 +136,7 @@ pub(super) async fn setup( (inbound, outbound, server_hnd, node_identity, shutdown) } -#[runtime::test] +#[tokio::test] async fn request_response_errors_and_streaming() { let (mut muxer, _outbound, server_hnd, node_identity, mut shutdown) = setup(GreetingService::default(), 1).await; let socket = muxer.incoming_mut().next().await.unwrap(); @@ -220,7 +219,7 @@ async fn request_response_errors_and_streaming() { server_hnd.await.unwrap(); } -#[runtime::test] +#[tokio::test] async fn concurrent_requests() { let (mut muxer, _outbound, _, _, _shutdown) = setup(GreetingService::default(), 1).await; let socket = muxer.incoming_mut().next().await.unwrap(); @@ -261,7 +260,7 @@ async fn concurrent_requests() { assert_eq!(spawned2.await.unwrap(), GreetingService::DEFAULT_GREETINGS[..5]); } -#[runtime::test] +#[tokio::test] async fn response_too_big() { let (mut muxer, _outbound, _, _, _shutdown) = setup(GreetingService::new(&[]), 1).await; let socket = muxer.incoming_mut().next().await.unwrap(); @@ -288,7 +287,7 @@ async fn response_too_big() { .unwrap(); } -#[runtime::test] +#[tokio::test] async fn ping_latency() { let (mut muxer, _outbound, _, _, _shutdown) = setup(GreetingService::new(&[]), 1).await; let socket = muxer.incoming_mut().next().await.unwrap(); @@ -302,7 +301,7 @@ async fn ping_latency() { assert!(latency.as_secs() < 5); } -#[runtime::test] +#[tokio::test] async fn server_shutdown_before_connect() { let (mut muxer, _outbound, _, _, mut shutdown) = setup(GreetingService::new(&[]), 1).await; let socket = muxer.incoming_mut().next().await.unwrap(); @@ -316,7 +315,7 @@ async fn server_shutdown_before_connect() { )); } -#[runtime::test] +#[tokio::test] async fn timeout() { let delay = Arc::new(RwLock::new(Duration::from_secs(10))); let (mut muxer, _outbound, _, _, _shutdown) = setup(SlowGreetingService::new(delay.clone()), 1).await; @@ -341,7 +340,7 @@ async fn timeout() { assert_eq!(resp.greeting, "took a while to load"); } -#[runtime::test] +#[tokio::test] async fn unknown_protocol() { let (notif_tx, _, _, _shutdown) = setup_service(GreetingService::new(&[]), 1).await; @@ -370,7 +369,7 @@ async fn unknown_protocol() { )); } -#[runtime::test] +#[tokio::test] async fn rejected_no_sessions_available() { let (mut muxer, _outbound, _, _, _shutdown) = setup(GreetingService::new(&[]), 0).await; let socket = muxer.incoming_mut().next().await.unwrap(); @@ -382,7 +381,7 @@ async fn rejected_no_sessions_available() { )); } -#[runtime::test] +#[tokio::test] async fn stream_still_works_after_cancel() { let service_impl = GreetingService::default(); let (mut muxer, _outbound, _, _, _shutdown) = setup(service_impl.clone(), 1).await; @@ -422,7 +421,7 @@ async fn stream_still_works_after_cancel() { }); } -#[runtime::test] +#[tokio::test] async fn stream_interruption_handling() { let service_impl = GreetingService::default(); let (mut muxer, _outbound, _, _, _shutdown) = setup(service_impl.clone(), 1).await; @@ -469,7 +468,7 @@ async fn stream_interruption_handling() { .unwrap(); } -#[runtime::test] +#[tokio::test] async fn max_global_sessions() { let builder = RpcServer::builder().with_maximum_simultaneous_sessions(1); let (muxer, _outbound, context, _shutdown) = setup_service_with_builder(GreetingService::default(), builder).await; @@ -527,7 +526,7 @@ async fn max_global_sessions() { .unwrap(); } -#[runtime::test] +#[tokio::test] async fn max_per_client_sessions() { let builder = RpcServer::builder() .with_maximum_simultaneous_sessions(3) diff --git a/comms/core/src/rate_limit.rs b/comms/core/src/rate_limit.rs index 2f4f5360d5..288835a92c 100644 --- a/comms/core/src/rate_limit.rs +++ b/comms/core/src/rate_limit.rs @@ -140,9 +140,8 @@ mod test { use futures::{stream, StreamExt}; use super::*; - use crate::runtime; - #[runtime::test] + #[tokio::test] async fn rate_limit() { let repeater = stream::repeat(()); @@ -168,7 +167,7 @@ mod test { assert_eq!(count, 10); } - #[runtime::test] + #[tokio::test] async fn rate_limit_restock() { let repeater = stream::repeat(()); diff --git a/comms/core/src/runtime.rs b/comms/core/src/runtime.rs deleted file mode 100644 index 78c3b8e11e..0000000000 --- a/comms/core/src/runtime.rs +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2020, The Tari Project -// -// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the -// following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following -// disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the -// following disclaimer in the documentation and/or other materials provided with the distribution. -// -// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote -// products derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, -// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -//! Runtime used by Tari comms (tokio) - -use tokio::runtime; -// Re-export -pub use tokio::{runtime::Handle, task, test}; - -/// Return the current tokio executor. Panics if the tokio runtime is not started. -#[inline] -pub fn current() -> runtime::Handle { - runtime::Handle::current() -} diff --git a/comms/core/src/test_utils/factories/node_identity.rs b/comms/core/src/test_utils/factories/node_identity.rs index 6963616f51..bf05fc8043 100644 --- a/comms/core/src/test_utils/factories/node_identity.rs +++ b/comms/core/src/test_utils/factories/node_identity.rs @@ -66,7 +66,7 @@ impl TestFactory for NodeIdentityFactory { Ok(NodeIdentity::new( secret_key, - control_service_address, + vec![control_service_address], self.peer_features, )) } diff --git a/comms/core/src/test_utils/factories/peer.rs b/comms/core/src/test_utils/factories/peer.rs index 83fd7315c3..eb0ae4c5e7 100644 --- a/comms/core/src/test_utils/factories/peer.rs +++ b/comms/core/src/test_utils/factories/peer.rs @@ -28,10 +28,10 @@ use tari_crypto::keys::PublicKey; use super::{net_address::NetAddressesFactory, TestFactory, TestFactoryError}; use crate::{ + net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{NodeId, Peer, PeerFeatures, PeerFlags}, types::CommsPublicKey, }; - pub fn create_many(n: usize) -> PeersFactory { PeersFactory::default().with_count(n) } @@ -93,7 +93,7 @@ impl TestFactory for PeerFactory { Ok(Peer::new( public_key, node_id, - addresses.into(), + MultiaddressesWithStats::from_addresses_with_source(addresses, &PeerAddressSource::Config), flags, self.peer_features, Default::default(), diff --git a/comms/core/src/test_utils/mocks/connection_manager.rs b/comms/core/src/test_utils/mocks/connection_manager.rs index 1b3874891c..66b8cc41e3 100644 --- a/comms/core/src/test_utils/mocks/connection_manager.rs +++ b/comms/core/src/test_utils/mocks/connection_manager.rs @@ -39,7 +39,6 @@ use crate::{ PeerConnection, }, peer_manager::NodeId, - runtime::task, }; pub fn create_connection_manager_mock() -> (ConnectionManagerRequester, ConnectionManagerMock) { @@ -118,7 +117,7 @@ impl ConnectionManagerMock { } pub fn spawn(self) { - task::spawn(Self::run(self)); + tokio::spawn(Self::run(self)); } pub async fn run(mut self) { diff --git a/comms/core/src/test_utils/mocks/connectivity_manager.rs b/comms/core/src/test_utils/mocks/connectivity_manager.rs index 37f70fe507..ae81bc05db 100644 --- a/comms/core/src/test_utils/mocks/connectivity_manager.rs +++ b/comms/core/src/test_utils/mocks/connectivity_manager.rs @@ -38,7 +38,6 @@ use crate::{ ConnectivityStatus, }, peer_manager::NodeId, - runtime::task, }; pub fn create_connectivity_mock() -> (ConnectivityRequester, ConnectivityManagerMock) { @@ -217,7 +216,7 @@ impl ConnectivityManagerMock { pub fn spawn(self) -> ConnectivityManagerMockState { let state = self.get_shared_state(); - task::spawn(Self::run(self)); + tokio::spawn(Self::run(self)); state } diff --git a/comms/core/src/test_utils/mocks/peer_connection.rs b/comms/core/src/test_utils/mocks/peer_connection.rs index fc7c5cd5e7..2f0b80f676 100644 --- a/comms/core/src/test_utils/mocks/peer_connection.rs +++ b/comms/core/src/test_utils/mocks/peer_connection.rs @@ -20,9 +20,12 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::sync::{ - atomic::{AtomicUsize, Ordering}, - Arc, +use std::{ + str::FromStr, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, }; use tokio::{ @@ -51,13 +54,14 @@ static ID_COUNTER: AtomicUsize = AtomicUsize::new(0); pub fn create_dummy_peer_connection(node_id: NodeId) -> (PeerConnection, mpsc::Receiver) { let (tx, rx) = mpsc::channel(1); + let addr = Multiaddr::from_str("/ip4/23.23.23.23/tcp/80").unwrap(); ( - PeerConnection::new( + PeerConnection::unverified( 1, tx, node_id, PeerFeatures::COMMUNICATION_NODE, - Multiaddr::empty(), + addr, ConnectionDirection::Inbound, AtomicRefCounter::new(), ), @@ -88,7 +92,7 @@ pub async fn create_peer_connection_mock_pair( rt_handle.spawn(mock.run()); ( - PeerConnection::new( + PeerConnection::unverified( // ID must be unique since it is used for connection equivalency, so we re-implement this in the mock ID_COUNTER.fetch_add(1, Ordering::Relaxed), tx1, @@ -99,7 +103,7 @@ pub async fn create_peer_connection_mock_pair( mock_state_in.substream_counter(), ), mock_state_in, - PeerConnection::new( + PeerConnection::unverified( ID_COUNTER.fetch_add(1, Ordering::Relaxed), tx2, peer1.node_id, diff --git a/comms/core/src/test_utils/mod.rs b/comms/core/src/test_utils/mod.rs index 5471fa4dab..c0e72d786c 100644 --- a/comms/core/src/test_utils/mod.rs +++ b/comms/core/src/test_utils/mod.rs @@ -24,8 +24,8 @@ cfg_test! { #[allow(dead_code)] pub mod factories; pub(crate) mod test_node; -} +} pub mod mocks; pub mod node_id; pub mod node_identity; diff --git a/comms/core/src/tor/control_client/client.rs b/comms/core/src/tor/control_client/client.rs index 29663f7603..49f1bce751 100644 --- a/comms/core/src/tor/control_client/client.rs +++ b/comms/core/src/tor/control_client/client.rs @@ -60,8 +60,9 @@ impl TorControlPortClient { addr: Multiaddr, event_tx: broadcast::Sender, ) -> Result { - let mut tcp = TcpTransport::new(); - tcp.set_nodelay(true); + let tcp = TcpTransport::new(); + // TODO: Probably don't need nodelay + // tcp.set_nodelay(true); let socket = tcp.dial(&addr).await?; Ok(Self::new(socket, event_tx)) } @@ -289,10 +290,7 @@ mod test { use tokio_stream::StreamExt; use super::*; - use crate::{ - runtime, - tor::control_client::{test_server, test_server::canned_responses, types::PrivateKey}, - }; + use crate::tor::control_client::{test_server, test_server::canned_responses, types::PrivateKey}; async fn setup_test() -> (TorControlPortClient, test_server::State) { let (_, mock_state, socket) = test_server::spawn().await; @@ -301,7 +299,7 @@ mod test { (tor, mock_state) } - #[runtime::test] + #[tokio::test] async fn connect() { let (mut listener, addr) = TcpTransport::default() .listen(&"/ip4/127.0.0.1/tcp/0".parse().unwrap()) @@ -318,7 +316,7 @@ mod test { in_sock.shutdown().await.unwrap(); } - #[runtime::test] + #[tokio::test] async fn authenticate() { let (mut tor, mock_state) = setup_test().await; @@ -342,7 +340,7 @@ mod test { assert_eq!(req.remove(0), "AUTHENTICATE NOTACTUALLYHEXENCODED"); } - #[runtime::test] + #[tokio::test] async fn get_conf_ok() { let (mut tor, mock_state) = setup_test().await; @@ -357,7 +355,7 @@ mod test { assert_eq!(results[2], "8082 127.0.0.1:9001"); } - #[runtime::test] + #[tokio::test] async fn get_conf_err() { let (mut tor, mock_state) = setup_test().await; @@ -367,7 +365,7 @@ mod test { unpack_enum!(TorClientError::TorCommandFailed(_s) = err); } - #[runtime::test] + #[tokio::test] async fn get_info_multiline_kv_ok() { let (mut tor, mock_state) = setup_test().await; @@ -379,7 +377,7 @@ mod test { assert_eq!(values, &["127.0.0.1:9050", "unix:/run/tor/socks"]); } - #[runtime::test] + #[tokio::test] async fn get_info_kv_multiline_value_ok() { let (mut tor, mock_state) = setup_test().await; @@ -394,7 +392,7 @@ mod test { ]); } - #[runtime::test] + #[tokio::test] async fn get_info_err() { let (mut tor, mock_state) = setup_test().await; @@ -404,7 +402,7 @@ mod test { unpack_enum!(TorClientError::TorCommandFailed(_s) = err); } - #[runtime::test] + #[tokio::test] async fn add_onion_from_private_key_ok() { let (mut tor, mock_state) = setup_test().await; @@ -425,7 +423,7 @@ mod test { assert_eq!(request, "ADD_ONION RSA1024:dummy-key Port=8080,127.0.0.1:8080"); } - #[runtime::test] + #[tokio::test] async fn add_onion_ok() { let (mut tor, mock_state) = setup_test().await; @@ -457,7 +455,7 @@ mod test { assert_eq!(request, "ADD_ONION NEW:BEST NumStreams=10 Port=8080,127.0.0.1:8080"); } - #[runtime::test] + #[tokio::test] async fn add_onion_discard_pk_ok() { let (mut tor, mock_state) = setup_test().await; @@ -496,7 +494,7 @@ mod test { ); } - #[runtime::test] + #[tokio::test] async fn add_onion_err() { let (mut tor, mock_state) = setup_test().await; @@ -510,7 +508,7 @@ mod test { unpack_enum!(TorClientError::TorCommandFailed(_s) = err); } - #[runtime::test] + #[tokio::test] async fn del_onion_ok() { let (mut tor, mock_state) = setup_test().await; @@ -522,7 +520,7 @@ mod test { assert_eq!(request, "DEL_ONION some-fake-id"); } - #[runtime::test] + #[tokio::test] async fn del_onion_err() { let (mut tor, mock_state) = setup_test().await; @@ -534,7 +532,7 @@ mod test { assert_eq!(request, "DEL_ONION some-fake-id"); } - #[runtime::test] + #[tokio::test] async fn protocol_info_cookie_ok() { let (mut tor, mock_state) = setup_test().await; @@ -555,7 +553,7 @@ mod test { ); } - #[runtime::test] + #[tokio::test] async fn protocol_info_no_auth_ok() { let (mut tor, mock_state) = setup_test().await; @@ -573,7 +571,7 @@ mod test { assert_eq!(info.auth_methods.cookie_file, None); } - #[runtime::test] + #[tokio::test] async fn protocol_info_err() { let (mut tor, mock_state) = setup_test().await; diff --git a/comms/core/src/tor/control_client/monitor.rs b/comms/core/src/tor/control_client/monitor.rs index 550cac0757..bce9dddd64 100644 --- a/comms/core/src/tor/control_client/monitor.rs +++ b/comms/core/src/tor/control_client/monitor.rs @@ -31,7 +31,6 @@ use tokio::{ use tokio_util::codec::{Framed, LinesCodec}; use super::{event::TorControlEvent, parsers, response::ResponseLine, LOG_TARGET}; -use crate::runtime::task; pub fn spawn_monitor( mut cmd_rx: mpsc::Receiver, @@ -43,7 +42,7 @@ where { let (responses_tx, responses_rx) = mpsc::channel(100); - task::spawn(async move { + tokio::spawn(async move { let framed = Framed::new(socket, LinesCodec::new()); let (mut sink, mut stream) = framed.split(); loop { diff --git a/comms/core/src/tor/control_client/test_server.rs b/comms/core/src/tor/control_client/test_server.rs index 15b66804a2..d276cdbfec 100644 --- a/comms/core/src/tor/control_client/test_server.rs +++ b/comms/core/src/tor/control_client/test_server.rs @@ -25,14 +25,14 @@ use std::sync::Arc; use futures::{lock::Mutex, stream, SinkExt, StreamExt}; use tokio_util::codec::{Framed, LinesCodec}; -use crate::{memsocket::MemorySocket, multiaddr::Multiaddr, runtime, test_utils::transport::build_connected_sockets}; +use crate::{memsocket::MemorySocket, multiaddr::Multiaddr, test_utils::transport::build_connected_sockets}; pub async fn spawn() -> (Multiaddr, State, MemorySocket) { let (addr, socket_out, socket_in) = build_connected_sockets().await; let server = TorControlPortTestServer::new(socket_in); let state = server.get_shared_state(); - runtime::current().spawn(server.run()); + tokio::spawn(server.run()); (addr, state, socket_out) } diff --git a/comms/core/src/tor/hidden_service/controller.rs b/comms/core/src/tor/hidden_service/controller.rs index a4482aecbc..a706da54df 100644 --- a/comms/core/src/tor/hidden_service/controller.rs +++ b/comms/core/src/tor/hidden_service/controller.rs @@ -31,7 +31,6 @@ use tokio::{sync::broadcast, time}; use crate::{ multiaddr::Multiaddr, - runtime::task, socks, tor::{ control_client::{ @@ -145,7 +144,7 @@ impl HiddenServiceController { let mut shutdown_signal = hidden_service.shutdown_signal.clone(); let mut event_stream = self.client.as_ref().unwrap().get_event_stream(); - task::spawn({ + tokio::spawn({ async move { loop { let either = future::select(&mut shutdown_signal, event_stream.next()).await; diff --git a/comms/core/src/transports/dns/system.rs b/comms/core/src/transports/dns/system.rs index 5110c4c4c9..01be424889 100644 --- a/comms/core/src/transports/dns/system.rs +++ b/comms/core/src/transports/dns/system.rs @@ -31,7 +31,6 @@ use log::*; use super::{DnsResolver, DnsResolverError}; use crate::{ multiaddr::{Multiaddr, Protocol}, - runtime::task::spawn_blocking, transports::dns::common, }; @@ -61,7 +60,7 @@ impl DnsResolver for SystemDnsResolver { /// Performs an non-blocking DNS lookup of the given address async fn dns_lookup(addr: T) -> Result where T: ToSocketAddrs + Display + Send + Sync + 'static { - spawn_blocking(move || { + tokio::task::spawn_blocking(move || { debug!(target: LOG_TARGET, "Resolving address `{}` using system resolver", addr); addr.to_socket_addrs() .map_err(|err| DnsResolverError::NameResolutionFailed { diff --git a/comms/core/src/transports/dns/tor.rs b/comms/core/src/transports/dns/tor.rs index aa9ce1c658..04d59ae4ec 100644 --- a/comms/core/src/transports/dns/tor.rs +++ b/comms/core/src/transports/dns/tor.rs @@ -94,7 +94,7 @@ mod test { // This only works when a tor proxy is running #[ignore] - #[crate::runtime::test] + #[tokio::test] async fn resolve() { let resolver = TorDnsResolver::new(SocksConfig { proxy_address: "/ip4/127.0.0.1/tcp/9050".parse().unwrap(), diff --git a/comms/core/src/transports/memory.rs b/comms/core/src/transports/memory.rs index 4c3455966e..baf522c478 100644 --- a/comms/core/src/transports/memory.rs +++ b/comms/core/src/transports/memory.rs @@ -134,9 +134,8 @@ mod test { use tokio::io::{AsyncReadExt, AsyncWriteExt}; use super::*; - use crate::runtime; - #[runtime::test] + #[tokio::test] async fn simple_listen_and_dial() -> Result<(), ::std::io::Error> { let t = MemoryTransport::default(); @@ -162,7 +161,7 @@ mod test { Ok(()) } - #[runtime::test] + #[tokio::test] async fn unsupported_multiaddrs() { let t = MemoryTransport::default(); diff --git a/comms/core/src/transports/socks.rs b/comms/core/src/transports/socks.rs index aed81823b3..10ad98cb8a 100644 --- a/comms/core/src/transports/socks.rs +++ b/comms/core/src/transports/socks.rs @@ -73,7 +73,8 @@ impl SocksTransport { pub fn create_socks_tcp_transport() -> TcpTransport { let mut tcp_transport = TcpTransport::new(); - tcp_transport.set_nodelay(true); + // TODO: Confirm, but probably don't need nodelay + // tcp_transport.set_nodelay(true); tcp_transport.set_dns_resolver(SystemDnsResolver); tcp_transport } diff --git a/comms/core/tests/helpers.rs b/comms/core/tests/helpers.rs index 281a3f44fd..9142fb1217 100644 --- a/comms/core/tests/helpers.rs +++ b/comms/core/tests/helpers.rs @@ -45,6 +45,7 @@ pub fn create_peer_storage() -> CommsDatabase { LMDBWrapper::new(Arc::new(peer_database)) } +#[cfg(test)] pub fn create_comms(signal: ShutdownSignal) -> UnspawnedCommsNode { let node_identity = Arc::new(NodeIdentity::random( &mut OsRng, diff --git a/comms/core/tests/rpc.rs b/comms/core/tests/rpc.rs index 90e393012d..d47346aacb 100644 --- a/comms/core/tests/rpc.rs +++ b/comms/core/tests/rpc.rs @@ -53,7 +53,8 @@ async fn spawn_node(signal: ShutdownSignal) -> (CommsNode, RpcServerHandle) { comms .node_identity() - .set_public_address(comms.listening_address().clone()); + .replace_public_address(comms.listening_address().clone()); + (comms, rpc_server_hnd) } diff --git a/comms/core/tests/rpc_stress.rs b/comms/core/tests/rpc_stress.rs index 708121ca3d..8f3bc66f5b 100644 --- a/comms/core/tests/rpc_stress.rs +++ b/comms/core/tests/rpc_stress.rs @@ -54,7 +54,7 @@ async fn spawn_node(signal: ShutdownSignal) -> CommsNode { comms .node_identity() - .set_public_address(comms.listening_address().clone()); + .replace_public_address(comms.listening_address().clone()); comms } diff --git a/comms/core/tests/substream_stress.rs b/comms/core/tests/substream_stress.rs index c9ad61a722..e5b2641009 100644 --- a/comms/core/tests/substream_stress.rs +++ b/comms/core/tests/substream_stress.rs @@ -49,7 +49,7 @@ pub async fn spawn_node(signal: ShutdownSignal) -> (CommsNode, ProtocolNotificat comms .node_identity() - .set_public_address(comms.listening_address().clone()); + .replace_public_address(comms.listening_address().clone()); (comms, notif_rx) } diff --git a/comms/dht/examples/memory_net/drain_burst.rs b/comms/dht/examples/memory_net/drain_burst.rs index 07d0f90363..5d508ec98b 100644 --- a/comms/dht/examples/memory_net/drain_burst.rs +++ b/comms/dht/examples/memory_net/drain_burst.rs @@ -74,14 +74,14 @@ mod test { use super::*; - #[runtime::test] + #[tokio::test] async fn drain_terminating_stream() { let mut stream = stream::iter(1..10u8); let burst = DrainBurst::new(&mut stream).await; assert_eq!(burst, (1..10u8).into_iter().collect::>()); } - #[runtime::test] + #[tokio::test] async fn drain_stream_with_pending() { let mut stream = stream::iter(1..10u8); let burst = DrainBurst::new(&mut stream).await; diff --git a/comms/dht/examples/memory_net/utilities.rs b/comms/dht/examples/memory_net/utilities.rs index 2fbe0d1745..0c578a14fd 100644 --- a/comms/dht/examples/memory_net/utilities.rs +++ b/comms/dht/examples/memory_net/utilities.rs @@ -752,12 +752,10 @@ impl TestNode { }); } - #[inline] pub fn node_identity(&self) -> Arc { self.comms.node_identity() } - #[inline] pub fn to_peer(&self) -> Peer { self.comms.node_identity().to_peer() } @@ -775,7 +773,7 @@ impl TestNode { match &*event { PeerConnected(conn) if conn.peer_node_id() == node_id => { - break Some(conn.clone()); + break Some(*conn.clone()); }, _ => {}, } @@ -905,7 +903,7 @@ async fn setup_comms_dht( let comms = CommsBuilder::new() .allow_test_addresses() // In this case the listener address and the public address are the same (/memory/...) - .with_listener_address(node_identity.public_address()) + .with_listener_address(node_identity.first_public_address()) .with_shutdown_signal(shutdown_signal) .with_node_identity(node_identity) .with_min_connectivity(1) diff --git a/comms/dht/examples/propagation/node.rs b/comms/dht/examples/propagation/node.rs index 4c563aaa60..4f9dda1f82 100644 --- a/comms/dht/examples/propagation/node.rs +++ b/comms/dht/examples/propagation/node.rs @@ -25,7 +25,6 @@ use std::{path::Path, sync::Arc, time::Duration}; use rand::rngs::OsRng; use tari_comms::{ backoff::ConstantBackoff, - multiaddr::Multiaddr, peer_manager::PeerFeatures, pipeline, pipeline::SinkService, @@ -67,8 +66,13 @@ pub async fn create>( let peer_database = datastore.get_handle("peerdb").unwrap(); let peer_database = LMDBWrapper::new(Arc::new(peer_database)); - let node_identity = node_identity - .unwrap_or_else(|| Arc::new(NodeIdentity::random(&mut OsRng, Multiaddr::empty(), Default::default()))); + let node_identity = node_identity.unwrap_or_else(|| { + Arc::new(NodeIdentity::random_multiple_addresses( + &mut OsRng, + vec![], + Default::default(), + )) + }); let builder = CommsBuilder::new() .allow_test_addresses() diff --git a/comms/dht/examples/propagation/prompt.rs b/comms/dht/examples/propagation/prompt.rs index 5dcfaa9de2..f60c8ea831 100644 --- a/comms/dht/examples/propagation/prompt.rs +++ b/comms/dht/examples/propagation/prompt.rs @@ -25,6 +25,7 @@ use std::{io::stdin, str::FromStr, sync::Arc}; use anyhow::anyhow; use tari_comms::{ multiaddr::Multiaddr, + net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{NodeId, Peer, PeerFeatures}, types::CommsPublicKey, NodeIdentity, @@ -68,7 +69,7 @@ pub fn user_prompt(node_identity: &Arc) -> anyhow::Result Option { Some(Peer::new( pk, node_id, - vec![address].into(), + MultiaddressesWithStats::from_addresses_with_source(vec![address], &PeerAddressSource::Config), Default::default(), features, Default::default(), diff --git a/comms/dht/src/actor.rs b/comms/dht/src/actor.rs index bed2b99e9e..938d7c5f38 100644 --- a/comms/dht/src/actor.rs +++ b/comms/dht/src/actor.rs @@ -667,6 +667,7 @@ impl DhtActor { let mut banned_count = 0; let mut excluded_count = 0; let mut filtered_out_node_count = 0; + let query = PeerQuery::new() .select_where(|peer| { if peer.is_banned() { @@ -829,9 +830,10 @@ mod test { use std::{convert::TryFrom, time::Duration}; use chrono::{DateTime, Utc}; - use tari_comms::{ - runtime, - test_utils::mocks::{create_connectivity_mock, create_peer_connection_mock_pair, ConnectivityManagerMockState}, + use tari_comms::test_utils::mocks::{ + create_connectivity_mock, + create_peer_connection_mock_pair, + ConnectivityManagerMockState, }; use tari_shutdown::Shutdown; use tari_test_utils::random; @@ -855,7 +857,7 @@ mod test { conn } - #[runtime::test] + #[tokio::test] async fn send_join_request() { let node_identity = make_node_identity(); let peer_manager = build_peer_manager(); @@ -933,7 +935,7 @@ mod test { ) } - #[runtime::test] + #[tokio::test] async fn it_discovers_a_peer() { let shutdown = Shutdown::new(); let (mut dht, node_identity, connectivity_mock, discovery_mock, _) = setup(shutdown.to_signal()).await; @@ -947,7 +949,7 @@ mod test { assert_eq!(discovery_mock.call_count(), 1); } - #[runtime::test] + #[tokio::test] async fn it_gets_active_peer_connection() { let shutdown = Shutdown::new(); let (mut dht, node_identity, connectivity_mock, discovery_mock, peer_manager) = @@ -963,7 +965,7 @@ mod test { assert_eq!(connectivity_mock.call_count().await, 1); } - #[runtime::test] + #[tokio::test] async fn it_errors_if_discovery_fails_for_unknown_peer() { let shutdown = Shutdown::new(); let (mut dht, _, connectivity_mock, discovery_mock, _) = setup(shutdown.to_signal()).await; @@ -974,7 +976,7 @@ mod test { } } - #[runtime::test] + #[tokio::test] async fn insert_message_signature() { let node_identity = make_node_identity(); let peer_manager = build_peer_manager(); @@ -1018,7 +1020,7 @@ mod test { assert_eq!(num_hits, 1); } - #[runtime::test] + #[tokio::test] async fn dedup_cache_cleanup() { let node_identity = make_node_identity(); let peer_manager = build_peer_manager(); @@ -1106,8 +1108,8 @@ mod test { } } - #[runtime::test] - async fn select_peers() { + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn test_select_peers() { let node_identity = make_node_identity(); let peer_manager = build_peer_manager(); @@ -1219,7 +1221,7 @@ mod test { assert_eq!(peers.len(), 1); } - #[runtime::test] + #[tokio::test] async fn get_and_set_metadata() { let node_identity = make_node_identity(); let peer_manager = build_peer_manager(); diff --git a/comms/dht/src/config.rs b/comms/dht/src/config.rs index fe94c9b59d..a3f401151f 100644 --- a/comms/dht/src/config.rs +++ b/comms/dht/src/config.rs @@ -103,7 +103,7 @@ pub struct DhtConfig { /// Once a peer has been marked as offline, wait at least this length of time before reconsidering them. /// In a situation where a node is not well-connected and many nodes are locally marked as offline, we can retry /// peers that were previously tried. - /// Default: 2 hours + /// Default: 24 hours #[serde(with = "serializers::seconds")] pub offline_peer_cooldown: Duration, } @@ -169,7 +169,11 @@ impl Default for DhtConfig { allow_test_addresses: false, flood_ban_max_msg_count: 100_000, flood_ban_timespan: Duration::from_secs(100), - offline_peer_cooldown: Duration::from_secs(2 * 60 * 60), + // TODO: This should be depending on the kind of offline..... If it has been seen, it is different + // to a peer that is not seen at all + // Also, 2 hours is too short, because we'll cycle through 2000 peers every two hours + // Setting it to 24 hours for now + offline_peer_cooldown: Duration::from_secs(24 * 60 * 60), } } } diff --git a/comms/dht/src/connectivity/mod.rs b/comms/dht/src/connectivity/mod.rs index 6d008e52a3..9eaf9b2677 100644 --- a/comms/dht/src/connectivity/mod.rs +++ b/comms/dht/src/connectivity/mod.rs @@ -39,6 +39,7 @@ use std::{sync::Arc, time::Instant}; use log::*; pub use metrics::{MetricsCollector, MetricsCollectorHandle}; use tari_comms::{ + connection_manager::ConnectionDirection, connectivity::{ ConnectivityError, ConnectivityEvent, @@ -47,6 +48,7 @@ use tari_comms::{ ConnectivitySelection, }, multiaddr, + net_address::PeerAddressSource, peer_manager::{NodeDistance, NodeId, PeerManagerError, PeerQuery, PeerQuerySortBy}, NodeIdentity, PeerConnection, @@ -63,6 +65,8 @@ const LOG_TARGET: &str = "comms::dht::connectivity"; /// Error type for the DHT connectivity actor. #[derive(Debug, Error)] pub enum DhtConnectivityError { + #[error("Peer connection did not have a peer identity claim")] + PeerConnectionMissingPeerIdentityClaim, #[error("ConnectivityError: {0}")] ConnectivityError(#[from] ConnectivityError), #[error("PeerManagerError: {0}")] @@ -92,7 +96,6 @@ pub(crate) struct DhtConnectivity { dht_events: broadcast::Receiver>, metrics_collector: MetricsCollectorHandle, cooldown_in_effect: Option, - recent_connection_failure_count: usize, shutdown_signal: ShutdownSignal, } @@ -120,7 +123,6 @@ impl DhtConnectivity { random_pool_last_refresh: None, stats: Stats::new(), dht_events, - recent_connection_failure_count: 0, cooldown_in_effect: None, shutdown_signal, } @@ -290,12 +292,7 @@ impl DhtConnectivity { #[allow(clippy::single_match)] match event { DhtEvent::NetworkDiscoveryPeersAdded(info) => { - if info.has_new_neighbours() { - debug!( - target: LOG_TARGET, - "Network discovery discovered {} more neighbouring peers. Reinitializing pools", - info.num_new_peers - ); + if info.num_new_peers > 0 { self.refresh_peer_pools().await?; } }, @@ -496,7 +493,21 @@ impl DhtConnectivity { } async fn handle_new_peer_connected(&mut self, conn: PeerConnection) -> Result<(), DhtConnectivityError> { - self.peer_manager.mark_last_seen(conn.peer_node_id()).await?; + if conn.direction() == ConnectionDirection::Outbound { + if let Some(peer_identity_claim) = conn.peer_identity_claim() { + self.peer_manager + .mark_last_seen( + conn.peer_node_id(), + conn.address(), + &PeerAddressSource::FromPeerConnection { + peer_identity_claim: peer_identity_claim.clone(), + }, + ) + .await?; + } else { + return Err(DhtConnectivityError::PeerConnectionMissingPeerIdentityClaim); + } + } if conn.peer_features().is_client() { debug!( target: LOG_TARGET, @@ -565,7 +576,7 @@ impl DhtConnectivity { debug!(target: LOG_TARGET, "Connectivity event: {}", event); match event { PeerConnected(conn) => { - self.handle_new_peer_connected(conn).await?; + self.handle_new_peer_connected(*conn).await?; }, PeerConnectFailed(node_id) => { self.connection_handles.retain(|c| *c.peer_node_id() != node_id); @@ -579,33 +590,7 @@ impl DhtConnectivity { debug!(target: LOG_TARGET, "{} is not managed by the DHT. Ignoring", node_id); return Ok(()); } - - const TOLERATED_CONNECTION_FAILURES: usize = 40; - if self.recent_connection_failure_count < TOLERATED_CONNECTION_FAILURES { - self.recent_connection_failure_count += 1; - } - - if self.recent_connection_failure_count == TOLERATED_CONNECTION_FAILURES && - self.cooldown_in_effect.is_none() - { - warn!( - target: LOG_TARGET, - "Too many ({}) connection failures, cooldown is in effect", TOLERATED_CONNECTION_FAILURES - ); - self.cooldown_in_effect = Some(Instant::now()); - } - - if self - .cooldown_in_effect - .map(|ts| ts.elapsed() >= self.config.connectivity.high_failure_rate_cooldown) - .unwrap_or(true) - { - if self.cooldown_in_effect.is_some() { - self.cooldown_in_effect = None; - self.recent_connection_failure_count = 1; - } - self.replace_pool_peer(&node_id).await?; - } + self.replace_pool_peer(&node_id).await?; self.log_status(); }, PeerDisconnected(node_id) => { @@ -786,68 +771,40 @@ impl DhtConnectivity { // - it has the required features // - it didn't recently fail to connect, and // - it is not in the exclusion list in closest_request - let mut connect_ineligable_count = 0; - let mut banned_count = 0; - let mut excluded_count = 0; - let mut filtered_out_node_count = 0; - let mut already_connected = 0; + let offline_cooldown = self.config.offline_peer_cooldown; let query = PeerQuery::new() .select_where(|peer| { if peer.is_banned() { - banned_count += 1; return false; } if peer.features.is_client() { - filtered_out_node_count += 1; return false; } if connected.contains(&&peer.node_id) { - already_connected += 1; return false; } if peer .offline_since() - .map(|since| since <= self.config.offline_peer_cooldown) + .map(|since| since <= offline_cooldown) .unwrap_or(false) { - connect_ineligable_count += 1; return false; } let is_excluded = excluded.contains(&peer.node_id); if is_excluded { - excluded_count += 1; return false; } true }) - .sort_by(PeerQuerySortBy::DistanceFromLastConnected(node_id)) - // Fetch double here so that there is a bigger closest peer set that can be ordered by last seen - .limit(n * 2); + .sort_by(PeerQuerySortBy::DistanceFrom(node_id)) + .limit(n); let peers = peer_manager.perform_query(query).await?; - let total_excluded = banned_count + connect_ineligable_count + excluded_count + filtered_out_node_count; - if total_excluded > 0 { - debug!( - target: LOG_TARGET, - "\n====================================\n Closest Peer Selection\n\n {num_peers} peer(s) selected\n \ - {total} peer(s) were not selected \n\n {banned} banned\n {filtered_out} not communication node\n \ - {not_connectable} are not connectable\n {excluded} explicitly excluded\n {already_connected} already \ - connected - \n====================================\n", - num_peers = peers.len(), - total = total_excluded, - banned = banned_count, - filtered_out = filtered_out_node_count, - not_connectable = connect_ineligable_count, - excluded = excluded_count, - already_connected = already_connected - ); - } Ok(peers.into_iter().map(|p| p.node_id).take(n).collect()) } diff --git a/comms/dht/src/connectivity/test.rs b/comms/dht/src/connectivity/test.rs index 35ee819547..3120aa075b 100644 --- a/comms/dht/src/connectivity/test.rs +++ b/comms/dht/src/connectivity/test.rs @@ -26,7 +26,6 @@ use rand::{rngs::OsRng, seq::SliceRandom}; use tari_comms::{ connectivity::ConnectivityEvent, peer_manager::{Peer, PeerFeatures}, - runtime, test_utils::{ count_string_occurrences, mocks::{create_connectivity_mock, create_dummy_peer_connection, ConnectivityManagerMockState}, @@ -92,7 +91,7 @@ async fn setup( ) } -#[runtime::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn initialize() { let config = DhtConfig { num_neighbouring_nodes: 4, @@ -124,7 +123,7 @@ async fn initialize() { } } -#[runtime::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn added_neighbours() { let node_identity = make_node_identity(); let mut node_identities = @@ -152,7 +151,7 @@ async fn added_neighbours() { assert_eq!(count_string_occurrences(&calls, &["DialPeer"]), 5); let (conn, _) = create_dummy_peer_connection(closer_peer.node_id().clone()); - connectivity.publish_event(ConnectivityEvent::PeerConnected(conn.clone())); + connectivity.publish_event(ConnectivityEvent::PeerConnected(conn.clone().into())); async_assert!( connectivity.get_dialed_peers().await.len() >= 5, @@ -164,7 +163,7 @@ async fn added_neighbours() { assert_eq!(conn.handle_count(), 2); } -#[runtime::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn replace_peer_when_peer_goes_offline() { let node_identity = make_node_identity(); let node_identities = @@ -223,7 +222,7 @@ async fn replace_peer_when_peer_goes_offline() { assert_eq!(dialed[0], *node_identities[5].node_id()); } -#[runtime::test] +#[tokio::test] async fn insert_neighbour() { let node_identity = make_node_identity(); let node_identities = @@ -265,14 +264,12 @@ async fn insert_neighbour() { } mod metrics { - use super::*; mod collector { use tari_comms::peer_manager::NodeId; - use super::*; use crate::connectivity::MetricsCollector; - #[runtime::test] + #[tokio::test] async fn it_adds_message_received() { let mut metric_collector = MetricsCollector::spawn(); let node_id = NodeId::default(); @@ -287,7 +284,7 @@ mod metrics { assert_eq!(ts.count(), 100); } - #[runtime::test] + #[tokio::test] async fn it_clears_the_metrics() { let mut metric_collector = MetricsCollector::spawn(); let node_id = NodeId::default(); diff --git a/comms/dht/src/dht.rs b/comms/dht/src/dht.rs index f984791b07..2719ac4af9 100644 --- a/comms/dht/src/dht.rs +++ b/comms/dht/src/dht.rs @@ -455,7 +455,6 @@ mod test { use tari_comms::{ message::{MessageExt, MessageTag}, pipeline::SinkService, - runtime, test_utils::mocks::create_connectivity_mock, types::CommsDHKE, wrap_in_envelope_body, @@ -480,8 +479,8 @@ mod test { }, }; - #[runtime::test] - async fn stack_unencrypted() { + #[tokio::test] + async fn test_stack_unencrypted() { let node_identity = make_node_identity(); let peer_manager = build_peer_manager(); let (connectivity, _) = create_connectivity_mock(); @@ -532,8 +531,8 @@ mod test { assert_eq!(msg, b"secret"); } - #[runtime::test] - async fn stack_encrypted() { + #[tokio::test] + async fn test_stack_encrypted() { let node_identity = make_node_identity(); let peer_manager = build_peer_manager(); let (connectivity, _) = create_connectivity_mock(); @@ -585,8 +584,8 @@ mod test { assert_eq!(msg, b"secret"); } - #[runtime::test] - async fn stack_forward() { + #[tokio::test] + async fn test_stack_forward() { let node_identity = make_node_identity(); let peer_manager = build_peer_manager(); let shutdown = Shutdown::new(); @@ -650,8 +649,8 @@ mod test { assert_eq!(spy.call_count(), 0); } - #[runtime::test] - async fn stack_filter_saf_message() { + #[tokio::test] + async fn test_stack_filter_saf_message() { let node_identity = make_client_identity(); let peer_manager = build_peer_manager(); let (connectivity, _) = create_connectivity_mock(); diff --git a/comms/dht/src/discovery/error.rs b/comms/dht/src/discovery/error.rs index 6135359b74..8862c6b266 100644 --- a/comms/dht/src/discovery/error.rs +++ b/comms/dht/src/discovery/error.rs @@ -44,6 +44,10 @@ pub enum DhtDiscoveryError { PeerManagerError(#[from] PeerManagerError), #[error("InvalidPeerMultiaddr: {0}")] InvalidPeerMultiaddr(String), + #[error("No signature provided")] + NoSignatureProvided, + #[error("Invalid signature: {0}")] + InvalidSignature(String), } impl DhtDiscoveryError { diff --git a/comms/dht/src/discovery/service.rs b/comms/dht/src/discovery/service.rs index 4730b57aa2..728525b9e9 100644 --- a/comms/dht/src/discovery/service.rs +++ b/comms/dht/src/discovery/service.rs @@ -20,15 +20,22 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{collections::HashMap, sync::Arc, time::Instant}; +use std::{ + collections::HashMap, + convert::{TryFrom, TryInto}, + sync::Arc, + time::Instant, +}; use log::*; use rand::{rngs::OsRng, RngCore}; use tari_comms::{ log_if_error, - peer_manager::{NodeId, NodeIdentity, Peer, PeerFeatures, PeerManager}, + multiaddr::Multiaddr, + net_address::PeerAddressSource, + peer_manager::{NodeId, NodeIdentity, Peer, PeerFeatures, PeerIdentityClaim, PeerManager}, types::CommsPublicKey, - validate_peer_addresses, + validate_addresses, }; use tari_shutdown::ShutdownSignal; use tari_utilities::{hex::Hex, ByteArray}; @@ -164,7 +171,7 @@ impl DhtDiscoveryService { trace!( target: LOG_TARGET, "Received discovery response message from {}", - discovery_msg.node_id.to_hex() + discovery_msg.public_key.to_hex() ); match self.inflight_discoveries.remove(&discovery_msg.nonce) { @@ -217,7 +224,7 @@ impl DhtDiscoveryService { target: LOG_TARGET, "Received a discovery response from peer '{}' that this node did not expect. It may have been \ cancelled earlier.", - discovery_msg.node_id.to_hex() + discovery_msg.public_key.to_hex() ); }, } @@ -228,17 +235,29 @@ impl DhtDiscoveryService { public_key: &CommsPublicKey, discovery_msg: Box, ) -> Result { - let node_id = self.validate_raw_node_id(public_key, &discovery_msg.node_id)?; + let node_id = NodeId::from_public_key(public_key); - let addresses = discovery_msg + let addresses: Vec = discovery_msg .addresses .into_iter() - .filter_map(|addr| addr.parse().ok()) - .collect::>(); + .map(Multiaddr::try_from) + .collect::>() + .map_err(|e| DhtDiscoveryError::InvalidPeerMultiaddr(e.to_string()))?; - validate_peer_addresses(&addresses, self.config.allow_test_addresses) + validate_addresses(&addresses, self.config.allow_test_addresses) .map_err(|err| DhtDiscoveryError::InvalidPeerMultiaddr(err.to_string()))?; + let peer_identity_claim = PeerIdentityClaim::new( + addresses.clone(), + PeerFeatures::from_bits_truncate(discovery_msg.peer_features), + discovery_msg + .identity_signature + .ok_or(DhtDiscoveryError::NoSignatureProvided)? + .try_into() + .map_err(|e: anyhow::Error| DhtDiscoveryError::InvalidSignature(e.to_string()))?, + None, + ); + let peer = self .peer_manager .add_or_update_online_peer( @@ -246,30 +265,13 @@ impl DhtDiscoveryService { node_id, addresses, PeerFeatures::from_bits_truncate(discovery_msg.peer_features), + &PeerAddressSource::FromDiscovery { peer_identity_claim }, ) .await?; Ok(peer) } - fn validate_raw_node_id( - &self, - public_key: &CommsPublicKey, - raw_node_id: &[u8], - ) -> Result { - // The reason that we check the given node id against what we expect instead of just using the given node id - // is in future the NodeId may not necessarily be derived from the public key (i.e. DAN node is registered on - // the base layer) - let expected_node_id = NodeId::from_key(public_key); - let node_id = NodeId::from_bytes(raw_node_id).map_err(|_| DhtDiscoveryError::InvalidNodeId)?; - if expected_node_id == node_id { - Ok(expected_node_id) - } else { - // TODO: Misbehaviour #banheuristic - Err(DhtDiscoveryError::InvalidNodeId) - } - } - async fn initiate_peer_discovery( &mut self, dest_pubkey: Box, @@ -317,8 +319,13 @@ impl DhtDiscoveryService { dest_public_key: Box, ) -> Result<(), DhtDiscoveryError> { let discover_msg = DiscoveryMessage { - node_id: self.node_identity.node_id().to_vec(), - addresses: vec![self.node_identity.public_address().to_string()], + public_key: self.node_identity.public_key().to_vec(), + addresses: self + .node_identity + .public_addresses() + .into_iter() + .map(|a| a.to_vec()) + .collect(), peer_features: self.node_identity.features().bits(), nonce, identity_signature: self.node_identity.identity_signature_read().as_ref().map(Into::into), @@ -352,7 +359,6 @@ impl DhtDiscoveryService { mod test { use std::time::Duration; - use tari_comms::runtime; use tari_shutdown::Shutdown; use super::*; @@ -362,7 +368,7 @@ mod test { test_utils::{build_peer_manager, make_node_identity}, }; - #[runtime::test] + #[tokio::test] async fn send_discovery() { let node_identity = make_node_identity(); let peer_manager = build_peer_manager(); diff --git a/comms/dht/src/inbound/decryption.rs b/comms/dht/src/inbound/decryption.rs index 83077b6978..23f88962de 100644 --- a/comms/dht/src/inbound/decryption.rs +++ b/comms/dht/src/inbound/decryption.rs @@ -506,7 +506,6 @@ mod test { use futures::{executor::block_on, future}; use tari_comms::{ message::{MessageExt, MessageTag}, - runtime, test_utils::mocks::create_connectivity_mock, wrap_in_envelope_body, BytesMut, @@ -579,7 +578,7 @@ mod test { assert_eq!(counter.get(), 0); } - #[runtime::test] + #[tokio::test] /// We can decrypt valid encrypted messages destined for us async fn decrypt_inbound_success() { let (connectivity, mock) = create_connectivity_mock(); @@ -612,7 +611,7 @@ mod test { assert_eq!(mock_state.count_calls_containing("BanPeer").await, 0); } - #[runtime::test] + #[tokio::test] /// An encrypted message is not destined for us async fn decrypt_inbound_not_for_us() { let (connectivity, mock) = create_connectivity_mock(); @@ -652,7 +651,7 @@ mod test { assert_eq!(mock_state.count_calls_containing("BanPeer").await, 0); } - #[runtime::test] + #[tokio::test] /// An encrypted message is empty async fn empty_message() { let node_identity = make_node_identity(); @@ -675,7 +674,7 @@ mod test { } } - #[runtime::test] + #[tokio::test] /// An encrypted message is destined for us but can't be decrypted async fn decrypt_inbound_fail_for_us() { let node_identity = make_node_identity(); @@ -695,7 +694,7 @@ mod test { .await; } - #[runtime::test] + #[tokio::test] /// An encrypted message has no destination async fn decrypt_inbound_fail_no_destination() { let node_identity = make_node_identity(); @@ -715,7 +714,7 @@ mod test { .await; } - #[runtime::test] + #[tokio::test] /// An encrypted message destined for us has an invalid signature async fn decrypt_inbound_fail_invalid_signature_encrypted() { let node_identity = make_node_identity(); @@ -765,7 +764,7 @@ mod test { .await; } - #[runtime::test] + #[tokio::test] /// An unencrypted message has an invalid signature async fn decrypt_inbound_fail_invalid_signature_cleartext() { let node_identity = make_node_identity(); @@ -791,7 +790,7 @@ mod test { } } - #[runtime::test] + #[tokio::test] /// An encrypted message has no signature async fn decrypt_inbound_fail_missing_signature_encrypted() { let node_identity = make_node_identity(); @@ -817,7 +816,7 @@ mod test { } } - #[runtime::test] + #[tokio::test] /// An encrypted message has no ephemeral key async fn decrypt_inbound_fail_missing_ephemeral_encrypted() { let node_identity = make_node_identity(); @@ -843,7 +842,7 @@ mod test { } } - #[runtime::test] + #[tokio::test] /// An unencrypted message has a signature that can't be decoded (wire format) async fn decrypt_inbound_fail_cleartext_signature_decode_wire() { let node_identity = make_node_identity(); @@ -869,7 +868,7 @@ mod test { } } - #[runtime::test] + #[tokio::test] /// An unencrypted message has a signature that can't be decoded (signature structure) async fn decrypt_inbound_fail_cleartext_signature_decode_structure() { let node_identity = make_node_identity(); diff --git a/comms/dht/src/inbound/deserialize.rs b/comms/dht/src/inbound/deserialize.rs index 23537899ff..6bd60e027f 100644 --- a/comms/dht/src/inbound/deserialize.rs +++ b/comms/dht/src/inbound/deserialize.rs @@ -131,10 +131,7 @@ impl Layer for DeserializeLayer { #[cfg(test)] mod test { - use tari_comms::{ - message::{MessageExt, MessageTag}, - runtime, - }; + use tari_comms::message::{MessageExt, MessageTag}; use super::*; use crate::{ @@ -149,7 +146,7 @@ mod test { }, }; - #[runtime::test] + #[tokio::test] async fn deserialize() { let spy = service_spy(); let peer_manager = build_peer_manager(); diff --git a/comms/dht/src/inbound/dht_handler/task.rs b/comms/dht/src/inbound/dht_handler/task.rs index e6ee3c7a5d..250070a1f6 100644 --- a/comms/dht/src/inbound/dht_handler/task.rs +++ b/comms/dht/src/inbound/dht_handler/task.rs @@ -20,13 +20,26 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::{convert::TryFrom, str::FromStr, sync::Arc}; +use std::{ + convert::{TryFrom, TryInto}, + sync::Arc, +}; use log::*; use tari_comms::{ message::MessageExt, multiaddr::Multiaddr, - peer_manager::{IdentitySignature, NodeId, NodeIdentity, Peer, PeerFeatures, PeerFlags, PeerManager}, + net_address::{MultiaddressesWithStats, PeerAddressSource}, + peer_manager::{ + IdentitySignature, + NodeId, + NodeIdentity, + Peer, + PeerFeatures, + PeerFlags, + PeerIdentityClaim, + PeerManager, + }, pipeline::PipelineError, types::CommsPublicKey, OrNotFound, @@ -44,6 +57,7 @@ use crate::{ dht::{DiscoveryMessage, DiscoveryResponseMessage, JoinMessage}, envelope::DhtMessageType, }, + rpc::PeerInfo, DhtConfig, }; @@ -170,31 +184,38 @@ where S: Service let addresses = join_msg .addresses .iter() - .filter_map(|addr| Multiaddr::from_str(addr).ok()) + .filter_map(|addr| Multiaddr::try_from(addr.clone()).ok()) .collect::>(); if addresses.is_empty() { return Err(DhtInboundError::InvalidAddresses); } let node_id = NodeId::from_public_key(&authenticated_pk); + let features = PeerFeatures::from_bits_truncate(join_msg.peer_features); - let mut new_peer = Peer::new( + + let identity_signature: IdentitySignature = join_msg + .identity_signature + .map(IdentitySignature::try_from) + .transpose() + .map_err(|err| DhtInboundError::InvalidPeerIdentitySignature(err.to_string()))? + .ok_or(DhtInboundError::NoPeerIdentitySignature)?; + + let peer_identity_claim = PeerIdentityClaim::new(addresses.clone(), features, identity_signature, None); + + let new_peer = Peer::new( authenticated_pk, node_id.clone(), - addresses.into(), + MultiaddressesWithStats::from_addresses_with_source(addresses, &PeerAddressSource::FromJoinMessage { + peer_identity_claim, + }), PeerFlags::empty(), features, vec![], String::new(), ); - new_peer.identity_signature = join_msg - .identity_signature - .map(IdentitySignature::try_from) - .transpose() - .map_err(|err| DhtInboundError::InvalidPeerIdentitySignature(err.to_string()))?; - let peer_validator = PeerValidator::new(&self.peer_manager, &self.config); - peer_validator.validate_and_add_peer(new_peer).await?; + self.peer_manager.add_peer(new_peer.clone()).await?; let origin_peer = self.peer_manager.find_by_node_id(&node_id).await.or_not_found()?; // DO NOT propagate this peer if this node has banned them @@ -277,6 +298,7 @@ where S: Service .decode_part::(0)? .ok_or(DhtInboundError::InvalidMessageBody)?; + let nonce = discover_msg.nonce; let authenticated_pk = message.authenticated_origin.ok_or_else(|| { DhtInboundError::OriginRequired("Origin header required for Discovery message".to_string()) })?; @@ -286,32 +308,10 @@ where S: Service "Received discovery message from '{}', forwarded by {}", authenticated_pk, message.source_peer ); - let addresses = discover_msg - .addresses - .iter() - .filter_map(|addr| Multiaddr::from_str(addr).ok()) - .collect::>(); - - if addresses.is_empty() { - return Err(DhtInboundError::InvalidAddresses); - } - - let node_id = NodeId::from_public_key(&authenticated_pk); - let features = PeerFeatures::from_bits_truncate(discover_msg.peer_features); - let mut new_peer = Peer::new( - authenticated_pk, - node_id.clone(), - addresses.into(), - PeerFlags::empty(), - features, - vec![], - String::new(), - ); - new_peer.identity_signature = discover_msg - .identity_signature - .map(IdentitySignature::try_from) - .transpose() - .map_err(|err| DhtInboundError::InvalidPeerIdentitySignature(err.to_string()))?; + let new_peer: PeerInfo = discover_msg + .try_into() + .map_err(DhtInboundError::InvalidDiscoveryMessage)?; + let node_id = NodeId::from_public_key(&new_peer.public_key); let peer_validator = PeerValidator::new(&self.peer_manager, &self.config); peer_validator.validate_and_add_peer(new_peer).await?; @@ -327,8 +327,7 @@ where S: Service } // Send the origin the current nodes latest contact info - self.send_discovery_response(origin_peer.public_key, discover_msg.nonce) - .await?; + self.send_discovery_response(origin_peer.public_key, nonce).await?; Ok(()) } @@ -341,8 +340,13 @@ where S: Service nonce: u64, ) -> Result<(), DhtInboundError> { let response = DiscoveryResponseMessage { - node_id: self.node_identity.node_id().to_vec(), - addresses: vec![self.node_identity.public_address().to_string()], + public_key: self.node_identity.public_key().to_vec(), + addresses: self + .node_identity + .public_addresses() + .iter() + .map(|a| a.to_vec()) + .collect(), peer_features: self.node_identity.features().bits(), nonce, identity_signature: self.node_identity.identity_signature_read().as_ref().map(Into::into), diff --git a/comms/dht/src/inbound/error.rs b/comms/dht/src/inbound/error.rs index aec8ea076c..6e920bb99d 100644 --- a/comms/dht/src/inbound/error.rs +++ b/comms/dht/src/inbound/error.rs @@ -20,7 +20,10 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use tari_comms::{message::MessageError, peer_manager::PeerManagerError}; +use tari_comms::{ + message::MessageError, + peer_manager::{NodeId, PeerManagerError}, +}; use thiserror::Error; use crate::{ @@ -44,12 +47,18 @@ pub enum DhtInboundError { InvalidMessageBody, #[error("All given addresses were invalid")] InvalidAddresses, + #[error("One or more peer addresses were invalid for '{peer}'")] + InvalidPeerAddresses { peer: NodeId }, #[error("DhtDiscoveryError: {0}")] DhtDiscoveryError(#[from] DhtDiscoveryError), #[error("OriginRequired: {0}")] OriginRequired(String), #[error("Invalid peer identity signature: {0}")] InvalidPeerIdentitySignature(String), + #[error("No peer identity signature")] + NoPeerIdentitySignature, #[error("Invalid peer: {0}")] PeerValidatorError(#[from] PeerValidatorError), + #[error("Invalid discovery message {0}")] + InvalidDiscoveryMessage(#[from] anyhow::Error), } diff --git a/comms/dht/src/inbound/forward.rs b/comms/dht/src/inbound/forward.rs index e687eff8a1..6213b545d6 100644 --- a/comms/dht/src/inbound/forward.rs +++ b/comms/dht/src/inbound/forward.rs @@ -259,8 +259,8 @@ where S: Service mod test { use std::time::Duration; - use tari_comms::{message::MessageExt, runtime, runtime::task, wrap_in_envelope_body}; - use tokio::sync::mpsc; + use tari_comms::{message::MessageExt, wrap_in_envelope_body}; + use tokio::{sync::mpsc, task}; use super::*; use crate::{ @@ -269,7 +269,7 @@ mod test { test_utils::{make_dht_inbound_message, make_node_identity, service_spy}, }; - #[runtime::test] + #[tokio::test] async fn decryption_succeeded() { let spy = service_spy(); let (oms_tx, _) = mpsc::channel(1); @@ -288,7 +288,7 @@ mod test { assert!(spy.is_called()); } - #[runtime::test] + #[tokio::test] async fn decryption_failed() { let spy = service_spy(); let (oms_requester, oms_mock) = create_outbound_service_mock(1); diff --git a/comms/dht/src/network_discovery/discovering.rs b/comms/dht/src/network_discovery/discovering.rs index de51c6ed9c..d482ba5eac 100644 --- a/comms/dht/src/network_discovery/discovering.rs +++ b/comms/dht/src/network_discovery/discovering.rs @@ -26,7 +26,7 @@ use futures::{stream::FuturesUnordered, Stream, StreamExt}; use log::*; use tari_comms::{ connectivity::ConnectivityError, - peer_manager::{NodeDistance, NodeId, Peer, PeerFeatures}, + peer_manager::{NodeDistance, NodeId, PeerFeatures}, PeerConnection, PeerManager, }; @@ -39,6 +39,7 @@ use crate::{ peer_validator::{PeerValidator, PeerValidatorError}, proto::rpc::GetPeersRequest, rpc, + rpc::PeerInfo, DhtConfig, }; @@ -190,23 +191,21 @@ impl Discovering { Ok(()) } - async fn validate_and_add_peer(&mut self, sync_peer: &NodeId, new_peer: Peer) -> Result<(), NetworkDiscoveryError> { - if self.context.node_identity.node_id() == &new_peer.node_id { + async fn validate_and_add_peer( + &mut self, + sync_peer: &NodeId, + new_peer: PeerInfo, + ) -> Result<(), NetworkDiscoveryError> { + let node_id = NodeId::from_public_key(&new_peer.public_key); + if self.context.node_identity.node_id() == &node_id { debug!(target: LOG_TARGET, "Received our own node from peer sync. Ignoring."); return Ok(()); } - let new_peer_node_id = new_peer.node_id.clone(); let peer_validator = PeerValidator::new(self.peer_manager(), self.config()); - let peer_dist = new_peer.node_id.distance(self.context.node_identity.node_id()); - let is_neighbour = peer_dist <= self.neighbourhood_threshold; - match peer_validator.validate_and_add_peer(new_peer).await { Ok(true) => { - if is_neighbour { - self.stats.num_new_neighbours += 1; - } self.stats.num_new_peers += 1; Ok(()) }, @@ -220,17 +219,6 @@ impl Discovering { target: LOG_TARGET, "Received invalid peer from sync peer '{}': {}. Banning sync peer.", sync_peer, err ); - self.context - .connectivity - .ban_peer_until( - sync_peer.clone(), - self.context.config.ban_duration, - format!( - "Network discovery peer sent invalid peer '{}'. {}", - new_peer_node_id, err - ), - ) - .await?; Err(err.into()) }, } diff --git a/comms/dht/src/network_discovery/on_connect.rs b/comms/dht/src/network_discovery/on_connect.rs index 8faa0f5fa2..6794242081 100644 --- a/comms/dht/src/network_discovery/on_connect.rs +++ b/comms/dht/src/network_discovery/on_connect.rs @@ -37,9 +37,9 @@ use crate::{ peer_validator::PeerValidator, proto::rpc::GetPeersRequest, rpc, + rpc::PeerInfo, DhtConfig, }; - const LOG_TARGET: &str = "comms::dht::network_discovery:onconnect"; const NUM_FETCH_PEERS: u32 = 1000; @@ -81,7 +81,7 @@ impl OnConnect { conn.peer_node_id() ); - match self.sync_peers(conn.clone()).await { + match self.sync_peers(*conn.clone()).await { Ok(_) => continue, Err(err @ NetworkDiscoveryError::PeerValidationError(_)) => { warn!(target: LOG_TARGET, "{}. Banning peer.", err); @@ -135,12 +135,11 @@ impl OnConnect { let sync_peer = conn.peer_node_id(); let mut num_added = 0; - let peer_validator = PeerValidator::new(&self.context.peer_manager, self.config()); while let Some(resp) = peer_stream.next().await { match resp { Ok(resp) => match resp.peer.and_then(|peer| peer.try_into().ok()) { Some(peer) => { - if peer_validator.validate_and_add_peer(peer).await? { + if self.validate_and_add_peer(peer).await? { num_added += 1; } }, @@ -161,8 +160,6 @@ impl OnConnect { if num_added > 0 { self.context .publish_event(DhtEvent::NetworkDiscoveryPeersAdded(DhtNetworkDiscoveryRoundInfo { - // TODO: num_new_neighbours could be incorrect here - num_new_neighbours: 0, num_new_peers: num_added, num_duplicate_peers: 0, num_succeeded: num_added, @@ -173,6 +170,13 @@ impl OnConnect { Ok(()) } + // Returns true if the peer was added + async fn validate_and_add_peer(&self, peer: PeerInfo) -> Result { + let peer_validator = PeerValidator::new(&self.context.peer_manager, self.config()); + + Ok(peer_validator.validate_and_add_peer(peer).await?) + } + #[inline] fn config(&self) -> &DhtConfig { &self.context.config diff --git a/comms/dht/src/network_discovery/ready.rs b/comms/dht/src/network_discovery/ready.rs index 68f6ac67bd..61940dbfdd 100644 --- a/comms/dht/src/network_discovery/ready.rs +++ b/comms/dht/src/network_discovery/ready.rs @@ -20,8 +20,6 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -use std::cmp; - use log::*; use tari_comms::peer_manager::PeerFeatures; @@ -75,6 +73,11 @@ impl DiscoveryReady { return Ok(StateEvent::Idle); } + warn!( + target: LOG_TARGET, + "DHT - Not enough current peers, choosing random peers to sync with" + ); + let peers = self .context .peer_manager @@ -108,18 +111,16 @@ impl DiscoveryReady { let round_num = self.context.increment_num_rounds(); debug!(target: LOG_TARGET, "Completed peer round #{} ({})", round_num + 1, info); - if !info.has_new_neighbours() { - debug!( - target: LOG_TARGET, - "No new neighbours found this round {}. Going to on connect mode", info, - ); - return Ok(StateEvent::OnConnectMode); - } - - // If the last round was a success, but we didnt get any new peers, let's IDLE - if info.is_success() && !info.has_new_peers() && self.context.num_rounds() > 0 { + // If the last round was a success, but we didnt get any new peers, let's go to on connect or idle + // depending on the number of peers we have + if info.is_success() && !info.has_new_peers() { self.context.reset_num_rounds(); - return Ok(StateEvent::Idle); + if num_peers < self.context.config.network_discovery.min_desired_peers { + return Ok(StateEvent::Idle); + } else { + // We have enough peers, so we can go to on connect mode + return Ok(StateEvent::OnConnectMode); + } } if self.context.num_rounds() >= self.config().network_discovery.idle_after_num_rounds { @@ -130,14 +131,14 @@ impl DiscoveryReady { let peers = match last_round { Some(ref stats) => { - let num_peers_to_select = - cmp::min(stats.num_new_neighbours, self.config().network_discovery.max_sync_peers); + let num_peers_to_select = self.config().network_discovery.max_sync_peers; - if stats.has_new_neighbours() { + if stats.has_new_peers() { debug!( target: LOG_TARGET, - "Last peer sync round found {} new neighbour(s). Attempting to sync from those neighbours", - stats.num_new_neighbours + "Last peer sync round found {} new peer(s). Attempting to sync from those peers if they are \ + closer than existing peers", + stats.num_new_peers, ); self.context .peer_manager @@ -154,7 +155,7 @@ impl DiscoveryReady { } else { debug!( target: LOG_TARGET, - "Last peer sync round found no new neighbours. Transitioning to OnConnectMode", + "Last peer sync round found no new peers. Transitioning to OnConnectMode", ); return Ok(StateEvent::OnConnectMode); } diff --git a/comms/dht/src/network_discovery/state_machine.rs b/comms/dht/src/network_discovery/state_machine.rs index 1290b5c8fa..6f5ac6dd20 100644 --- a/comms/dht/src/network_discovery/state_machine.rs +++ b/comms/dht/src/network_discovery/state_machine.rs @@ -315,7 +315,6 @@ impl Display for DiscoveryParams { #[derive(Debug, Default, Clone)] pub struct DhtNetworkDiscoveryRoundInfo { - pub num_new_neighbours: usize, pub num_new_peers: usize, pub num_duplicate_peers: usize, pub num_succeeded: usize, @@ -327,10 +326,6 @@ impl DhtNetworkDiscoveryRoundInfo { self.num_new_peers > 0 } - pub fn has_new_neighbours(&self) -> bool { - self.num_new_neighbours > 0 - } - /// Returns true if the round succeeded (i.e. at least one sync peer was contacted and succeeded in the protocol), /// otherwise false pub fn is_success(&self) -> bool { @@ -342,10 +337,9 @@ impl Display for DhtNetworkDiscoveryRoundInfo { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "Synced {}/{}, num_new_neighbours = {}, num_new_peers = {}, num_duplicate_peers = {}", + "Synced {}/{}, num_new_peers = {}, num_duplicate_peers = {}", self.num_succeeded, self.sync_peers.len(), - self.num_new_neighbours, self.num_new_peers, self.num_duplicate_peers, ) diff --git a/comms/dht/src/network_discovery/test.rs b/comms/dht/src/network_discovery/test.rs index 1d791d459a..f947f69e6d 100644 --- a/comms/dht/src/network_discovery/test.rs +++ b/comms/dht/src/network_discovery/test.rs @@ -25,7 +25,6 @@ use tari_comms::{ connectivity::ConnectivityStatus, peer_manager::{Peer, PeerFeatures}, protocol::rpc::{mock::MockRpcServer, NamedProtocolService}, - runtime, test_utils::{ mocks::{create_connectivity_mock, ConnectivityManagerMockState}, node_identity::build_node_identity, @@ -49,6 +48,7 @@ use crate::{ mod state_machine { use super::*; + use crate::rpc::PeerInfo; async fn setup( mut config: DhtConfig, @@ -99,7 +99,7 @@ mod state_machine { ) } - #[runtime::test] + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[allow(clippy::redundant_closure)] async fn it_fetches_peers() { const NUM_PEERS: usize = 3; @@ -112,7 +112,9 @@ mod state_machine { ..DhtConfig::default_local_test() }; let peers = iter::repeat_with(|| make_node_identity().to_peer()) - .map(|p| GetPeersResponse { peer: Some(p.into()) }) + .map(|p| GetPeersResponse { + peer: Some(PeerInfo::from(p).into()), + }) .take(NUM_PEERS) .collect(); let (discovery_actor, connectivity_mock, peer_manager, node_identity, mut event_rx, _shutdown) = @@ -143,66 +145,13 @@ mod state_machine { let event = event_rx.recv().await.unwrap(); unpack_enum!(DhtEvent::NetworkDiscoveryPeersAdded(info) = &*event); - assert!(info.has_new_neighbours()); - assert_eq!(info.num_new_neighbours, NUM_PEERS); assert_eq!(info.num_new_peers, NUM_PEERS); assert_eq!(info.num_duplicate_peers, 0); assert_eq!(info.num_succeeded, 1); assert_eq!(info.sync_peers, vec![peer_node_identity.node_id().clone()]); } - #[runtime::test] - #[allow(clippy::redundant_closure)] - async fn dht_banning_peers() { - const NUM_PEERS: usize = 3; - let config = DhtConfig { - num_neighbouring_nodes: 4, - network_discovery: NetworkDiscoveryConfig { - min_desired_peers: NUM_PEERS, - ..Default::default() - }, - ..DhtConfig::default_local_test() - }; - let (discovery_actor, connectivity_mock, peer_manager, node_identity, _event_rx, _shutdown) = - setup(config, make_node_identity(), vec![]).await; - - let mock = DhtRpcServiceMock::new(); - let service = rpc::DhtService::new(mock.clone()); - let protocol_name = service.as_protocol_name(); - - let mut mock_server = MockRpcServer::new(service, node_identity.clone()); - let peer_node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); - // Add the peer that we'll sync from - peer_manager.add_peer(peer_node_identity.to_peer()).await.unwrap(); - mock_server.serve(); - - // Create a connection to the RPC mock and then make it available to the connectivity manager mock - let connection = mock_server - .create_connection(peer_node_identity.to_peer(), protocol_name.into()) - .await; - - connectivity_mock - .set_connectivity_status(ConnectivityStatus::Online(NUM_PEERS)) - .await; - connectivity_mock.add_active_connection(connection).await; - - // Checking banning logic - let mut invalid_peer = make_node_identity().to_peer(); - invalid_peer.set_valid_identity_signature(make_node_identity().identity_signature_read().clone().unwrap()); - let resp = GetPeersResponse { - peer: Some(invalid_peer.clone().into()), - }; - mock.get_peers.set_response(Ok(vec![resp])).await; - - discovery_actor.spawn(); - - connectivity_mock.await_call_count(1).await; - let banned = connectivity_mock.take_banned_peers().await; - let (peer, _, _) = &banned[0]; - assert_eq!(peer, peer_node_identity.node_id()); - } - - #[runtime::test] + #[tokio::test] async fn it_shuts_down() { let (discovery, _, _, _, _, mut shutdown) = setup(Default::default(), make_node_identity(), vec![]).await; @@ -254,7 +203,7 @@ mod discovery_ready { (node_identity, peer_manager, connectivity_mock, ready, context) } - #[runtime::test] + #[tokio::test] async fn it_begins_aggressive_discovery() { let (_, pm, _, mut ready, _) = setup(Default::default()); let peers = build_many_node_identities(1, PeerFeatures::COMMUNICATION_NODE); @@ -266,14 +215,14 @@ mod discovery_ready { assert!(params.num_peers_to_request.is_none()); } - #[runtime::test] + #[tokio::test] async fn it_idles_if_no_sync_peers() { let (_, _, _, mut ready, _) = setup(Default::default()); let state_event = ready.next_event().await; unpack_enum!(StateEvent::Idle = state_event); } - #[runtime::test] + #[tokio::test] async fn it_idles_if_num_rounds_reached() { let config = NetworkDiscoveryConfig { min_desired_peers: 0, @@ -283,7 +232,6 @@ mod discovery_ready { let (_, _, _, mut ready, context) = setup(config); context .set_last_round(DhtNetworkDiscoveryRoundInfo { - num_new_neighbours: 1, num_new_peers: 1, num_duplicate_peers: 0, num_succeeded: 1, @@ -294,7 +242,7 @@ mod discovery_ready { unpack_enum!(StateEvent::Idle = state_event); } - #[runtime::test] + #[tokio::test] async fn it_transitions_to_on_connect() { let config = NetworkDiscoveryConfig { min_desired_peers: 0, @@ -302,7 +250,12 @@ mod discovery_ready { ..Default::default() }; let (_, _, _, mut ready, context) = setup(config); - context.set_last_round(Default::default()).await; + context + .set_last_round(DhtNetworkDiscoveryRoundInfo { + num_succeeded: 1, + ..Default::default() + }) + .await; let state_event = ready.next_event().await; unpack_enum!(StateEvent::OnConnectMode = state_event); } diff --git a/comms/dht/src/outbound/broadcast.rs b/comms/dht/src/outbound/broadcast.rs index 21b634689d..9a0d2b30c6 100644 --- a/comms/dht/src/outbound/broadcast.rs +++ b/comms/dht/src/outbound/broadcast.rs @@ -563,8 +563,8 @@ mod test { use rand::rngs::OsRng; use tari_comms::{ multiaddr::Multiaddr, + net_address::{MultiaddressesWithStats, PeerAddressSource}, peer_manager::{NodeId, Peer, PeerFeatures, PeerFlags}, - runtime, types::CommsPublicKey, }; use tari_crypto::keys::PublicKey; @@ -583,13 +583,16 @@ mod test { }, }; - #[runtime::test] + #[tokio::test] async fn test_send_message_flood() { let pk = CommsPublicKey::default(); let example_peer = Peer::new( pk.clone(), NodeId::from_key(&pk), - vec!["/ip4/127.0.0.1/tcp/9999".parse::().unwrap()].into(), + MultiaddressesWithStats::from_addresses_with_source( + vec!["/ip4/127.0.0.1/tcp/9999".parse::().unwrap()], + &PeerAddressSource::Config, + ), PeerFlags::empty(), PeerFeatures::COMMUNICATION_NODE, Default::default(), @@ -648,7 +651,7 @@ mod test { assert!(requests.iter().any(|msg| msg.destination_node_id == other_peer.node_id)); } - #[runtime::test] + #[tokio::test] async fn test_send_message_direct_not_found() { // Test for issue https://github.com/tari-project/tari/issues/959 @@ -693,7 +696,7 @@ mod test { assert_eq!(spy.call_count(), 0); } - #[runtime::test] + #[tokio::test] async fn test_send_message_direct_dht_discovery() { let node_identity = NodeIdentity::random( &mut OsRng, diff --git a/comms/dht/src/outbound/message_send_state.rs b/comms/dht/src/outbound/message_send_state.rs index 347be8616c..c7e9725b7b 100644 --- a/comms/dht/src/outbound/message_send_state.rs +++ b/comms/dht/src/outbound/message_send_state.rs @@ -259,7 +259,7 @@ impl Index for MessageSendStates { mod test { use std::iter::repeat_with; - use tari_comms::{message::MessagingReplyTx, runtime}; + use tari_comms::message::MessagingReplyTx; use tokio::sync::oneshot; use super::*; @@ -279,7 +279,7 @@ mod test { assert!(!states.is_empty()); } - #[runtime::test] + #[tokio::test] async fn wait_single() { let (state, mut reply_tx) = create_send_state(); let states = MessageSendStates::from(vec![state]); @@ -294,7 +294,7 @@ mod test { assert!(!states.wait_single().await); } - #[runtime::test] + #[tokio::test] #[allow(clippy::redundant_closure)] async fn wait_percentage_success() { let (states, mut reply_txs) = repeat_with(|| create_send_state()) @@ -311,7 +311,7 @@ mod test { assert_eq!(failed.len(), 4); } - #[runtime::test] + #[tokio::test] #[allow(clippy::redundant_closure)] async fn wait_n_timeout() { let (states, mut reply_txs) = repeat_with(|| create_send_state()) @@ -342,7 +342,7 @@ mod test { assert_eq!(failed.len(), 6); } - #[runtime::test] + #[tokio::test] #[allow(clippy::redundant_closure)] async fn wait_all() { let (states, mut reply_txs) = repeat_with(|| create_send_state()) diff --git a/comms/dht/src/outbound/serialize.rs b/comms/dht/src/outbound/serialize.rs index 4ba404b28d..a32c87c807 100644 --- a/comms/dht/src/outbound/serialize.rs +++ b/comms/dht/src/outbound/serialize.rs @@ -136,12 +136,12 @@ impl Layer for SerializeLayer { #[cfg(test)] mod test { use prost::Message; - use tari_comms::{peer_manager::NodeId, runtime}; + use tari_comms::peer_manager::NodeId; use super::*; use crate::test_utils::{assert_send_static_service, create_outbound_message, service_spy}; - #[runtime::test] + #[tokio::test] async fn serialize() { let spy = service_spy(); let mut serialize = SerializeLayer.layer(spy.to_service::()); diff --git a/comms/dht/src/peer_validator.rs b/comms/dht/src/peer_validator.rs index b71085d6f5..f270559adf 100644 --- a/comms/dht/src/peer_validator.rs +++ b/comms/dht/src/peer_validator.rs @@ -22,13 +22,14 @@ use log::*; use tari_comms::{ - peer_manager::{NodeId, Peer, PeerManagerError}, + connection_manager::validate_address_and_source, + net_address::{MultiaddrWithStats, MultiaddressesWithStats, PeerAddressSource}, + peer_manager::{NodeId, Peer, PeerFlags, PeerManagerError}, types::CommsPublicKey, - validate_peer_addresses, PeerManager, }; -use crate::DhtConfig; +use crate::{rpc::PeerInfo, DhtConfig}; const LOG_TARGET: &str = "dht::network_discovery::peer_validator"; @@ -41,6 +42,8 @@ pub enum PeerValidatorError { InvalidPeerSignature { peer: NodeId }, #[error("One or more peer addresses were invalid for '{peer}'")] InvalidPeerAddresses { peer: NodeId }, + #[error("Peer '{peer}' was banned")] + PeerHasNoAddresses { peer: NodeId }, #[error("Peer manager error: {0}")] PeerManagerError(#[from] PeerManagerError), } @@ -59,73 +62,54 @@ impl<'a> PeerValidator<'a> { /// Validates the new peer against the current peer database. Returning true if a new peer was added and false if /// the peer already exists. - pub async fn validate_and_add_peer(&self, new_peer: Peer) -> Result { - validate_node_id(&new_peer.public_key, &new_peer.node_id)?; + pub async fn validate_and_add_peer(&self, new_peer: PeerInfo) -> Result { + let node_id = NodeId::from_public_key(&new_peer.public_key); - if let Err(err) = validate_peer_addresses(new_peer.addresses.iter(), self.config.allow_test_addresses) { - warn!(target: LOG_TARGET, "Invalid peer address: {}", err); - return Err(PeerValidatorError::InvalidPeerAddresses { peer: new_peer.node_id }); + if new_peer.addresses.is_empty() { + return Err(PeerValidatorError::PeerHasNoAddresses { peer: node_id }); } - - let can_update = match new_peer.is_valid_identity_signature() { - // Update/insert peer - Some(true) => true, - Some(false) => return Err(PeerValidatorError::InvalidPeerSignature { peer: new_peer.node_id }), - // Insert new peer if it doesn't exist, do not update - None => false, - }; - - trace!(target: LOG_TARGET, "Adding peer `{}`", new_peer.node_id); - - match self.peer_manager.find_by_node_id(&new_peer.node_id).await? { - Some(mut current_peer) => { - let can_update = can_update && { - // Update/insert peer if newer - // unreachable panic: can_update is true only is identity_signature is present and valid - let new_dt = new_peer - .identity_signature - .as_ref() - .map(|i| i.updated_at()) - .expect("unreachable panic"); - - // Update if new_peer has newer timestamp than current_peer, and if the newer timestamp is after the - // added date - current_peer - .identity_signature - .as_ref() - .map(|i| i.updated_at() < new_dt && ( - !current_peer.is_seed() || - current_peer.added_at < new_dt.naive_utc())) - // If None, update to peer with valid signature - .unwrap_or(true) - }; - - if !can_update { - debug!( + let mut peer = Peer::new( + new_peer.public_key.clone(), + node_id.clone(), + MultiaddressesWithStats::new(vec![]), + PeerFlags::default(), + new_peer.peer_features, + new_peer.supported_protocols, + new_peer.user_agent, + ); + + for addr in new_peer.addresses { + let multiaddr_and_stats = MultiaddrWithStats::new(addr.address.clone(), PeerAddressSource::FromDiscovery { + peer_identity_claim: addr.peer_identity_claim, + }); + match validate_address_and_source( + &new_peer.public_key, + &multiaddr_and_stats, + self.config.allow_test_addresses, + ) { + Ok(()) => { + peer.addresses + .add_address(multiaddr_and_stats.address(), multiaddr_and_stats.source()); + }, + Err(e) => { + warn!( target: LOG_TARGET, - "Peer `{}` already exists or is up to date and will not be updated", new_peer.node_id + "Peer provided info on another peer that had a bad address or signature (new peer: {} \ + address: {}): error:{}. Ignoring.", + new_peer.public_key, + addr.address, + e ); - return Ok(false); - } - - debug!(target: LOG_TARGET, "Updating peer `{}`", new_peer.node_id); - current_peer - .update_addresses(new_peer.addresses.into_vec()) - .set_features(new_peer.features) - .set_offline(false); - if let Some(sig) = new_peer.identity_signature { - current_peer.set_valid_identity_signature(sig); - } - self.peer_manager.add_peer(current_peer).await?; - - Ok(false) - }, - None => { - debug!(target: LOG_TARGET, "Adding peer `{}`", new_peer.node_id); - self.peer_manager.add_peer(new_peer).await?; - Ok(true) - }, + }, + } } + validate_node_id(&peer.public_key, &peer.node_id)?; + + let exists = self.peer_manager.exists(&peer.public_key).await; + + self.peer_manager.add_peer(peer).await?; + + Ok(!exists) } } @@ -140,9 +124,17 @@ fn validate_node_id(public_key: &CommsPublicKey, node_id: &NodeId) -> Result> From for JoinMessage { fn from(identity: T) -> Self { let node_identity = identity.as_ref(); Self { - node_id: node_identity.node_id().to_vec(), - addresses: vec![node_identity.public_address().to_string()], + public_key: node_identity.public_key().to_vec(), + addresses: node_identity.public_addresses().iter().map(|a| a.to_vec()).collect(), peer_features: node_identity.features().bits(), nonce: OsRng.next_u64(), identity_signature: node_identity.identity_signature_read().as_ref().map(Into::into), @@ -81,8 +86,8 @@ impl fmt::Display for dht::JoinMessage { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, - "JoinMessage(NodeId = {}, Addresses = {:?}, Features = {:?})", - self.node_id.to_hex(), + "JoinMessage(PK = {}, Addresses = {:?}, Features = {:?})", + self.public_key.to_hex(), self.addresses, PeerFeatures::from_bits_truncate(self.peer_features), ) @@ -91,46 +96,144 @@ impl fmt::Display for dht::JoinMessage { //---------------------------------- Rpc Message Conversions --------------------------------------------// -impl From for rpc::Peer { - fn from(peer: Peer) -> Self { - rpc::Peer { - public_key: peer.public_key.to_vec(), - addresses: peer +impl TryFrom for PeerInfo { + type Error = anyhow::Error; + + fn try_from(value: DiscoveryMessage) -> Result { + let identity_signature = value + .identity_signature + .ok_or_else(|| anyhow!("DiscoveryMessage missing peer_identity_claim"))? + .try_into()?; + + let identity_claim = PeerIdentityClaim { + addresses: value .addresses + .iter() + .map(|a| Multiaddr::try_from(a.clone())) + .collect::>()?, + features: PeerFeatures::from_bits_truncate(value.peer_features), + signature: identity_signature, + unverified_data: None, + }; + + Ok(Self { + public_key: RistrettoPublicKey::from_bytes(&value.public_key)?, + addresses: value .addresses .iter() - .map(|addr| addr.address.to_string()) + .map(|a| { + Ok(PeerInfoAddress { + address: Multiaddr::try_from(a.clone())?, + peer_identity_claim: identity_claim.clone(), + }) + }) + .collect::>()?, + peer_features: PeerFeatures::from_bits_truncate(value.peer_features), + supported_protocols: vec![], + user_agent: "".to_string(), + }) + } +} + +impl From for rpc::PeerInfo { + fn from(value: PeerInfo) -> Self { + Self { + public_key: value.public_key.to_vec(), + addresses: value.addresses.into_iter().map(Into::into).collect(), + peer_features: value.peer_features.bits(), + supported_protocols: value + .supported_protocols + .into_iter() + .map(|b| b.as_ref().to_vec()) .collect(), - peer_features: peer.features.bits(), - identity_signature: peer.identity_signature.as_ref().map(Into::into), + user_agent: value.user_agent, } } } -impl TryInto for rpc::Peer { +impl From for rpc::PeerInfoAddress { + fn from(value: PeerInfoAddress) -> Self { + Self { + address: value.address.to_vec(), + peer_identity_claim: Some(value.peer_identity_claim.into()), + } + } +} + +impl From for rpc::PeerIdentityClaim { + fn from(value: PeerIdentityClaim) -> Self { + Self { + addresses: value.addresses.iter().map(|a| a.to_vec()).collect(), + peer_features: value.features.bits(), + identity_signature: Some((&value.signature).into()), + } + } +} + +impl TryInto for rpc::PeerInfo { type Error = anyhow::Error; - fn try_into(self) -> Result { - let pk = CommsPublicKey::from_bytes(&self.public_key)?; - let node_id = NodeId::from_public_key(&pk); + fn try_into(self) -> Result { + let public_key = CommsPublicKey::from_bytes(&self.public_key)?; let addresses = self .addresses - .iter() - .filter_map(|addr| addr.parse::().ok()) - .collect::>(); - let mut peer = Peer::new( - pk, - node_id, - addresses.into(), - PeerFlags::NONE, - PeerFeatures::from_bits_truncate(self.peer_features), - Default::default(), - String::new(), - ); + .into_iter() + .map(TryInto::try_into) + .collect::, _>>()?; + let peer_features = PeerFeatures::from_bits_truncate(self.peer_features); + let supported_protocols = self + .supported_protocols + .into_iter() + .map(|b| b.try_into()) + .collect::, _>>()?; + Ok(PeerInfo { + public_key, + addresses, + peer_features, + user_agent: self.user_agent, + supported_protocols, + }) + } +} + +impl TryInto for rpc::PeerInfoAddress { + type Error = anyhow::Error; + + fn try_into(self) -> Result { + let address = Multiaddr::try_from(self.address)?; + let peer_identity_claim = self + .peer_identity_claim + .ok_or_else(|| anyhow::anyhow!("Missing peer identity claim"))? + .try_into()?; - peer.identity_signature = self.identity_signature.map(TryInto::try_into).transpose()?; + Ok(PeerInfoAddress { + address, + peer_identity_claim, + }) + } +} + +impl TryInto for rpc::PeerIdentityClaim { + type Error = anyhow::Error; + + fn try_into(self) -> Result { + let addresses = self + .addresses + .into_iter() + .filter_map(|addr| Multiaddr::try_from(addr).ok()) + .collect::>(); - Ok(peer) + let features = PeerFeatures::from_bits_truncate(self.peer_features); + let signature = self + .identity_signature + .map(TryInto::try_into) + .ok_or_else(|| anyhow::anyhow!("No signature"))??; + Ok(PeerIdentityClaim { + addresses, + features, + signature, + unverified_data: None, + }) } } diff --git a/comms/dht/src/proto/rpc.proto b/comms/dht/src/proto/rpc.proto index 909f1cb0a1..3728c0012c 100644 --- a/comms/dht/src/proto/rpc.proto +++ b/comms/dht/src/proto/rpc.proto @@ -25,13 +25,27 @@ message GetPeersRequest { // GET peers response message GetPeersResponse { - Peer peer = 1; + PeerInfo peer = 1; } // Minimal peer information -message Peer { +message PeerInfo { bytes public_key = 1; - repeated string addresses = 2; + repeated PeerInfoAddress addresses = 2; uint64 peer_features = 3; - tari.dht.common.IdentitySignature identity_signature = 4; + repeated bytes supported_protocols = 4; + // Note: not part of the signature + string user_agent = 5; + +} + +message PeerInfoAddress { + bytes address = 1; + PeerIdentityClaim peer_identity_claim = 2; +} + +message PeerIdentityClaim { + repeated bytes addresses = 1; + uint64 peer_features = 2; + tari.dht.common.IdentitySignature identity_signature = 3; } diff --git a/comms/dht/src/rpc/mod.rs b/comms/dht/src/rpc/mod.rs index 826641495c..2b58fe62d5 100644 --- a/comms/dht/src/rpc/mod.rs +++ b/comms/dht/src/rpc/mod.rs @@ -36,6 +36,9 @@ use tari_comms_rpc_macros::tari_rpc; use crate::proto::rpc::{GetCloserPeersRequest, GetPeersRequest, GetPeersResponse}; +mod peer_info; +pub use peer_info::{PeerInfo, PeerInfoAddress}; + #[tari_rpc(protocol_name = b"t/dht/1", server_struct = DhtService, client_struct = DhtClient)] pub trait DhtRpcService: Send + Sync + 'static { /// Fetches and returns nodes (as in PeerFeatures::COMMUNICATION_NODE) as per `GetCloserPeersRequest` diff --git a/comms/dht/src/rpc/peer_info.rs b/comms/dht/src/rpc/peer_info.rs new file mode 100644 index 0000000000..bcaae7edca --- /dev/null +++ b/comms/dht/src/rpc/peer_info.rs @@ -0,0 +1,67 @@ +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use tari_comms::{ + multiaddr::Multiaddr, + peer_manager::{Peer, PeerFeatures, PeerIdentityClaim}, + protocol::ProtocolId, + types::CommsPublicKey, +}; + +pub struct PeerInfo { + pub public_key: CommsPublicKey, + pub addresses: Vec, + pub peer_features: PeerFeatures, + pub user_agent: String, + pub supported_protocols: Vec, +} + +pub struct PeerInfoAddress { + pub address: Multiaddr, + pub peer_identity_claim: PeerIdentityClaim, +} + +impl From for PeerInfo { + fn from(peer: Peer) -> Self { + PeerInfo { + public_key: peer.public_key, + addresses: peer + .addresses + .addresses() + .iter() + .filter_map(|addr| { + // TODO: find the source of the empty addresses + if addr.address().is_empty() { + return None; + } + addr.source.peer_identity_claim().map(|claim| PeerInfoAddress { + address: addr.address().clone(), + peer_identity_claim: claim.clone(), + }) + }) + .collect(), + peer_features: peer.features, + user_agent: peer.user_agent, + supported_protocols: peer.supported_protocols, + } + } +} diff --git a/comms/dht/src/rpc/service.rs b/comms/dht/src/rpc/service.rs index 77c75be981..3dc9d35c34 100644 --- a/comms/dht/src/rpc/service.rs +++ b/comms/dht/src/rpc/service.rs @@ -34,7 +34,7 @@ use tokio::{sync::mpsc, task}; use crate::{ proto::rpc::{GetCloserPeersRequest, GetPeersRequest, GetPeersResponse}, - rpc::DhtRpcService, + rpc::{DhtRpcService, PeerInfo}, }; const LOG_TARGET: &str = "comms::dht::rpc"; @@ -62,10 +62,19 @@ impl DhtRpcServiceImpl { task::spawn(async move { let iter = peers .into_iter() - .map(|peer| GetPeersResponse { - peer: Some(peer.into()), + .filter_map(|peer| { + let peer_info: PeerInfo = peer.into(); + + if peer_info.addresses.is_empty() { + None + } else { + Some(GetPeersResponse { + peer: Some(peer_info.into()), + }) + } }) .map(Ok); + let _result = utils::mpsc::send_all(&tx, iter).await; }); @@ -147,7 +156,8 @@ impl DhtRpcService for DhtRpcServiceImpl { let mut query = PeerQuery::new().select_where(|peer| { &peer.node_id != requester_node_id && (message.include_clients || !peer.features.is_client()) && - !peer.is_banned() + !peer.is_banned() && + peer.deleted_at.is_none() }); if message.n > 0 { diff --git a/comms/dht/src/rpc/test.rs b/comms/dht/src/rpc/test.rs index badd552bd4..f7e325f93f 100644 --- a/comms/dht/src/rpc/test.rs +++ b/comms/dht/src/rpc/test.rs @@ -24,9 +24,8 @@ use std::{convert::TryInto, sync::Arc, time::Duration}; use futures::StreamExt; use tari_comms::{ - peer_manager::{NodeDistance, NodeId, Peer, PeerFeatures}, + peer_manager::{NodeDistance, NodeId, PeerFeatures}, protocol::rpc::{mock::RpcRequestMock, RpcStatusCode}, - runtime, test_utils::node_identity::{build_node_identity, ordered_node_identities_by_distance}, PeerManager, }; @@ -50,8 +49,9 @@ fn setup() -> (DhtRpcServiceImpl, RpcRequestMock, Arc) { // Unit tests for get_closer_peers request mod get_closer_peers { use super::*; + use crate::rpc::PeerInfo; - #[runtime::test] + #[tokio::test] async fn it_returns_empty_peer_stream() { let (service, mock, _) = setup(); let node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); @@ -69,7 +69,7 @@ mod get_closer_peers { assert!(next.is_none()); } - #[runtime::test] + #[tokio::test] async fn it_returns_closest_peers() { let (service, mock, peer_manager) = setup(); let node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); @@ -94,17 +94,17 @@ mod get_closer_peers { .map(Result::unwrap) .map(|r| r.peer.unwrap()) .map(|p| p.try_into().unwrap()) - .collect::>(); + .collect::>(); let mut dist = NodeDistance::zero(); for p in &peers { - let current = p.node_id.distance(node_identity.node_id()); + let current = NodeId::from_public_key(&p.public_key).distance(node_identity.node_id()); assert!(dist < current); dist = current; } } - #[runtime::test] + #[tokio::test] async fn it_returns_n_peers() { let (service, mock, peer_manager) = setup(); @@ -126,7 +126,7 @@ mod get_closer_peers { assert_eq!(results.len(), 5); } - #[runtime::test] + #[tokio::test] async fn it_skips_excluded_peers() { let (service, mock, peer_manager) = setup(); @@ -150,7 +150,7 @@ mod get_closer_peers { assert!(peers.all(|p| p.public_key != excluded_peer.public_key().as_bytes())); } - #[runtime::test] + #[tokio::test] async fn it_errors_if_maximum_n_exceeded() { let (service, mock, _) = setup(); let req = GetCloserPeersRequest { @@ -168,12 +168,12 @@ mod get_closer_peers { mod get_peers { use std::time::Duration; - use tari_comms::{peer_manager::Peer, test_utils::node_identity::build_many_node_identities}; + use tari_comms::test_utils::node_identity::build_many_node_identities; use super::*; - use crate::proto::rpc::GetPeersRequest; + use crate::{proto::rpc::GetPeersRequest, rpc::PeerInfo}; - #[runtime::test] + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn it_returns_empty_peer_stream() { let (service, mock, _) = setup(); let node_identity = build_node_identity(PeerFeatures::COMMUNICATION_NODE); @@ -189,7 +189,7 @@ mod get_peers { assert!(next.is_none()); } - #[runtime::test] + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn it_returns_all_peers() { let (service, mock, peer_manager) = setup(); let nodes = build_many_node_identities(3, PeerFeatures::COMMUNICATION_NODE); @@ -214,13 +214,13 @@ mod get_peers { .map(Result::unwrap) .map(|r| r.peer.unwrap()) .map(|p| p.try_into().unwrap()) - .collect::>(); + .collect::>(); - assert_eq!(peers.iter().filter(|p| p.features.is_client()).count(), 2); - assert_eq!(peers.iter().filter(|p| p.features.is_node()).count(), 3); + assert_eq!(peers.iter().filter(|p| p.peer_features.is_client()).count(), 2); + assert_eq!(peers.iter().filter(|p| p.peer_features.is_node()).count(), 3); } - #[runtime::test] + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn it_excludes_clients() { let (service, mock, peer_manager) = setup(); let nodes = build_many_node_identities(3, PeerFeatures::COMMUNICATION_NODE); @@ -245,12 +245,12 @@ mod get_peers { .map(Result::unwrap) .map(|r| r.peer.unwrap()) .map(|p| p.try_into().unwrap()) - .collect::>(); + .collect::>(); - assert!(peers.iter().all(|p| p.features.is_node())); + assert!(peers.iter().all(|p| p.peer_features.is_node())); } - #[runtime::test] + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] async fn it_returns_n_peers() { let (service, mock, peer_manager) = setup(); diff --git a/comms/dht/src/storage/connection.rs b/comms/dht/src/storage/connection.rs index d614cdf675..48ffe4c2bd 100644 --- a/comms/dht/src/storage/connection.rs +++ b/comms/dht/src/storage/connection.rs @@ -163,19 +163,18 @@ impl DbConnection { #[cfg(test)] mod test { use diesel::{dsl::sql, sql_types::Integer, RunQueryDsl}; - use tari_comms::runtime; use tari_test_utils::random; use super::*; - #[runtime::test] + #[tokio::test] async fn connect_and_migrate() { let conn = DbConnection::connect_memory(random::string(8)).unwrap(); let output = conn.migrate().unwrap(); assert!(output.starts_with("Running migration")); } - #[runtime::test] + #[tokio::test] async fn memory_connections() { let id = random::string(8); let conn = DbConnection::connect_memory(id.clone()).unwrap(); diff --git a/comms/dht/src/store_forward/database/mod.rs b/comms/dht/src/store_forward/database/mod.rs index e080e0e2f8..9079b20707 100644 --- a/comms/dht/src/store_forward/database/mod.rs +++ b/comms/dht/src/store_forward/database/mod.rs @@ -227,12 +227,11 @@ impl StoreAndForwardDatabase { #[cfg(test)] mod test { - use tari_comms::runtime; use tari_test_utils::random; use super::*; - #[runtime::test] + #[tokio::test] async fn insert_messages() { let conn = DbConnection::connect_memory(random::string(8)).unwrap(); conn.migrate().unwrap(); @@ -252,7 +251,7 @@ mod test { assert_eq!(messages[1].body_hash, msg2.body_hash); } - #[runtime::test] + #[tokio::test] async fn remove_messages() { let conn = DbConnection::connect_memory(random::string(8)).unwrap(); conn.migrate().unwrap(); @@ -279,7 +278,7 @@ mod test { assert_eq!(messages[0].id, msg2_id); } - #[runtime::test] + #[tokio::test] async fn truncate_messages() { let conn = DbConnection::connect_memory(random::string(8)).unwrap(); conn.migrate().unwrap(); diff --git a/comms/dht/src/store_forward/saf_handler/task.rs b/comms/dht/src/store_forward/saf_handler/task.rs index 7e8c37c859..3b10512618 100644 --- a/comms/dht/src/store_forward/saf_handler/task.rs +++ b/comms/dht/src/store_forward/saf_handler/task.rs @@ -624,7 +624,7 @@ mod test { use std::time::Duration; use chrono::{Timelike, Utc}; - use tari_comms::{message::MessageExt, runtime, wrap_in_envelope_body}; + use tari_comms::{message::MessageExt, wrap_in_envelope_body}; use tari_test_utils::collect_recv; use tari_utilities::{hex, hex::Hex}; use tokio::{sync::mpsc, task, time::sleep}; @@ -822,7 +822,7 @@ mod test { assert!(stored_messages.iter().any(|s| s.body == msg2.as_bytes())); } - #[runtime::test] + #[tokio::test] #[allow(clippy::similar_names, clippy::too_many_lines)] async fn receive_stored_messages() { let spy = service_spy(); @@ -940,7 +940,7 @@ mod test { assert_eq!(last_saf_received.second(), msg2_time.second()); } - #[runtime::test] + #[tokio::test] async fn stored_at_in_future() { let spy = service_spy(); let (requester, _) = create_store_and_forward_mock(); @@ -1013,7 +1013,7 @@ mod test { assert!(last_saf_received.is_none()); } - #[runtime::test] + #[tokio::test] async fn saf_message_was_requested() { let spy = service_spy(); let (saf_requester, saf_mock_state) = create_store_and_forward_mock(); diff --git a/comms/dht/src/store_forward/store.rs b/comms/dht/src/store_forward/store.rs index c8a91b9d00..2e335a199f 100644 --- a/comms/dht/src/store_forward/store.rs +++ b/comms/dht/src/store_forward/store.rs @@ -453,7 +453,7 @@ mod test { use std::time::Duration; use chrono::Utc; - use tari_comms::{runtime, wrap_in_envelope_body}; + use tari_comms::wrap_in_envelope_body; use tari_test_utils::async_assert_eventually; use tari_utilities::hex::Hex; @@ -470,7 +470,7 @@ mod test { }, }; - #[runtime::test] + #[tokio::test] async fn cleartext_message_no_origin() { let (requester, mock_state) = create_store_and_forward_mock(); @@ -496,7 +496,7 @@ mod test { assert_eq!(messages.len(), 0); } - #[runtime::test] + #[tokio::test] async fn decryption_succeeded_no_store() { let (requester, mock_state) = create_store_and_forward_mock(); @@ -526,7 +526,7 @@ mod test { assert_eq!(mock_state.call_count(), 0); } - #[runtime::test] + #[tokio::test] async fn decryption_failed_should_store() { let (requester, mock_state) = create_store_and_forward_mock(); let spy = service_spy(); @@ -567,7 +567,7 @@ mod test { assert!(duration.num_seconds() <= 5); } - #[runtime::test] + #[tokio::test] async fn decryption_failed_banned_peer() { let (requester, mock_state) = create_store_and_forward_mock(); let spy = service_spy(); diff --git a/comms/dht/src/test_utils/makers.rs b/comms/dht/src/test_utils/makers.rs index d4b3213a5a..9e3428cfe0 100644 --- a/comms/dht/src/test_utils/makers.rs +++ b/comms/dht/src/test_utils/makers.rs @@ -24,7 +24,7 @@ use std::{convert::TryInto, sync::Arc}; use rand::rngs::OsRng; use tari_comms::{ message::{InboundMessage, MessageExt, MessageTag}, - multiaddr::Multiaddr, + net_address::MultiaddressesWithStats, peer_manager::{NodeId, NodeIdentity, Peer, PeerFeatures, PeerFlags, PeerManager}, transports::MemoryTransport, types::{CommsDHKE, CommsDatabase, CommsPublicKey, CommsSecretKey}, @@ -139,7 +139,7 @@ pub fn make_dht_inbound_message( Arc::new(Peer::new( node_identity.public_key().clone(), node_identity.node_id().clone(), - Vec::::new().into(), + MultiaddressesWithStats::empty(), PeerFlags::empty(), PeerFeatures::COMMUNICATION_NODE, Default::default(), @@ -176,7 +176,7 @@ pub fn make_dht_inbound_message_raw( Arc::new(Peer::new( node_identity.public_key().clone(), node_identity.node_id().clone(), - Vec::::new().into(), + MultiaddressesWithStats::empty(), PeerFlags::empty(), PeerFeatures::COMMUNICATION_NODE, Default::default(), diff --git a/comms/dht/tests/dht.rs b/comms/dht/tests/dht.rs index 188ca055f5..9eec7db933 100644 --- a/comms/dht/tests/dht.rs +++ b/comms/dht/tests/dht.rs @@ -172,7 +172,7 @@ async fn setup_comms_dht( let comms = CommsBuilder::new() .allow_test_addresses() // In this case the listener address and the public address are the same (/memory/...) - .with_listener_address(node_identity.public_address()) + .with_listener_address(node_identity.first_public_address()) .with_shutdown_signal(shutdown_signal) .with_node_identity(node_identity) .with_peer_storage(storage,None) @@ -230,9 +230,9 @@ fn dht_config() -> DhtConfig { config } -#[tokio::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[allow(non_snake_case)] -async fn dht_join_propagation() { +async fn test_dht_join_propagation() { // Create 3 nodes where only Node B knows A and C, but A and C want to talk to each other // Node C knows no one @@ -300,9 +300,9 @@ async fn dht_join_propagation() { node_C.shutdown().await; } -#[tokio::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[allow(non_snake_case)] -async fn dht_discover_propagation() { +async fn test_dht_discover_propagation() { // Create 4 nodes where A knows B, B knows A and C, C knows B and D, and D knows C // Node D knows no one @@ -374,9 +374,9 @@ async fn dht_discover_propagation() { assert!(node_D_peer_manager.exists(node_A.node_identity().public_key()).await); } -#[tokio::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[allow(non_snake_case)] -async fn dht_store_forward() { +async fn test_dht_store_forward() { let node_C_node_identity = make_node_identity(PeerFeatures::COMMUNICATION_NODE); // Node B knows about Node C let node_B = make_node("node_B", PeerFeatures::COMMUNICATION_NODE, dht_config(), None).await; @@ -489,7 +489,7 @@ async fn dht_store_forward() { #[tokio::test] #[allow(non_snake_case)] #[allow(clippy::too_many_lines)] -async fn dht_propagate_dedup() { +async fn test_dht_propagate_dedup() { let mut config = dht_config(); // For this test we want to exactly measure the path of a message, so we disable repropagation of messages (i.e // allow 1 occurrence) @@ -634,7 +634,7 @@ async fn dht_propagate_dedup() { #[tokio::test] #[allow(non_snake_case)] #[allow(clippy::too_many_lines)] -async fn dht_do_not_store_invalid_message_in_dedup() { +async fn test_dht_do_not_store_invalid_message_in_dedup() { let mut config = dht_config(); config.dedup_allowed_message_occurrences = 1; @@ -803,7 +803,7 @@ async fn dht_do_not_store_invalid_message_in_dedup() { #[tokio::test] #[allow(non_snake_case)] -async fn dht_repropagate() { +async fn test_dht_repropagate() { let mut config = dht_config(); config.dedup_allowed_message_occurrences = 3; let mut node_C = make_node("node_C", PeerFeatures::COMMUNICATION_NODE, config.clone(), []).await; @@ -904,9 +904,9 @@ async fn dht_repropagate() { node_C.shutdown().await; } -#[tokio::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[allow(non_snake_case)] -async fn dht_propagate_message_contents_not_malleable_ban() { +async fn test_dht_propagate_message_contents_not_malleable_ban() { let node_C = make_node("node_C", PeerFeatures::COMMUNICATION_NODE, dht_config(), None).await; // Node B knows about Node C let mut node_B = make_node( @@ -1009,9 +1009,9 @@ async fn dht_propagate_message_contents_not_malleable_ban() { node_C.shutdown().await; } -#[tokio::test] +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] #[allow(non_snake_case)] -async fn dht_header_not_malleable() { +async fn test_dht_header_not_malleable() { let node_C = make_node("node_C", PeerFeatures::COMMUNICATION_NODE, dht_config(), None).await; // Node B knows about Node C let mut node_B = make_node( diff --git a/infrastructure/storage/src/key_val_store/cached_store.rs b/infrastructure/storage/src/key_val_store/cached_store.rs new file mode 100644 index 0000000000..23094833cd --- /dev/null +++ b/infrastructure/storage/src/key_val_store/cached_store.rs @@ -0,0 +1,125 @@ +// Copyright 2022. The Tari Project +// +// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the +// following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following +// disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the +// following disclaimer in the documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote +// products derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use std::{collections::HashMap, hash::Hash, sync::RwLock}; + +use crate::{IterationResult, KeyValStoreError, KeyValueStore}; + +pub struct CachedStore { + cache: RwLock>, + + actual_store: DS, +} + +impl> CachedStore { + pub fn new(inner: DS) -> Self { + Self { + cache: RwLock::new(HashMap::new()), + actual_store: inner, + } + } + + fn ensure_cache_is_filled(&self) -> Result<(), KeyValStoreError> { + let empty_check_guard = self.cache.read().map_err(|_| KeyValStoreError::PoisonedAccess)?; + if empty_check_guard.is_empty() { + // Drop here or we can't get a read lock + drop(empty_check_guard); + let mut guard = self.cache.write().map_err(|_| KeyValStoreError::PoisonedAccess)?; + // fill cache + self.actual_store.for_each(|item| match item { + Ok((k, v)) => { + guard.insert(k, v); + IterationResult::Continue + }, + Err(_) => IterationResult::Break, + })?; + } + Ok(()) + } +} + +impl KeyValueStore for CachedStore +where DS: KeyValueStore +{ + fn insert(&self, key: K, value: V) -> Result<(), KeyValStoreError> { + self.ensure_cache_is_filled()?; + let mut guard = self.cache.write().map_err(|_| KeyValStoreError::PoisonedAccess)?; + guard.insert(key.clone(), value.clone()); + drop(guard); + self.actual_store.insert(key, value)?; + Ok(()) + } + + fn get(&self, key: &K) -> Result, KeyValStoreError> { + self.ensure_cache_is_filled()?; + let read_lock = self.cache.read().map_err(|_| KeyValStoreError::PoisonedAccess)?; + Ok(read_lock.get(key).cloned()) + } + + fn get_many(&self, keys: &[K]) -> Result, KeyValStoreError> { + let mut result = Vec::with_capacity(keys.len()); + for key in keys { + if let Some(value) = self.get(key)? { + result.push(value); + } + } + Ok(result) + } + + fn size(&self) -> Result { + self.ensure_cache_is_filled()?; + let read_guard = self.cache.read().map_err(|_| KeyValStoreError::PoisonedAccess)?; + Ok(read_guard.len()) + } + + fn for_each(&self, mut f: F) -> Result<(), KeyValStoreError> + where + Self: Sized, + F: FnMut(Result<(K, V), KeyValStoreError>) -> IterationResult, + { + self.ensure_cache_is_filled()?; + let read_guard = self.cache.read().map_err(|_| KeyValStoreError::PoisonedAccess)?; + let vec = read_guard + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect::>(); + drop(read_guard); + for (k, v) in vec { + f(Ok((k, v))); + } + Ok(()) + } + + fn exists(&self, key: &K) -> Result { + self.ensure_cache_is_filled()?; + let read_guard = self.cache.read().map_err(|_| KeyValStoreError::PoisonedAccess)?; + Ok(read_guard.contains_key(key)) + } + + fn delete(&self, key: &K) -> Result<(), KeyValStoreError> { + self.ensure_cache_is_filled()?; + let mut write_guard = self.cache.write().map_err(|_| KeyValStoreError::PoisonedAccess)?; + write_guard.remove(key); + drop(write_guard); + self.actual_store.delete(key) + } +} diff --git a/infrastructure/storage/src/key_val_store/mod.rs b/infrastructure/storage/src/key_val_store/mod.rs index 8bb27f7a69..120736e7d0 100644 --- a/infrastructure/storage/src/key_val_store/mod.rs +++ b/infrastructure/storage/src/key_val_store/mod.rs @@ -20,6 +20,7 @@ // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +pub mod cached_store; pub mod error; pub mod hmap_database; #[allow(clippy::module_inception)] diff --git a/infrastructure/storage/src/lib.rs b/infrastructure/storage/src/lib.rs index 481c7363cd..b1277e66ce 100644 --- a/infrastructure/storage/src/lib.rs +++ b/infrastructure/storage/src/lib.rs @@ -5,6 +5,7 @@ mod key_val_store; pub mod lmdb_store; pub use key_val_store::{ + cached_store::CachedStore, key_val_store::IterationResult, lmdb_database::LMDBWrapper, HashmapDatabase, diff --git a/integration_tests/log4rs/cucumber.yml b/integration_tests/log4rs/cucumber.yml index 93a1ebec90..f5a1c8263d 100644 --- a/integration_tests/log4rs/cucumber.yml +++ b/integration_tests/log4rs/cucumber.yml @@ -4,7 +4,7 @@ appenders: # An appender named "stdout" that writes to stdout stdout: kind: rolling_file - path: "log/stdout.log" + path: "{{log_dir}}/log/stdout.log" policy: kind: compound trigger: @@ -14,13 +14,13 @@ appenders: kind: fixed_window base: 1 count: 10 - pattern: "log/stdout.{}.log" + pattern: "{{log_dir}}/log/stdout.{}.log" encoder: pattern: "{m}" # An appender named "network" that writes to a file with a custom pattern encoder network: kind: rolling_file - path: "log/network.log" + path: "{{log_dir}}/log/network.log" policy: kind: compound trigger: @@ -30,12 +30,12 @@ appenders: kind: fixed_window base: 1 count: 10 - pattern: "log/network.{}.log" + pattern: "{{log_dir}}/log/network.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} {f}.{L} {i} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{X(grpc)}] {f}.{L} {i} [{t}] {l:5} {m}{n}" base_layer_base_node: kind: rolling_file - path: "log/base_node.log" + path: "{{log_dir}}/log/base_node.log" policy: kind: compound trigger: @@ -45,12 +45,12 @@ appenders: kind: fixed_window base: 1 count: 10 - pattern: "log/base_node.{}.log" + pattern: "{{log_dir}}/log/base_node.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} {f}.{L} {i} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{X(grpc)}] {f}.{L} {i} [{t}] {l:5} {m}{n}" base_layer_wallet: kind: rolling_file - path: "log/wallet.log" + path: "{{log_dir}}/log/wallet.log" policy: kind: compound trigger: @@ -60,13 +60,13 @@ appenders: kind: fixed_window base: 1 count: 10 - pattern: "log/wallet.{}.log" + pattern: "{{log_dir}}/log/wallet.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} {f}.{L} {i} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{X(grpc)}] {f}.{L} {i} [{t}] {l:5} {m}{n}" # An appender named "other" that writes to a file with a custom pattern encoder other: kind: rolling_file - path: "log/other.log" + path: "{{log_dir}}/log/other.log" policy: kind: compound trigger: @@ -76,9 +76,9 @@ appenders: kind: fixed_window base: 1 count: 5 - pattern: "log/other.{}.log" + pattern: "{{log_dir}}/log/other.{}.log" encoder: - pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} {f}.{L} {i} [{t}] {l:5} {m}{n}" + pattern: "{d(%Y-%m-%d %H:%M:%S.%f)} [{X(grpc)}] {f}.{L} {i} [{t}] {l:5} {m}{n}" # We don't want prints during cucumber test, everything useful will in logs. # root: # level: warn diff --git a/integration_tests/tests/cucumber.rs b/integration_tests/tests/cucumber.rs index 36c3e21626..dfcad43f9b 100644 --- a/integration_tests/tests/cucumber.rs +++ b/integration_tests/tests/cucumber.rs @@ -382,7 +382,27 @@ async fn node_pending_connection_to(world: &mut TariWorld, first_node: String, s if res.connected_peers.iter().any(|p| p.public_key == second_client_pubkey) { return; } - tokio::time::sleep(Duration::from_secs(5)).await; + tokio::time::sleep(Duration::from_secs(1)).await; + } + + panic!("Peer was not connected in time"); +} + +#[when(expr = "I wait for {word} to have {int} connections")] +async fn wait_for_node_have_x_connections(world: &mut TariWorld, node: String, num_connections: usize) { + let mut node_client = world.get_base_node_or_wallet_client(&node).await.unwrap(); + + for _i in 0..100 { + let res = match node_client { + NodeClient::Wallet(ref mut client) => client.list_connected_peers(Empty {}).await.unwrap(), + NodeClient::BaseNode(ref mut client) => client.list_connected_peers(Empty {}).await.unwrap(), + }; + let res = res.into_inner(); + + if res.connected_peers.len() >= num_connections { + return; + } + tokio::time::sleep(Duration::from_secs(1)).await; } panic!("Peer was not connected in time"); @@ -3944,7 +3964,7 @@ async fn change_base_node_of_wallet_via_cli(world: &mut TariWorld, wallet: Strin let args = SetBaseNodeArgs { public_key: UniPublicKey::from_str(node_identity.public_key.to_hex().as_str()).unwrap(), - address: Multiaddr::from_str(node_identity.public_address.as_str()).unwrap(), + address: Multiaddr::from_str(node_identity.public_addresses[0].as_str()).unwrap(), }; cli.command2 = Some(CliCommands::SetBaseNode(args)); @@ -3967,7 +3987,7 @@ async fn change_custom_base_node_of_wallet_via_cli(world: &mut TariWorld, wallet let args = SetBaseNodeArgs { public_key: UniPublicKey::from_str(node_identity.public_key.to_hex().as_str()).unwrap(), - address: Multiaddr::from_str(node_identity.public_address.as_str()).unwrap(), + address: Multiaddr::from_str(node_identity.public_addresses[0].as_str()).unwrap(), }; cli.command2 = Some(CliCommands::SetCustomBaseNode(args)); @@ -4306,7 +4326,7 @@ async fn ffi_start_wallet_connected_to_base_node(world: &mut TariWorld, wallet: let base_node = world.get_node(&base_node).unwrap(); world.get_ffi_wallet(&wallet).unwrap().add_base_node( base_node.identity.public_key().to_hex(), - base_node.identity.public_address().to_string(), + base_node.identity.first_public_address().to_string(), ); } @@ -4317,7 +4337,7 @@ async fn ffi_start_wallet_connected_to_seed_node(world: &mut TariWorld, wallet: let seed_node = world.get_node(&seed_node).unwrap(); world.get_ffi_wallet(&wallet).unwrap().add_base_node( seed_node.identity.public_key().to_hex(), - seed_node.identity.public_address().to_string(), + seed_node.identity.first_public_address().to_string(), ); } @@ -4326,7 +4346,7 @@ async fn ffi_set_base_node(world: &mut TariWorld, base_node: String, wallet: Str let base_node = world.get_node(&base_node).unwrap(); world.get_ffi_wallet(&wallet).unwrap().add_base_node( base_node.identity.public_key().to_hex(), - base_node.identity.public_address().to_string(), + base_node.identity.first_public_address().to_string(), ); } @@ -4738,7 +4758,7 @@ async fn ffi_recover_wallet(world: &mut TariWorld, wallet_name: String, ffi_wall let base_node = world.get_node(&base_node).unwrap(); world.get_ffi_wallet(&ffi_wallet_name).unwrap().add_base_node( base_node.identity.public_key().to_hex(), - base_node.identity.public_address().to_string(), + base_node.identity.first_public_address().to_string(), ); } @@ -4750,7 +4770,7 @@ async fn ffi_restart_wallet(world: &mut TariWorld, wallet: String, base_node: St let ffi_wallet = world.get_ffi_wallet(&wallet).unwrap(); ffi_wallet.add_base_node( base_node.identity.public_key().to_hex(), - base_node.identity.public_address().to_string(), + base_node.identity.first_public_address().to_string(), ); } @@ -5023,7 +5043,11 @@ fn main() { info!(target: LOG_TARGET, "Starting {} {}", scenario.keyword, scenario.name); }) }); - world.run_and_exit("tests/features/").await; + world + // .fail_on_skipped() + // .fail_fast() - Not yet supported in 0.18 + .run_and_exit("tests/features/") + .await; }); // If by any chance we have anything in the stdout buffer just log it. diff --git a/integration_tests/tests/features/Sync.feature b/integration_tests/tests/features/Sync.feature index fd433ea00c..ba7dfbdbd8 100644 --- a/integration_tests/tests/features/Sync.feature +++ b/integration_tests/tests/features/Sync.feature @@ -77,8 +77,8 @@ Feature: Block Sync Scenario: When a new node joins the network, it receives all peers Given I have 10 seed nodes When I have a base node NODE1 connected to all seed nodes - # additional peer seeds are being included from config.toml [common] - Then NODE1 has at least 10 peers + When I wait for NODE1 to have 10 connections +# Then NODE1 has at least 10 peers When I have a base node NODE2 connected to node NODE1 Then NODE1 has at least 11 peers Then NODE2 has at least 11 peers diff --git a/integration_tests/tests/features/WalletFFI.feature b/integration_tests/tests/features/WalletFFI.feature index aa17c6272f..4ff2199305 100644 --- a/integration_tests/tests/features/WalletFFI.feature +++ b/integration_tests/tests/features/WalletFFI.feature @@ -108,12 +108,11 @@ Feature: Wallet FFI Then I wait for ffi wallet FFI_WALLET to have at least 1000000 uT And I send 1000000 uT from ffi wallet FFI_WALLET to wallet RECEIVER at fee 20 Then ffi wallet FFI_WALLET detects AT_LEAST 2 ffi transactions to be TRANSACTION_STATUS_BROADCAST + Then I wait until base node BASE2 has 1 unconfirmed transactions in its mempool # The broadcast check does not include delivery; create some holding points to ensure it was received - When mining node MINER mines 2 blocks - Then all nodes are at height 22 - When mining node MINER mines 2 blocks + When mining node MINER mines 4 blocks Then all nodes are at height 24 - When mining node MINER mines 6 blocks +# When mining node MINER mines 6 blocks Then I wait for wallet RECEIVER to have at least 1000000 uT And I have 1 received and 1 send transaction in ffi wallet FFI_WALLET And I start TXO validation on ffi wallet FFI_WALLET diff --git a/integration_tests/tests/utils/base_node_process.rs b/integration_tests/tests/utils/base_node_process.rs index 8fc2eea99b..de6ebe81ca 100644 --- a/integration_tests/tests/utils/base_node_process.rs +++ b/integration_tests/tests/utils/base_node_process.rs @@ -24,6 +24,7 @@ use std::{ default::Default, fmt::{Debug, Formatter}, path::PathBuf, + process, str::FromStr, sync::Arc, time::Duration, @@ -34,10 +35,9 @@ use tari_base_node::{run_base_node, BaseNodeConfig, MetricsConfig}; use tari_base_node_grpc_client::BaseNodeGrpcClient; use tari_common::configuration::CommonConfig; use tari_comms::{multiaddr::Multiaddr, peer_manager::PeerFeatures, NodeIdentity}; -use tari_comms_dht::{DbConnectionUrl, DhtConfig}; +use tari_comms_dht::DhtConfig; use tari_p2p::{auto_update::AutoUpdateConfig, Network, PeerSeedsConfig, TransportType}; use tari_shutdown::Shutdown; -use tempfile::tempdir; use tokio::task; use tonic::transport::Channel; @@ -82,6 +82,11 @@ pub async fn spawn_base_node(world: &mut TariWorld, is_seed_node: bool, bn_name: spawn_base_node_with_config(world, is_seed_node, bn_name, peers, BaseNodeConfig::default()).await; } +pub fn get_base_dir() -> PathBuf { + let crate_root = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + crate_root.join(format!("tests/temp/cucumber_{}", process::id())) +} + pub async fn spawn_base_node_with_config( world: &mut TariWorld, is_seed_node: bool, @@ -107,7 +112,11 @@ pub async fn spawn_base_node_with_config( port = get_port(18000..18499).unwrap(); grpc_port = get_port(18500..18999).unwrap(); // create a new temporary directory - temp_dir_path = tempdir().unwrap().path().to_path_buf(); + // temp_dir_path = tempdir().unwrap().path().to_path_buf(); + temp_dir_path = get_base_dir() + .join("base_nodes") + .join(format!("grpc_port_{}", grpc_port)) + .join(bn_name.clone()); base_node_address = Multiaddr::from_str(&format!("/ip4/127.0.0.1/tcp/{}", port)).unwrap(); base_node_identity = NodeIdentity::random(&mut OsRng, base_node_address, PeerFeatures::COMMUNICATION_NODE); @@ -164,12 +173,13 @@ pub async fn spawn_base_node_with_config( base_node_config.base_node.p2p.transport.transport_type = TransportType::Tcp; base_node_config.base_node.p2p.transport.tcp.listener_address = format!("/ip4/127.0.0.1/tcp/{}", port).parse().unwrap(); - base_node_config.base_node.p2p.public_address = - Some(base_node_config.base_node.p2p.transport.tcp.listener_address.clone()); - base_node_config.base_node.p2p.datastore_path = temp_dir_path.to_path_buf(); + base_node_config.base_node.p2p.public_addresses = + vec![base_node_config.base_node.p2p.transport.tcp.listener_address.clone()]; + // base_node_config.base_node.p2p.datastore_path = temp_dir_path.to_path_buf(); + // base_node_config.base_node.p2p.peer_database_name = "peer_db.mdb".to_string(); base_node_config.base_node.p2p.dht = DhtConfig::default_local_test(); - base_node_config.base_node.p2p.dht.database_url = - DbConnectionUrl::File(temp_dir_path.clone().join("dht.sqlite")); + // base_node_config.base_node.p2p.dht.database_url = + // DbConnectionUrl::File(temp_dir_path.clone().join("dht.sqlite")); base_node_config.base_node.p2p.dht.network_discovery.enabled = true; base_node_config.base_node.p2p.allow_test_addresses = true; base_node_config.base_node.storage.orphan_storage_capacity = 10; @@ -177,6 +187,9 @@ pub async fn spawn_base_node_with_config( base_node_config.base_node.storage.pruning_interval = 1; }; + // Heirachically set the base path for all configs + base_node_config.base_node.set_base_path(temp_dir_path.clone()); + println!( "Initializing base node: name={}; port={}; grpc_port={}; is_seed_node={}", name_cloned, port, grpc_port, is_seed_node diff --git a/integration_tests/tests/utils/mod.rs b/integration_tests/tests/utils/mod.rs index ebf1b64492..3914d8437a 100644 --- a/integration_tests/tests/utils/mod.rs +++ b/integration_tests/tests/utils/mod.rs @@ -75,7 +75,7 @@ pub async fn get_peer_addresses(world: &TariWorld, peers: &Vec) -> Vec