From a6ad2558709969046199972ac33c562afe500d75 Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 1 Oct 2024 13:07:08 +0900 Subject: [PATCH 1/2] feat: disable node tests, remove sn_client dep Comments APIs that we need to surface in api --- Cargo.lock | 535 +------ Cargo.toml | 8 +- sn_client/Cargo.toml | 2 +- sn_node/Cargo.toml | 1 - sn_node/examples/register_inspect.rs | 232 --- sn_node/examples/registers.rs | 166 --- .../reactivate_examples/register_inspect.rs | 233 +++ sn_node/reactivate_examples/registers.rs | 167 +++ sn_node/tests/common/client.rs | 9 +- sn_node/tests/common/mod.rs | 505 +++---- sn_node/tests/data_with_churn.rs | 1286 ++++++++--------- sn_node/tests/verify_data_location.rs | 855 +++++------ sn_node/tests/verify_routing_table.rs | 204 +-- sn_node_rpc_client/Cargo.toml | 10 +- 14 files changed, 1859 insertions(+), 2354 deletions(-) delete mode 100644 sn_node/examples/register_inspect.rs delete mode 100644 sn_node/examples/registers.rs create mode 100644 sn_node/reactivate_examples/register_inspect.rs create mode 100644 sn_node/reactivate_examples/registers.rs diff --git a/Cargo.lock b/Cargo.lock index c0bf2820ca..d0b97edc11 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -33,18 +33,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug 0.3.1", -] - [[package]] name = "aes" version = "0.8.4" @@ -52,7 +40,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if", - "cipher 0.4.4", + "cipher", "cpufeatures", ] @@ -63,8 +51,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ "aead", - "aes 0.8.4", - "cipher 0.4.4", + "aes", + "cipher", "ctr", "ghash", "subtle", @@ -77,8 +65,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae0784134ba9375416d469ec31e7c5f9fa94405049cf08c5ce5b4698be673e0d" dependencies = [ "aead", - "aes 0.8.4", - "cipher 0.4.4", + "aes", + "cipher", "ctr", "polyval", "subtle", @@ -378,7 +366,7 @@ dependencies = [ "async-stream", "async-trait", "auto_impl", - "dashmap 5.5.3", + "dashmap", "futures", "futures-utils-wasm", "lru", @@ -921,12 +909,6 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" -[[package]] -name = "ascii" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d92bec98840b8f03a5ff5413de5293bfcd8bf96467cf5452609f939ec6f5de16" - [[package]] name = "asn1-rs" version = "0.6.2" @@ -1285,12 +1267,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "bech32" -version = "0.10.0-beta" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98f7eed2b2781a6f0b5c903471d48e15f56fb4e1165df8a9a2337fd1a59d45ea" - [[package]] name = "better-panic" version = "0.3.0" @@ -1322,7 +1298,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" dependencies = [ - "bitcoin_hashes 0.11.0", + "bitcoin_hashes", "serde", "unicode-normalization", ] @@ -1342,43 +1318,12 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" -[[package]] -name = "bitcoin" -version = "0.31.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c85783c2fe40083ea54a33aa2f0ba58831d90fcd190f5bdc47e74e84d2a96ae" -dependencies = [ - "base64 0.21.7", - "bech32", - "bitcoin-internals", - "bitcoin_hashes 0.13.0", - "hex-conservative", - "hex_lit", - "secp256k1 0.28.2", -] - -[[package]] -name = "bitcoin-internals" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" - [[package]] name = "bitcoin_hashes" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" -[[package]] -name = "bitcoin_hashes" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" -dependencies = [ - "bitcoin-internals", - "hex-conservative", -] - [[package]] name = "bitflags" version = "1.3.2" @@ -1446,16 +1391,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "block-modes" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb03d1bed155d89dce0f845b7899b18a9a163e148fd004e1c28421a783e2d8e" -dependencies = [ - "block-padding 0.2.1", - "cipher 0.3.0", -] - [[package]] name = "block-padding" version = "0.1.5" @@ -1704,7 +1639,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] @@ -1740,7 +1675,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ "cfg-if", - "cipher 0.4.4", + "cipher", "cpufeatures", ] @@ -1752,7 +1687,7 @@ checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ "aead", "chacha20", - "cipher 0.4.4", + "cipher", "poly1305", "zeroize", ] @@ -1771,12 +1706,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "chunked_transfer" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4de3bc4ea267985becf712dc6d9eed8b04c953b3fcfb339ebc87acd9804901" - [[package]] name = "ciborium" version = "0.2.2" @@ -1804,15 +1733,6 @@ dependencies = [ "half", ] -[[package]] -name = "cipher" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "cipher" version = "0.4.4" @@ -1992,16 +1912,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "console_error_panic_hook" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06aeb73f470f66dcdbf7223caeebb85984942f22f1adb2a088cf9668146bbbc" -dependencies = [ - "cfg-if", - "wasm-bindgen", -] - [[package]] name = "const-hex" version = "1.12.0" @@ -2120,8 +2030,6 @@ version = "7.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "387808c885b79055facbd4b2e806a683fe1bc37abc7dfa5fea1974ad2d4137b0" dependencies = [ - "num", - "quickcheck", "serde", "tiny-keccak", ] @@ -2296,7 +2204,7 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0369ee1ad671834580515889b80f2ea915f23b8be8d0daa4bbaf2ac5c7590835" dependencies = [ - "cipher 0.4.4", + "cipher", ] [[package]] @@ -2410,20 +2318,6 @@ dependencies = [ "parking_lot_core", ] -[[package]] -name = "dashmap" -version = "6.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" -dependencies = [ - "cfg-if", - "crossbeam-utils", - "hashbrown 0.14.5", - "lock_api", - "once_cell", - "parking_lot_core", -] - [[package]] name = "data-encoding" version = "2.6.0" @@ -2537,19 +2431,6 @@ dependencies = [ "syn 2.0.77", ] -[[package]] -name = "dialoguer" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bce805d770f407bc62102fca7c2c64ceef2fbcb2b8bd19d2765ce093980de" -dependencies = [ - "console", - "shell-words", - "tempfile", - "thiserror", - "zeroize", -] - [[package]] name = "diff" version = "0.1.13" @@ -2686,21 +2567,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aac81fa3e28d21450aa4d2ac065992ba96a1d7303efbce51a95f4fd175b67562" -[[package]] -name = "dot-generator" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aaac7ada45f71873ebce336491d1c1bc4a7c8042c7cea978168ad59e805b871" -dependencies = [ - "dot-structures", -] - -[[package]] -name = "dot-structures" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "675e35c02a51bb4d4618cb4885b3839ce6d1787c97b664474d9208d074742e20" - [[package]] name = "downcast" version = "0.11.0" @@ -3903,22 +3769,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "graphviz-rust" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c33d03804e2ce21db5821f2beb4e54f844a8f90326e6bd99a1771dc54aef427" -dependencies = [ - "dot-generator", - "dot-structures", - "into-attr", - "into-attr-derive", - "pest", - "pest_derive", - "rand 0.8.5", - "tempfile", -] - [[package]] name = "group" version = "0.12.1" @@ -4069,12 +3919,6 @@ dependencies = [ "serde", ] -[[package]] -name = "hex-conservative" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" - [[package]] name = "hex-literal" version = "0.4.1" @@ -4087,12 +3931,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b07f60793ff0a4d9cef0f18e63b5357e06209987153a64648c972c1e5aff336f" -[[package]] -name = "hex_lit" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" - [[package]] name = "hickory-proto" version = "0.24.1" @@ -4613,28 +4451,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "into-attr" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18b48c537e49a709e678caec3753a7dba6854661a1eaa27675024283b3f8b376" -dependencies = [ - "dot-structures", -] - -[[package]] -name = "into-attr-derive" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecac7c1ae6cd2c6a3a64d1061a8bdc7f52ff62c26a831a2301e54c1b5d70d5b1" -dependencies = [ - "dot-generator", - "dot-structures", - "into-attr", - "quote", - "syn 1.0.109", -] - [[package]] name = "ipconfig" version = "0.3.2" @@ -5622,19 +5438,6 @@ dependencies = [ "adler2", ] -[[package]] -name = "minreq" -version = "2.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "763d142cdff44aaadd9268bebddb156ef6c65a0e13486bb81673cf2d8739f9b0" -dependencies = [ - "log", - "once_cell", - "rustls 0.21.12", - "rustls-webpki 0.101.7", - "webpki-roots 0.25.4", -] - [[package]] name = "mio" version = "0.8.11" @@ -6000,20 +5803,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "num" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" -dependencies = [ - "num-bigint 0.4.6", - "num-complex", - "num-integer", - "num-iter", - "num-rational", - "num-traits", -] - [[package]] name = "num-bigint" version = "0.2.6" @@ -6036,16 +5825,6 @@ dependencies = [ "serde", ] -[[package]] -name = "num-complex" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" -dependencies = [ - "num-traits", - "serde", -] - [[package]] name = "num-conv" version = "0.1.0" @@ -6071,29 +5850,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-iter" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" -dependencies = [ - "autocfg 1.3.0", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" -dependencies = [ - "num-bigint 0.4.6", - "num-integer", - "num-traits", - "serde", -] - [[package]] name = "num-traits" version = "0.2.19" @@ -6522,8 +6278,6 @@ checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", "indexmap 2.5.0", - "serde", - "serde_derive", ] [[package]] @@ -7618,17 +7372,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "rpassword" -version = "7.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80472be3c897911d0137b2d2b9055faf6eeac5b14e324073d83bc17b191d7e3f" -dependencies = [ - "libc", - "rtoolbox", - "windows-sys 0.48.0", -] - [[package]] name = "rtnetlink" version = "0.10.1" @@ -7644,16 +7387,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "rtoolbox" -version = "0.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c247d24e63230cdb56463ae328478bd5eac8b8faa8c69461a77e8e323afac90e" -dependencies = [ - "libc", - "windows-sys 0.48.0", -] - [[package]] name = "ruint" version = "1.12.3" @@ -7762,19 +7495,7 @@ dependencies = [ "log", "ring 0.16.20", "sct 0.6.1", - "webpki 0.21.4", -] - -[[package]] -name = "rustls" -version = "0.20.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" -dependencies = [ - "log", - "ring 0.16.20", - "sct 0.7.1", - "webpki 0.22.4", + "webpki", ] [[package]] @@ -7803,15 +7524,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rustls-pemfile" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" -dependencies = [ - "base64 0.13.1", -] - [[package]] name = "rustls-pemfile" version = "1.0.4" @@ -7969,21 +7681,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d03ceae636d0fed5bae6a7f4f664354c5f4fcedf6eef053fef17e49f837d0a" dependencies = [ "rand 0.6.5", - "secp256k1-sys 0.4.2", + "secp256k1-sys", "serde", ] -[[package]] -name = "secp256k1" -version = "0.28.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" -dependencies = [ - "bitcoin_hashes 0.13.0", - "rand 0.8.5", - "secp256k1-sys 0.9.2", -] - [[package]] name = "secp256k1-sys" version = "0.4.2" @@ -7993,15 +7694,6 @@ dependencies = [ "cc", ] -[[package]] -name = "secp256k1-sys" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" -dependencies = [ - "cc", -] - [[package]] name = "secrecy" version = "0.8.0" @@ -8017,7 +7709,7 @@ version = "0.29.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "894da3241a9e426c16fb8cb28b19416eae5fafdc7742e4bc505c1821661c140f" dependencies = [ - "aes 0.8.4", + "aes", "bincode", "brotli", "bytes", @@ -8256,12 +7948,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "shell-words" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" - [[package]] name = "shlex" version = "1.3.0" @@ -8409,30 +8095,6 @@ dependencies = [ "zip", ] -[[package]] -name = "sn_auditor" -version = "0.3.1" -dependencies = [ - "blsttc", - "clap", - "color-eyre", - "dirs-next", - "futures", - "graphviz-rust", - "lazy_static", - "serde", - "serde_json", - "sn_build_info", - "sn_client", - "sn_logging", - "sn_peers_acquisition", - "sn_protocol", - "tiny_http", - "tokio", - "tracing", - "urlencoding", -] - [[package]] name = "sn_bls_ckd" version = "0.2.1" @@ -8454,99 +8116,6 @@ dependencies = [ "vergen", ] -[[package]] -name = "sn_cli" -version = "0.95.1" -dependencies = [ - "aes 0.7.5", - "base64 0.22.1", - "bitcoin", - "block-modes", - "blsttc", - "bytes", - "chrono", - "clap", - "color-eyre", - "criterion", - "custom_debug", - "dialoguer", - "dirs-next", - "eyre", - "futures", - "hex 0.4.3", - "indicatif", - "libp2p 0.54.1", - "rand 0.8.5", - "rayon", - "reqwest 0.12.7", - "rmp-serde", - "rpassword", - "serde", - "sn_build_info", - "sn_client", - "sn_logging", - "sn_peers_acquisition", - "sn_protocol", - "tempfile", - "tiny-keccak", - "tokio", - "tracing", - "url", - "walkdir", - "xor_name", -] - -[[package]] -name = "sn_client" -version = "0.110.1" -dependencies = [ - "assert_matches", - "async-trait", - "backoff", - "bip39", - "blsttc", - "bytes", - "console_error_panic_hook", - "crdts", - "custom_debug", - "dashmap 6.1.0", - "dirs-next", - "eyre", - "futures", - "getrandom 0.2.15", - "hex 0.4.3", - "itertools 0.12.1", - "libp2p 0.54.1", - "libp2p-identity", - "petgraph", - "prometheus-client", - "rand 0.8.5", - "rayon", - "rmp-serde", - "self_encryption", - "serde", - "sn_bls_ckd", - "sn_client", - "sn_curv", - "sn_logging", - "sn_networking", - "sn_peers_acquisition", - "sn_protocol", - "sn_registers", - "sn_transfers", - "tempfile", - "thiserror", - "tiny-keccak", - "tokio", - "tracing", - "tracing-wasm", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasmtimer", - "web-sys", - "xor_name", -] - [[package]] name = "sn_curv" version = "0.10.1" @@ -8568,7 +8137,7 @@ dependencies = [ "pairing-plus", "rand 0.6.5", "rand 0.7.3", - "secp256k1 0.20.3", + "secp256k1", "serde", "serde_bytes", "serde_derive", @@ -8602,38 +8171,6 @@ dependencies = [ "xor_name", ] -[[package]] -name = "sn_faucet" -version = "0.5.1" -dependencies = [ - "assert_fs", - "base64 0.22.1", - "bitcoin", - "blsttc", - "clap", - "color-eyre", - "dirs-next", - "fs2", - "futures", - "hex 0.4.3", - "indicatif", - "minreq", - "reqwest 0.12.7", - "serde", - "serde_json", - "sn_build_info", - "sn_cli", - "sn_client", - "sn_logging", - "sn_peers_acquisition", - "sn_protocol", - "sn_transfers", - "tokio", - "tracing", - "url", - "warp", -] - [[package]] name = "sn_logging" version = "0.2.34" @@ -8748,7 +8285,6 @@ dependencies = [ "serde", "serde_json", "sn_build_info", - "sn_client", "sn_evm", "sn_logging", "sn_networking", @@ -8785,7 +8321,6 @@ dependencies = [ "libp2p 0.54.1", "libp2p-identity", "sn_build_info", - "sn_client", "sn_logging", "sn_node", "sn_peers_acquisition", @@ -9336,21 +8871,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "tiny_http" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389915df6413a2e74fb181895f933386023c71110878cd0825588928e64cdc82" -dependencies = [ - "ascii", - "chunked_transfer", - "httpdate", - "log", - "rustls 0.20.9", - "rustls-pemfile 0.2.1", - "zeroize", -] - [[package]] name = "tinytemplate" version = "1.2.1" @@ -9435,7 +8955,7 @@ checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ "rustls 0.19.1", "tokio", - "webpki 0.21.4", + "webpki", ] [[package]] @@ -9819,17 +9339,6 @@ dependencies = [ "syn 2.0.77", ] -[[package]] -name = "tracing-wasm" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4575c663a174420fa2d78f4108ff68f65bf2fbb7dd89f33749b6e826b3626e07" -dependencies = [ - "tracing", - "tracing-subscriber", - "wasm-bindgen", -] - [[package]] name = "try-lock" version = "0.2.5" @@ -10301,16 +9810,6 @@ dependencies = [ "untrusted 0.7.1", ] -[[package]] -name = "webpki" -version = "0.22.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" -dependencies = [ - "ring 0.17.8", - "untrusted 0.9.0", -] - [[package]] name = "webpki-roots" version = "0.25.4" @@ -10827,7 +10326,7 @@ version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" dependencies = [ - "aes 0.8.4", + "aes", "byteorder", "bzip2", "constant_time_eq", diff --git a/Cargo.toml b/Cargo.toml index fb86e31a39..79cc1a5945 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,12 +4,12 @@ members = [ "autonomi", "evmlib", "evm_testnet", - "sn_auditor", + # "sn_auditor", "sn_build_info", "sn_evm", - "sn_cli", - "sn_client", - "sn_faucet", + # "sn_cli", + # "sn_client", + # "sn_faucet", "sn_logging", "sn_metrics", "nat-detection", diff --git a/sn_client/Cargo.toml b/sn_client/Cargo.toml index c69626873a..da0eeaf5d3 100644 --- a/sn_client/Cargo.toml +++ b/sn_client/Cargo.toml @@ -67,7 +67,7 @@ assert_matches = "1.5.0" dirs-next = "~2.0.0" # add rand to libp2p libp2p-identity = { version = "0.2.7", features = ["rand"] } -sn_client = { path = "../sn_client", features = ["test-utils"] } +# sn_client = { path = "../sn_client", features = ["test-utils"] } sn_logging = { path = "../sn_logging", version = "0.2.34" } sn_registers = { path = "../sn_registers", version = "0.3.19", features = [ "test-utils", diff --git a/sn_node/Cargo.toml b/sn_node/Cargo.toml index bed23167bb..58159455f1 100644 --- a/sn_node/Cargo.toml +++ b/sn_node/Cargo.toml @@ -85,7 +85,6 @@ reqwest = { version = "0.12.2", default-features = false, features = [ "rustls-tls-manual-roots", ] } serde_json = "1.0" -sn_client = { path = "../sn_client", version = "0.110.1" } sn_protocol = { path = "../sn_protocol", version = "0.17.9", features = [ "rpc", ] } diff --git a/sn_node/examples/register_inspect.rs b/sn_node/examples/register_inspect.rs deleted file mode 100644 index 3c3d70a36b..0000000000 --- a/sn_node/examples/register_inspect.rs +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use crdts::merkle_reg::{Hash, MerkleReg, Node}; -use std::collections::HashMap; -use std::io; - -use sn_client::{acc_packet::load_account_wallet_or_create_with_mnemonic, Client, WalletClient}; -use sn_registers::{Entry, Permissions, RegisterAddress}; - -use xor_name::XorName; - -use bls::SecretKey; -use clap::Parser; -use color_eyre::{ - eyre::{eyre, Result, WrapErr}, - Help, -}; - -#[derive(Parser, Debug)] -#[clap(name = "register inspect cli")] -struct Opt { - // Create register and give it a nickname (first user) - #[clap(long, default_value = "")] - reg_nickname: String, - - // Get existing register with given network address (any other user) - #[clap(long, default_value = "", conflicts_with = "reg_nickname")] - reg_address: String, -} - -#[tokio::main] -async fn main() -> Result<()> { - let opt = Opt::parse(); - let mut reg_nickname = opt.reg_nickname; - let reg_address_string = opt.reg_address; - - // let's build a random secret key to sign our Register ops - let signer = SecretKey::random(); - - println!("Starting SAFE client..."); - let client = Client::new(signer, None, None, None).await?; - println!("SAFE client signer public key: {:?}", client.signer_pk()); - - // The address of the register to be displayed - let mut meta = XorName::from_content(reg_nickname.as_bytes()); - let reg_address = if !reg_nickname.is_empty() { - meta = XorName::from_content(reg_nickname.as_bytes()); - RegisterAddress::new(meta, client.signer_pk()) - } else { - reg_nickname = format!("{reg_address_string:<6}..."); - RegisterAddress::from_hex(®_address_string) - .wrap_err("cannot parse hex register address")? - }; - - // Loading a local wallet (for ClientRegister::sync()). - // The wallet can have ZERO balance in this example, - // but the ClientRegister::sync() API requires a wallet and will - // create the register if not found even though we don't want that. - // - // The only want to avoid unwanted creation of a Register seems to - // be to supply an empty wallet. - // TODO Follow the issue about this: https://github.com/maidsafe/safe_network/issues/1308 - let root_dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe") - .join("client"); - - let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None) - .wrap_err("Unable to read wallet file in {root_dir:?}") - .suggestion( - "If you have an old wallet file, it may no longer be compatible. Try removing it", - )?; - - let mut wallet_client = WalletClient::new(client.clone(), wallet); - - println!("Retrieving Register '{reg_nickname}' from SAFE"); - let mut reg_replica = match client.get_register(reg_address).await { - Ok(register) => { - println!( - "Register '{reg_nickname}' found at {:?}!", - register.address(), - ); - register - } - Err(_) => { - println!("Register '{reg_nickname}' not found, creating it at {reg_address}"); - let (register, _cost, _royalties_fees) = client - .create_and_pay_for_register( - meta, - &mut wallet_client, - true, - Permissions::new_anyone_can_write(), - ) - .await?; - - register - } - }; - println!("Register address: {:?}", reg_replica.address().to_hex()); - println!("Register owned by: {:?}", reg_replica.owner()); - println!("Register permissions: {:?}", reg_replica.permissions()); - - // Repeatedly display of the register structure on command - loop { - println!(); - println!( - "Current total number of items in Register: {}", - reg_replica.size() - ); - println!("Latest value (more than one if concurrent writes were made):"); - println!("--------------"); - for (_, entry) in reg_replica.read().into_iter() { - println!("{}", String::from_utf8(entry)?); - } - println!("--------------"); - - if prompt_user() { - return Ok(()); - } - - // Sync with network after a delay - println!("Syncing with SAFE..."); - reg_replica.sync(&mut wallet_client, true, None).await?; - let merkle_reg = reg_replica.merkle_reg(); - let content = merkle_reg.read(); - println!("synced!"); - - // Show the Register structure - - // Index nodes to make it easier to see where a - // node appears multiple times in the output. - // Note: it isn't related to the order of insertion - // which is hard to determine. - let mut index: usize = 0; - let mut node_ordering: HashMap = HashMap::new(); - for (_hash, node) in content.hashes_and_nodes() { - index_node_and_descendants(node, &mut index, &mut node_ordering, merkle_reg); - } - - println!("======================"); - println!("Root (Latest) Node(s):"); - for node in content.nodes() { - let _ = print_node(0, node, &node_ordering); - } - - println!("======================"); - println!("Register Structure:"); - println!("(In general, earlier nodes are more indented)"); - let mut indents = 0; - for (_hash, node) in content.hashes_and_nodes() { - print_node_and_descendants(&mut indents, node, &node_ordering, merkle_reg); - } - - println!("======================"); - } -} - -fn index_node_and_descendants( - node: &Node, - index: &mut usize, - node_ordering: &mut HashMap, - merkle_reg: &MerkleReg, -) { - let node_hash = node.hash(); - if node_ordering.get(&node_hash).is_none() { - node_ordering.insert(node_hash, *index); - *index += 1; - } - - for child_hash in node.children.iter() { - if let Some(child_node) = merkle_reg.node(*child_hash) { - index_node_and_descendants(child_node, index, node_ordering, merkle_reg); - } else { - println!("ERROR looking up hash of child"); - } - } -} - -fn print_node_and_descendants( - indents: &mut usize, - node: &Node, - node_ordering: &HashMap, - merkle_reg: &MerkleReg, -) { - let _ = print_node(*indents, node, node_ordering); - - *indents += 1; - for child_hash in node.children.iter() { - if let Some(child_node) = merkle_reg.node(*child_hash) { - print_node_and_descendants(indents, child_node, node_ordering, merkle_reg); - } - } - *indents -= 1; -} - -fn print_node( - indents: usize, - node: &Node, - node_ordering: &HashMap, -) -> Result<()> { - let order = match node_ordering.get(&node.hash()) { - Some(order) => format!("{order}"), - None => String::new(), - }; - let indentation = " ".repeat(indents); - println!( - "{indentation}[{:>2}] Node({:?}..) Entry({:?})", - order, - hex::encode(&node.hash()[0..3]), - String::from_utf8(node.value.clone())? - ); - Ok(()) -} - -fn prompt_user() -> bool { - let mut input_text = String::new(); - println!(); - println!("Enter a blank line to print the latest register structure (or 'Q' to quit)"); - io::stdin() - .read_line(&mut input_text) - .expect("Failed to read text from stdin"); - - let string = input_text.trim().to_string(); - - string.contains('Q') || string.contains('q') -} diff --git a/sn_node/examples/registers.rs b/sn_node/examples/registers.rs deleted file mode 100644 index 70d3177a1c..0000000000 --- a/sn_node/examples/registers.rs +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -use sn_client::{ - acc_packet::load_account_wallet_or_create_with_mnemonic, Client, Error, WalletClient, -}; -use sn_registers::{Permissions, RegisterAddress}; - -use xor_name::XorName; - -use bls::SecretKey; -use clap::Parser; -use color_eyre::{ - eyre::{eyre, Result, WrapErr}, - Help, -}; -use std::{io, time::Duration}; -use tokio::time::sleep; - -#[derive(Parser, Debug)] -#[clap(name = "registers cli")] -struct Opt { - // A name for this user in the example - #[clap(long)] - user: String, - - // Create register and give it a nickname (first user) - #[clap(long, default_value = "")] - reg_nickname: String, - - // Get existing register with given network address (any other user) - #[clap(long, default_value = "", conflicts_with = "reg_nickname")] - reg_address: String, - - // Delay before synchronising local register with the network - #[clap(long, default_value_t = 2000)] - delay_millis: u64, -} - -#[tokio::main] -async fn main() -> Result<()> { - let opt = Opt::parse(); - let user = opt.user; - let mut reg_nickname = opt.reg_nickname; - let reg_address_string = opt.reg_address; - let delay = Duration::from_millis(opt.delay_millis); - - // let's build a random secret key to sign our Register ops - let signer = SecretKey::random(); - - println!("Starting SAFE client..."); - let client = Client::new(signer, None, None, None).await?; - println!("SAFE client signer public key: {:?}", client.signer_pk()); - - // We'll retrieve (or create if not found) a Register, and write on it - // in offline mode, syncing with the network periodically. - - let mut meta = XorName::from_content(reg_nickname.as_bytes()); - let reg_address = if !reg_nickname.is_empty() { - meta = XorName::from_content(reg_nickname.as_bytes()); - RegisterAddress::new(meta, client.signer_pk()) - } else { - reg_nickname = format!("{reg_address_string:<6}..."); - RegisterAddress::from_hex(®_address_string) - .wrap_err("cannot parse hex register address")? - }; - - // Loading a local wallet. It needs to have a non-zero balance for - // this example to be able to pay for the Register's storage. - let root_dir = dirs_next::data_dir() - .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? - .join("safe") - .join("client"); - - let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None) - .wrap_err("Unable to read wallet file in {root_dir:?}") - .suggestion( - "If you have an old wallet file, it may no longer be compatible. Try removing it", - )?; - let mut wallet_client = WalletClient::new(client.clone(), wallet); - - println!("Retrieving Register '{reg_nickname}' from SAFE, as user '{user}'"); - let mut reg_replica = match client.get_register(reg_address).await { - Ok(register) => { - println!( - "Register '{reg_nickname}' found at {:?}!", - register.address(), - ); - register - } - Err(_) => { - println!("Register '{reg_nickname}' not found, creating it at {reg_address}"); - let (register, _cost, _royalties_fees) = client - .create_and_pay_for_register( - meta, - &mut wallet_client, - true, - Permissions::new_anyone_can_write(), - ) - .await?; - - register - } - }; - println!("Register address: {:?}", reg_replica.address().to_hex()); - println!("Register owned by: {:?}", reg_replica.owner()); - println!("Register permissions: {:?}", reg_replica.permissions()); - - // We'll loop asking for new msg to write onto the Register offline, - // then we'll be syncing the offline Register with the network, i.e. - // both pushing and ulling all changes made to it by us and other clients/users. - // If we detect branches when trying to write, after we synced with remote - // replicas of the Register, we'll merge them all back into a single value. - loop { - println!(); - println!( - "Current total number of items in Register: {}", - reg_replica.size() - ); - println!("Latest value (more than one if concurrent writes were made):"); - println!("--------------"); - for (_, entry) in reg_replica.read().into_iter() { - println!("{}", String::from_utf8(entry)?); - } - println!("--------------"); - - let input_text = prompt_user(); - if !input_text.is_empty() { - println!("Writing msg (offline) to Register: '{input_text}'"); - let msg = format!("[{user}]: {input_text}"); - match reg_replica.write(msg.as_bytes()) { - Ok(_) => {} - Err(Error::ContentBranchDetected(branches)) => { - println!( - "Branches ({}) detected in Register, let's merge them all...", - branches.len() - ); - reg_replica.write_merging_branches(msg.as_bytes())?; - } - Err(err) => return Err(err.into()), - } - } - - // Sync with network after a delay - println!("Syncing with SAFE in {delay:?}..."); - sleep(delay).await; - reg_replica.sync(&mut wallet_client, true, None).await?; - println!("synced!"); - } -} - -fn prompt_user() -> String { - let mut input_text = String::new(); - println!(); - println!("Enter a blank line to receive updates, or some text to be written."); - io::stdin() - .read_line(&mut input_text) - .expect("Failed to read text from stdin"); - - input_text.trim().to_string() -} diff --git a/sn_node/reactivate_examples/register_inspect.rs b/sn_node/reactivate_examples/register_inspect.rs new file mode 100644 index 0000000000..2873aa1139 --- /dev/null +++ b/sn_node/reactivate_examples/register_inspect.rs @@ -0,0 +1,233 @@ +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// use crdts::merkle_reg::{Hash, MerkleReg, Node}; +// use std::collections::HashMap; +// use std::io; + +// // TODO: use autonomi API here +// // use sn_client::{acc_packet::load_account_wallet_or_create_with_mnemonic, Client, WalletClient}; +// use sn_registers::{Entry, Permissions, RegisterAddress}; + +// use xor_name::XorName; + +// use bls::SecretKey; +// use clap::Parser; +// use color_eyre::{ +// eyre::{eyre, Result, WrapErr}, +// Help, +// }; + +// #[derive(Parser, Debug)] +// #[clap(name = "register inspect cli")] +// struct Opt { +// // Create register and give it a nickname (first user) +// #[clap(long, default_value = "")] +// reg_nickname: String, + +// // Get existing register with given network address (any other user) +// #[clap(long, default_value = "", conflicts_with = "reg_nickname")] +// reg_address: String, +// } + +// #[tokio::main] +// async fn main() -> Result<()> { +// let opt = Opt::parse(); +// let mut reg_nickname = opt.reg_nickname; +// let reg_address_string = opt.reg_address; + +// // let's build a random secret key to sign our Register ops +// let signer = SecretKey::random(); + +// println!("Starting SAFE client..."); +// let client = Client::new(signer, None, None, None).await?; +// println!("SAFE client signer public key: {:?}", client.signer_pk()); + +// // The address of the register to be displayed +// let mut meta = XorName::from_content(reg_nickname.as_bytes()); +// let reg_address = if !reg_nickname.is_empty() { +// meta = XorName::from_content(reg_nickname.as_bytes()); +// RegisterAddress::new(meta, client.signer_pk()) +// } else { +// reg_nickname = format!("{reg_address_string:<6}..."); +// RegisterAddress::from_hex(®_address_string) +// .wrap_err("cannot parse hex register address")? +// }; + +// // Loading a local wallet (for ClientRegister::sync()). +// // The wallet can have ZERO balance in this example, +// // but the ClientRegister::sync() API requires a wallet and will +// // create the register if not found even though we don't want that. +// // +// // The only want to avoid unwanted creation of a Register seems to +// // be to supply an empty wallet. +// // TODO Follow the issue about this: https://github.com/maidsafe/safe_network/issues/1308 +// let root_dir = dirs_next::data_dir() +// .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? +// .join("safe") +// .join("client"); + +// let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None) +// .wrap_err("Unable to read wallet file in {root_dir:?}") +// .suggestion( +// "If you have an old wallet file, it may no longer be compatible. Try removing it", +// )?; + +// let mut wallet_client = WalletClient::new(client.clone(), wallet); + +// println!("Retrieving Register '{reg_nickname}' from SAFE"); +// let mut reg_replica = match client.get_register(reg_address).await { +// Ok(register) => { +// println!( +// "Register '{reg_nickname}' found at {:?}!", +// register.address(), +// ); +// register +// } +// Err(_) => { +// println!("Register '{reg_nickname}' not found, creating it at {reg_address}"); +// let (register, _cost, _royalties_fees) = client +// .create_and_pay_for_register( +// meta, +// &mut wallet_client, +// true, +// Permissions::new_anyone_can_write(), +// ) +// .await?; + +// register +// } +// }; +// println!("Register address: {:?}", reg_replica.address().to_hex()); +// println!("Register owned by: {:?}", reg_replica.owner()); +// println!("Register permissions: {:?}", reg_replica.permissions()); + +// // Repeatedly display of the register structure on command +// loop { +// println!(); +// println!( +// "Current total number of items in Register: {}", +// reg_replica.size() +// ); +// println!("Latest value (more than one if concurrent writes were made):"); +// println!("--------------"); +// for (_, entry) in reg_replica.read().into_iter() { +// println!("{}", String::from_utf8(entry)?); +// } +// println!("--------------"); + +// if prompt_user() { +// return Ok(()); +// } + +// // Sync with network after a delay +// println!("Syncing with SAFE..."); +// reg_replica.sync(&mut wallet_client, true, None).await?; +// let merkle_reg = reg_replica.merkle_reg(); +// let content = merkle_reg.read(); +// println!("synced!"); + +// // Show the Register structure + +// // Index nodes to make it easier to see where a +// // node appears multiple times in the output. +// // Note: it isn't related to the order of insertion +// // which is hard to determine. +// let mut index: usize = 0; +// let mut node_ordering: HashMap = HashMap::new(); +// for (_hash, node) in content.hashes_and_nodes() { +// index_node_and_descendants(node, &mut index, &mut node_ordering, merkle_reg); +// } + +// println!("======================"); +// println!("Root (Latest) Node(s):"); +// for node in content.nodes() { +// let _ = print_node(0, node, &node_ordering); +// } + +// println!("======================"); +// println!("Register Structure:"); +// println!("(In general, earlier nodes are more indented)"); +// let mut indents = 0; +// for (_hash, node) in content.hashes_and_nodes() { +// print_node_and_descendants(&mut indents, node, &node_ordering, merkle_reg); +// } + +// println!("======================"); +// } +// } + +// fn index_node_and_descendants( +// node: &Node, +// index: &mut usize, +// node_ordering: &mut HashMap, +// merkle_reg: &MerkleReg, +// ) { +// let node_hash = node.hash(); +// if node_ordering.get(&node_hash).is_none() { +// node_ordering.insert(node_hash, *index); +// *index += 1; +// } + +// for child_hash in node.children.iter() { +// if let Some(child_node) = merkle_reg.node(*child_hash) { +// index_node_and_descendants(child_node, index, node_ordering, merkle_reg); +// } else { +// println!("ERROR looking up hash of child"); +// } +// } +// } + +// fn print_node_and_descendants( +// indents: &mut usize, +// node: &Node, +// node_ordering: &HashMap, +// merkle_reg: &MerkleReg, +// ) { +// let _ = print_node(*indents, node, node_ordering); + +// *indents += 1; +// for child_hash in node.children.iter() { +// if let Some(child_node) = merkle_reg.node(*child_hash) { +// print_node_and_descendants(indents, child_node, node_ordering, merkle_reg); +// } +// } +// *indents -= 1; +// } + +// fn print_node( +// indents: usize, +// node: &Node, +// node_ordering: &HashMap, +// ) -> Result<()> { +// let order = match node_ordering.get(&node.hash()) { +// Some(order) => format!("{order}"), +// None => String::new(), +// }; +// let indentation = " ".repeat(indents); +// println!( +// "{indentation}[{:>2}] Node({:?}..) Entry({:?})", +// order, +// hex::encode(&node.hash()[0..3]), +// String::from_utf8(node.value.clone())? +// ); +// Ok(()) +// } + +// fn prompt_user() -> bool { +// let mut input_text = String::new(); +// println!(); +// println!("Enter a blank line to print the latest register structure (or 'Q' to quit)"); +// io::stdin() +// .read_line(&mut input_text) +// .expect("Failed to read text from stdin"); + +// let string = input_text.trim().to_string(); + +// string.contains('Q') || string.contains('q') +// } diff --git a/sn_node/reactivate_examples/registers.rs b/sn_node/reactivate_examples/registers.rs new file mode 100644 index 0000000000..6fa6c51045 --- /dev/null +++ b/sn_node/reactivate_examples/registers.rs @@ -0,0 +1,167 @@ +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// // TODO: use autonomi API here. +// // use sn_client::{ +// // acc_packet::load_account_wallet_or_create_with_mnemonic, Client, Error, WalletClient, +// // }; +// use sn_registers::{Permissions, RegisterAddress}; + +// use xor_name::XorName; + +// use bls::SecretKey; +// use clap::Parser; +// use color_eyre::{ +// eyre::{eyre, Result, WrapErr}, +// Help, +// }; +// use std::{io, time::Duration}; +// use tokio::time::sleep; + +// #[derive(Parser, Debug)] +// #[clap(name = "registers cli")] +// struct Opt { +// // A name for this user in the example +// #[clap(long)] +// user: String, + +// // Create register and give it a nickname (first user) +// #[clap(long, default_value = "")] +// reg_nickname: String, + +// // Get existing register with given network address (any other user) +// #[clap(long, default_value = "", conflicts_with = "reg_nickname")] +// reg_address: String, + +// // Delay before synchronising local register with the network +// #[clap(long, default_value_t = 2000)] +// delay_millis: u64, +// } + +// #[tokio::main] +// async fn main() -> Result<()> { +// let opt = Opt::parse(); +// let user = opt.user; +// let mut reg_nickname = opt.reg_nickname; +// let reg_address_string = opt.reg_address; +// let delay = Duration::from_millis(opt.delay_millis); + +// // let's build a random secret key to sign our Register ops +// let signer = SecretKey::random(); + +// println!("Starting SAFE client..."); +// let client = Client::new(signer, None, None, None).await?; +// println!("SAFE client signer public key: {:?}", client.signer_pk()); + +// // We'll retrieve (or create if not found) a Register, and write on it +// // in offline mode, syncing with the network periodically. + +// let mut meta = XorName::from_content(reg_nickname.as_bytes()); +// let reg_address = if !reg_nickname.is_empty() { +// meta = XorName::from_content(reg_nickname.as_bytes()); +// RegisterAddress::new(meta, client.signer_pk()) +// } else { +// reg_nickname = format!("{reg_address_string:<6}..."); +// RegisterAddress::from_hex(®_address_string) +// .wrap_err("cannot parse hex register address")? +// }; + +// // Loading a local wallet. It needs to have a non-zero balance for +// // this example to be able to pay for the Register's storage. +// let root_dir = dirs_next::data_dir() +// .ok_or_else(|| eyre!("could not obtain data directory path".to_string()))? +// .join("safe") +// .join("client"); + +// let wallet = load_account_wallet_or_create_with_mnemonic(&root_dir, None) +// .wrap_err("Unable to read wallet file in {root_dir:?}") +// .suggestion( +// "If you have an old wallet file, it may no longer be compatible. Try removing it", +// )?; +// let mut wallet_client = WalletClient::new(client.clone(), wallet); + +// println!("Retrieving Register '{reg_nickname}' from SAFE, as user '{user}'"); +// let mut reg_replica = match client.get_register(reg_address).await { +// Ok(register) => { +// println!( +// "Register '{reg_nickname}' found at {:?}!", +// register.address(), +// ); +// register +// } +// Err(_) => { +// println!("Register '{reg_nickname}' not found, creating it at {reg_address}"); +// let (register, _cost, _royalties_fees) = client +// .create_and_pay_for_register( +// meta, +// &mut wallet_client, +// true, +// Permissions::new_anyone_can_write(), +// ) +// .await?; + +// register +// } +// }; +// println!("Register address: {:?}", reg_replica.address().to_hex()); +// println!("Register owned by: {:?}", reg_replica.owner()); +// println!("Register permissions: {:?}", reg_replica.permissions()); + +// // We'll loop asking for new msg to write onto the Register offline, +// // then we'll be syncing the offline Register with the network, i.e. +// // both pushing and ulling all changes made to it by us and other clients/users. +// // If we detect branches when trying to write, after we synced with remote +// // replicas of the Register, we'll merge them all back into a single value. +// loop { +// println!(); +// println!( +// "Current total number of items in Register: {}", +// reg_replica.size() +// ); +// println!("Latest value (more than one if concurrent writes were made):"); +// println!("--------------"); +// for (_, entry) in reg_replica.read().into_iter() { +// println!("{}", String::from_utf8(entry)?); +// } +// println!("--------------"); + +// let input_text = prompt_user(); +// if !input_text.is_empty() { +// println!("Writing msg (offline) to Register: '{input_text}'"); +// let msg = format!("[{user}]: {input_text}"); +// match reg_replica.write(msg.as_bytes()) { +// Ok(_) => {} +// Err(Error::ContentBranchDetected(branches)) => { +// println!( +// "Branches ({}) detected in Register, let's merge them all...", +// branches.len() +// ); +// reg_replica.write_merging_branches(msg.as_bytes())?; +// } +// Err(err) => return Err(err.into()), +// } +// } + +// // Sync with network after a delay +// println!("Syncing with SAFE in {delay:?}..."); +// sleep(delay).await; +// reg_replica.sync(&mut wallet_client, true, None).await?; +// println!("synced!"); +// } +// } + +// fn prompt_user() -> String { +// let mut input_text = String::new(); +// println!(); +// println!("Enter a blank line to receive updates, or some text to be written."); +// io::stdin() +// .read_line(&mut input_text) +// .expect("Failed to read text from stdin"); + +// input_text.trim().to_string() +// } diff --git a/sn_node/tests/common/client.rs b/sn_node/tests/common/client.rs index bff2c8d333..297b103d27 100644 --- a/sn_node/tests/common/client.rs +++ b/sn_node/tests/common/client.rs @@ -8,10 +8,11 @@ use eyre::{bail, OptionExt, Result}; use libp2p::PeerId; -use sn_client::{ - acc_packet::{create_faucet_account_and_wallet, load_account_wallet_or_create_with_mnemonic}, - send, Client, -}; +/// TODO: Update to use autonomi API here +// use sn_client::{ +// acc_packet::{create_faucet_account_and_wallet, load_account_wallet_or_create_with_mnemonic}, +// send, Client, +// }; use sn_peers_acquisition::parse_peer_addr; use sn_protocol::safenode_proto::{NodeInfoRequest, RestartRequest}; use sn_service_management::{ diff --git a/sn_node/tests/common/mod.rs b/sn_node/tests/common/mod.rs index 6366e2092c..452d506379 100644 --- a/sn_node/tests/common/mod.rs +++ b/sn_node/tests/common/mod.rs @@ -1,275 +1,276 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. -#![allow(dead_code)] +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. +// #![allow(dead_code)] -pub mod client; +// pub mod client; -use self::client::{Droplet, NonDroplet}; -use bytes::Bytes; -use eyre::{bail, eyre, OptionExt, Result}; -use itertools::Either; -use libp2p::PeerId; -use rand::{ - distributions::{Distribution, Standard}, - Rng, -}; -use self_encryption::MIN_ENCRYPTABLE_BYTES; -use sn_client::{Client, FilesApi}; -use sn_protocol::{ - safenode_proto::{safe_node_client::SafeNodeClient, NodeInfoRequest}, - storage::ChunkAddress, -}; -use sn_service_management::{ - get_local_node_registry_path, - safenode_manager_proto::safe_node_manager_client::SafeNodeManagerClient, NodeRegistry, -}; -use std::{ - fs::File, - io::Write, - net::SocketAddr, - path::{Path, PathBuf}, - time::Duration, -}; -use test_utils::testnet::DeploymentInventory; -use tonic::Request; -use tracing::{debug, error, warn}; -use xor_name::XorName; +// use self::client::{Droplet, NonDroplet}; +// use bytes::Bytes; +// use eyre::{bail, eyre, OptionExt, Result}; +// use itertools::Either; +// use libp2p::PeerId; +// use rand::{ +// distributions::{Distribution, Standard}, +// Rng, +// }; +// use self_encryption::MIN_ENCRYPTABLE_BYTES; +// // TODO: Use autonimi API here +// // use sn_client::{Client, FilesApi}; +// use sn_protocol::{ +// safenode_proto::{safe_node_client::SafeNodeClient, NodeInfoRequest}, +// storage::ChunkAddress, +// }; +// use sn_service_management::{ +// get_local_node_registry_path, +// safenode_manager_proto::safe_node_manager_client::SafeNodeManagerClient, NodeRegistry, +// }; +// use std::{ +// fs::File, +// io::Write, +// net::SocketAddr, +// path::{Path, PathBuf}, +// time::Duration, +// }; +// use test_utils::testnet::DeploymentInventory; +// use tonic::Request; +// use tracing::{debug, error, warn}; +// use xor_name::XorName; -type ResultRandomContent = Result<(FilesApi, Bytes, ChunkAddress, Vec<(XorName, PathBuf)>)>; +// type ResultRandomContent = Result<(FilesApi, Bytes, ChunkAddress, Vec<(XorName, PathBuf)>)>; -pub fn random_content( - client: &Client, - wallet_dir: PathBuf, - chunk_dir: &Path, -) -> ResultRandomContent { - let mut rng = rand::thread_rng(); +// pub fn random_content( +// client: &Client, +// wallet_dir: PathBuf, +// chunk_dir: &Path, +// ) -> ResultRandomContent { +// let mut rng = rand::thread_rng(); - let random_len = rng.gen_range(MIN_ENCRYPTABLE_BYTES..1024 * MIN_ENCRYPTABLE_BYTES); - let random_length_content: Vec = - >::sample_iter(Standard, &mut rng) - .take(random_len) - .collect(); +// let random_len = rng.gen_range(MIN_ENCRYPTABLE_BYTES..1024 * MIN_ENCRYPTABLE_BYTES); +// let random_length_content: Vec = +// >::sample_iter(Standard, &mut rng) +// .take(random_len) +// .collect(); - let file_path = chunk_dir.join("random_content"); - let mut output_file = File::create(file_path.clone())?; - output_file.write_all(&random_length_content)?; +// let file_path = chunk_dir.join("random_content"); +// let mut output_file = File::create(file_path.clone())?; +// output_file.write_all(&random_length_content)?; - let files_api = FilesApi::new(client.clone(), wallet_dir); - let (head_chunk_address, _data_map, _file_size, chunks) = - FilesApi::chunk_file(&file_path, chunk_dir, true)?; +// let files_api = FilesApi::new(client.clone(), wallet_dir); +// let (head_chunk_address, _data_map, _file_size, chunks) = +// FilesApi::chunk_file(&file_path, chunk_dir, true)?; - Ok(( - files_api, - random_length_content.into(), - head_chunk_address, - chunks, - )) -} +// Ok(( +// files_api, +// random_length_content.into(), +// head_chunk_address, +// chunks, +// )) +// } -// Connect to a RPC socket addr with retry -pub async fn get_safenode_rpc_client( - socket_addr: SocketAddr, -) -> Result> { - // get the new PeerId for the current NodeIndex - let endpoint = format!("https://{socket_addr}"); - let mut attempts = 0; - loop { - if let Ok(rpc_client) = SafeNodeClient::connect(endpoint.clone()).await { - break Ok(rpc_client); - } - attempts += 1; - println!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); - error!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); - tokio::time::sleep(Duration::from_secs(1)).await; - if attempts >= 10 { - bail!("Failed to connect to {endpoint:?} even after 10 retries"); - } - } -} +// // Connect to a RPC socket addr with retry +// pub async fn get_safenode_rpc_client( +// socket_addr: SocketAddr, +// ) -> Result> { +// // get the new PeerId for the current NodeIndex +// let endpoint = format!("https://{socket_addr}"); +// let mut attempts = 0; +// loop { +// if let Ok(rpc_client) = SafeNodeClient::connect(endpoint.clone()).await { +// break Ok(rpc_client); +// } +// attempts += 1; +// println!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); +// error!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); +// tokio::time::sleep(Duration::from_secs(1)).await; +// if attempts >= 10 { +// bail!("Failed to connect to {endpoint:?} even after 10 retries"); +// } +// } +// } -// Connect to a RPC socket addr with retry -pub async fn get_safenode_manager_rpc_client( - socket_addr: SocketAddr, -) -> Result> { - // get the new PeerId for the current NodeIndex - let endpoint = format!("https://{socket_addr}"); - let mut attempts = 0; - loop { - if let Ok(rpc_client) = SafeNodeManagerClient::connect(endpoint.clone()).await { - break Ok(rpc_client); - } - attempts += 1; - println!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); - error!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); - tokio::time::sleep(Duration::from_secs(1)).await; - if attempts >= 10 { - bail!("Failed to connect to {endpoint:?} even after 10 retries"); - } - } -} +// // Connect to a RPC socket addr with retry +// pub async fn get_safenode_manager_rpc_client( +// socket_addr: SocketAddr, +// ) -> Result> { +// // get the new PeerId for the current NodeIndex +// let endpoint = format!("https://{socket_addr}"); +// let mut attempts = 0; +// loop { +// if let Ok(rpc_client) = SafeNodeManagerClient::connect(endpoint.clone()).await { +// break Ok(rpc_client); +// } +// attempts += 1; +// println!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); +// error!("Could not connect to rpc {endpoint:?}. Attempts: {attempts:?}/10"); +// tokio::time::sleep(Duration::from_secs(1)).await; +// if attempts >= 10 { +// bail!("Failed to connect to {endpoint:?} even after 10 retries"); +// } +// } +// } -// Returns all the PeerId for all the running nodes -pub async fn get_all_peer_ids(node_rpc_addresses: &Vec) -> Result> { - let mut all_peers = Vec::new(); +// // Returns all the PeerId for all the running nodes +// pub async fn get_all_peer_ids(node_rpc_addresses: &Vec) -> Result> { +// let mut all_peers = Vec::new(); - for addr in node_rpc_addresses { - let mut rpc_client = get_safenode_rpc_client(*addr).await?; +// for addr in node_rpc_addresses { +// let mut rpc_client = get_safenode_rpc_client(*addr).await?; - // get the peer_id - let response = rpc_client - .node_info(Request::new(NodeInfoRequest {})) - .await?; - let peer_id = PeerId::from_bytes(&response.get_ref().peer_id)?; - all_peers.push(peer_id); - } - debug!( - "Obtained the PeerId list for the running network with a node count of {}", - node_rpc_addresses.len() - ); - Ok(all_peers) -} +// // get the peer_id +// let response = rpc_client +// .node_info(Request::new(NodeInfoRequest {})) +// .await?; +// let peer_id = PeerId::from_bytes(&response.get_ref().peer_id)?; +// all_peers.push(peer_id); +// } +// debug!( +// "Obtained the PeerId list for the running network with a node count of {}", +// node_rpc_addresses.len() +// ); +// Ok(all_peers) +// } -/// A struct to facilitate restart of droplet/local nodes -pub struct NodeRestart { - // Deployment inventory is used incase of Droplet nodes and NodeRegistry incase of NonDroplet nodes. - inventory_file: Either, - next_to_restart_idx: usize, - skip_genesis_for_droplet: bool, - retain_peer_id: bool, -} +// /// A struct to facilitate restart of droplet/local nodes +// pub struct NodeRestart { +// // Deployment inventory is used incase of Droplet nodes and NodeRegistry incase of NonDroplet nodes. +// inventory_file: Either, +// next_to_restart_idx: usize, +// skip_genesis_for_droplet: bool, +// retain_peer_id: bool, +// } -impl NodeRestart { - /// The genesis address is skipped for droplets as we don't want to restart the Genesis node there. - /// The restarted node relies on the genesis multiaddr to bootstrap after restart. - /// - /// Setting retain_peer_id will soft restart the node by keeping the old PeerId, ports, records etc. - pub fn new(skip_genesis_for_droplet: bool, retain_peer_id: bool) -> Result { - let inventory_file = match DeploymentInventory::load() { - Ok(inv) => Either::Left(inv), - Err(_) => { - let reg = NodeRegistry::load(&get_local_node_registry_path()?)?; - Either::Right(reg) - } - }; +// impl NodeRestart { +// /// The genesis address is skipped for droplets as we don't want to restart the Genesis node there. +// /// The restarted node relies on the genesis multiaddr to bootstrap after restart. +// /// +// /// Setting retain_peer_id will soft restart the node by keeping the old PeerId, ports, records etc. +// pub fn new(skip_genesis_for_droplet: bool, retain_peer_id: bool) -> Result { +// let inventory_file = match DeploymentInventory::load() { +// Ok(inv) => Either::Left(inv), +// Err(_) => { +// let reg = NodeRegistry::load(&get_local_node_registry_path()?)?; +// Either::Right(reg) +// } +// }; - Ok(Self { - inventory_file, - next_to_restart_idx: 0, - skip_genesis_for_droplet, - retain_peer_id, - }) - } +// Ok(Self { +// inventory_file, +// next_to_restart_idx: 0, +// skip_genesis_for_droplet, +// retain_peer_id, +// }) +// } - /// Restart the next node in the list. - /// Set `loop_over` to `true` if we want to start over the restart process if we have already restarted all - /// the nodes. - /// Set `progress_on_error` to `true` if we want to restart the next node if you call this function again. - /// Else we'll be retrying the same node on the next call. - /// - /// Returns the `safenode's RPC addr` if we have restarted a node successfully. - /// Returns `None` if `loop_over` is `false` and we have not restarted any nodes. - pub async fn restart_next( - &mut self, - loop_over: bool, - progress_on_error: bool, - ) -> Result> { - let safenode_rpc_endpoint = match self.inventory_file.clone() { - Either::Left(inv) => { - // check if we've reached the end - if loop_over && self.next_to_restart_idx > inv.safenodemand_endpoints.len() { - self.next_to_restart_idx = 0; - } +// /// Restart the next node in the list. +// /// Set `loop_over` to `true` if we want to start over the restart process if we have already restarted all +// /// the nodes. +// /// Set `progress_on_error` to `true` if we want to restart the next node if you call this function again. +// /// Else we'll be retrying the same node on the next call. +// /// +// /// Returns the `safenode's RPC addr` if we have restarted a node successfully. +// /// Returns `None` if `loop_over` is `false` and we have not restarted any nodes. +// pub async fn restart_next( +// &mut self, +// loop_over: bool, +// progress_on_error: bool, +// ) -> Result> { +// let safenode_rpc_endpoint = match self.inventory_file.clone() { +// Either::Left(inv) => { +// // check if we've reached the end +// if loop_over && self.next_to_restart_idx > inv.safenodemand_endpoints.len() { +// self.next_to_restart_idx = 0; +// } - if let Some((peer_id, daemon_endpoint)) = inv - .safenodemand_endpoints - .iter() - .nth(self.next_to_restart_idx) - { - self.restart(*peer_id, *daemon_endpoint, progress_on_error) - .await?; +// if let Some((peer_id, daemon_endpoint)) = inv +// .safenodemand_endpoints +// .iter() +// .nth(self.next_to_restart_idx) +// { +// self.restart(*peer_id, *daemon_endpoint, progress_on_error) +// .await?; - let safenode_rpc_endpoint = inv - .rpc_endpoints - .get(peer_id) - .ok_or_eyre("Failed to obtain safenode rpc endpoint from inventory file")?; - Some(*safenode_rpc_endpoint) - } else { - warn!("We have restarted all the nodes in the list. Since loop_over is false, we are not restarting any nodes now."); - None - } - } - Either::Right(reg) => { - // check if we've reached the end - if loop_over && self.next_to_restart_idx > reg.nodes.len() { - self.next_to_restart_idx = 0; - } +// let safenode_rpc_endpoint = inv +// .rpc_endpoints +// .get(peer_id) +// .ok_or_eyre("Failed to obtain safenode rpc endpoint from inventory file")?; +// Some(*safenode_rpc_endpoint) +// } else { +// warn!("We have restarted all the nodes in the list. Since loop_over is false, we are not restarting any nodes now."); +// None +// } +// } +// Either::Right(reg) => { +// // check if we've reached the end +// if loop_over && self.next_to_restart_idx > reg.nodes.len() { +// self.next_to_restart_idx = 0; +// } - if let Some((peer_id, safenode_rpc_endpoint)) = reg - .nodes - .get(self.next_to_restart_idx) - .map(|node| (node.peer_id, node.rpc_socket_addr)) - { - let peer_id = - peer_id.ok_or_eyre("PeerId should be present for a local node")?; - self.restart(peer_id, safenode_rpc_endpoint, progress_on_error) - .await?; - Some(safenode_rpc_endpoint) - } else { - warn!("We have restarted all the nodes in the list. Since loop_over is false, we are not restarting any nodes now."); - None - } - } - }; +// if let Some((peer_id, safenode_rpc_endpoint)) = reg +// .nodes +// .get(self.next_to_restart_idx) +// .map(|node| (node.peer_id, node.rpc_socket_addr)) +// { +// let peer_id = +// peer_id.ok_or_eyre("PeerId should be present for a local node")?; +// self.restart(peer_id, safenode_rpc_endpoint, progress_on_error) +// .await?; +// Some(safenode_rpc_endpoint) +// } else { +// warn!("We have restarted all the nodes in the list. Since loop_over is false, we are not restarting any nodes now."); +// None +// } +// } +// }; - Ok(safenode_rpc_endpoint) - } +// Ok(safenode_rpc_endpoint) +// } - async fn restart( - &mut self, - peer_id: PeerId, - endpoint: SocketAddr, - progress_on_error: bool, - ) -> Result<()> { - match &self.inventory_file { - Either::Left(_inv) => { - match Droplet::restart_node(&peer_id, endpoint, self.retain_peer_id) - .await - .map_err(|err| eyre!("Failed to restart peer {peer_id:} on daemon endpoint: {endpoint:?} with err {err:?}")) { - Ok(_) => { - self.next_to_restart_idx += 1; - }, - Err(err) => { - if progress_on_error { - self.next_to_restart_idx += 1; - } - return Err(err); - }, - } - }, - Either::Right(_reg) => { - match NonDroplet::restart_node(endpoint, self.retain_peer_id).await - .map_err(|err| eyre!("Failed to restart peer {peer_id:?} on safenode RPC endpoint: {endpoint:?} with err {err:?}")) { - Ok(_) => { - self.next_to_restart_idx += 1; - }, - Err(err) => { - if progress_on_error { - self.next_to_restart_idx += 1; - } - return Err(err); - } - } - } - } - Ok(()) - } +// async fn restart( +// &mut self, +// peer_id: PeerId, +// endpoint: SocketAddr, +// progress_on_error: bool, +// ) -> Result<()> { +// match &self.inventory_file { +// Either::Left(_inv) => { +// match Droplet::restart_node(&peer_id, endpoint, self.retain_peer_id) +// .await +// .map_err(|err| eyre!("Failed to restart peer {peer_id:} on daemon endpoint: {endpoint:?} with err {err:?}")) { +// Ok(_) => { +// self.next_to_restart_idx += 1; +// }, +// Err(err) => { +// if progress_on_error { +// self.next_to_restart_idx += 1; +// } +// return Err(err); +// }, +// } +// }, +// Either::Right(_reg) => { +// match NonDroplet::restart_node(endpoint, self.retain_peer_id).await +// .map_err(|err| eyre!("Failed to restart peer {peer_id:?} on safenode RPC endpoint: {endpoint:?} with err {err:?}")) { +// Ok(_) => { +// self.next_to_restart_idx += 1; +// }, +// Err(err) => { +// if progress_on_error { +// self.next_to_restart_idx += 1; +// } +// return Err(err); +// } +// } +// } +// } +// Ok(()) +// } - pub fn reset_index(&mut self) { - self.next_to_restart_idx = 0; - } -} +// pub fn reset_index(&mut self) { +// self.next_to_restart_idx = 0; +// } +// } diff --git a/sn_node/tests/data_with_churn.rs b/sn_node/tests/data_with_churn.rs index 36626b920d..64d014f5dc 100644 --- a/sn_node/tests/data_with_churn.rs +++ b/sn_node/tests/data_with_churn.rs @@ -1,643 +1,643 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -mod common; - -use crate::common::client::{add_funds_to_wallet, get_client_and_funded_wallet}; -use assert_fs::TempDir; -use common::{ - client::{get_node_count, get_wallet}, - NodeRestart, -}; -use eyre::{bail, eyre, Result}; -use rand::{rngs::OsRng, Rng}; -use sn_client::{Client, Error, FilesApi, FilesDownload, Uploader, WalletClient}; -use sn_logging::LogBuilder; -use sn_protocol::{ - storage::{ChunkAddress, RegisterAddress, SpendAddress}, - NetworkAddress, -}; -use sn_registers::Permissions; -use sn_transfers::{CashNote, HotWallet, MainSecretKey, NanoTokens}; -use std::{ - collections::{BTreeMap, VecDeque}, - fmt, - fs::{create_dir_all, File}, - io::Write, - path::{Path, PathBuf}, - sync::Arc, - time::{Duration, Instant}, -}; -use tempfile::tempdir; -use tokio::{sync::RwLock, task::JoinHandle, time::sleep}; -use tracing::{debug, error, info, trace, warn}; -use xor_name::XorName; - -const EXTRA_CHURN_COUNT: u32 = 5; -const CHURN_CYCLES: u32 = 2; -const CHUNK_CREATION_RATIO_TO_CHURN: u32 = 15; -const REGISTER_CREATION_RATIO_TO_CHURN: u32 = 15; -const CASHNOTE_CREATION_RATIO_TO_CHURN: u32 = 15; - -const CHUNKS_SIZE: usize = 1024 * 1024; - -const CONTENT_QUERY_RATIO_TO_CHURN: u32 = 40; -const MAX_NUM_OF_QUERY_ATTEMPTS: u8 = 5; - -// Default total amount of time we run the checks for before reporting the outcome. -// It can be overriden by setting the 'TEST_DURATION_MINS' env var. -const TEST_DURATION: Duration = Duration::from_secs(60 * 60); // 1hr - -type ContentList = Arc>>; -type CashNoteMap = Arc>>; - -struct ContentError { - net_addr: NetworkAddress, - attempts: u8, - last_err: Error, -} - -impl fmt::Debug for ContentError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "{:?}, attempts: {}, last error: {:?}", - self.net_addr, self.attempts, self.last_err - ) - } -} - -type ContentErredList = Arc>>; - -#[tokio::test(flavor = "multi_thread")] -async fn data_availability_during_churn() -> Result<()> { - let _log_appender_guard = LogBuilder::init_multi_threaded_tokio_test("data_with_churn", false); - - let test_duration = if let Ok(str) = std::env::var("TEST_DURATION_MINS") { - Duration::from_secs(60 * str.parse::()?) - } else { - TEST_DURATION - }; - let node_count = get_node_count(); - - let churn_period = if let Ok(str) = std::env::var("TEST_TOTAL_CHURN_CYCLES") { - println!("Using value set in 'TEST_TOTAL_CHURN_CYCLES' env var: {str}"); - info!("Using value set in 'TEST_TOTAL_CHURN_CYCLES' env var: {str}"); - let cycles = str.parse::()?; - test_duration / cycles - } else { - // Ensure at least some nodes got churned twice. - test_duration - / std::cmp::max( - CHURN_CYCLES * node_count as u32, - node_count as u32 + EXTRA_CHURN_COUNT, - ) - }; - println!("Nodes will churn every {churn_period:?}"); - info!("Nodes will churn every {churn_period:?}"); - - // Create a cross thread usize for tracking churned nodes - let churn_count = Arc::new(RwLock::new(0_usize)); - - // Allow to disable Registers data creation/checks, storing and querying only Chunks during churn. - // Default to be not carry out chunks only during churn. - let chunks_only = std::env::var("CHUNKS_ONLY").is_ok(); - - println!( - "Running this test for {test_duration:?}{}...", - if chunks_only { " (Chunks only)" } else { "" } - ); - info!( - "Running this test for {test_duration:?}{}...", - if chunks_only { " (Chunks only)" } else { "" } - ); - - // The testnet will create a `faucet` at last. To avoid mess up with that, - // wait for a while to ensure the spends of that got settled. - sleep(std::time::Duration::from_secs(10)).await; - - info!("Creating a client and paying wallet..."); - let paying_wallet_dir = TempDir::new()?; - let (client, _paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - - // Waiting for the paying_wallet funded. - sleep(std::time::Duration::from_secs(10)).await; - - info!( - "Client and paying_wallet created with signing key: {:?}", - client.signer_pk() - ); - - // Shared bucket where we keep track of content created/stored on the network - let content = ContentList::default(); - - // Shared bucket where we keep track of CashNotes created/stored on the network - let cash_notes = CashNoteMap::default(); - - // Spawn a task to create Registers and CashNotes at random locations, - // at a higher frequency than the churning events - if !chunks_only { - info!("Creating transfer wallet taking balance from the payment wallet"); - let transfers_wallet_dir = TempDir::new()?; - let transfers_wallet = add_funds_to_wallet(&client, transfers_wallet_dir.path()).await?; - info!("Transfer wallet created"); - - // Waiting for the transfers_wallet funded. - sleep(std::time::Duration::from_secs(10)).await; - - create_registers_task( - client.clone(), - Arc::clone(&content), - churn_period, - paying_wallet_dir.path().to_path_buf(), - ); - - create_cash_note_task( - client.clone(), - transfers_wallet, - Arc::clone(&content), - Arc::clone(&cash_notes), - churn_period, - ); - } - - println!("Uploading some chunks before carry out node churning"); - info!("Uploading some chunks before carry out node churning"); - - // Spawn a task to store Chunks at random locations, at a higher frequency than the churning events - store_chunks_task( - client.clone(), - Arc::clone(&content), - churn_period, - paying_wallet_dir.path().to_path_buf(), - ); - - // Spawn a task to churn nodes - churn_nodes_task(Arc::clone(&churn_count), test_duration, churn_period); - - // Shared bucket where we keep track of the content which erred when creating/storing/fetching. - // We remove them from this bucket if we are then able to query/fetch them successfully. - // We only try to query them 'MAX_NUM_OF_QUERY_ATTEMPTS' times, then report them effectivelly as failures. - let content_erred = ContentErredList::default(); - - // Shared bucket where we keep track of the content we failed to fetch for 'MAX_NUM_OF_QUERY_ATTEMPTS' times. - let failures = ContentErredList::default(); - - // Spawn a task to randomly query/fetch the content we create/store - query_content_task( - client.clone(), - Arc::clone(&content), - Arc::clone(&content_erred), - Arc::clone(&cash_notes), - churn_period, - paying_wallet_dir.path().to_path_buf(), - ); - - // Spawn a task to retry querying the content that failed, up to 'MAX_NUM_OF_QUERY_ATTEMPTS' times, - // and mark them as failures if they effectivelly cannot be retrieved. - retry_query_content_task( - client.clone(), - Arc::clone(&content_erred), - Arc::clone(&failures), - Arc::clone(&cash_notes), - churn_period, - paying_wallet_dir.path().to_path_buf(), - ); - - info!("All tasks have been spawned. The test is now running..."); - println!("All tasks have been spawned. The test is now running..."); - - let start_time = Instant::now(); - while start_time.elapsed() < test_duration { - let failed = failures.read().await; - info!( - "Current failures after {:?} ({}): {:?}", - start_time.elapsed(), - failed.len(), - failed.values() - ); - sleep(churn_period).await; - } - - println!(); - println!( - ">>>>>> Test stopping after running for {:?}. <<<<<<", - start_time.elapsed() - ); - println!("{:?} churn events happened.", *churn_count.read().await); - println!(); - - // The churning of storing_chunk/querying_chunk are all random, - // which will have a high chance that newly stored chunk got queried BEFORE - // the original holders churned out. - // i.e. the test may pass even without any replication - // Hence, we carry out a final round of query all data to confirm storage. - println!("Final querying confirmation of content"); - info!("Final querying confirmation of content"); - - // take one read lock to avoid holding the lock for the whole loop - // prevent any late content uploads being added to the list - let content = content.read().await; - let uploaded_content_count = content.len(); - let mut handles = Vec::new(); - for net_addr in content.iter() { - let client = client.clone(); - let net_addr = net_addr.clone(); - let cash_notes = Arc::clone(&cash_notes); - - let failures = Arc::clone(&failures); - let wallet_dir = paying_wallet_dir.to_path_buf().clone(); - let handle = tokio::spawn(async move { - final_retry_query_content( - &client, - &net_addr, - cash_notes, - churn_period, - failures, - &wallet_dir, - ) - .await - }); - handles.push(handle); - } - let results: Vec<_> = futures::future::join_all(handles).await; - - let content_queried_count = results.iter().filter(|r| r.is_ok()).count(); - assert_eq!( - content_queried_count, uploaded_content_count, - "Not all content was queried successfully" - ); - - println!("{content_queried_count:?} pieces of content queried"); - - assert_eq!( - content_queried_count, uploaded_content_count, - "Not all content was queried" - ); - - let failed = failures.read().await; - if failed.len() > 0 { - bail!("{} failure/s in test: {:?}", failed.len(), failed.values()); - } - - println!("Test passed after running for {:?}.", start_time.elapsed()); - Ok(()) -} - -// Spawns a task which periodically creates CashNotes at random locations. -fn create_cash_note_task( - client: Client, - transfers_wallet: HotWallet, - content: ContentList, - cash_notes: CashNoteMap, - churn_period: Duration, -) { - let _handle = tokio::spawn(async move { - // Create CashNote at a higher frequency than the churning events - let delay = churn_period / CASHNOTE_CREATION_RATIO_TO_CHURN; - - let mut wallet_client = WalletClient::new(client.clone(), transfers_wallet); - - loop { - sleep(delay).await; - - let dest_pk = MainSecretKey::random().main_pubkey(); - let cash_note = wallet_client - .send_cash_note(NanoTokens::from(10), dest_pk, true) - .await - .unwrap_or_else(|_| panic!("Failed to send CashNote to {dest_pk:?}")); - - let cash_note_addr = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); - let net_addr = NetworkAddress::SpendAddress(cash_note_addr); - println!("Created CashNote at {cash_note_addr:?} after {delay:?}"); - debug!("Created CashNote at {cash_note_addr:?} after {delay:?}"); - content.write().await.push_back(net_addr); - let _ = cash_notes.write().await.insert(cash_note_addr, cash_note); - } - }); -} - -// Spawns a task which periodically creates Registers at random locations. -fn create_registers_task( - client: Client, - content: ContentList, - churn_period: Duration, - paying_wallet_dir: PathBuf, -) { - let _handle = tokio::spawn(async move { - // Create Registers at a higher frequency than the churning events - let delay = churn_period / REGISTER_CREATION_RATIO_TO_CHURN; - - let paying_wallet = get_wallet(&paying_wallet_dir); - - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - loop { - let meta = XorName(rand::random()); - let owner = client.signer_pk(); - - let addr = RegisterAddress::new(meta, owner); - println!("Creating Register at {addr:?} in {delay:?}"); - debug!("Creating Register at {addr:?} in {delay:?}"); - sleep(delay).await; - - match client - .create_and_pay_for_register(meta, &mut wallet_client, true, Permissions::default()) - .await - { - Ok(_) => content - .write() - .await - .push_back(NetworkAddress::RegisterAddress(addr)), - Err(err) => println!("Discarding new Register ({addr:?}) due to error: {err:?}"), - } - } - }); -} - -// Spawns a task which periodically stores Chunks at random locations. -fn store_chunks_task( - client: Client, - content: ContentList, - churn_period: Duration, - paying_wallet_dir: PathBuf, -) { - let _handle: JoinHandle> = tokio::spawn(async move { - let temp_dir = tempdir().expect("Can not create a temp directory for store_chunks_task!"); - let output_dir = temp_dir.path().join("chunk_path"); - create_dir_all(output_dir.clone()) - .expect("failed to create output dir for encrypted chunks"); - - // Store Chunks at a higher frequency than the churning events - let delay = churn_period / CHUNK_CREATION_RATIO_TO_CHURN; - - let mut rng = OsRng; - - loop { - let random_bytes: Vec = ::std::iter::repeat(()) - .map(|()| rng.gen::()) - .take(CHUNKS_SIZE) - .collect(); - let chunk_size = random_bytes.len(); - - let chunk_name = XorName::from_content(&random_bytes); - - let file_path = temp_dir.path().join(hex::encode(chunk_name)); - let mut chunk_file = - File::create(&file_path).expect("failed to create temp chunk file"); - chunk_file - .write_all(&random_bytes) - .expect("failed to write to temp chunk file"); - - let (addr, _data_map, _file_size, chunks) = - FilesApi::chunk_file(&file_path, &output_dir, true).expect("Failed to chunk bytes"); - - info!( - "Paying storage for ({}) new Chunk/s of file ({} bytes) at {addr:?} in {delay:?}", - chunks.len(), - chunk_size - ); - sleep(delay).await; - - let chunks_len = chunks.len(); - let chunks_name = chunks.iter().map(|(name, _)| *name).collect::>(); - - let mut uploader = Uploader::new(client.clone(), paying_wallet_dir.clone()); - uploader.set_show_holders(true); - uploader.insert_chunk_paths(chunks); - - let cost = match uploader.start_upload().await { - Ok(stats) => stats - .royalty_fees - .checked_add(stats.storage_cost) - .ok_or(eyre!("Total storage cost exceed possible token amount"))?, - Err(err) => { - bail!("Bailing w/ new Chunk ({addr:?}) due to error: {err:?}"); - } - }; - - println!( - "Stored ({chunks_len}) Chunk/s at cost: {cost:?} of file ({chunk_size} bytes) at {addr:?} in {delay:?}" - ); - info!( - "Stored ({chunks_len}) Chunk/s at cost: {cost:?} of file ({chunk_size} bytes) at {addr:?} in {delay:?}" - ); - sleep(delay).await; - - for chunk_name in chunks_name { - content - .write() - .await - .push_back(NetworkAddress::ChunkAddress(ChunkAddress::new(chunk_name))); - } - } - }); -} - -// Spawns a task which periodically queries a content by randomly choosing it from the list -// of content created by another task. -fn query_content_task( - client: Client, - content: ContentList, - content_erred: ContentErredList, - cash_notes: CashNoteMap, - churn_period: Duration, - root_dir: PathBuf, -) { - let _handle = tokio::spawn(async move { - let delay = churn_period / CONTENT_QUERY_RATIO_TO_CHURN; - loop { - let len = content.read().await.len(); - if len == 0 { - println!("No content created/stored just yet, let's try in {delay:?} ..."); - info!("No content created/stored just yet, let's try in {delay:?} ..."); - sleep(delay).await; - continue; - } - - // let's choose a random content to query, picking it from the list of created - let index = rand::thread_rng().gen_range(0..len); - let net_addr = content.read().await[index].clone(); - trace!("Querying content (bucket index: {index}) at {net_addr:?} in {delay:?}"); - sleep(delay).await; - - match query_content(&client, &root_dir, &net_addr, Arc::clone(&cash_notes)).await { - Ok(_) => { - let _ = content_erred.write().await.remove(&net_addr); - } - Err(last_err) => { - println!( - "Failed to query content (index: {index}) at {net_addr}: {last_err:?}" - ); - error!("Failed to query content (index: {index}) at {net_addr}: {last_err:?}"); - // mark it to try 'MAX_NUM_OF_QUERY_ATTEMPTS' times. - let _ = content_erred - .write() - .await - .entry(net_addr.clone()) - .and_modify(|curr| curr.attempts += 1) - .or_insert(ContentError { - net_addr, - attempts: 1, - last_err, - }); - } - } - } - }); -} - -// Spawns a task which periodically picks up a node, and restarts it to cause churn in the network. -fn churn_nodes_task( - churn_count: Arc>, - test_duration: Duration, - churn_period: Duration, -) { - let start = Instant::now(); - let _handle: JoinHandle> = tokio::spawn(async move { - let mut node_restart = NodeRestart::new(true, false)?; - - loop { - sleep(churn_period).await; - - // break out if we've run the duration of churn - if start.elapsed() > test_duration { - debug!("Test duration reached, stopping churn nodes task"); - break; - } - - if let Err(err) = node_restart.restart_next(true, true).await { - println!("Failed to restart node {err}"); - info!("Failed to restart node {err}"); - continue; - } - - *churn_count.write().await += 1; - } - Ok(()) - }); -} - -// Checks (periodically) for any content that an error was reported either at the moment of its creation or -// in a later query attempt. -fn retry_query_content_task( - client: Client, - content_erred: ContentErredList, - failures: ContentErredList, - cash_notes: CashNoteMap, - churn_period: Duration, - wallet_dir: PathBuf, -) { - let _handle = tokio::spawn(async move { - let delay = 2 * churn_period; - loop { - sleep(delay).await; - - // let's try to query from the bucket of those that erred upon creation/query - let erred = content_erred.write().await.pop_first(); - - if let Some((net_addr, mut content_error)) = erred { - let attempts = content_error.attempts + 1; - - println!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); - info!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); - if let Err(last_err) = - query_content(&client, &wallet_dir, &net_addr, Arc::clone(&cash_notes)).await - { - println!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); - warn!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); - // We only keep it to retry 'MAX_NUM_OF_QUERY_ATTEMPTS' times, - // otherwise report it effectivelly as failure. - content_error.attempts = attempts; - content_error.last_err = last_err; - - if attempts == MAX_NUM_OF_QUERY_ATTEMPTS { - let _ = failures.write().await.insert(net_addr, content_error); - } else { - let _ = content_erred.write().await.insert(net_addr, content_error); - } - } else { - // remove from fails and errs if we had a success and it was added meanwhile perchance - let _ = failures.write().await.remove(&net_addr); - let _ = content_erred.write().await.remove(&net_addr); - } - } - } - }); -} - -async fn final_retry_query_content( - client: &Client, - net_addr: &NetworkAddress, - cash_notes: CashNoteMap, - churn_period: Duration, - failures: ContentErredList, - wallet_dir: &Path, -) -> Result<()> { - let mut attempts = 1; - let net_addr = net_addr.clone(); - loop { - println!("Final querying content at {net_addr}, attempt: #{attempts} ..."); - debug!("Final querying content at {net_addr}, attempt: #{attempts} ..."); - if let Err(last_err) = - query_content(client, wallet_dir, &net_addr, Arc::clone(&cash_notes)).await - { - if attempts == MAX_NUM_OF_QUERY_ATTEMPTS { - println!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); - error!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); - bail!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); - } else { - attempts += 1; - let delay = 2 * churn_period; - debug!("Delaying last check for {delay:?} ..."); - sleep(delay).await; - continue; - } - } else { - failures.write().await.remove(&net_addr); - // content retrieved fine - return Ok(()); - } - } -} - -async fn query_content( - client: &Client, - wallet_dir: &Path, - net_addr: &NetworkAddress, - cash_notes: CashNoteMap, -) -> Result<(), Error> { - match net_addr { - NetworkAddress::SpendAddress(addr) => { - if let Some(cash_note) = cash_notes.read().await.get(addr) { - match client.verify_cashnote(cash_note).await { - Ok(_) => Ok(()), - Err(err) => Err(Error::CouldNotVerifyTransfer(format!( - "Verification of cash_note {addr:?} failed with error: {err:?}" - ))), - } - } else { - Err(Error::CouldNotVerifyTransfer(format!( - "Do not have the CashNote: {addr:?}" - ))) - } - } - NetworkAddress::RegisterAddress(addr) => { - let _ = client.get_register(*addr).await?; - Ok(()) - } - NetworkAddress::ChunkAddress(addr) => { - let files_api = FilesApi::new(client.clone(), wallet_dir.to_path_buf()); - let mut file_download = FilesDownload::new(files_api); - let _ = file_download.download_file(*addr, None).await?; - - Ok(()) - } - _other => Ok(()), // we don't create/store any other type of content in this test yet - } -} +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// mod common; + +// use crate::common::{ +// client::{add_funds_to_wallet, get_client_and_funded_wallet, get_node_count, get_wallet}, +// NodeRestart, +// }; +// use assert_fs::TempDir; +// use eyre::{bail, eyre, Result}; +// use rand::{rngs::OsRng, Rng}; +// // TODO: Update `autonomi` to have relevant types here +// // use sn_client::{Client, Error, FilesApi, FilesDownload, Uploader, WalletClient}; +// use sn_logging::LogBuilder; +// use sn_protocol::{ +// storage::{ChunkAddress, RegisterAddress, SpendAddress}, +// NetworkAddress, +// }; +// use sn_registers::Permissions; +// use sn_transfers::{CashNote, HotWallet, MainSecretKey, NanoTokens}; +// use std::{ +// collections::{BTreeMap, VecDeque}, +// fmt, +// fs::{create_dir_all, File}, +// io::Write, +// path::{Path, PathBuf}, +// sync::Arc, +// time::{Duration, Instant}, +// }; +// use tempfile::tempdir; +// use tokio::{sync::RwLock, task::JoinHandle, time::sleep}; +// use tracing::{debug, error, info, trace, warn}; +// use xor_name::XorName; + +// const EXTRA_CHURN_COUNT: u32 = 5; +// const CHURN_CYCLES: u32 = 2; +// const CHUNK_CREATION_RATIO_TO_CHURN: u32 = 15; +// const REGISTER_CREATION_RATIO_TO_CHURN: u32 = 15; +// const CASHNOTE_CREATION_RATIO_TO_CHURN: u32 = 15; + +// const CHUNKS_SIZE: usize = 1024 * 1024; + +// const CONTENT_QUERY_RATIO_TO_CHURN: u32 = 40; +// const MAX_NUM_OF_QUERY_ATTEMPTS: u8 = 5; + +// // Default total amount of time we run the checks for before reporting the outcome. +// // It can be overriden by setting the 'TEST_DURATION_MINS' env var. +// const TEST_DURATION: Duration = Duration::from_secs(60 * 60); // 1hr + +// type ContentList = Arc>>; +// type CashNoteMap = Arc>>; + +// struct ContentError { +// net_addr: NetworkAddress, +// attempts: u8, +// last_err: Error, +// } + +// impl fmt::Debug for ContentError { +// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +// write!( +// f, +// "{:?}, attempts: {}, last error: {:?}", +// self.net_addr, self.attempts, self.last_err +// ) +// } +// } + +// type ContentErredList = Arc>>; + +// #[tokio::test(flavor = "multi_thread")] +// async fn data_availability_during_churn() -> Result<()> { +// let _log_appender_guard = LogBuilder::init_multi_threaded_tokio_test("data_with_churn", false); + +// let test_duration = if let Ok(str) = std::env::var("TEST_DURATION_MINS") { +// Duration::from_secs(60 * str.parse::()?) +// } else { +// TEST_DURATION +// }; +// let node_count = get_node_count(); + +// let churn_period = if let Ok(str) = std::env::var("TEST_TOTAL_CHURN_CYCLES") { +// println!("Using value set in 'TEST_TOTAL_CHURN_CYCLES' env var: {str}"); +// info!("Using value set in 'TEST_TOTAL_CHURN_CYCLES' env var: {str}"); +// let cycles = str.parse::()?; +// test_duration / cycles +// } else { +// // Ensure at least some nodes got churned twice. +// test_duration +// / std::cmp::max( +// CHURN_CYCLES * node_count as u32, +// node_count as u32 + EXTRA_CHURN_COUNT, +// ) +// }; +// println!("Nodes will churn every {churn_period:?}"); +// info!("Nodes will churn every {churn_period:?}"); + +// // Create a cross thread usize for tracking churned nodes +// let churn_count = Arc::new(RwLock::new(0_usize)); + +// // Allow to disable Registers data creation/checks, storing and querying only Chunks during churn. +// // Default to be not carry out chunks only during churn. +// let chunks_only = std::env::var("CHUNKS_ONLY").is_ok(); + +// println!( +// "Running this test for {test_duration:?}{}...", +// if chunks_only { " (Chunks only)" } else { "" } +// ); +// info!( +// "Running this test for {test_duration:?}{}...", +// if chunks_only { " (Chunks only)" } else { "" } +// ); + +// // The testnet will create a `faucet` at last. To avoid mess up with that, +// // wait for a while to ensure the spends of that got settled. +// sleep(std::time::Duration::from_secs(10)).await; + +// info!("Creating a client and paying wallet..."); +// let paying_wallet_dir = TempDir::new()?; +// let (client, _paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; + +// // Waiting for the paying_wallet funded. +// sleep(std::time::Duration::from_secs(10)).await; + +// info!( +// "Client and paying_wallet created with signing key: {:?}", +// client.signer_pk() +// ); + +// // Shared bucket where we keep track of content created/stored on the network +// let content = ContentList::default(); + +// // Shared bucket where we keep track of CashNotes created/stored on the network +// let cash_notes = CashNoteMap::default(); + +// // Spawn a task to create Registers and CashNotes at random locations, +// // at a higher frequency than the churning events +// if !chunks_only { +// info!("Creating transfer wallet taking balance from the payment wallet"); +// let transfers_wallet_dir = TempDir::new()?; +// let transfers_wallet = add_funds_to_wallet(&client, transfers_wallet_dir.path()).await?; +// info!("Transfer wallet created"); + +// // Waiting for the transfers_wallet funded. +// sleep(std::time::Duration::from_secs(10)).await; + +// create_registers_task( +// client.clone(), +// Arc::clone(&content), +// churn_period, +// paying_wallet_dir.path().to_path_buf(), +// ); + +// create_cash_note_task( +// client.clone(), +// transfers_wallet, +// Arc::clone(&content), +// Arc::clone(&cash_notes), +// churn_period, +// ); +// } + +// println!("Uploading some chunks before carry out node churning"); +// info!("Uploading some chunks before carry out node churning"); + +// // Spawn a task to store Chunks at random locations, at a higher frequency than the churning events +// store_chunks_task( +// client.clone(), +// Arc::clone(&content), +// churn_period, +// paying_wallet_dir.path().to_path_buf(), +// ); + +// // Spawn a task to churn nodes +// churn_nodes_task(Arc::clone(&churn_count), test_duration, churn_period); + +// // Shared bucket where we keep track of the content which erred when creating/storing/fetching. +// // We remove them from this bucket if we are then able to query/fetch them successfully. +// // We only try to query them 'MAX_NUM_OF_QUERY_ATTEMPTS' times, then report them effectivelly as failures. +// let content_erred = ContentErredList::default(); + +// // Shared bucket where we keep track of the content we failed to fetch for 'MAX_NUM_OF_QUERY_ATTEMPTS' times. +// let failures = ContentErredList::default(); + +// // Spawn a task to randomly query/fetch the content we create/store +// query_content_task( +// client.clone(), +// Arc::clone(&content), +// Arc::clone(&content_erred), +// Arc::clone(&cash_notes), +// churn_period, +// paying_wallet_dir.path().to_path_buf(), +// ); + +// // Spawn a task to retry querying the content that failed, up to 'MAX_NUM_OF_QUERY_ATTEMPTS' times, +// // and mark them as failures if they effectivelly cannot be retrieved. +// retry_query_content_task( +// client.clone(), +// Arc::clone(&content_erred), +// Arc::clone(&failures), +// Arc::clone(&cash_notes), +// churn_period, +// paying_wallet_dir.path().to_path_buf(), +// ); + +// info!("All tasks have been spawned. The test is now running..."); +// println!("All tasks have been spawned. The test is now running..."); + +// let start_time = Instant::now(); +// while start_time.elapsed() < test_duration { +// let failed = failures.read().await; +// info!( +// "Current failures after {:?} ({}): {:?}", +// start_time.elapsed(), +// failed.len(), +// failed.values() +// ); +// sleep(churn_period).await; +// } + +// println!(); +// println!( +// ">>>>>> Test stopping after running for {:?}. <<<<<<", +// start_time.elapsed() +// ); +// println!("{:?} churn events happened.", *churn_count.read().await); +// println!(); + +// // The churning of storing_chunk/querying_chunk are all random, +// // which will have a high chance that newly stored chunk got queried BEFORE +// // the original holders churned out. +// // i.e. the test may pass even without any replication +// // Hence, we carry out a final round of query all data to confirm storage. +// println!("Final querying confirmation of content"); +// info!("Final querying confirmation of content"); + +// // take one read lock to avoid holding the lock for the whole loop +// // prevent any late content uploads being added to the list +// let content = content.read().await; +// let uploaded_content_count = content.len(); +// let mut handles = Vec::new(); +// for net_addr in content.iter() { +// let client = client.clone(); +// let net_addr = net_addr.clone(); +// let cash_notes = Arc::clone(&cash_notes); + +// let failures = Arc::clone(&failures); +// let wallet_dir = paying_wallet_dir.to_path_buf().clone(); +// let handle = tokio::spawn(async move { +// final_retry_query_content( +// &client, +// &net_addr, +// cash_notes, +// churn_period, +// failures, +// &wallet_dir, +// ) +// .await +// }); +// handles.push(handle); +// } +// let results: Vec<_> = futures::future::join_all(handles).await; + +// let content_queried_count = results.iter().filter(|r| r.is_ok()).count(); +// assert_eq!( +// content_queried_count, uploaded_content_count, +// "Not all content was queried successfully" +// ); + +// println!("{content_queried_count:?} pieces of content queried"); + +// assert_eq!( +// content_queried_count, uploaded_content_count, +// "Not all content was queried" +// ); + +// let failed = failures.read().await; +// if failed.len() > 0 { +// bail!("{} failure/s in test: {:?}", failed.len(), failed.values()); +// } + +// println!("Test passed after running for {:?}.", start_time.elapsed()); +// Ok(()) +// } + +// // Spawns a task which periodically creates CashNotes at random locations. +// fn create_cash_note_task( +// client: Client, +// transfers_wallet: HotWallet, +// content: ContentList, +// cash_notes: CashNoteMap, +// churn_period: Duration, +// ) { +// let _handle = tokio::spawn(async move { +// // Create CashNote at a higher frequency than the churning events +// let delay = churn_period / CASHNOTE_CREATION_RATIO_TO_CHURN; + +// let mut wallet_client = WalletClient::new(client.clone(), transfers_wallet); + +// loop { +// sleep(delay).await; + +// let dest_pk = MainSecretKey::random().main_pubkey(); +// let cash_note = wallet_client +// .send_cash_note(NanoTokens::from(10), dest_pk, true) +// .await +// .unwrap_or_else(|_| panic!("Failed to send CashNote to {dest_pk:?}")); + +// let cash_note_addr = SpendAddress::from_unique_pubkey(&cash_note.unique_pubkey()); +// let net_addr = NetworkAddress::SpendAddress(cash_note_addr); +// println!("Created CashNote at {cash_note_addr:?} after {delay:?}"); +// debug!("Created CashNote at {cash_note_addr:?} after {delay:?}"); +// content.write().await.push_back(net_addr); +// let _ = cash_notes.write().await.insert(cash_note_addr, cash_note); +// } +// }); +// } + +// // Spawns a task which periodically creates Registers at random locations. +// fn create_registers_task( +// client: Client, +// content: ContentList, +// churn_period: Duration, +// paying_wallet_dir: PathBuf, +// ) { +// let _handle = tokio::spawn(async move { +// // Create Registers at a higher frequency than the churning events +// let delay = churn_period / REGISTER_CREATION_RATIO_TO_CHURN; + +// let paying_wallet = get_wallet(&paying_wallet_dir); + +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// loop { +// let meta = XorName(rand::random()); +// let owner = client.signer_pk(); + +// let addr = RegisterAddress::new(meta, owner); +// println!("Creating Register at {addr:?} in {delay:?}"); +// debug!("Creating Register at {addr:?} in {delay:?}"); +// sleep(delay).await; + +// match client +// .create_and_pay_for_register(meta, &mut wallet_client, true, Permissions::default()) +// .await +// { +// Ok(_) => content +// .write() +// .await +// .push_back(NetworkAddress::RegisterAddress(addr)), +// Err(err) => println!("Discarding new Register ({addr:?}) due to error: {err:?}"), +// } +// } +// }); +// } + +// // Spawns a task which periodically stores Chunks at random locations. +// fn store_chunks_task( +// client: Client, +// content: ContentList, +// churn_period: Duration, +// paying_wallet_dir: PathBuf, +// ) { +// let _handle: JoinHandle> = tokio::spawn(async move { +// let temp_dir = tempdir().expect("Can not create a temp directory for store_chunks_task!"); +// let output_dir = temp_dir.path().join("chunk_path"); +// create_dir_all(output_dir.clone()) +// .expect("failed to create output dir for encrypted chunks"); + +// // Store Chunks at a higher frequency than the churning events +// let delay = churn_period / CHUNK_CREATION_RATIO_TO_CHURN; + +// let mut rng = OsRng; + +// loop { +// let random_bytes: Vec = ::std::iter::repeat(()) +// .map(|()| rng.gen::()) +// .take(CHUNKS_SIZE) +// .collect(); +// let chunk_size = random_bytes.len(); + +// let chunk_name = XorName::from_content(&random_bytes); + +// let file_path = temp_dir.path().join(hex::encode(chunk_name)); +// let mut chunk_file = +// File::create(&file_path).expect("failed to create temp chunk file"); +// chunk_file +// .write_all(&random_bytes) +// .expect("failed to write to temp chunk file"); + +// let (addr, _data_map, _file_size, chunks) = +// FilesApi::chunk_file(&file_path, &output_dir, true).expect("Failed to chunk bytes"); + +// info!( +// "Paying storage for ({}) new Chunk/s of file ({} bytes) at {addr:?} in {delay:?}", +// chunks.len(), +// chunk_size +// ); +// sleep(delay).await; + +// let chunks_len = chunks.len(); +// let chunks_name = chunks.iter().map(|(name, _)| *name).collect::>(); + +// let mut uploader = Uploader::new(client.clone(), paying_wallet_dir.clone()); +// uploader.set_show_holders(true); +// uploader.insert_chunk_paths(chunks); + +// let cost = match uploader.start_upload().await { +// Ok(stats) => stats +// .royalty_fees +// .checked_add(stats.storage_cost) +// .ok_or(eyre!("Total storage cost exceed possible token amount"))?, +// Err(err) => { +// bail!("Bailing w/ new Chunk ({addr:?}) due to error: {err:?}"); +// } +// }; + +// println!( +// "Stored ({chunks_len}) Chunk/s at cost: {cost:?} of file ({chunk_size} bytes) at {addr:?} in {delay:?}" +// ); +// info!( +// "Stored ({chunks_len}) Chunk/s at cost: {cost:?} of file ({chunk_size} bytes) at {addr:?} in {delay:?}" +// ); +// sleep(delay).await; + +// for chunk_name in chunks_name { +// content +// .write() +// .await +// .push_back(NetworkAddress::ChunkAddress(ChunkAddress::new(chunk_name))); +// } +// } +// }); +// } + +// // Spawns a task which periodically queries a content by randomly choosing it from the list +// // of content created by another task. +// fn query_content_task( +// client: Client, +// content: ContentList, +// content_erred: ContentErredList, +// cash_notes: CashNoteMap, +// churn_period: Duration, +// root_dir: PathBuf, +// ) { +// let _handle = tokio::spawn(async move { +// let delay = churn_period / CONTENT_QUERY_RATIO_TO_CHURN; +// loop { +// let len = content.read().await.len(); +// if len == 0 { +// println!("No content created/stored just yet, let's try in {delay:?} ..."); +// info!("No content created/stored just yet, let's try in {delay:?} ..."); +// sleep(delay).await; +// continue; +// } + +// // let's choose a random content to query, picking it from the list of created +// let index = rand::thread_rng().gen_range(0..len); +// let net_addr = content.read().await[index].clone(); +// trace!("Querying content (bucket index: {index}) at {net_addr:?} in {delay:?}"); +// sleep(delay).await; + +// match query_content(&client, &root_dir, &net_addr, Arc::clone(&cash_notes)).await { +// Ok(_) => { +// let _ = content_erred.write().await.remove(&net_addr); +// } +// Err(last_err) => { +// println!( +// "Failed to query content (index: {index}) at {net_addr}: {last_err:?}" +// ); +// error!("Failed to query content (index: {index}) at {net_addr}: {last_err:?}"); +// // mark it to try 'MAX_NUM_OF_QUERY_ATTEMPTS' times. +// let _ = content_erred +// .write() +// .await +// .entry(net_addr.clone()) +// .and_modify(|curr| curr.attempts += 1) +// .or_insert(ContentError { +// net_addr, +// attempts: 1, +// last_err, +// }); +// } +// } +// } +// }); +// } + +// // Spawns a task which periodically picks up a node, and restarts it to cause churn in the network. +// fn churn_nodes_task( +// churn_count: Arc>, +// test_duration: Duration, +// churn_period: Duration, +// ) { +// let start = Instant::now(); +// let _handle: JoinHandle> = tokio::spawn(async move { +// let mut node_restart = NodeRestart::new(true, false)?; + +// loop { +// sleep(churn_period).await; + +// // break out if we've run the duration of churn +// if start.elapsed() > test_duration { +// debug!("Test duration reached, stopping churn nodes task"); +// break; +// } + +// if let Err(err) = node_restart.restart_next(true, true).await { +// println!("Failed to restart node {err}"); +// info!("Failed to restart node {err}"); +// continue; +// } + +// *churn_count.write().await += 1; +// } +// Ok(()) +// }); +// } + +// // Checks (periodically) for any content that an error was reported either at the moment of its creation or +// // in a later query attempt. +// fn retry_query_content_task( +// client: Client, +// content_erred: ContentErredList, +// failures: ContentErredList, +// cash_notes: CashNoteMap, +// churn_period: Duration, +// wallet_dir: PathBuf, +// ) { +// let _handle = tokio::spawn(async move { +// let delay = 2 * churn_period; +// loop { +// sleep(delay).await; + +// // let's try to query from the bucket of those that erred upon creation/query +// let erred = content_erred.write().await.pop_first(); + +// if let Some((net_addr, mut content_error)) = erred { +// let attempts = content_error.attempts + 1; + +// println!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); +// info!("Querying erred content at {net_addr}, attempt: #{attempts} ..."); +// if let Err(last_err) = +// query_content(&client, &wallet_dir, &net_addr, Arc::clone(&cash_notes)).await +// { +// println!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); +// warn!("Erred content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); +// // We only keep it to retry 'MAX_NUM_OF_QUERY_ATTEMPTS' times, +// // otherwise report it effectivelly as failure. +// content_error.attempts = attempts; +// content_error.last_err = last_err; + +// if attempts == MAX_NUM_OF_QUERY_ATTEMPTS { +// let _ = failures.write().await.insert(net_addr, content_error); +// } else { +// let _ = content_erred.write().await.insert(net_addr, content_error); +// } +// } else { +// // remove from fails and errs if we had a success and it was added meanwhile perchance +// let _ = failures.write().await.remove(&net_addr); +// let _ = content_erred.write().await.remove(&net_addr); +// } +// } +// } +// }); +// } + +// async fn final_retry_query_content( +// client: &Client, +// net_addr: &NetworkAddress, +// cash_notes: CashNoteMap, +// churn_period: Duration, +// failures: ContentErredList, +// wallet_dir: &Path, +// ) -> Result<()> { +// let mut attempts = 1; +// let net_addr = net_addr.clone(); +// loop { +// println!("Final querying content at {net_addr}, attempt: #{attempts} ..."); +// debug!("Final querying content at {net_addr}, attempt: #{attempts} ..."); +// if let Err(last_err) = +// query_content(client, wallet_dir, &net_addr, Arc::clone(&cash_notes)).await +// { +// if attempts == MAX_NUM_OF_QUERY_ATTEMPTS { +// println!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); +// error!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); +// bail!("Final check: Content is still not retrievable at {net_addr} after {attempts} attempts: {last_err:?}"); +// } else { +// attempts += 1; +// let delay = 2 * churn_period; +// debug!("Delaying last check for {delay:?} ..."); +// sleep(delay).await; +// continue; +// } +// } else { +// failures.write().await.remove(&net_addr); +// // content retrieved fine +// return Ok(()); +// } +// } +// } + +// async fn query_content( +// client: &Client, +// wallet_dir: &Path, +// net_addr: &NetworkAddress, +// cash_notes: CashNoteMap, +// ) -> Result<(), Error> { +// match net_addr { +// NetworkAddress::SpendAddress(addr) => { +// if let Some(cash_note) = cash_notes.read().await.get(addr) { +// match client.verify_cashnote(cash_note).await { +// Ok(_) => Ok(()), +// Err(err) => Err(Error::CouldNotVerifyTransfer(format!( +// "Verification of cash_note {addr:?} failed with error: {err:?}" +// ))), +// } +// } else { +// Err(Error::CouldNotVerifyTransfer(format!( +// "Do not have the CashNote: {addr:?}" +// ))) +// } +// } +// NetworkAddress::RegisterAddress(addr) => { +// let _ = client.get_register(*addr).await?; +// Ok(()) +// } +// NetworkAddress::ChunkAddress(addr) => { +// let files_api = FilesApi::new(client.clone(), wallet_dir.to_path_buf()); +// let mut file_download = FilesDownload::new(files_api); +// let _ = file_download.download_file(*addr, None).await?; + +// Ok(()) +// } +// _other => Ok(()), // we don't create/store any other type of content in this test yet +// } +// } diff --git a/sn_node/tests/verify_data_location.rs b/sn_node/tests/verify_data_location.rs index 3abf477b18..d81cc8a8d6 100644 --- a/sn_node/tests/verify_data_location.rs +++ b/sn_node/tests/verify_data_location.rs @@ -1,427 +1,428 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. - -#![allow(clippy::mutable_key_type)] -mod common; - -use crate::common::{ - client::{get_all_rpc_addresses, get_client_and_funded_wallet}, - get_all_peer_ids, get_safenode_rpc_client, NodeRestart, -}; -use assert_fs::TempDir; -use common::client::get_wallet; -use eyre::{eyre, Result}; -use libp2p::{ - kad::{KBucketKey, RecordKey}, - PeerId, -}; -use rand::{rngs::OsRng, Rng}; -use sn_client::{Client, FilesApi, Uploader, WalletClient}; -use sn_logging::LogBuilder; -use sn_networking::sort_peers_by_key; -use sn_protocol::{ - safenode_proto::{NodeInfoRequest, RecordAddressesRequest}, - NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, -}; -use sn_registers::{Permissions, RegisterAddress}; -use std::{ - collections::{BTreeSet, HashMap, HashSet}, - fs::File, - io::Write, - net::SocketAddr, - path::PathBuf, - time::{Duration, Instant}, -}; -use tonic::Request; -use tracing::{debug, error, info}; -use xor_name::XorName; - -const CHUNK_SIZE: usize = 1024; - -// VERIFICATION_DELAY is set based on the dead peer detection interval -// Once a node has been restarted, it takes VERIFICATION_DELAY time -// for the old peer to be removed from the routing table. -// Replication is then kicked off to distribute the data to the new closest -// nodes, hence verification has to be performed after this. -const VERIFICATION_DELAY: Duration = Duration::from_secs(60); - -/// Number of times to retry verification if it fails -const VERIFICATION_ATTEMPTS: usize = 5; - -/// Length of time to wait before re-verifying the data location -const REVERIFICATION_DELAY: Duration = - Duration::from_secs(sn_node::PERIODIC_REPLICATION_INTERVAL_MAX_S); - -// Default number of churns that should be performed. After each churn, we -// wait for VERIFICATION_DELAY time before verifying the data location. -// It can be overridden by setting the 'CHURN_COUNT' env var. -const CHURN_COUNT: u8 = 20; - -/// Default number of chunks that should be PUT to the network. -/// It can be overridden by setting the 'CHUNK_COUNT' env var. -const CHUNK_COUNT: usize = 5; -/// Default number of registers that should be PUT to the network. -/// It can be overridden by setting the 'REGISTER_COUNT' env var. -const REGISTER_COUNT: usize = 5; - -type NodeIndex = usize; -type RecordHolders = HashMap>; - -#[tokio::test(flavor = "multi_thread")] -async fn verify_data_location() -> Result<()> { - let _log_appender_guard = - LogBuilder::init_multi_threaded_tokio_test("verify_data_location", false); - - let churn_count = if let Ok(str) = std::env::var("CHURN_COUNT") { - str.parse::()? - } else { - CHURN_COUNT - }; - let chunk_count = if let Ok(str) = std::env::var("CHUNK_COUNT") { - str.parse::()? - } else { - CHUNK_COUNT - }; - let register_count = if let Ok(str) = std::env::var("REGISTER_COUNT") { - str.parse::()? - } else { - REGISTER_COUNT - }; - println!( - "Performing data location verification with a churn count of {churn_count} and n_chunks {chunk_count}, n_registers {register_count}\nIt will take approx {:?}", - VERIFICATION_DELAY*churn_count as u32 - ); - info!( - "Performing data location verification with a churn count of {churn_count} and n_chunks {chunk_count}, n_registers {register_count}\nIt will take approx {:?}", - VERIFICATION_DELAY*churn_count as u32 - ); - let node_rpc_address = get_all_rpc_addresses(true)?; - let mut all_peers = get_all_peer_ids(&node_rpc_address).await?; - - // Store chunks - println!("Creating a client and paying wallet..."); - debug!("Creating a client and paying wallet..."); - - let paying_wallet_dir = TempDir::new()?; - - let (client, _paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; - - store_chunks(client.clone(), chunk_count, paying_wallet_dir.to_path_buf()).await?; - store_registers(client, register_count, paying_wallet_dir.to_path_buf()).await?; - - // Verify data location initially - verify_location(&all_peers, &node_rpc_address).await?; - - // Churn nodes and verify the location of the data after VERIFICATION_DELAY - let mut current_churn_count = 0; - - let mut node_restart = NodeRestart::new(true, false)?; - let mut node_index = 0; - 'main: loop { - if current_churn_count >= churn_count { - break 'main Ok(()); - } - current_churn_count += 1; - - let safenode_rpc_endpoint = match node_restart.restart_next(false, false).await? { - None => { - // we have reached the end. - break 'main Ok(()); - } - Some(safenode_rpc_endpoint) => safenode_rpc_endpoint, - }; - - // wait for the dead peer to be removed from the RT and the replication flow to finish - println!( - "\nNode has been restarted, waiting for {VERIFICATION_DELAY:?} before verification" - ); - info!("\nNode has been restarted, waiting for {VERIFICATION_DELAY:?} before verification"); - tokio::time::sleep(VERIFICATION_DELAY).await; - - // get the new PeerId for the current NodeIndex - let mut rpc_client = get_safenode_rpc_client(safenode_rpc_endpoint).await?; - - let response = rpc_client - .node_info(Request::new(NodeInfoRequest {})) - .await?; - let new_peer_id = PeerId::from_bytes(&response.get_ref().peer_id)?; - // The below indexing assumes that, the way we do iteration to retrieve all_peers inside get_all_rpc_addresses - // and get_all_peer_ids is the same as how we do the iteration inside NodeRestart. - // todo: make this more cleaner. - if all_peers[node_index] == new_peer_id { - println!("new and old peer id are the same {new_peer_id:?}"); - return Err(eyre!("new and old peer id are the same {new_peer_id:?}")); - } - all_peers[node_index] = new_peer_id; - node_index += 1; - - print_node_close_groups(&all_peers); - - verify_location(&all_peers, &node_rpc_address).await?; - } -} - -fn print_node_close_groups(all_peers: &[PeerId]) { - let all_peers = all_peers.to_vec(); - info!("\nNode close groups:"); - - for (node_index, peer) in all_peers.iter().enumerate() { - let key = NetworkAddress::from_peer(*peer).as_kbucket_key(); - let closest_peers = - sort_peers_by_key(&all_peers, &key, CLOSE_GROUP_SIZE).expect("failed to sort peer"); - let closest_peers_idx = closest_peers - .iter() - .map(|&&peer| { - all_peers - .iter() - .position(|&p| p == peer) - .expect("peer to be in iterator") - }) - .collect::>(); - info!("Close for {node_index}: {peer:?} are {closest_peers_idx:?}"); - } -} - -async fn get_records_and_holders(node_rpc_addresses: &[SocketAddr]) -> Result { - let mut record_holders = RecordHolders::default(); - - for (node_index, rpc_address) in node_rpc_addresses.iter().enumerate() { - let mut rpc_client = get_safenode_rpc_client(*rpc_address).await?; - - let records_response = rpc_client - .record_addresses(Request::new(RecordAddressesRequest {})) - .await?; - - for bytes in records_response.get_ref().addresses.iter() { - let key = RecordKey::from(bytes.clone()); - let holders = record_holders.entry(key).or_insert(HashSet::new()); - holders.insert(node_index); - } - } - debug!("Obtained the current set of Record Key holders"); - Ok(record_holders) -} - -// Fetches the record_holders and verifies that the record is stored by the actual closest peers to the RecordKey -// It has a retry loop built in. -async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAddr]) -> Result<()> { - let mut failed = HashMap::new(); - - println!("*********************************************"); - println!("Verifying data across all peers {all_peers:?}"); - info!("*********************************************"); - info!("Verifying data across all peers {all_peers:?}"); - - let mut verification_attempts = 0; - while verification_attempts < VERIFICATION_ATTEMPTS { - failed.clear(); - let record_holders = get_records_and_holders(node_rpc_addresses).await?; - for (key, actual_holders_idx) in record_holders.iter() { - println!("Verifying {:?}", PrettyPrintRecordKey::from(key)); - info!("Verifying {:?}", PrettyPrintRecordKey::from(key)); - let record_key = KBucketKey::from(key.to_vec()); - let expected_holders = sort_peers_by_key(all_peers, &record_key, CLOSE_GROUP_SIZE)? - .into_iter() - .cloned() - .collect::>(); - - let actual_holders = actual_holders_idx - .iter() - .map(|i| all_peers[*i]) - .collect::>(); - - info!( - "Expected to be held by {:?} nodes: {expected_holders:?}", - expected_holders.len() - ); - info!( - "Actually held by {:?} nodes : {actual_holders:?}", - actual_holders.len() - ); - - if actual_holders != expected_holders { - // print any expect holders that are not in actual holders - let mut missing_peers = Vec::new(); - expected_holders - .iter() - .filter(|expected| !actual_holders.contains(expected)) - .for_each(|expected| missing_peers.push(*expected)); - - if !missing_peers.is_empty() { - error!( - "Record {:?} is not stored by {missing_peers:?}", - PrettyPrintRecordKey::from(key), - ); - println!( - "Record {:?} is not stored by {missing_peers:?}", - PrettyPrintRecordKey::from(key), - ); - } - } - - let mut failed_peers = Vec::new(); - expected_holders - .iter() - .filter(|expected| !actual_holders.contains(expected)) - .for_each(|expected| failed_peers.push(*expected)); - - if !failed_peers.is_empty() { - failed.insert(key.clone(), failed_peers); - } - } - - if !failed.is_empty() { - error!("Verification failed for {:?} entries", failed.len()); - println!("Verification failed for {:?} entries", failed.len()); - - failed.iter().for_each(|(key, failed_peers)| { - let key_addr = NetworkAddress::from_record_key(key); - let pretty_key = PrettyPrintRecordKey::from(key); - failed_peers.iter().for_each(|peer| { - let peer_addr = NetworkAddress::from_peer(*peer); - let ilog2_distance = peer_addr.distance(&key_addr).ilog2(); - println!("Record {pretty_key:?} is not stored inside {peer:?}, with ilog2 distance to be {ilog2_distance:?}"); - error!("Record {pretty_key:?} is not stored inside {peer:?}, with ilog2 distance to be {ilog2_distance:?}"); - }); - }); - info!("State of each node:"); - record_holders.iter().for_each(|(key, node_index)| { - info!( - "Record {:?} is currently held by node indices {node_index:?}", - PrettyPrintRecordKey::from(key) - ); - }); - info!("Node index map:"); - all_peers - .iter() - .enumerate() - .for_each(|(idx, peer)| info!("{idx} : {peer:?}")); - verification_attempts += 1; - println!("Sleeping before retrying verification. {verification_attempts}/{VERIFICATION_ATTEMPTS}"); - info!("Sleeping before retrying verification. {verification_attempts}/{VERIFICATION_ATTEMPTS}"); - if verification_attempts < VERIFICATION_ATTEMPTS { - tokio::time::sleep(REVERIFICATION_DELAY).await; - } - } else { - // if successful, break out of the loop - break; - } - } - - if !failed.is_empty() { - println!("Verification failed after {VERIFICATION_ATTEMPTS} times"); - error!("Verification failed after {VERIFICATION_ATTEMPTS} times"); - Err(eyre!("Verification failed for: {failed:?}")) - } else { - println!("All the Records have been verified!"); - info!("All the Records have been verified!"); - Ok(()) - } -} - -// Generate random Chunks and store them to the Network -async fn store_chunks(client: Client, chunk_count: usize, wallet_dir: PathBuf) -> Result<()> { - let start = Instant::now(); - let mut rng = OsRng; - - let mut uploaded_chunks_count = 0; - loop { - if uploaded_chunks_count >= chunk_count { - break; - } - - let chunks_dir = TempDir::new()?; - - let random_bytes: Vec = ::std::iter::repeat(()) - .map(|()| rng.gen::()) - .take(CHUNK_SIZE) - .collect(); - - let file_path = chunks_dir.join("random_content"); - let mut output_file = File::create(file_path.clone())?; - output_file.write_all(&random_bytes)?; - - let (head_chunk_addr, _data_map, _file_size, chunks) = - FilesApi::chunk_file(&file_path, chunks_dir.path(), true)?; - - debug!( - "Paying storage for ({}) new Chunk/s of file ({} bytes) at {head_chunk_addr:?}", - chunks.len(), - random_bytes.len() - ); - - let key = - PrettyPrintRecordKey::from(&RecordKey::new(&head_chunk_addr.xorname())).into_owned(); - - let mut uploader = Uploader::new(client.clone(), wallet_dir.clone()); - uploader.set_show_holders(true); - uploader.set_verify_store(false); - uploader.insert_chunk_paths(chunks); - let _upload_stats = uploader.start_upload().await?; - - uploaded_chunks_count += 1; - - println!("Stored Chunk with {head_chunk_addr:?} / {key:?}"); - info!("Stored Chunk with {head_chunk_addr:?} / {key:?}"); - } - - println!( - "{chunk_count:?} Chunks were stored in {:?}", - start.elapsed() - ); - info!( - "{chunk_count:?} Chunks were stored in {:?}", - start.elapsed() - ); - - // to make sure the last chunk was stored - tokio::time::sleep(Duration::from_secs(10)).await; - - Ok(()) -} - -async fn store_registers(client: Client, register_count: usize, wallet_dir: PathBuf) -> Result<()> { - let start = Instant::now(); - let paying_wallet = get_wallet(&wallet_dir); - let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); - - let mut uploaded_registers_count = 0; - loop { - if uploaded_registers_count >= register_count { - break; - } - let meta = XorName(rand::random()); - let owner = client.signer_pk(); - - let addr = RegisterAddress::new(meta, owner); - println!("Creating Register at {addr:?}"); - debug!("Creating Register at {addr:?}"); - - let (mut register, ..) = client - .create_and_pay_for_register(meta, &mut wallet_client, true, Permissions::default()) - .await?; - - println!("Editing Register at {addr:?}"); - debug!("Editing Register at {addr:?}"); - register.write_online("entry".as_bytes(), true).await?; - - uploaded_registers_count += 1; - } - println!( - "{register_count:?} Registers were stored in {:?}", - start.elapsed() - ); - info!( - "{register_count:?} Registers were stored in {:?}", - start.elapsed() - ); - - // to make sure the last register was stored - tokio::time::sleep(Duration::from_secs(10)).await; - Ok(()) -} +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. + +// #![allow(clippy::mutable_key_type)] +// mod common; + +// use crate::common::{ +// client::{get_all_rpc_addresses, get_client_and_funded_wallet}, +// get_all_peer_ids, get_safenode_rpc_client, NodeRestart, +// }; +// use assert_fs::TempDir; +// use common::client::get_wallet; +// use eyre::{eyre, Result}; +// use libp2p::{ +// kad::{KBucketKey, RecordKey}, +// PeerId, +// }; +// use rand::{rngs::OsRng, Rng}; +// // TODO: update autonomi API here +// // use sn_client::{Client, FilesApi, Uploader, WalletClient}; +// use sn_logging::LogBuilder; +// use sn_networking::sort_peers_by_key; +// use sn_protocol::{ +// safenode_proto::{NodeInfoRequest, RecordAddressesRequest}, +// NetworkAddress, PrettyPrintRecordKey, CLOSE_GROUP_SIZE, +// }; +// use sn_registers::{Permissions, RegisterAddress}; +// use std::{ +// collections::{BTreeSet, HashMap, HashSet}, +// fs::File, +// io::Write, +// net::SocketAddr, +// path::PathBuf, +// time::{Duration, Instant}, +// }; +// use tonic::Request; +// use tracing::{debug, error, info}; +// use xor_name::XorName; + +// const CHUNK_SIZE: usize = 1024; + +// // VERIFICATION_DELAY is set based on the dead peer detection interval +// // Once a node has been restarted, it takes VERIFICATION_DELAY time +// // for the old peer to be removed from the routing table. +// // Replication is then kicked off to distribute the data to the new closest +// // nodes, hence verification has to be performed after this. +// const VERIFICATION_DELAY: Duration = Duration::from_secs(60); + +// /// Number of times to retry verification if it fails +// const VERIFICATION_ATTEMPTS: usize = 5; + +// /// Length of time to wait before re-verifying the data location +// const REVERIFICATION_DELAY: Duration = +// Duration::from_secs(sn_node::PERIODIC_REPLICATION_INTERVAL_MAX_S); + +// // Default number of churns that should be performed. After each churn, we +// // wait for VERIFICATION_DELAY time before verifying the data location. +// // It can be overridden by setting the 'CHURN_COUNT' env var. +// const CHURN_COUNT: u8 = 20; + +// /// Default number of chunks that should be PUT to the network. +// /// It can be overridden by setting the 'CHUNK_COUNT' env var. +// const CHUNK_COUNT: usize = 5; +// /// Default number of registers that should be PUT to the network. +// /// It can be overridden by setting the 'REGISTER_COUNT' env var. +// const REGISTER_COUNT: usize = 5; + +// type NodeIndex = usize; +// type RecordHolders = HashMap>; + +// #[tokio::test(flavor = "multi_thread")] +// async fn verify_data_location() -> Result<()> { +// let _log_appender_guard = +// LogBuilder::init_multi_threaded_tokio_test("verify_data_location", false); + +// let churn_count = if let Ok(str) = std::env::var("CHURN_COUNT") { +// str.parse::()? +// } else { +// CHURN_COUNT +// }; +// let chunk_count = if let Ok(str) = std::env::var("CHUNK_COUNT") { +// str.parse::()? +// } else { +// CHUNK_COUNT +// }; +// let register_count = if let Ok(str) = std::env::var("REGISTER_COUNT") { +// str.parse::()? +// } else { +// REGISTER_COUNT +// }; +// println!( +// "Performing data location verification with a churn count of {churn_count} and n_chunks {chunk_count}, n_registers {register_count}\nIt will take approx {:?}", +// VERIFICATION_DELAY*churn_count as u32 +// ); +// info!( +// "Performing data location verification with a churn count of {churn_count} and n_chunks {chunk_count}, n_registers {register_count}\nIt will take approx {:?}", +// VERIFICATION_DELAY*churn_count as u32 +// ); +// let node_rpc_address = get_all_rpc_addresses(true)?; +// let mut all_peers = get_all_peer_ids(&node_rpc_address).await?; + +// // Store chunks +// println!("Creating a client and paying wallet..."); +// debug!("Creating a client and paying wallet..."); + +// let paying_wallet_dir = TempDir::new()?; + +// let (client, _paying_wallet) = get_client_and_funded_wallet(paying_wallet_dir.path()).await?; + +// store_chunks(client.clone(), chunk_count, paying_wallet_dir.to_path_buf()).await?; +// store_registers(client, register_count, paying_wallet_dir.to_path_buf()).await?; + +// // Verify data location initially +// verify_location(&all_peers, &node_rpc_address).await?; + +// // Churn nodes and verify the location of the data after VERIFICATION_DELAY +// let mut current_churn_count = 0; + +// let mut node_restart = NodeRestart::new(true, false)?; +// let mut node_index = 0; +// 'main: loop { +// if current_churn_count >= churn_count { +// break 'main Ok(()); +// } +// current_churn_count += 1; + +// let safenode_rpc_endpoint = match node_restart.restart_next(false, false).await? { +// None => { +// // we have reached the end. +// break 'main Ok(()); +// } +// Some(safenode_rpc_endpoint) => safenode_rpc_endpoint, +// }; + +// // wait for the dead peer to be removed from the RT and the replication flow to finish +// println!( +// "\nNode has been restarted, waiting for {VERIFICATION_DELAY:?} before verification" +// ); +// info!("\nNode has been restarted, waiting for {VERIFICATION_DELAY:?} before verification"); +// tokio::time::sleep(VERIFICATION_DELAY).await; + +// // get the new PeerId for the current NodeIndex +// let mut rpc_client = get_safenode_rpc_client(safenode_rpc_endpoint).await?; + +// let response = rpc_client +// .node_info(Request::new(NodeInfoRequest {})) +// .await?; +// let new_peer_id = PeerId::from_bytes(&response.get_ref().peer_id)?; +// // The below indexing assumes that, the way we do iteration to retrieve all_peers inside get_all_rpc_addresses +// // and get_all_peer_ids is the same as how we do the iteration inside NodeRestart. +// // todo: make this more cleaner. +// if all_peers[node_index] == new_peer_id { +// println!("new and old peer id are the same {new_peer_id:?}"); +// return Err(eyre!("new and old peer id are the same {new_peer_id:?}")); +// } +// all_peers[node_index] = new_peer_id; +// node_index += 1; + +// print_node_close_groups(&all_peers); + +// verify_location(&all_peers, &node_rpc_address).await?; +// } +// } + +// fn print_node_close_groups(all_peers: &[PeerId]) { +// let all_peers = all_peers.to_vec(); +// info!("\nNode close groups:"); + +// for (node_index, peer) in all_peers.iter().enumerate() { +// let key = NetworkAddress::from_peer(*peer).as_kbucket_key(); +// let closest_peers = +// sort_peers_by_key(&all_peers, &key, CLOSE_GROUP_SIZE).expect("failed to sort peer"); +// let closest_peers_idx = closest_peers +// .iter() +// .map(|&&peer| { +// all_peers +// .iter() +// .position(|&p| p == peer) +// .expect("peer to be in iterator") +// }) +// .collect::>(); +// info!("Close for {node_index}: {peer:?} are {closest_peers_idx:?}"); +// } +// } + +// async fn get_records_and_holders(node_rpc_addresses: &[SocketAddr]) -> Result { +// let mut record_holders = RecordHolders::default(); + +// for (node_index, rpc_address) in node_rpc_addresses.iter().enumerate() { +// let mut rpc_client = get_safenode_rpc_client(*rpc_address).await?; + +// let records_response = rpc_client +// .record_addresses(Request::new(RecordAddressesRequest {})) +// .await?; + +// for bytes in records_response.get_ref().addresses.iter() { +// let key = RecordKey::from(bytes.clone()); +// let holders = record_holders.entry(key).or_insert(HashSet::new()); +// holders.insert(node_index); +// } +// } +// debug!("Obtained the current set of Record Key holders"); +// Ok(record_holders) +// } + +// // Fetches the record_holders and verifies that the record is stored by the actual closest peers to the RecordKey +// // It has a retry loop built in. +// async fn verify_location(all_peers: &Vec, node_rpc_addresses: &[SocketAddr]) -> Result<()> { +// let mut failed = HashMap::new(); + +// println!("*********************************************"); +// println!("Verifying data across all peers {all_peers:?}"); +// info!("*********************************************"); +// info!("Verifying data across all peers {all_peers:?}"); + +// let mut verification_attempts = 0; +// while verification_attempts < VERIFICATION_ATTEMPTS { +// failed.clear(); +// let record_holders = get_records_and_holders(node_rpc_addresses).await?; +// for (key, actual_holders_idx) in record_holders.iter() { +// println!("Verifying {:?}", PrettyPrintRecordKey::from(key)); +// info!("Verifying {:?}", PrettyPrintRecordKey::from(key)); +// let record_key = KBucketKey::from(key.to_vec()); +// let expected_holders = sort_peers_by_key(all_peers, &record_key, CLOSE_GROUP_SIZE)? +// .into_iter() +// .cloned() +// .collect::>(); + +// let actual_holders = actual_holders_idx +// .iter() +// .map(|i| all_peers[*i]) +// .collect::>(); + +// info!( +// "Expected to be held by {:?} nodes: {expected_holders:?}", +// expected_holders.len() +// ); +// info!( +// "Actually held by {:?} nodes : {actual_holders:?}", +// actual_holders.len() +// ); + +// if actual_holders != expected_holders { +// // print any expect holders that are not in actual holders +// let mut missing_peers = Vec::new(); +// expected_holders +// .iter() +// .filter(|expected| !actual_holders.contains(expected)) +// .for_each(|expected| missing_peers.push(*expected)); + +// if !missing_peers.is_empty() { +// error!( +// "Record {:?} is not stored by {missing_peers:?}", +// PrettyPrintRecordKey::from(key), +// ); +// println!( +// "Record {:?} is not stored by {missing_peers:?}", +// PrettyPrintRecordKey::from(key), +// ); +// } +// } + +// let mut failed_peers = Vec::new(); +// expected_holders +// .iter() +// .filter(|expected| !actual_holders.contains(expected)) +// .for_each(|expected| failed_peers.push(*expected)); + +// if !failed_peers.is_empty() { +// failed.insert(key.clone(), failed_peers); +// } +// } + +// if !failed.is_empty() { +// error!("Verification failed for {:?} entries", failed.len()); +// println!("Verification failed for {:?} entries", failed.len()); + +// failed.iter().for_each(|(key, failed_peers)| { +// let key_addr = NetworkAddress::from_record_key(key); +// let pretty_key = PrettyPrintRecordKey::from(key); +// failed_peers.iter().for_each(|peer| { +// let peer_addr = NetworkAddress::from_peer(*peer); +// let ilog2_distance = peer_addr.distance(&key_addr).ilog2(); +// println!("Record {pretty_key:?} is not stored inside {peer:?}, with ilog2 distance to be {ilog2_distance:?}"); +// error!("Record {pretty_key:?} is not stored inside {peer:?}, with ilog2 distance to be {ilog2_distance:?}"); +// }); +// }); +// info!("State of each node:"); +// record_holders.iter().for_each(|(key, node_index)| { +// info!( +// "Record {:?} is currently held by node indices {node_index:?}", +// PrettyPrintRecordKey::from(key) +// ); +// }); +// info!("Node index map:"); +// all_peers +// .iter() +// .enumerate() +// .for_each(|(idx, peer)| info!("{idx} : {peer:?}")); +// verification_attempts += 1; +// println!("Sleeping before retrying verification. {verification_attempts}/{VERIFICATION_ATTEMPTS}"); +// info!("Sleeping before retrying verification. {verification_attempts}/{VERIFICATION_ATTEMPTS}"); +// if verification_attempts < VERIFICATION_ATTEMPTS { +// tokio::time::sleep(REVERIFICATION_DELAY).await; +// } +// } else { +// // if successful, break out of the loop +// break; +// } +// } + +// if !failed.is_empty() { +// println!("Verification failed after {VERIFICATION_ATTEMPTS} times"); +// error!("Verification failed after {VERIFICATION_ATTEMPTS} times"); +// Err(eyre!("Verification failed for: {failed:?}")) +// } else { +// println!("All the Records have been verified!"); +// info!("All the Records have been verified!"); +// Ok(()) +// } +// } + +// // Generate random Chunks and store them to the Network +// async fn store_chunks(client: Client, chunk_count: usize, wallet_dir: PathBuf) -> Result<()> { +// let start = Instant::now(); +// let mut rng = OsRng; + +// let mut uploaded_chunks_count = 0; +// loop { +// if uploaded_chunks_count >= chunk_count { +// break; +// } + +// let chunks_dir = TempDir::new()?; + +// let random_bytes: Vec = ::std::iter::repeat(()) +// .map(|()| rng.gen::()) +// .take(CHUNK_SIZE) +// .collect(); + +// let file_path = chunks_dir.join("random_content"); +// let mut output_file = File::create(file_path.clone())?; +// output_file.write_all(&random_bytes)?; + +// let (head_chunk_addr, _data_map, _file_size, chunks) = +// FilesApi::chunk_file(&file_path, chunks_dir.path(), true)?; + +// debug!( +// "Paying storage for ({}) new Chunk/s of file ({} bytes) at {head_chunk_addr:?}", +// chunks.len(), +// random_bytes.len() +// ); + +// let key = +// PrettyPrintRecordKey::from(&RecordKey::new(&head_chunk_addr.xorname())).into_owned(); + +// let mut uploader = Uploader::new(client.clone(), wallet_dir.clone()); +// uploader.set_show_holders(true); +// uploader.set_verify_store(false); +// uploader.insert_chunk_paths(chunks); +// let _upload_stats = uploader.start_upload().await?; + +// uploaded_chunks_count += 1; + +// println!("Stored Chunk with {head_chunk_addr:?} / {key:?}"); +// info!("Stored Chunk with {head_chunk_addr:?} / {key:?}"); +// } + +// println!( +// "{chunk_count:?} Chunks were stored in {:?}", +// start.elapsed() +// ); +// info!( +// "{chunk_count:?} Chunks were stored in {:?}", +// start.elapsed() +// ); + +// // to make sure the last chunk was stored +// tokio::time::sleep(Duration::from_secs(10)).await; + +// Ok(()) +// } + +// async fn store_registers(client: Client, register_count: usize, wallet_dir: PathBuf) -> Result<()> { +// let start = Instant::now(); +// let paying_wallet = get_wallet(&wallet_dir); +// let mut wallet_client = WalletClient::new(client.clone(), paying_wallet); + +// let mut uploaded_registers_count = 0; +// loop { +// if uploaded_registers_count >= register_count { +// break; +// } +// let meta = XorName(rand::random()); +// let owner = client.signer_pk(); + +// let addr = RegisterAddress::new(meta, owner); +// println!("Creating Register at {addr:?}"); +// debug!("Creating Register at {addr:?}"); + +// let (mut register, ..) = client +// .create_and_pay_for_register(meta, &mut wallet_client, true, Permissions::default()) +// .await?; + +// println!("Editing Register at {addr:?}"); +// debug!("Editing Register at {addr:?}"); +// register.write_online("entry".as_bytes(), true).await?; + +// uploaded_registers_count += 1; +// } +// println!( +// "{register_count:?} Registers were stored in {:?}", +// start.elapsed() +// ); +// info!( +// "{register_count:?} Registers were stored in {:?}", +// start.elapsed() +// ); + +// // to make sure the last register was stored +// tokio::time::sleep(Duration::from_secs(10)).await; +// Ok(()) +// } diff --git a/sn_node/tests/verify_routing_table.rs b/sn_node/tests/verify_routing_table.rs index da19270b69..8f01c1a24a 100644 --- a/sn_node/tests/verify_routing_table.rs +++ b/sn_node/tests/verify_routing_table.rs @@ -1,114 +1,114 @@ -// Copyright 2024 MaidSafe.net limited. -// -// This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. -// Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed -// under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. Please review the Licences for the specific language governing -// permissions and limitations relating to use of the SAFE Network Software. +// // Copyright 2024 MaidSafe.net limited. +// // +// // This SAFE Network Software is licensed to you under The General Public License (GPL), version 3. +// // Unless required by applicable law or agreed to in writing, the SAFE Network Software distributed +// // under the GPL Licence is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// // KIND, either express or implied. Please review the Licences for the specific language governing +// // permissions and limitations relating to use of the SAFE Network Software. -#![allow(clippy::mutable_key_type)] -mod common; +// #![allow(clippy::mutable_key_type)] +// mod common; -use crate::common::{client::get_all_rpc_addresses, get_all_peer_ids, get_safenode_rpc_client}; -use color_eyre::Result; -use libp2p::{ - kad::{KBucketKey, K_VALUE}, - PeerId, -}; -use sn_logging::LogBuilder; -use sn_protocol::safenode_proto::KBucketsRequest; -use std::{ - collections::{BTreeMap, HashSet}, - time::Duration, -}; -use tonic::Request; -use tracing::{error, info, trace}; +// use crate::common::{client::get_all_rpc_addresses, get_all_peer_ids, get_safenode_rpc_client}; +// use color_eyre::Result; +// use libp2p::{ +// kad::{KBucketKey, K_VALUE}, +// PeerId, +// }; +// use sn_logging::LogBuilder; +// use sn_protocol::safenode_proto::KBucketsRequest; +// use std::{ +// collections::{BTreeMap, HashSet}, +// time::Duration, +// }; +// use tonic::Request; +// use tracing::{error, info, trace}; -/// Sleep for sometime for the nodes for discover each other before verification -/// Also can be set through the env variable of the same name. -const SLEEP_BEFORE_VERIFICATION: Duration = Duration::from_secs(5); +// /// Sleep for sometime for the nodes for discover each other before verification +// /// Also can be set through the env variable of the same name. +// const SLEEP_BEFORE_VERIFICATION: Duration = Duration::from_secs(5); -#[tokio::test(flavor = "multi_thread")] -async fn verify_routing_table() -> Result<()> { - let _log_appender_guard = - LogBuilder::init_multi_threaded_tokio_test("verify_routing_table", false); +// #[tokio::test(flavor = "multi_thread")] +// async fn verify_routing_table() -> Result<()> { +// let _log_appender_guard = +// LogBuilder::init_multi_threaded_tokio_test("verify_routing_table", false); - let sleep_duration = std::env::var("SLEEP_BEFORE_VERIFICATION") - .map(|value| { - value - .parse::() - .expect("Failed to prase sleep value into u64") - }) - .map(Duration::from_secs) - .unwrap_or(SLEEP_BEFORE_VERIFICATION); - info!("Sleeping for {sleep_duration:?} before verification"); - tokio::time::sleep(sleep_duration).await; +// let sleep_duration = std::env::var("SLEEP_BEFORE_VERIFICATION") +// .map(|value| { +// value +// .parse::() +// .expect("Failed to prase sleep value into u64") +// }) +// .map(Duration::from_secs) +// .unwrap_or(SLEEP_BEFORE_VERIFICATION); +// info!("Sleeping for {sleep_duration:?} before verification"); +// tokio::time::sleep(sleep_duration).await; - let node_rpc_address = get_all_rpc_addresses(false)?; +// let node_rpc_address = get_all_rpc_addresses(false)?; - let all_peers = get_all_peer_ids(&node_rpc_address).await?; - trace!("All peers: {all_peers:?}"); - let mut all_failed_list = BTreeMap::new(); +// let all_peers = get_all_peer_ids(&node_rpc_address).await?; +// trace!("All peers: {all_peers:?}"); +// let mut all_failed_list = BTreeMap::new(); - for (node_index, rpc_address) in node_rpc_address.iter().enumerate() { - let mut rpc_client = get_safenode_rpc_client(*rpc_address).await?; +// for (node_index, rpc_address) in node_rpc_address.iter().enumerate() { +// let mut rpc_client = get_safenode_rpc_client(*rpc_address).await?; - let response = rpc_client - .k_buckets(Request::new(KBucketsRequest {})) - .await?; +// let response = rpc_client +// .k_buckets(Request::new(KBucketsRequest {})) +// .await?; - let k_buckets = response.get_ref().kbuckets.clone(); - let k_buckets = k_buckets - .into_iter() - .map(|(ilog2, peers)| { - let peers = peers - .peers - .into_iter() - .map(|peer_bytes| PeerId::from_bytes(&peer_bytes).unwrap()) - .collect::>(); - (ilog2, peers) - }) - .collect::>(); +// let k_buckets = response.get_ref().kbuckets.clone(); +// let k_buckets = k_buckets +// .into_iter() +// .map(|(ilog2, peers)| { +// let peers = peers +// .peers +// .into_iter() +// .map(|peer_bytes| PeerId::from_bytes(&peer_bytes).unwrap()) +// .collect::>(); +// (ilog2, peers) +// }) +// .collect::>(); - let current_peer = all_peers[node_index]; - let current_peer_key = KBucketKey::from(current_peer); - trace!("KBuckets for node #{node_index}: {current_peer} are: {k_buckets:?}"); +// let current_peer = all_peers[node_index]; +// let current_peer_key = KBucketKey::from(current_peer); +// trace!("KBuckets for node #{node_index}: {current_peer} are: {k_buckets:?}"); - let mut failed_list = Vec::new(); - for peer in all_peers.iter() { - let ilog2_distance = match KBucketKey::from(*peer).distance(¤t_peer_key).ilog2() { - Some(distance) => distance, - // None if same key - None => continue, - }; - match k_buckets.get(&ilog2_distance) { - Some(bucket) => { - if bucket.contains(peer) { - println!("{peer:?} found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); - continue; - } else if bucket.len() == K_VALUE.get() { - println!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); - info!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); - continue; - } else { - println!("{peer:?} not found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); - error!("{peer:?} not found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); - failed_list.push(*peer); - } - } - None => { - info!("Current peer {current_peer:?} should be {ilog2_distance} ilog2 distance away from {peer:?}, but that kbucket is not present for current_peer."); - failed_list.push(*peer); - } - } - } - if !failed_list.is_empty() { - all_failed_list.insert(current_peer, failed_list); - } - } - if !all_failed_list.is_empty() { - error!("Failed to verify routing table:\n{all_failed_list:?}"); - panic!("Failed to verify routing table."); - } - Ok(()) -} +// let mut failed_list = Vec::new(); +// for peer in all_peers.iter() { +// let ilog2_distance = match KBucketKey::from(*peer).distance(¤t_peer_key).ilog2() { +// Some(distance) => distance, +// // None if same key +// None => continue, +// }; +// match k_buckets.get(&ilog2_distance) { +// Some(bucket) => { +// if bucket.contains(peer) { +// println!("{peer:?} found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); +// continue; +// } else if bucket.len() == K_VALUE.get() { +// println!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); +// info!("{peer:?} should be inside the ilog2 bucket: {ilog2_distance:?} of {current_peer:?}. But skipped as the bucket is full"); +// continue; +// } else { +// println!("{peer:?} not found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); +// error!("{peer:?} not found inside the kbucket with ilog2 {ilog2_distance:?} of {current_peer:?} RT"); +// failed_list.push(*peer); +// } +// } +// None => { +// info!("Current peer {current_peer:?} should be {ilog2_distance} ilog2 distance away from {peer:?}, but that kbucket is not present for current_peer."); +// failed_list.push(*peer); +// } +// } +// } +// if !failed_list.is_empty() { +// all_failed_list.insert(current_peer, failed_list); +// } +// } +// if !all_failed_list.is_empty() { +// error!("Failed to verify routing table:\n{all_failed_list:?}"); +// panic!("Failed to verify routing table."); +// } +// Ok(()) +// } diff --git a/sn_node_rpc_client/Cargo.toml b/sn_node_rpc_client/Cargo.toml index 055f1913b9..60c9f79134 100644 --- a/sn_node_rpc_client/Cargo.toml +++ b/sn_node_rpc_client/Cargo.toml @@ -24,14 +24,16 @@ bls = { package = "blsttc", version = "8.0.1" } clap = { version = "4.2.1", features = ["derive"] } color-eyre = "0.6.2" hex = "~0.4.3" -libp2p = { version = "0.54.1", features = ["kad"]} -libp2p-identity = { version="0.2.7", features = ["rand"] } +libp2p = { version = "0.54.1", features = ["kad"] } +libp2p-identity = { version = "0.2.7", features = ["rand"] } sn_build_info = { path = "../sn_build_info", version = "0.1.13" } -sn_client = { path = "../sn_client", version = "0.110.1" } +# sn_client = { path = "../sn_client", version = "0.110.1" } sn_logging = { path = "../sn_logging", version = "0.2.34" } sn_node = { path = "../sn_node", version = "0.111.2" } sn_peers_acquisition = { path = "../sn_peers_acquisition", version = "0.5.1" } -sn_protocol = { path = "../sn_protocol", version = "0.17.9", features=["rpc"] } +sn_protocol = { path = "../sn_protocol", version = "0.17.9", features = [ + "rpc", +] } sn_service_management = { path = "../sn_service_management", version = "0.3.12" } sn_transfers = { path = "../sn_transfers", version = "0.19.1" } thiserror = "1.0.23" From 804bce11aef6967e32b740888a7a7f52ecb49fad Mon Sep 17 00:00:00 2001 From: Josh Wilson Date: Tue, 1 Oct 2024 13:16:03 +0900 Subject: [PATCH 2/2] fix: clippy issues --- sn_node_manager/src/cmd/local.rs | 8 ++++---- sn_node_manager/src/local.rs | 6 ++---- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/sn_node_manager/src/cmd/local.rs b/sn_node_manager/src/cmd/local.rs index 8e1ba90c31..b77ed0b36e 100644 --- a/sn_node_manager/src/cmd/local.rs +++ b/sn_node_manager/src/cmd/local.rs @@ -28,8 +28,8 @@ pub async fn join( build: bool, count: u16, enable_metrics_server: bool, - faucet_path: Option, - faucet_version: Option, + _faucet_path: Option, + _faucet_version: Option, interval: u64, metrics_port: Option, node_path: Option, @@ -143,8 +143,8 @@ pub async fn run( clean: bool, count: u16, enable_metrics_server: bool, - faucet_path: Option, - faucet_version: Option, + _faucet_path: Option, + _faucet_version: Option, interval: u64, metrics_port: Option, node_path: Option, diff --git a/sn_node_manager/src/local.rs b/sn_node_manager/src/local.rs index ed39f67c12..3f31ac899e 100644 --- a/sn_node_manager/src/local.rs +++ b/sn_node_manager/src/local.rs @@ -8,7 +8,7 @@ use crate::add_services::config::PortRange; use crate::helpers::{ - check_port_availability, get_bin_version, get_start_port_if_applicable, get_username, + check_port_availability, get_bin_version, get_start_port_if_applicable, increment_port_option, }; use color_eyre::eyre::OptionExt; @@ -22,10 +22,8 @@ use sn_evm::{EvmNetwork, RewardsAddress}; use sn_logging::LogFormat; use sn_service_management::{ control::ServiceControl, - rpc::{RpcActions, RpcClient}, - FaucetServiceData, NodeRegistry, NodeServiceData, ServiceStatus, + rpc::{RpcActions, RpcClient}, NodeRegistry, NodeServiceData, ServiceStatus, }; -use sn_transfers::get_faucet_data_dir; use std::{ net::{IpAddr, Ipv4Addr, SocketAddr}, path::PathBuf,