diff --git a/Cargo.lock b/Cargo.lock index a928c959..c1151975 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2360,6 +2360,18 @@ dependencies = [ "wasi", ] +[[package]] +name = "getset" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f636605b743120a8d32ed92fc27b6cde1a769f8f936c065151eb66f88ded513c" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "ghash" version = "0.5.1" @@ -4193,6 +4205,7 @@ dependencies = [ "clap", "dotenv", "futures", + "getset", "hex", "k256", "libp2p", @@ -4216,6 +4229,7 @@ dependencies = [ "alloy-rlp", "alloy-rlp-derive", "directories", + "getset", "hex", "k256", "libp2p", @@ -4257,6 +4271,7 @@ dependencies = [ "alloy-rlp", "dotenv", "futures", + "getset", "hex", "k256", "libp2p", @@ -4312,6 +4327,7 @@ dependencies = [ "clap", "csv", "dotenv", + "getset", "hex", "jsonrpsee", "k256", @@ -4330,6 +4346,7 @@ version = "0.1.1" dependencies = [ "alloy-rlp", "futures", + "getset", "hex", "jsonrpsee", "libp2p", @@ -4367,6 +4384,7 @@ dependencies = [ "alloy-rlp", "dotenv", "futures", + "getset", "hex", "k256", "libp2p", diff --git a/Cargo.toml b/Cargo.toml index 00170236..38b84e2e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,3 +45,4 @@ k256 = "0.13.3" directories = "5.0.1" thiserror = "1.0.63" clap = "4.5.9" +getset = "0.1.3" diff --git a/block-builder/Cargo.toml b/block-builder/Cargo.toml index 078acb48..ca688a5c 100644 --- a/block-builder/Cargo.toml +++ b/block-builder/Cargo.toml @@ -36,3 +36,4 @@ toml = { workspace = true } dotenv = { workspace = true } k256 = { workspace = true } clap = { workspace = true, features = ["derive"] } +getset = { workspace = true } diff --git a/block-builder/src/coordinator.rs b/block-builder/src/coordinator.rs index f58834a2..1e97d98f 100644 --- a/block-builder/src/coordinator.rs +++ b/block-builder/src/coordinator.rs @@ -1,8 +1,11 @@ +use getset::Getters; use openrank_common::tx::compute; use std::collections::HashMap; /// Coordinator role for the OpenRank network. /// Responsible for sequencing job results. +#[derive(Default, Getters)] +#[getset(get = "pub")] pub struct JobCoordinator { /// A map of all job results. job_results: HashMap, @@ -20,11 +23,11 @@ impl JobCoordinator { /// Add a JobResult to memory and increase the counter in case /// it has not been seen before. pub fn add_job_result(&mut self, compute_result: &mut compute::Result) { - if compute_result.seq_number.is_none() { + if compute_result.seq_number().is_none() { compute_result.set_seq_number(self.count); self.count += 1; } - let seq_number = compute_result.seq_number.unwrap(); + let seq_number = compute_result.seq_number().unwrap(); self.job_results.insert(seq_number, compute_result.clone()); if seq_number > self.count { self.count = seq_number; diff --git a/block-builder/src/lib.rs b/block-builder/src/lib.rs index a36ba136..8240960a 100644 --- a/block-builder/src/lib.rs +++ b/block-builder/src/lib.rs @@ -2,6 +2,7 @@ use alloy_rlp::Decodable; use coordinator::JobCoordinator; use dotenv::dotenv; use futures::StreamExt; +use getset::Getters; use k256::ecdsa; use k256::ecdsa::SigningKey; use libp2p::{gossipsub, mdns, swarm::SwarmEvent, Swarm}; @@ -52,29 +53,33 @@ impl Display for Error { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] /// The whitelist for the Block Builder. -pub struct Whitelist { +struct Whitelist { /// The list of addresses that are allowed to be computers. - pub computer: Vec
, + computer: Vec
, /// The list of addresses that are allowed to be verifiers. - pub verifier: Vec
, + verifier: Vec
, /// The list of addresses that are allowed to broadcast transactions. - pub users: Vec
, + users: Vec
, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] /// The configuration for the Block Builder. -pub struct Config { +struct Config { /// The list of domains to process ComputeRequest TXs for. - pub domains: Vec, + domains: Vec, /// The whitelist for the Block Builder. - pub whitelist: Whitelist, - pub database: db::Config, - pub p2p: net::Config, + whitelist: Whitelist, + database: db::Config, + p2p: net::Config, } /// The Block Builder node. It contains the Swarm, the Config, the DB, the SecretKey, and the ComputeRunner. +#[derive(Getters)] +#[getset(get = "pub")] pub struct Node { swarm: Swarm, config: Config, @@ -105,7 +110,7 @@ impl Node { &[&Tx::get_cf(), &compute::Result::get_cf(), &compute::ResultReference::get_cf()], )?; - let swarm = build_node(net::load_keypair(&config.p2p.keypair, &config_loader)?).await?; + let swarm = build_node(net::load_keypair(config.p2p().keypair(), &config_loader)?).await?; info!("PEER_ID: {:?}", swarm.local_peer_id()); let coordinator = JobCoordinator::new(); @@ -172,7 +177,7 @@ impl Node { assert!(self.config.whitelist.users.contains(&address)); // Add Tx to db self.db.put(tx.clone()).map_err(Error::Db)?; - assert_eq!(&compute_request.domain_id, domain_id); + assert_eq!(compute_request.domain_id(), domain_id); let assignment_topic = Topic::DomainAssignent(*domain_id); let computer = self.config.whitelist.computer[0]; @@ -207,7 +212,7 @@ impl Node { let assignment_tx_key = Tx::construct_full_key( consts::COMPUTE_ASSIGNMENT, - commitment.assignment_tx_hash, + commitment.assignment_tx_hash().clone(), ); let assignment_tx: Tx = self.db.get(assignment_tx_key).map_err(Error::Db)?; @@ -217,12 +222,12 @@ impl Node { }; let request_tx_key = Tx::construct_full_key( consts::COMPUTE_REQUEST, - assignment_body.request_tx_hash.clone(), + assignment_body.request_tx_hash().clone(), ); let request: Tx = self.db.get(request_tx_key).map_err(Error::Db)?; if let Err(db::Error::NotFound) = self.db.get::( - assignment_body.request_tx_hash.0.to_vec(), + assignment_body.request_tx_hash().to_bytes(), ) { let mut result = @@ -230,8 +235,8 @@ impl Node { self.coordinator.add_job_result(&mut result); self.db.put(result.clone()).map_err(Error::Db)?; let reference = compute::ResultReference::new( - assignment_body.request_tx_hash, - result.seq_number.unwrap(), + assignment_body.request_tx_hash().clone(), + result.seq_number().unwrap(), ); self.db.put(reference).map_err(Error::Db)?; } @@ -274,7 +279,7 @@ impl Node { let assignment_tx_key = Tx::construct_full_key( consts::COMPUTE_ASSIGNMENT, - compute_verification.assignment_tx_hash, + compute_verification.assignment_tx_hash().clone(), ); let assignment_tx: Tx = self.db.get(assignment_tx_key).map_err(Error::Db)?; @@ -284,13 +289,13 @@ impl Node { }; let result_reference: compute::ResultReference = self .db - .get(assignment_body.request_tx_hash.0.to_vec()) + .get(assignment_body.request_tx_hash().to_bytes()) .map_err(Error::Db)?; let compute_result_key = - compute::Result::construct_full_key(result_reference.seq_number); + compute::Result::construct_full_key(*result_reference.seq_number()); let mut result: compute::Result = self.db.get(compute_result_key).map_err(Error::Db)?; - result.compute_verification_tx_hashes.push(tx.hash()); + result.append_verification_tx_hash(tx.hash()); self.coordinator.add_job_result(&mut result); self.db.put(result).map_err(Error::Db)?; info!( @@ -325,7 +330,7 @@ impl Node { /// - Handles gossipsub events. /// - Handles mDNS events. pub async fn run(&mut self) -> Result<(), Box> { - net::listen_on(&mut self.swarm, &self.config.p2p.listen_on)?; + net::listen_on(&mut self.swarm, self.config.p2p().listen_on())?; let topics_trust_updates: Vec = self .config @@ -385,7 +390,7 @@ impl Node { // Create a Gossipsub topic let topic = gossipsub::IdentTopic::new(topic.clone()); // subscribes to our topic - self.swarm.behaviour_mut().gossipsub.subscribe(&topic)?; + self.swarm.behaviour_mut().gossipsub_subscribe(&topic)?; } // Kick it off @@ -395,13 +400,13 @@ impl Node { SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Discovered(list))) => { for (peer_id, _multiaddr) in list { info!("mDNS discovered a new peer: {peer_id}"); - self.swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id); + self.swarm.behaviour_mut().gossipsub_add_peer(&peer_id); } }, SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Expired(list))) => { for (peer_id, _multiaddr) in list { info!("mDNS discover peer has expired: {peer_id}"); - self.swarm.behaviour_mut().gossipsub.remove_explicit_peer(&peer_id); + self.swarm.behaviour_mut().gossipsub_remove_peer(&peer_id); } }, SwarmEvent::Behaviour(MyBehaviourEvent::Gossipsub(event)) => { diff --git a/common/Cargo.toml b/common/Cargo.toml index aa3ce56f..1317a86b 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -25,6 +25,7 @@ libp2p = { workspace = true, features = [ ] } alloy-rlp = { workspace = true } alloy-rlp-derive = { workspace = true } +getset = { workspace = true } alloy-primitives = { workspace = true, features = ["serde", "rlp"] } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } diff --git a/common/src/config.rs b/common/src/config.rs index 59e4d4cc..938e19d4 100644 --- a/common/src/config.rs +++ b/common/src/config.rs @@ -1,5 +1,6 @@ //! The config module provides configuration-loading mechanism for OpenRank programs. +use getset::Getters; use serde::de::DeserializeOwned; use std::io::ErrorKind; use std::path::{Path, PathBuf}; @@ -36,6 +37,8 @@ pub enum Error { /// // loads ~/.config/openrank-computer/x.toml /// let config: MyConfig = loader.load_named("x").unwrap(); /// ``` +#[derive(Getters)] +#[getset(get = "pub")] pub struct Loader { program_name: String, config_dir: PathBuf, diff --git a/common/src/db/items.rs b/common/src/db/items.rs index dd781214..fe5a7f20 100644 --- a/common/src/db/items.rs +++ b/common/src/db/items.rs @@ -30,7 +30,7 @@ impl DbItem for TxEvent { impl DbItem for Result { fn get_key(&self) -> Vec { - self.compute_request_tx_hash.0.to_vec() + self.get_seq_number().to_be_bytes().to_vec() } fn get_cf() -> String { @@ -44,7 +44,7 @@ impl DbItem for Result { impl DbItem for ResultReference { fn get_key(&self) -> Vec { - self.compute_request_tx_hash.0.to_vec() + self.compute_request_tx_hash().to_bytes() } fn get_prefix(&self) -> String { @@ -58,7 +58,7 @@ impl DbItem for ResultReference { impl DbItem for Tx { fn get_key(&self) -> Vec { - self.hash().0.to_vec() + self.hash().to_bytes() } fn get_cf() -> String { diff --git a/common/src/db/mod.rs b/common/src/db/mod.rs index cefbb972..459fcb0d 100644 --- a/common/src/db/mod.rs +++ b/common/src/db/mod.rs @@ -1,3 +1,4 @@ +use getset::Getters; use rocksdb::{self, Options, DB}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_json::{self, to_vec}; @@ -46,10 +47,11 @@ pub trait DbItem { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] pub struct Config { - pub directory: String, - pub secondary: Option, + directory: String, + secondary: Option, } impl Config { @@ -58,6 +60,8 @@ impl Config { } } +#[derive(Getters)] +#[getset(get = "pub")] /// Wrapper for database connection. pub struct Db { connection: DB, diff --git a/common/src/lib.rs b/common/src/lib.rs index 55521a5d..f5f48ac1 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -11,9 +11,10 @@ pub mod tx_event; pub mod db; use alloy_rlp::encode; +use getset::Getters; use k256::ecdsa::SigningKey; use libp2p::{ - gossipsub::{self, MessageId, PublishError}, + gossipsub::{self, MessageId, PublishError, SubscriptionError}, identity, mdns, noise, swarm::NetworkBehaviour, tcp, yamux, Swarm, @@ -26,12 +27,34 @@ use tracing::info; use tx::{Address, Tx}; use tx_event::TxEvent; -#[derive(NetworkBehaviour)] +#[derive(NetworkBehaviour, Getters)] +#[getset(get = "pub")] /// A custom libp2p [network behavior](libp2p::swarm::NetworkBehaviour) used by OpenRank nodes. pub struct MyBehaviour { - pub gossipsub: gossipsub::Behaviour, - pub mdns: mdns::tokio::Behaviour, - // pub identify: identify::Behaviour, + gossipsub: gossipsub::Behaviour, + mdns: mdns::tokio::Behaviour, +} + +impl MyBehaviour { + pub fn gossipsub_subscribe( + &mut self, topic: &gossipsub::IdentTopic, + ) -> Result { + self.gossipsub.subscribe(topic) + } + + pub fn gossipsub_publish( + &mut self, topic: gossipsub::IdentTopic, data: Vec, + ) -> Result { + self.gossipsub.publish(topic, data) + } + + pub fn gossipsub_add_peer(&mut self, peer_id: &libp2p::PeerId) { + self.gossipsub.add_explicit_peer(peer_id); + } + + pub fn gossipsub_remove_peer(&mut self, peer_id: &libp2p::PeerId) { + self.gossipsub.remove_explicit_peer(peer_id); + } } /// Builds a libp2p swarm with the custom behaviour. @@ -109,7 +132,7 @@ pub fn address_from_sk(sk: &SigningKey) -> Address { let hash = hash_leaf::(vk_bytes[1..].to_vec()); let mut address_bytes = [0u8; 20]; - address_bytes.copy_from_slice(&hash.0[12..]); + address_bytes.copy_from_slice(&hash.inner()[12..]); Address::from_slice(&address_bytes) } diff --git a/common/src/merkle/fixed.rs b/common/src/merkle/fixed.rs index 0f1de748..1feb87e6 100644 --- a/common/src/merkle/fixed.rs +++ b/common/src/merkle/fixed.rs @@ -1,8 +1,10 @@ use crate::merkle::{self, hash_two, Hash}; +use getset::Getters; use sha3::Digest; use std::{collections::HashMap, marker::PhantomData}; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Getters)] +#[getset(get = "pub")] /// Dense Merkle tree. /// The dense tree is a tree where leaf nodes are compressed to be next to each other /// which makes it more efficient to store and traverse. @@ -13,7 +15,7 @@ where H: Digest, { /// HashMap to keep the level and index of the nodes. - pub(crate) nodes: HashMap>, + nodes: HashMap>, // Number of levels num_levels: u8, /// PhantomData for the hasher diff --git a/common/src/merkle/incremental.rs b/common/src/merkle/incremental.rs index c9b20cf7..934b6f46 100644 --- a/common/src/merkle/incremental.rs +++ b/common/src/merkle/incremental.rs @@ -1,8 +1,10 @@ use crate::merkle::{self, hash_two, next_index, num_to_bits_vec, Hash}; +use getset::Getters; use sha3::Digest; use std::{collections::HashMap, marker::PhantomData}; -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Getters)] +#[getset(get = "pub")] /// Dense incremental Merkle tree. /// The dense tree is a tree where leaf nodes are compressed to be next to each other /// which makes it more efficient to store and traverse. @@ -12,7 +14,7 @@ where H: Digest, { /// HashMap to keep the level and index of the nodes. - pub(crate) nodes: HashMap<(u8, u64), Hash>, + nodes: HashMap<(u8, u64), Hash>, /// Default nodes. default: HashMap<(u8, u64), Hash>, /// Number of levels. diff --git a/common/src/merkle/mod.rs b/common/src/merkle/mod.rs index 14cd2ecd..213e58f8 100644 --- a/common/src/merkle/mod.rs +++ b/common/src/merkle/mod.rs @@ -14,13 +14,17 @@ pub mod incremental; Debug, Clone, Default, PartialEq, Eq, RlpDecodable, RlpEncodable, Serialize, Deserialize, )] /// Used to represent a hash of a node in the merkle tree. -pub struct Hash(#[serde(with = "hex")] pub [u8; 32]); +pub struct Hash(#[serde(with = "hex")] [u8; 32]); impl Hash { /// Converts the hash to a hex string. pub fn to_hex(self) -> String { hex::encode(self.0) } + + pub fn inner(&self) -> &[u8; 32] { + &self.0 + } } #[cfg(test)] diff --git a/common/src/net.rs b/common/src/net.rs index 86fdede3..bbb5e8f8 100644 --- a/common/src/net.rs +++ b/common/src/net.rs @@ -1,3 +1,4 @@ +use getset::Getters; use libp2p::core::transport::ListenerId; use libp2p::identity::{DecodingError, Keypair}; use libp2p::{swarm, Swarm, TransportError}; @@ -23,24 +24,27 @@ pub enum Error { } /// Rpc configuration. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] pub struct RpcConfig { - pub address: SocketAddr, + address: SocketAddr, } /// Network configuration. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] pub struct Config { - pub listen_on: Vec, - pub keypair: Option, + listen_on: Vec, + keypair: Option, } /// P2P keypair configuration. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] pub struct KeypairConfig { /// Filename of the keypair. Either absolute or relative to the config directory. /// The file contains binary protobuf representation of the keypair. - pub file: Option, + file: Option, } /// Default P2P keypair filename. diff --git a/common/src/result.rs b/common/src/result.rs index 0cf89762..50d0f316 100644 --- a/common/src/result.rs +++ b/common/src/result.rs @@ -1,11 +1,13 @@ use crate::tx; +use getset::Getters; use serde::{Deserialize, Serialize}; -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] pub struct GetResultsQuery { - pub request_tx_hash: tx::TxHash, - pub start: u32, - pub size: u32, + request_tx_hash: tx::TxHash, + start: u32, + size: u32, } impl GetResultsQuery { diff --git a/common/src/topics.rs b/common/src/topics.rs index ed5af2ea..436664cc 100644 --- a/common/src/topics.rs +++ b/common/src/topics.rs @@ -1,5 +1,6 @@ use crate::tx::{consts, trust::OwnedNamespace, Address}; use alloy_rlp_derive::{RlpDecodable, RlpEncodable}; +use getset::Getters; use hex::FromHex; use serde::{Deserialize, Serialize}; use std::{ @@ -28,6 +29,11 @@ impl DomainHash { pub fn to_hex(self) -> String { hex::encode(self.0.to_be_bytes()) } + + /// Get the inner value of the hash. + pub fn inner(self) -> u64 { + self.0 + } } impl FromHex for DomainHash { @@ -45,7 +51,8 @@ impl From for DomainHash { } } -#[derive(Clone, Debug, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] /// Domain of the openrank network. Consists of a trust namespace and a seed namespace + algorithm id. pub struct Domain { /// Address of the trust namespace owner. diff --git a/common/src/tx/block.rs b/common/src/tx/block.rs index d0e933ba..2d66bed8 100644 --- a/common/src/tx/block.rs +++ b/common/src/tx/block.rs @@ -1,22 +1,32 @@ use crate::tx::TxHash; use crate::{merkle::Hash, topics::DomainHash}; use alloy_rlp_derive::{RlpDecodable, RlpEncodable}; +use getset::Getters; use serde::{Deserialize, Serialize}; -#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable)] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable, Getters, +)] +#[getset(get = "pub")] struct PendingDomainUpdate { domain_id: DomainHash, commitment_tx_hash: TxHash, } -#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable)] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable, Getters, +)] +#[getset(get = "pub")] struct DomainUpdate { domain_id: DomainHash, commitment_tx_hash: TxHash, verification_results_tx_hashes: Vec, } -#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable)] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable, Getters, +)] +#[getset(get = "pub")] pub struct ProposedBlock { previous_block_hash: TxHash, state_root: Hash, @@ -25,7 +35,10 @@ pub struct ProposedBlock { block_height: u64, } -#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable)] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable, Getters, +)] +#[getset(get = "pub")] pub struct FinalisedBlock { previous_block_hash: TxHash, state_root: Hash, diff --git a/common/src/tx/compute.rs b/common/src/tx/compute.rs index 4b841250..4efe97d3 100644 --- a/common/src/tx/compute.rs +++ b/common/src/tx/compute.rs @@ -1,14 +1,18 @@ use crate::tx::{trust::ScoreEntry, Address, TxHash}; use crate::{merkle::Hash, topics::DomainHash}; use alloy_rlp_derive::{RlpDecodable, RlpEncodable}; +use getset::Getters; use serde::{Deserialize, Serialize}; -#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable)] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable, Getters, +)] +#[getset(get = "pub")] pub struct Commitment { - pub assignment_tx_hash: TxHash, - pub lt_root_hash: Hash, - pub compute_root_hash: Hash, - pub scores_tx_hashes: Vec, + assignment_tx_hash: TxHash, + lt_root_hash: Hash, + compute_root_hash: Hash, + scores_tx_hashes: Vec, } impl Commitment { @@ -20,9 +24,12 @@ impl Commitment { } } -#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable)] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable, Getters, +)] +#[getset(get = "pub")] pub struct Scores { - pub entries: Vec, + entries: Vec, } impl Scores { @@ -31,11 +38,14 @@ impl Scores { } } -#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize, RlpEncodable, RlpDecodable)] +#[derive( + Debug, Clone, PartialEq, Default, Serialize, Deserialize, RlpEncodable, RlpDecodable, Getters, +)] +#[getset(get = "pub")] pub struct Request { - pub domain_id: DomainHash, - pub block_height: u32, - pub compute_id: Hash, + domain_id: DomainHash, + block_height: u32, + compute_id: Hash, } impl Request { @@ -44,11 +54,14 @@ impl Request { } } -#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize, RlpEncodable, RlpDecodable)] +#[derive( + Debug, Clone, PartialEq, Default, Serialize, Deserialize, RlpEncodable, RlpDecodable, Getters, +)] +#[getset(get = "pub")] pub struct Assignment { - pub request_tx_hash: TxHash, - pub assigned_compute_node: Address, - pub assigned_verifier_node: Address, + request_tx_hash: TxHash, + assigned_compute_node: Address, + assigned_verifier_node: Address, } impl Assignment { @@ -59,10 +72,11 @@ impl Assignment { } } -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable, Getters)] +#[getset(get = "pub")] pub struct Verification { - pub assignment_tx_hash: TxHash, - pub verification_result: bool, + assignment_tx_hash: TxHash, + verification_result: bool, } impl Verification { @@ -78,17 +92,18 @@ impl Default for Verification { } /// Combination of several tx hashes representing the result of a compute run by `Computer`. -#[derive(Debug, Clone, RlpEncodable, RlpDecodable, Serialize, Deserialize)] +#[derive(Debug, Clone, RlpEncodable, RlpDecodable, Serialize, Deserialize, Getters)] #[rlp(trailing)] +#[getset(get = "pub")] pub struct Result { /// Hash of the ComputeCommitment TX. - pub compute_commitment_tx_hash: TxHash, + compute_commitment_tx_hash: TxHash, /// Hashes of the ComputeVerification TXs. - pub compute_verification_tx_hashes: Vec, + compute_verification_tx_hashes: Vec, /// Hash of the original ComputeRequest TX. - pub compute_request_tx_hash: TxHash, + compute_request_tx_hash: TxHash, /// Sequence number assigned by the block builder. - pub seq_number: Option, + seq_number: Option, } impl Result { @@ -115,15 +130,26 @@ impl Result { pub fn set_seq_number(&mut self, seq_number: u64) { self.seq_number = Some(seq_number); } + + /// Get sequence number + pub fn get_seq_number(&self) -> u64 { + self.seq_number.unwrap() + } + + /// Append verification tx hash + pub fn append_verification_tx_hash(&mut self, tx_hash: TxHash) { + self.compute_verification_tx_hashes.push(tx_hash); + } } /// Object connecting the sequence number with the original compute request -#[derive(Debug, Clone, RlpEncodable, RlpDecodable, Serialize, Deserialize)] +#[derive(Debug, Clone, RlpEncodable, RlpDecodable, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] pub struct ResultReference { /// Hash of the original job run request transaction. - pub compute_request_tx_hash: TxHash, + compute_request_tx_hash: TxHash, /// Sequence number assigned by the block builder. - pub seq_number: u64, + seq_number: u64, } impl ResultReference { diff --git a/common/src/tx/mod.rs b/common/src/tx/mod.rs index ce723dea..b932f388 100644 --- a/common/src/tx/mod.rs +++ b/common/src/tx/mod.rs @@ -2,6 +2,7 @@ use crate::merkle::hash_leaf; use alloy_rlp::{encode, BufMut, Decodable, Encodable, Error as RlpError, Result as RlpResult}; use alloy_rlp_derive::{RlpDecodable, RlpEncodable}; use block::{FinalisedBlock, ProposedBlock}; +use getset::Getters; use k256::ecdsa::signature::hazmat::PrehashVerifier; use k256::ecdsa::{ Error as EcdsaError, RecoveryId, Signature as EcdsaSignature, SigningKey, VerifyingKey, @@ -100,8 +101,9 @@ impl Body { } } -#[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable, Serialize, Deserialize, Getters)] #[rlp(trailing)] +#[getset(get = "pub")] pub struct Tx { nonce: u64, from: Address, @@ -124,26 +126,6 @@ impl Tx { } } - pub fn body(&self) -> Body { - self.body.clone() - } - - pub fn signature(&self) -> Signature { - self.signature.clone() - } - - pub fn nonce(&self) -> u64 { - self.nonce - } - - pub fn from(&self) -> Address { - self.from - } - - pub fn to(&self) -> Address { - self.to - } - pub fn hash(&self) -> TxHash { let mut hasher = Keccak256::new(); hasher.update(self.nonce.to_be_bytes()); @@ -187,7 +169,7 @@ impl Tx { let hash = hash_leaf::(vk_bytes[1..].to_vec()); let mut address_bytes = [0u8; 20]; - address_bytes.copy_from_slice(&hash.0[12..]); + address_bytes.copy_from_slice(&hash.inner()[12..]); if Address::from_slice(&address_bytes) != address { return Err(EcdsaError::new()); @@ -212,7 +194,7 @@ impl Tx { let hash = hash_leaf::(vk_bytes[1..].to_vec()); let mut address_bytes = [0u8; 20]; - address_bytes.copy_from_slice(&hash.0[12..]); + address_bytes.copy_from_slice(&hash.inner()[12..]); let address = Address::from_slice(&address_bytes); Ok(address) @@ -222,7 +204,7 @@ impl Tx { self.sequence_number = Some(sequence_number); } - pub fn sequence_number(&self) -> u64 { + pub fn get_sequence_number(&self) -> u64 { self.sequence_number.unwrap_or_default() } } @@ -232,7 +214,7 @@ pub type Address = alloy_primitives::Address; #[derive( Debug, Clone, Hash, PartialEq, Eq, Default, RlpDecodable, RlpEncodable, Serialize, Deserialize, )] -pub struct TxHash(#[serde(with = "hex")] pub [u8; 32]); +pub struct TxHash(#[serde(with = "hex")] [u8; 32]); impl TxHash { pub fn from_bytes(bytes: Vec) -> Self { @@ -252,14 +234,28 @@ impl TxHash { pub fn to_hex(self) -> String { hex::encode(self.0) } + + pub fn inner(&self) -> &[u8; 32] { + &self.0 + } } #[derive( - Debug, Clone, PartialEq, Eq, Default, RlpDecodable, RlpEncodable, Serialize, Deserialize, + Debug, + Clone, + PartialEq, + Eq, + Default, + RlpDecodable, + RlpEncodable, + Serialize, + Deserialize, + Getters, )] +#[getset(get = "pub")] pub struct Signature { - pub s: [u8; 32], - pub r: [u8; 32], + s: [u8; 32], + r: [u8; 32], r_id: u8, } @@ -267,10 +263,6 @@ impl Signature { pub fn new(s: [u8; 32], r: [u8; 32], r_id: u8) -> Self { Self { s, r, r_id } } - - pub fn r_id(&self) -> u8 { - self.r_id - } } #[cfg(test)] diff --git a/common/src/tx/trust.rs b/common/src/tx/trust.rs index 6ee3ab23..71bb0ad3 100644 --- a/common/src/tx/trust.rs +++ b/common/src/tx/trust.rs @@ -3,6 +3,7 @@ use crate::{merkle::Hash, topics::DomainHash}; use alloy_rlp::{BufMut, Decodable, Encodable, Error as RlpError, Result as RlpResult}; use alloy_rlp_derive::{RlpDecodable, RlpEncodable}; use core::result::Result as CoreResult; +use getset::Getters; use hex::FromHex; use serde::{Deserialize, Serialize}; use std::io::Read; @@ -10,7 +11,7 @@ use std::io::Read; #[derive( Debug, Clone, Hash, Default, PartialEq, Eq, RlpDecodable, RlpEncodable, Serialize, Deserialize, )] -pub struct OwnedNamespace(#[serde(with = "hex")] pub [u8; 24]); +pub struct OwnedNamespace(#[serde(with = "hex")] [u8; 24]); impl OwnedNamespace { pub fn new(owner: Address, id: u32) -> Self { @@ -29,6 +30,10 @@ impl OwnedNamespace { bytes.copy_from_slice(&self.0[..20]); Address::from_slice(&bytes) } + + pub fn inner(&self) -> &[u8; 24] { + &self.0 + } } impl FromHex for OwnedNamespace { @@ -39,10 +44,13 @@ impl FromHex for OwnedNamespace { } } -#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable)] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable, Getters, +)] +#[getset(get = "pub")] pub struct TrustUpdate { - pub trust_id: OwnedNamespace, - pub entries: Vec, + trust_id: OwnedNamespace, + entries: Vec, } impl TrustUpdate { @@ -51,10 +59,13 @@ impl TrustUpdate { } } -#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable)] +#[derive( + Debug, Clone, Default, PartialEq, Serialize, Deserialize, RlpEncodable, RlpDecodable, Getters, +)] +#[getset(get = "pub")] pub struct SeedUpdate { - pub seed_id: OwnedNamespace, - pub entries: Vec, + seed_id: OwnedNamespace, + entries: Vec, } impl SeedUpdate { @@ -63,10 +74,11 @@ impl SeedUpdate { } } -#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] pub struct ScoreEntry { - pub id: String, - pub value: f32, + id: String, + value: f32, } impl ScoreEntry { @@ -96,11 +108,12 @@ impl Decodable for ScoreEntry { } } -#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] pub struct TrustEntry { - pub from: String, - pub to: String, - pub value: f32, + from: String, + to: String, + value: f32, } impl TrustEntry { @@ -132,6 +145,8 @@ impl Decodable for TrustEntry { } } +#[derive(Getters)] +#[getset(get = "pub")] pub struct AcceptedTrustUpdates { sequence_number: u64, trust_update_tx_hashes: Vec, @@ -145,20 +160,10 @@ impl AcceptedTrustUpdates { ) -> Self { Self { sequence_number, trust_update_tx_hashes, seed_update_tx_hashes } } - - pub fn get_sequence_number(&self) -> u64 { - self.sequence_number - } - - pub fn get_trust_update_tx_hashes(&self) -> &Vec { - &self.trust_update_tx_hashes - } - - pub fn get_seed_update_tx_hashes(&self) -> &Vec { - &self.seed_update_tx_hashes - } } +#[derive(Getters)] +#[getset(get = "pub")] pub struct Assignment { to_sequence: u64, domain_id: DomainHash, @@ -173,24 +178,10 @@ impl Assignment { ) -> Self { Self { to_sequence, domain_id, trust_builder, trust_verifier } } - - pub fn get_to_sequence(&self) -> u64 { - self.to_sequence - } - - pub fn get_domain_id(&self) -> DomainHash { - self.domain_id - } - - pub fn get_trust_builder(&self) -> Address { - self.trust_builder - } - - pub fn get_trust_verifier(&self) -> &Vec
{ - &self.trust_verifier - } } +#[derive(Getters)] +#[getset(get = "pub")] pub struct Commitment { trust_assignment_tx_hash: TxHash, root_hash: Hash, @@ -200,16 +191,10 @@ impl Commitment { pub fn new(trust_assignment_tx_hash: TxHash, root_hash: Hash) -> Self { Self { trust_assignment_tx_hash, root_hash } } - - pub fn get_trust_assignment_tx_hash(&self) -> TxHash { - self.trust_assignment_tx_hash.clone() - } - - pub fn get_root_hash(&self) -> Hash { - self.root_hash.clone() - } } +#[derive(Getters)] +#[getset(get = "pub")] pub struct Verification { trust_commitment_tx_hash: TxHash, verification_result: bool, @@ -219,16 +204,10 @@ impl Verification { pub fn new(trust_commitment_tx_hash: TxHash, verification_result: bool) -> Self { Self { trust_commitment_tx_hash, verification_result } } - - pub fn get_trust_commitment_tx_hash(&self) -> TxHash { - self.trust_commitment_tx_hash.clone() - } - - pub fn get_verification_result(&self) -> bool { - self.verification_result - } } +#[derive(Getters)] +#[getset(get = "pub")] pub struct Result { trust_commitment_tx_hash: TxHash, trust_verification_tx_hashes: Vec, @@ -241,16 +220,4 @@ impl Result { ) -> Self { Self { trust_commitment_tx_hash, trust_verification_tx_hashes, timestamp } } - - pub fn get_trust_commitment_tx_hash(&self) -> TxHash { - self.trust_commitment_tx_hash.clone() - } - - pub fn get_trust_verification_tx_hashes(&self) -> &Vec { - &self.trust_verification_tx_hashes - } - - pub fn get_timestamp(&self) -> u64 { - self.timestamp - } } diff --git a/common/src/tx_event.rs b/common/src/tx_event.rs index 0031fb30..19cbe714 100644 --- a/common/src/tx_event.rs +++ b/common/src/tx_event.rs @@ -1,12 +1,24 @@ use alloy_rlp_derive::{RlpDecodable, RlpEncodable}; +use getset::Getters; use serde::{Deserialize, Serialize}; /// Proof of tx inclusion in block. #[derive(Debug, Clone, Default, RlpDecodable, RlpEncodable, Serialize, Deserialize)] pub struct InclusionProof([u8; 32]); +impl InclusionProof { + pub fn new(proof: [u8; 32]) -> Self { + Self(proof) + } + + pub fn inner(&self) -> &[u8; 32] { + &self.0 + } +} + /// Transaction event which includes proof of inclusion and custom data. -#[derive(Debug, Clone, RlpDecodable, RlpEncodable, Serialize, Deserialize)] +#[derive(Debug, Clone, RlpDecodable, RlpEncodable, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] pub struct TxEvent { /// Block height of the DA layer, where the tx was included. pub block_number: u64, @@ -25,9 +37,4 @@ impl TxEvent { pub fn default_with_data(data: Vec) -> Self { Self { block_number: 0, proof: InclusionProof::default(), data } } - - /// Returns the data of the tx event. - pub fn data(&self) -> Vec { - self.data.clone() - } } diff --git a/computer/Cargo.toml b/computer/Cargo.toml index 6aad701f..e927ee8b 100644 --- a/computer/Cargo.toml +++ b/computer/Cargo.toml @@ -33,3 +33,4 @@ serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } sha3 = { workspace = true } k256 = { workspace = true } +getset = { workspace = true } diff --git a/computer/src/lib.rs b/computer/src/lib.rs index 5ab2403d..aeaea255 100644 --- a/computer/src/lib.rs +++ b/computer/src/lib.rs @@ -1,6 +1,7 @@ use alloy_rlp::Decodable; use dotenv::dotenv; use futures::StreamExt; +use getset::Getters; use k256::ecdsa::{self, SigningKey}; use libp2p::{gossipsub, mdns, swarm::SwarmEvent, Swarm}; use openrank_common::{ @@ -62,20 +63,24 @@ impl From for Error { } } -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Whitelist { +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] +struct Whitelist { block_builder: Vec
, verifier: Vec
, } -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Config { - pub domains: Vec, - pub whitelist: Whitelist, - pub database: db::Config, - pub p2p: net::Config, +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] +struct Config { + domains: Vec, + whitelist: Whitelist, + database: db::Config, + p2p: net::Config, } +#[derive(Getters)] +#[getset(get = "pub")] pub struct Node { swarm: Swarm, config: Config, @@ -103,18 +108,18 @@ impl Node { TxEvent::decode(&mut message.data.as_slice()).map_err(Error::Decode)?; let mut tx = Tx::decode(&mut tx_event.data().as_slice()).map_err(Error::Decode)?; - if let Body::TrustUpdate(trust_update) = tx.body() { + if let Body::TrustUpdate(trust_update) = tx.body().clone() { tx.verify_against(namespace.owner()).map_err(Error::Signature)?; // Add Tx to db tx.set_sequence_number(message.sequence_number.unwrap_or_default()); self.db.put(tx.clone()).map_err(Error::Db)?; - assert!(*namespace == trust_update.trust_id); + assert!(namespace == trust_update.trust_id()); let domain = domains .iter() .find(|x| &x.trust_namespace() == namespace) .ok_or(Error::DomainNotFound(namespace.clone().to_hex()))?; self.compute_runner - .update_trust(domain.clone(), trust_update.entries.clone()) + .update_trust(domain.clone(), trust_update.entries().clone()) .map_err(Error::Runner)?; info!( "TOPIC: {}, ID: {message_id}, FROM: {propagation_source}", @@ -129,18 +134,18 @@ impl Node { TxEvent::decode(&mut message.data.as_slice()).map_err(Error::Decode)?; let mut tx = Tx::decode(&mut tx_event.data().as_slice()).map_err(Error::Decode)?; - if let Body::SeedUpdate(seed_update) = tx.body() { + if let Body::SeedUpdate(seed_update) = tx.body().clone() { tx.verify_against(namespace.owner()).map_err(Error::Signature)?; // Add Tx to db tx.set_sequence_number(message.sequence_number.unwrap_or_default()); self.db.put(tx.clone()).map_err(Error::Db)?; - assert!(*namespace == seed_update.seed_id); + assert!(namespace == seed_update.seed_id()); let domain = domains .iter() .find(|x| &x.trust_namespace() == namespace) .ok_or(Error::DomainNotFound(namespace.clone().to_hex()))?; self.compute_runner - .update_seed(domain.clone(), seed_update.entries.clone()) + .update_seed(domain.clone(), seed_update.entries().clone()) .map_err(Error::Runner)?; info!( "TOPIC: {}, ID: {message_id}, FROM: {propagation_source}", @@ -161,12 +166,15 @@ impl Node { // Add Tx to db self.db.put(tx.clone()).map_err(Error::Db)?; let computer_address = address_from_sk(&self.secret_key); - assert_eq!(computer_address, compute_assignment.assigned_compute_node); + assert_eq!( + &computer_address, + compute_assignment.assigned_compute_node() + ); assert!(self .config .whitelist .verifier - .contains(&compute_assignment.assigned_verifier_node)); + .contains(compute_assignment.assigned_verifier_node())); let domain = domains .iter() @@ -260,7 +268,7 @@ impl Node { let domain_hashes = config.domains.iter().map(|x| x.to_hash()).collect(); let compute_runner = ComputeRunner::new(domain_hashes); - let swarm = build_node(net::load_keypair(&config.p2p.keypair, &config_loader)?).await?; + let swarm = build_node(net::load_keypair(config.p2p().keypair(), &config_loader)?).await?; info!("PEER_ID: {:?}", swarm.local_peer_id()); Ok(Self { swarm, config, db, compute_runner, secret_key }) @@ -306,10 +314,10 @@ impl Node { // Create a Gossipsub topic let topic = gossipsub::IdentTopic::new(topic.clone()); // subscribes to our topic - self.swarm.behaviour_mut().gossipsub.subscribe(&topic)?; + self.swarm.behaviour_mut().gossipsub_subscribe(&topic)?; } - net::listen_on(&mut self.swarm, &self.config.p2p.listen_on)?; + net::listen_on(&mut self.swarm, self.config.p2p().listen_on())?; // Kick it off loop { @@ -318,13 +326,13 @@ impl Node { SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Discovered(list))) => { for (peer_id, _multiaddr) in list { info!("mDNS discovered a new peer: {peer_id}"); - self.swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id); + self.swarm.behaviour_mut().gossipsub_add_peer(&peer_id); } }, SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Expired(list))) => { for (peer_id, _multiaddr) in list { info!("mDNS discover peer has expired: {peer_id}"); - self.swarm.behaviour_mut().gossipsub.remove_explicit_peer(&peer_id); + self.swarm.behaviour_mut().gossipsub_remove_peer(&peer_id); } }, SwarmEvent::Behaviour(MyBehaviourEvent::Gossipsub(event)) => { @@ -363,13 +371,13 @@ impl Node { drop(seed_update_txs); // sort txs by sequence_number - txs.sort_unstable_by_key(|tx| tx.sequence_number()); + txs.sort_unstable_by_key(|tx| tx.get_sequence_number()); // update compute runner for tx in txs { match tx.body() { Body::TrustUpdate(trust_update) => { - let namespace = trust_update.trust_id; + let namespace = trust_update.trust_id().clone(); let domain = self .config .domains @@ -377,11 +385,11 @@ impl Node { .find(|x| x.trust_namespace() == namespace) .ok_or(Error::DomainNotFound(namespace.clone().to_hex()))?; self.compute_runner - .update_trust(domain.clone(), trust_update.entries.clone()) + .update_trust(domain.clone(), trust_update.entries().clone()) .map_err(Error::Runner)?; }, Body::SeedUpdate(seed_update) => { - let namespace = seed_update.seed_id; + let namespace = seed_update.seed_id().clone(); let domain = self .config .domains @@ -389,7 +397,7 @@ impl Node { .find(|x| x.seed_namespace() == namespace) .ok_or(Error::DomainNotFound(namespace.clone().to_hex()))?; self.compute_runner - .update_seed(domain.clone(), seed_update.entries.clone()) + .update_seed(domain.clone(), seed_update.entries().clone()) .map_err(Error::Runner)?; }, _ => (), diff --git a/computer/src/runner.rs b/computer/src/runner.rs index f74249d6..84253ef9 100644 --- a/computer/src/runner.rs +++ b/computer/src/runner.rs @@ -1,3 +1,4 @@ +use getset::Getters; use openrank_common::{ algos::{self, et::positive_run}, merkle::{ @@ -16,6 +17,8 @@ use std::{ fmt::{Display, Formatter, Result as FmtResult}, }; +#[derive(Getters)] +#[getset(get = "pub")] /// Struct containing the state of the computer compute runner. pub struct ComputeRunner { count: HashMap, @@ -85,29 +88,29 @@ impl ComputeRunner { .ok_or(Error::SeedTrustNotFound(domain.to_hash()))?; let default_sub_tree = DenseIncrementalMerkleTree::::new(32); for entry in trust_entries { - let from_index = if let Some(i) = domain_indices.get(&entry.from) { + let from_index = if let Some(i) = domain_indices.get(entry.from()) { *i } else { let curr_count = *count; - domain_indices.insert(entry.from.clone(), curr_count); + domain_indices.insert(entry.from().clone(), curr_count); *count += 1; curr_count }; - let to_index = if let Some(i) = domain_indices.get(&entry.to) { + let to_index = if let Some(i) = domain_indices.get(entry.to()) { *i } else { let curr_count = *count; - domain_indices.insert(entry.to.clone(), *count); + domain_indices.insert(entry.to().clone(), *count); *count += 1; curr_count }; - lt.insert((from_index, to_index), entry.value); + lt.insert((from_index, to_index), *entry.value()); lt_sub_trees.entry(from_index).or_insert_with(|| default_sub_tree.clone()); let sub_tree = lt_sub_trees .get_mut(&from_index) .ok_or(Error::LocalTrustSubTreesNotFoundWithIndex(from_index))?; - let leaf = hash_leaf::(entry.value.to_be_bytes().to_vec()); + let leaf = hash_leaf::(entry.value().to_be_bytes().to_vec()); sub_tree.insert_leaf(to_index, leaf); let sub_tree_root = sub_tree.root().map_err(Error::Merkle)?; @@ -143,11 +146,11 @@ impl ComputeRunner { .ok_or(Error::SeedTrustNotFound(domain.to_hash()))?; let default_sub_tree = DenseIncrementalMerkleTree::::new(32); for entry in seed_entries { - let index = if let Some(i) = domain_indices.get(&entry.id) { + let index = if let Some(i) = domain_indices.get(entry.id()) { *i } else { let curr_count = *count; - domain_indices.insert(entry.id.clone(), curr_count); + domain_indices.insert(entry.id().clone(), curr_count); *count += 1; curr_count }; @@ -157,11 +160,11 @@ impl ComputeRunner { .get_mut(&index) .ok_or(Error::LocalTrustSubTreesNotFoundWithIndex(index))?; let sub_tree_root = sub_tree.root().map_err(Error::Merkle)?; - let seed_hash = hash_leaf::(entry.value.to_be_bytes().to_vec()); + let seed_hash = hash_leaf::(entry.value().to_be_bytes().to_vec()); let leaf = hash_two::(sub_tree_root, seed_hash); lt_master_tree.insert_leaf(index, leaf); - seed.insert(index, entry.value); + seed.insert(index, *entry.value()); } Ok(()) diff --git a/openrank-sdk/Cargo.toml b/openrank-sdk/Cargo.toml index b2e26ae8..23da57a5 100644 --- a/openrank-sdk/Cargo.toml +++ b/openrank-sdk/Cargo.toml @@ -22,3 +22,4 @@ dotenv = { workspace = true } rand = { workspace = true } csv = "1.3.0" clap = { version = "4.5.9", features = ["derive"] } +getset = { workspace = true } diff --git a/openrank-sdk/src/main.rs b/openrank-sdk/src/main.rs index 171ff856..5b1f4598 100644 --- a/openrank-sdk/src/main.rs +++ b/openrank-sdk/src/main.rs @@ -2,6 +2,7 @@ use alloy_rlp::encode; use clap::{Parser, Subcommand}; use csv::StringRecord; use dotenv::dotenv; +use getset::Getters; use jsonrpsee::{core::client::ClientT, http_client::HttpClient}; use k256::{ecdsa::SigningKey, schnorr::CryptoRngCore}; use openrank_common::{ @@ -65,29 +66,33 @@ struct Args { method: Method, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] /// The configuration for the Sequencer. pub struct Sequencer { endpoint: String, result_size: u32, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] /// The configuration for the SDK. pub struct Config { /// The domain to be updated. - pub domain: Domain, + domain: Domain, /// The Sequencer configuration. It contains the endpoint of the Sequencer. - pub sequencer: Sequencer, + sequencer: Sequencer, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] struct ComputeRequestResult { compute_tx_hash: TxHash, tx_event: TxEvent, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] struct ComputeResults { votes: Vec, scores: Vec, @@ -245,13 +250,16 @@ async fn get_compute_result_txs(arg: String, config_path: &str) -> Result Result<(), Box> { Method::ComputeRequest { path, output_path } => { let secret_key = get_secret_key()?; let res = compute_request(secret_key, path.as_str()).await?; - let hex_encoded_tx_hash = hex::encode(res.compute_tx_hash.0); + let hex_encoded_tx_hash = hex::encode(res.compute_tx_hash.inner()); println!("{}", hex_encoded_tx_hash); if let Some(output_path) = output_path { write_json_to_file(&output_path, res)?; @@ -359,7 +367,7 @@ async fn main() -> Result<(), Box> { println!("votes: {:?}", votes); for res in &scores { - println!("{}: {}", res.id, res.value); + println!("{}: {}", res.id().clone(), *res.value()); } if let Some(output_path) = output_path { write_json_to_file(&output_path, scores)?; diff --git a/relayer/src/lib.rs b/relayer/src/lib.rs index 7fc91361..c8133bbb 100644 --- a/relayer/src/lib.rs +++ b/relayer/src/lib.rs @@ -30,7 +30,7 @@ impl SQLRelayer { let mut last_processed_keys = HashMap::new(); - let path = db_config.clone().secondary.expect("No secondary path found"); + let path = db_config.clone().secondary().clone().expect("No secondary path found"); let last_processed_key = target_db .load_last_processed_key(&format!("relayer_last_key_{}_{}", path, "tx")) .await @@ -68,7 +68,7 @@ impl SQLRelayer { self.db.refresh().unwrap(); let results = self.db.read_from_end::("result", None).unwrap(); - let dir = self.db.get_config().secondary.expect("Secondary path missing"); + let dir = self.db.get_config().secondary().clone().expect("Secondary path missing"); let last_count = self.last_processed_keys[dir.as_str()].unwrap_or(0); let mut current_count = 0; @@ -78,8 +78,10 @@ impl SQLRelayer { // assert_eq!(last_count as u64, res.seq_number.unwrap()); // ComputeRequest - let (request_key, request_tx_with_hash) = - self.get_tx_with_hash(consts::COMPUTE_REQUEST, res.compute_request_tx_hash.clone()); + let (request_key, request_tx_with_hash) = self.get_tx_with_hash( + consts::COMPUTE_REQUEST, + res.compute_request_tx_hash().clone(), + ); current_count += 1; @@ -95,7 +97,7 @@ impl SQLRelayer { // ComputeCommitment let (commitment_key, commitment_tx_with_hash) = self.get_tx_with_hash( consts::COMPUTE_COMMITMENT, - res.compute_commitment_tx_hash.clone(), + res.compute_commitment_tx_hash().clone(), ); current_count += 1; @@ -110,7 +112,7 @@ impl SQLRelayer { } // ComputeVerification - for verification_tx_hash in res.compute_verification_tx_hashes.clone() { + for verification_tx_hash in res.compute_verification_tx_hashes().clone() { current_count += 1; let (verification_key, verification_tx_with_hash) = diff --git a/relayer/src/main.rs b/relayer/src/main.rs index 3edd6f32..8b04bf18 100644 --- a/relayer/src/main.rs +++ b/relayer/src/main.rs @@ -11,7 +11,7 @@ pub mod api; #[derive(Debug, Clone, Serialize, Deserialize)] /// The configuration for the Relayer. pub struct Config { - pub database: db::Config, + database: db::Config, } #[tokio::main] diff --git a/sequencer/Cargo.toml b/sequencer/Cargo.toml index 99950f67..825be803 100644 --- a/sequencer/Cargo.toml +++ b/sequencer/Cargo.toml @@ -32,3 +32,4 @@ serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } jsonrpsee = { workspace = true, features = ["server", "macros"] } toml = { workspace = true } +getset = { workspace = true } diff --git a/sequencer/src/lib.rs b/sequencer/src/lib.rs index 096b4e39..547c6a74 100644 --- a/sequencer/src/lib.rs +++ b/sequencer/src/lib.rs @@ -1,4 +1,5 @@ use futures::StreamExt; +use getset::Getters; use jsonrpsee::{server::Server, RpcModule}; use libp2p::{gossipsub, mdns, swarm::SwarmEvent, Swarm}; use openrank_common::{ @@ -20,23 +21,27 @@ use tracing::{error, info}; mod rpc; -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] /// The whitelist for the Sequencer. -pub struct Whitelist { +struct Whitelist { /// The list of addresses that are allowed to call the Sequencer. - pub users: Vec
, + users: Vec
, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] /// The configuration for the Sequencer. -pub struct Config { +struct Config { /// The whitelist for the Sequencer. - pub whitelist: Whitelist, - pub database: db::Config, - pub p2p: net::Config, - pub rpc: net::RpcConfig, + whitelist: Whitelist, + database: db::Config, + p2p: net::Config, + rpc: net::RpcConfig, } +#[derive(Getters)] +#[getset(get = "pub")] /// The Sequencer node. It contains the Swarm, the Server, and the Receiver. pub struct Node { config: Config, @@ -62,7 +67,7 @@ impl Node { let seq_server = SequencerServer::new(sender, config.whitelist.users.clone(), db); let rpc = seq_server.into_rpc(); - let swarm = build_node(net::load_keypair(&config.p2p.keypair, &config_loader)?).await?; + let swarm = build_node(net::load_keypair(config.p2p().keypair(), &config_loader)?).await?; info!("PEER_ID: {:?}", swarm.local_peer_id()); Ok(Self { swarm, config, rpc, receiver }) @@ -74,8 +79,8 @@ impl Node { /// - Handle gossipsub events /// - Handle mDNS events pub async fn run(&mut self) -> Result<(), Box> { - net::listen_on(&mut self.swarm, &self.config.p2p.listen_on)?; - let server = Server::builder().build(self.config.rpc.address).await?; + net::listen_on(&mut self.swarm, self.config.p2p().listen_on())?; + let server = Server::builder().build(self.config.rpc().address()).await?; let handle = server.start(self.rpc.clone()); tokio::spawn(handle.stopped()); @@ -87,7 +92,7 @@ impl Node { let topic_wrapper = gossipsub::IdentTopic::new(topic.clone()); info!("PUBLISH: {:?}", topic.clone()); if let Err(e) = - self.swarm.behaviour_mut().gossipsub.publish(topic_wrapper, data) + self.swarm.behaviour_mut().gossipsub_publish(topic_wrapper, data) { error!("Publish error: {e:?}"); } @@ -97,13 +102,13 @@ impl Node { SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Discovered(list))) => { for (peer_id, _multiaddr) in list { info!("mDNS discovered a new peer: {peer_id}"); - self.swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id); + self.swarm.behaviour_mut().gossipsub_add_peer(&peer_id); } }, SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Expired(list))) => { for (peer_id, _multiaddr) in list { info!("mDNS discover peer has expired: {peer_id}"); - self.swarm.behaviour_mut().gossipsub.remove_explicit_peer(&peer_id); + self.swarm.behaviour_mut().gossipsub_remove_peer(&peer_id); } }, SwarmEvent::NewListenAddr { address, .. } => { diff --git a/sequencer/src/rpc.rs b/sequencer/src/rpc.rs index 45a5bb8d..811cc6eb 100644 --- a/sequencer/src/rpc.rs +++ b/sequencer/src/rpc.rs @@ -1,4 +1,5 @@ use alloy_rlp::{encode, Decodable}; +use getset::Getters; use jsonrpsee::core::async_trait; use jsonrpsee::proc_macros::rpc; use jsonrpsee::types::error::{INVALID_REQUEST_CODE, PARSE_ERROR_CODE}; @@ -40,6 +41,8 @@ pub trait Rpc { async fn get_txs(&self, keys: Vec<(String, tx::TxHash)>) -> Result, ErrorObjectOwned>; } +#[derive(Getters)] +#[getset(get = "pub")] /// The Sequencer JsonRPC server. It contains the sender, the whitelisted users, and the database connection. pub struct SequencerServer { sender: Sender<(Vec, Topic)>, @@ -93,7 +96,7 @@ impl SequencerServer { )); } - Ok((tx_bytes, tx.body())) + Ok((tx_bytes, tx.body().clone())) } } @@ -113,7 +116,7 @@ impl RpcServer for SequencerServer { let tx_event = TxEvent::default_with_data(tx_bytes); let channel_message = ( encode(tx_event.clone()), - Topic::NamespaceTrustUpdate(trust_update.trust_id), + Topic::NamespaceTrustUpdate(trust_update.trust_id().clone()), ); self.sender.send(channel_message).await.map_err(|e| { error!("{}", e); @@ -136,7 +139,7 @@ impl RpcServer for SequencerServer { let tx_event = TxEvent::default_with_data(tx_bytes); let channel_message = ( encode(tx_event.clone()), - Topic::NamespaceSeedUpdate(seed_update.seed_id), + Topic::NamespaceSeedUpdate(seed_update.seed_id().clone()), ); self.sender.send(channel_message).await.map_err(|e| { error!("{}", e); @@ -159,7 +162,7 @@ impl RpcServer for SequencerServer { let tx_event = TxEvent::default_with_data(tx_bytes); let channel_message = ( encode(tx_event.clone()), - Topic::DomainRequest(compute_request.domain_id), + Topic::DomainRequest(*compute_request.domain_id()), ); self.sender.send(channel_message).await.map_err(|e| { error!("{}", e); @@ -180,33 +183,33 @@ impl RpcServer for SequencerServer { let result_reference = self .db - .get::(query.request_tx_hash.to_bytes()) + .get::(query.request_tx_hash().to_bytes()) .map_err(|e| { error!("{}", e); ErrorObjectOwned::from(ErrorCode::InternalError) })?; - let key = compute::Result::construct_full_key(result_reference.seq_number); + let key = compute::Result::construct_full_key(*result_reference.seq_number()); let result = self.db.get::(key).map_err(|e| { error!("{}", e); ErrorObjectOwned::from(ErrorCode::InternalError) })?; let key = Tx::construct_full_key( consts::COMPUTE_COMMITMENT, - result.compute_commitment_tx_hash, + result.compute_commitment_tx_hash().clone(), ); let tx = self.db.get::(key).map_err(|e| { error!("{}", e); ErrorObjectOwned::from(ErrorCode::InternalError) })?; - let commitment = match tx.body() { + let commitment = match tx.body().clone() { tx::Body::ComputeCommitment(commitment) => Ok(commitment), _ => Err(ErrorObjectOwned::from(ErrorCode::InternalError)), }?; let create_scores_tx: Vec = { let mut create_scores_tx = Vec::new(); - for tx_hash in commitment.scores_tx_hashes.into_iter() { - let key = Tx::construct_full_key(consts::COMPUTE_SCORES, tx_hash); + for tx_hash in commitment.scores_tx_hashes().iter() { + let key = Tx::construct_full_key(consts::COMPUTE_SCORES, tx_hash.clone()); let tx = self.db.get::(key).map_err(|e| { error!("{}", e); ErrorObjectOwned::from(ErrorCode::InternalError) @@ -218,7 +221,7 @@ impl RpcServer for SequencerServer { let create_scores: Vec = { let mut create_scores = Vec::new(); for tx in create_scores_tx.into_iter() { - let scores = match tx.body() { + let scores = match tx.body().clone() { tx::Body::ComputeScores(scores) => Ok(scores), _ => Err(ErrorObjectOwned::from(ErrorCode::InternalError)), }?; @@ -227,13 +230,13 @@ impl RpcServer for SequencerServer { create_scores }; let mut score_entries: Vec = - create_scores.into_iter().flat_map(|x| x.entries).collect(); - score_entries.sort_by(|a, b| match a.value.partial_cmp(&b.value) { + create_scores.into_iter().flat_map(|x| x.entries().clone()).collect(); + score_entries.sort_by(|a, b| match a.value().partial_cmp(b.value()) { Some(ordering) => ordering, None => { - if a.value.is_nan() && b.value.is_nan() { + if a.value().is_nan() && b.value().is_nan() { Ordering::Equal - } else if a.value.is_nan() { + } else if a.value().is_nan() { Ordering::Greater } else { Ordering::Less @@ -242,16 +245,16 @@ impl RpcServer for SequencerServer { }); score_entries.reverse(); let score_entries: Vec = score_entries - .split_at(query.start as usize) + .split_at(*query.start() as usize) .1 .iter() - .take(query.size as usize) + .take(*query.size() as usize) .cloned() .collect(); let verificarion_results_tx: Vec = { let mut verification_resutls_tx = Vec::new(); - for tx_hash in result.compute_verification_tx_hashes.iter() { + for tx_hash in result.compute_verification_tx_hashes().iter() { let key = Tx::construct_full_key(consts::COMPUTE_VERIFICATION, tx_hash.clone()); let tx = self.db.get::(key).map_err(|e| { error!("{}", e); @@ -264,7 +267,7 @@ impl RpcServer for SequencerServer { let verification_results: Vec = { let mut verification_results = Vec::new(); for tx in verificarion_results_tx.into_iter() { - let result = match tx.body() { + let result = match tx.body().clone() { tx::Body::ComputeVerification(result) => Ok(result), _ => Err(ErrorObjectOwned::from(ErrorCode::InternalError)), }?; @@ -273,7 +276,7 @@ impl RpcServer for SequencerServer { verification_results }; let verification_results_bools: Vec = - verification_results.into_iter().map(|x| x.verification_result).collect(); + verification_results.into_iter().map(|x| *x.verification_result()).collect(); Ok((verification_results_bools, score_entries)) } diff --git a/verifier/Cargo.toml b/verifier/Cargo.toml index 2c56b190..ce7601e9 100644 --- a/verifier/Cargo.toml +++ b/verifier/Cargo.toml @@ -34,3 +34,4 @@ serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } sha3 = { workspace = true } k256 = { workspace = true } +getset = { workspace = true } diff --git a/verifier/src/lib.rs b/verifier/src/lib.rs index 9980f16f..822c6345 100644 --- a/verifier/src/lib.rs +++ b/verifier/src/lib.rs @@ -1,6 +1,7 @@ use alloy_rlp::Decodable; use dotenv::dotenv; use futures::StreamExt; +use getset::Getters; use k256::ecdsa; use k256::ecdsa::SigningKey; use libp2p::{gossipsub, mdns, swarm::SwarmEvent, Swarm}; @@ -64,26 +65,30 @@ impl From for Error { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] /// The whitelist for the Verifier. -pub struct Whitelist { +struct Whitelist { /// The list of addresses that are allowed to be block builders. block_builder: Vec
, /// The list of addresses that are allowed to be computers. computer: Vec
, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Getters)] +#[getset(get = "pub")] /// The configuration for the Verifier. -pub struct Config { +struct Config { /// The list of domains to perform verification for. - pub domains: Vec, + domains: Vec, /// The whitelist for the Verifier. - pub whitelist: Whitelist, - pub database: db::Config, - pub p2p: net::Config, + whitelist: Whitelist, + database: db::Config, + p2p: net::Config, } +#[derive(Getters)] +#[getset(get = "pub")] /// The Verifier node. It contains the Swarm, the Config, the DB, the VerificationRunner, and the SecretKey. pub struct Node { swarm: Swarm, @@ -113,7 +118,7 @@ impl Node { let domain_hashes = config.domains.iter().map(|x| x.to_hash()).collect(); let verification_runner = VerificationRunner::new(domain_hashes); - let swarm = build_node(net::load_keypair(&config.p2p.keypair, &config_loader)?).await?; + let swarm = build_node(net::load_keypair(config.p2p().keypair(), &config_loader)?).await?; info!("PEER_ID: {:?}", swarm.local_peer_id()); Ok(Self { swarm, config, db, verification_runner, secret_key }) @@ -137,18 +142,18 @@ impl Node { TxEvent::decode(&mut message.data.as_slice()).map_err(Error::Decode)?; let mut tx = Tx::decode(&mut tx_event.data().as_slice()).map_err(Error::Decode)?; - if let tx::Body::TrustUpdate(trust_update) = tx.body() { + if let tx::Body::TrustUpdate(trust_update) = tx.body().clone() { tx.verify_against(namespace.owner()).map_err(Error::Signature)?; // Add Tx to db tx.set_sequence_number(message.sequence_number.unwrap_or_default()); self.db.put(tx.clone()).map_err(Error::Db)?; - assert!(*namespace == trust_update.trust_id); + assert!(namespace == trust_update.trust_id()); let domain = domains .iter() .find(|x| &x.trust_namespace() == namespace) .ok_or(Error::DomainNotFound(namespace.clone().to_hex()))?; self.verification_runner - .update_trust(domain.clone(), trust_update.entries.clone()) + .update_trust(domain.clone(), trust_update.entries().clone()) .map_err(Error::Runner)?; info!( "TOPIC: {}, ID: {message_id}, FROM: {propagation_source}", @@ -167,13 +172,13 @@ impl Node { tx.verify_against(namespace.owner()).map_err(Error::Signature)?; // Add Tx to db self.db.put(tx.clone()).map_err(Error::Db)?; - assert!(*namespace == seed_update.seed_id); + assert!(namespace == seed_update.seed_id()); let domain = domains .iter() .find(|x| &x.trust_namespace() == namespace) .ok_or(Error::DomainNotFound(namespace.clone().to_hex()))?; self.verification_runner - .update_seed(domain.clone(), seed_update.entries.clone()) + .update_seed(domain.clone(), seed_update.entries().clone()) .map_err(Error::Runner)?; info!( "TOPIC: {}, ID: {message_id}, FROM: {propagation_source}", @@ -194,12 +199,15 @@ impl Node { // Add Tx to db self.db.put(tx.clone()).map_err(Error::Db)?; let computer_address = address_from_sk(&self.secret_key); - assert_eq!(computer_address, compute_assignment.assigned_verifier_node); + assert_eq!( + computer_address, + *compute_assignment.assigned_verifier_node() + ); assert!(self .config .whitelist .computer - .contains(&compute_assignment.assigned_compute_node)); + .contains(compute_assignment.assigned_compute_node())); let domain = domains .iter() @@ -345,13 +353,13 @@ impl Node { drop(seed_update_txs); // sort txs by sequence_number - txs.sort_unstable_by_key(|tx| tx.sequence_number()); + txs.sort_unstable_by_key(|tx| tx.get_sequence_number()); // update verification runner for tx in txs { match tx.body() { tx::Body::TrustUpdate(trust_update) => { - let namespace = trust_update.trust_id; + let namespace = trust_update.trust_id().clone(); let domain = self .config .domains @@ -359,11 +367,11 @@ impl Node { .find(|x| x.trust_namespace() == namespace) .ok_or(Error::DomainNotFound(namespace.clone().to_hex()))?; self.verification_runner - .update_trust(domain.clone(), trust_update.entries.clone()) + .update_trust(domain.clone(), trust_update.entries().clone()) .map_err(Error::Runner)?; }, tx::Body::SeedUpdate(seed_update) => { - let namespace = seed_update.seed_id; + let namespace = seed_update.seed_id().clone(); let domain = self .config .domains @@ -371,7 +379,7 @@ impl Node { .find(|x| x.seed_namespace() == namespace) .ok_or(Error::DomainNotFound(namespace.clone().to_hex()))?; self.verification_runner - .update_seed(domain.clone(), seed_update.entries.clone()) + .update_seed(domain.clone(), seed_update.entries().clone()) .map_err(Error::Runner)?; }, _ => (), @@ -439,10 +447,10 @@ impl Node { // Create a Gossipsub topic let topic = gossipsub::IdentTopic::new(topic.clone()); // subscribes to our topic - self.swarm.behaviour_mut().gossipsub.subscribe(&topic)?; + self.swarm.behaviour_mut().gossipsub_subscribe(&topic)?; } - net::listen_on(&mut self.swarm, &self.config.p2p.listen_on)?; + net::listen_on(&mut self.swarm, self.config.p2p().listen_on())?; // Kick it off loop { @@ -451,13 +459,13 @@ impl Node { SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Discovered(list))) => { for (peer_id, _multiaddr) in list { info!("mDNS discovered a new peer: {peer_id}"); - self.swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id); + self.swarm.behaviour_mut().gossipsub_add_peer(&peer_id); } }, SwarmEvent::Behaviour(MyBehaviourEvent::Mdns(mdns::Event::Expired(list))) => { for (peer_id, _multiaddr) in list { info!("mDNS discover peer has expired: {peer_id}"); - self.swarm.behaviour_mut().gossipsub.remove_explicit_peer(&peer_id); + self.swarm.behaviour_mut().gossipsub_remove_peer(&peer_id); } }, SwarmEvent::Behaviour(MyBehaviourEvent::Gossipsub(event)) => { diff --git a/verifier/src/runner.rs b/verifier/src/runner.rs index d7a4e6c7..db1d84c6 100644 --- a/verifier/src/runner.rs +++ b/verifier/src/runner.rs @@ -1,3 +1,4 @@ +use getset::Getters; use openrank_common::{ algos::{self, et::convergence_check}, merkle::{ @@ -17,6 +18,8 @@ use std::{ fmt::{Display, Formatter, Result as FmtResult}, }; +#[derive(Getters)] +#[getset(get = "pub")] /// Struct containing the state of the verification runner pub struct VerificationRunner { count: HashMap, @@ -96,29 +99,29 @@ impl VerificationRunner { .ok_or(Error::SeedTrustNotFound(domain.to_hash()))?; let default_sub_tree = DenseIncrementalMerkleTree::::new(32); for entry in trust_entries { - let from_index = if let Some(i) = domain_indices.get(&entry.from) { + let from_index = if let Some(i) = domain_indices.get(entry.from()) { *i } else { let curr_count = *count; - domain_indices.insert(entry.from.clone(), curr_count); + domain_indices.insert(entry.from().clone(), curr_count); *count += 1; curr_count }; - let to_index = if let Some(i) = domain_indices.get(&entry.to) { + let to_index = if let Some(i) = domain_indices.get(entry.to()) { *i } else { let curr_count = *count; - domain_indices.insert(entry.to.clone(), curr_count); + domain_indices.insert(entry.to().clone(), curr_count); *count += 1; curr_count }; - lt.insert((from_index, to_index), entry.value); + lt.insert((from_index, to_index), *entry.value()); lt_sub_trees.entry(from_index).or_insert_with(|| default_sub_tree.clone()); let sub_tree = lt_sub_trees .get_mut(&from_index) .ok_or(Error::LocalTrustSubTreesNotFoundWithIndex(from_index))?; - let leaf = hash_leaf::(entry.value.to_be_bytes().to_vec()); + let leaf = hash_leaf::(entry.value().to_be_bytes().to_vec()); sub_tree.insert_leaf(to_index, leaf); let sub_tree_root = sub_tree.root().map_err(Error::Merkle)?; @@ -154,11 +157,11 @@ impl VerificationRunner { .ok_or(Error::SeedTrustNotFound(domain.to_hash()))?; let default_sub_tree = DenseIncrementalMerkleTree::::new(32); for entry in seed_entries { - let index = if let Some(i) = domain_indices.get(&entry.id) { + let index = if let Some(i) = domain_indices.get(entry.id()) { *i } else { let curr_count = *count; - domain_indices.insert(entry.id.clone(), curr_count); + domain_indices.insert(entry.id().clone(), curr_count); *count += 1; curr_count }; @@ -168,11 +171,11 @@ impl VerificationRunner { .get_mut(&index) .ok_or(Error::LocalTrustSubTreesNotFoundWithIndex(index))?; let sub_tree_root = sub_tree.root().map_err(Error::Merkle)?; - let seed_hash = hash_leaf::(entry.value.to_be_bytes().to_vec()); + let seed_hash = hash_leaf::(entry.value().to_be_bytes().to_vec()); let leaf = hash_two::(sub_tree_root, seed_hash); lt_master_tree.insert_leaf(index, leaf); - seed.insert(index, entry.value); + seed.insert(index, *entry.value()); } Ok(()) @@ -186,8 +189,8 @@ impl VerificationRunner { .compute_scores .get(&domain.clone().to_hash()) .ok_or(Error::ComputeScoresNotFoundWithDomain(domain.to_hash()))?; - for score_tx in commitment.scores_tx_hashes { - let res = compute_scores_txs.contains_key(&score_tx); + for score_tx in commitment.scores_tx_hashes() { + let res = compute_scores_txs.contains_key(score_tx); if !res { return Ok(false); } @@ -211,8 +214,8 @@ impl VerificationRunner { self.check_scores_tx_hashes(domain.clone(), commitment.clone())?; if is_check_score_tx_hashes { let assgn_tx = assignment_id.clone(); - let lt_root = commitment.lt_root_hash.clone(); - let cp_root = commitment.compute_root_hash.clone(); + let lt_root = commitment.lt_root_hash().clone(); + let cp_root = commitment.compute_root_hash().clone(); self.create_compute_tree(domain.clone(), assignment_id.clone())?; let (res_lt_root, res_compute_root) = @@ -261,7 +264,7 @@ impl VerificationRunner { /// Add a new commitment of certain assignment pub fn update_commitment(&mut self, commitment: compute::Commitment) { - self.commitments.insert(commitment.assignment_tx_hash.clone(), commitment.clone()); + self.commitments.insert(commitment.assignment_tx_hash().clone(), commitment.clone()); } /// Build the compute tree of certain assignment, for certain domain. @@ -282,7 +285,7 @@ impl VerificationRunner { .ok_or(Error::ComputeScoresNotFoundWithDomain(domain.to_hash()))?; let scores: Vec<&compute::Scores> = { let mut scores = Vec::new(); - for tx_hash in commitment.scores_tx_hashes.iter() { + for tx_hash in commitment.scores_tx_hashes().iter() { scores.push( compute_scores .get(tx_hash) @@ -292,7 +295,7 @@ impl VerificationRunner { scores }; let score_entries: Vec = - scores.iter().flat_map(|cs| cs.entries.clone()).map(|x| x.value).collect(); + scores.iter().flat_map(|cs| cs.entries().clone()).map(|x| *x.value()).collect(); let score_hashes: Vec = score_entries .iter() .map(|&x| hash_leaf::(x.to_be_bytes().to_vec())) @@ -328,7 +331,7 @@ impl VerificationRunner { .ok_or(Error::SeedTrustNotFound(domain.to_hash()))?; let scores: Vec<&compute::Scores> = { let mut scores = Vec::new(); - for tx_hash in commitment.scores_tx_hashes.iter() { + for tx_hash in commitment.scores_tx_hashes().iter() { scores.push( compute_scores .get(tx_hash) @@ -339,14 +342,14 @@ impl VerificationRunner { }; let score_entries: HashMap = { let score_entries_vec: Vec = - scores.iter().flat_map(|cs| cs.entries.clone()).collect(); + scores.iter().flat_map(|cs| cs.entries().clone()).collect(); let mut score_entries_map: HashMap = HashMap::new(); for entry in score_entries_vec { let i = domain_indices - .get(&entry.id) - .ok_or(Error::DomainIndexNotFound(entry.id.clone()))?; - score_entries_map.insert(*i, entry.value); + .get(entry.id()) + .ok_or(Error::DomainIndexNotFound(entry.id().clone()))?; + score_entries_map.insert(*i, *entry.value()); } score_entries_map };