diff --git a/Cargo.lock b/Cargo.lock index 0f140edcb3..dad0549996 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3666,7 +3666,7 @@ dependencies = [ "ark-relations", "ark-serialize", "ark-std", - "borsh 0.9.3", + "borsh 0.10.3", "bytemuck", "color-eyre", "duct", @@ -9490,6 +9490,7 @@ name = "xtask" version = "1.1.0" dependencies = [ "account-compression", + "anchor-lang", "anyhow", "ark-bn254", "ark-ff", @@ -9499,12 +9500,15 @@ dependencies = [ "light-hash-set", "light-hasher", "light-indexed-merkle-tree", + "light-registry", "light-utils", "num-bigint 0.4.6", "quote", "rand 0.8.5", "sha2 0.10.8", + "solana-client", "solana-program", + "solana-sdk", "tabled", ] diff --git a/js/stateless.js/README.md b/js/stateless.js/README.md index 2642fc16cb..5f75ed4911 100644 --- a/js/stateless.js/README.md +++ b/js/stateless.js/README.md @@ -21,8 +21,8 @@ This package provides server and web applications with clients, utilities, and types to leverage the power of [ZK Compression](https://www.zkcompression.com/) on Solana via the Compression RPC API. > The core ZK Compression Solana programs and clients are maintained by -[Light](https://github.com/lightprotocol) as a part of the Light Protocol. The RPC API and indexer are maintained by -[Helius Labs](https://github.com/helius-labs). +> [Light](https://github.com/lightprotocol) as a part of the Light Protocol. The RPC API and indexer are maintained by +> [Helius Labs](https://github.com/helius-labs). ## Usage @@ -39,8 +39,8 @@ npm install --save \ ### Dependencies -- [`@solana/web3.js`](https://www.npmjs.com/package/@solana/web3.js) — provides access to the Solana network via RPC. -- [`@coral-xyz/anchor`](https://www.npmjs.com/package/@coral-xyz/anchor) — a client for [Anchor](https://www.anchor-lang.com/) Solana programs. +- [`@solana/web3.js`](https://www.npmjs.com/package/@solana/web3.js) — provides access to the Solana network via RPC. +- [`@coral-xyz/anchor`](https://www.npmjs.com/package/@coral-xyz/anchor) — a client for [Anchor](https://www.anchor-lang.com/) Solana programs. ## Documentation and Examples @@ -48,9 +48,9 @@ For a more detailed documentation on usage, please check [the respective section For example implementations, including web and server, refer to the respective repositories: -- [Web application example implementation](https://github.com/Lightprotocol/example-web-client) +- [Web application example implementation](https://github.com/Lightprotocol/example-web-client) -- [Node server example implementation](https://github.com/Lightprotocol/example-nodejs-client) +- [Node server example implementation](https://github.com/Lightprotocol/example-nodejs-client) ## Troubleshooting @@ -65,14 +65,14 @@ Feel free to ask in the [Light](https://discord.gg/CYvjBgzRFP) and [Helius](http Light and ZK Compression are open source protocols and very much welcome contributions. If you have a contribution, do not hesitate to send a PR to the respective repository or discuss in the linked developer Discord servers. -- 🐞 For bugs or feature requests, please open an -[issue](https://github.com/lightprotocol/lightprotocol/issues/new). -- 🔒 For security vulnerabilities, please follow the [security policy](https://github.com/Lightprotocol/light-protocol/blob/main/SECURITY.md). +- 🐞 For bugs or feature requests, please open an + [issue](https://github.com/lightprotocol/lightprotocol/issues/new). +- 🔒 For security vulnerabilities, please follow the [security policy](https://github.com/Lightprotocol/light-protocol/blob/main/SECURITY.md). ## Additional Resources -- [Light Protocol Repository](https://github.com/Lightprotocol/light-protocol) -- [ZK Compression Official Documentation](https://www.zkcompression.com/) +- [Light Protocol Repository](https://github.com/Lightprotocol/light-protocol) +- [ZK Compression Official Documentation](https://www.zkcompression.com/) ## Disclaimer diff --git a/js/stateless.js/src/utils/address.ts b/js/stateless.js/src/utils/address.ts index 255a494199..659bfefd45 100644 --- a/js/stateless.js/src/utils/address.ts +++ b/js/stateless.js/src/utils/address.ts @@ -23,7 +23,8 @@ export function deriveAddressSeed( */ export function deriveAddress( seed: Uint8Array, - addressMerkleTreePubkey: PublicKey = defaultTestStateTreeAccounts().addressTree, + addressMerkleTreePubkey: PublicKey = defaultTestStateTreeAccounts() + .addressTree, ): PublicKey { if (seed.length != 32) { throw new Error('Seed length is not 32 bytes.'); diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index f9afdfa281..51f60c3a59 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -5,6 +5,7 @@ edition = "2021" [dependencies] account-compression = { workspace = true } +light-registry = { workspace = true } anyhow = "1.0" ark-bn254 = "0.4" ark-ff = "0.4" @@ -21,3 +22,7 @@ quote = "1.0" sha2 = "0.10" solana-program = { workspace = true } tabled = "0.15" +solana-sdk.workspace = true +solana-client = { workspace = true } +anchor-lang = { workspace = true } + diff --git a/xtask/src/forester_stats.rs b/xtask/src/forester_stats.rs new file mode 100644 index 0000000000..9dcc66d926 --- /dev/null +++ b/xtask/src/forester_stats.rs @@ -0,0 +1,201 @@ +use account_compression::{AddressMerkleTreeAccount, QueueAccount, StateMerkleTreeAccount}; +use anchor_lang::{AccountDeserialize, Discriminator}; +use clap::Parser; +use light_concurrent_merkle_tree::copy::ConcurrentMerkleTreeCopy; +use light_hash_set::HashSet; +use light_hasher::Poseidon; +use light_registry::{protocol_config::state::ProtocolConfigPda, EpochPda, ForesterEpochPda}; +use solana_sdk::{account::ReadableAccount, commitment_config::CommitmentConfig}; +#[derive(Debug, Parser)] +pub struct Options { + /// Select to run compressed token program tests. + #[clap(long)] + full: bool, + #[clap(long)] + protocol_config: bool, + #[clap(long, default_value_t = true)] + queue: bool, +} + +pub fn fetch_foreter_stats(opts: Options) -> anyhow::Result<()> { + let commitment_config = CommitmentConfig::confirmed(); + let rpc_url = std::env::var("RPC_URL") + .expect("RPC_URL environment variable not set, export RPC_URL="); + + let client = + solana_client::rpc_client::RpcClient::new_with_commitment(rpc_url, commitment_config); + let registry_accounts = client + .get_program_accounts(&light_registry::ID) + .expect("Failed to fetch accounts for registry program."); + + let mut forester_epoch_pdas = vec![]; + let mut epoch_pdas = vec![]; + let mut protocol_config_pdas = vec![]; + for (_, account) in registry_accounts { + match account.data()[0..8].try_into().unwrap() { + ForesterEpochPda::DISCRIMINATOR => { + let forester_epoch_pda = + ForesterEpochPda::try_deserialize_unchecked(&mut account.data()) + .expect("Failed to deserialize ForesterEpochPda"); + forester_epoch_pdas.push(forester_epoch_pda); + } + EpochPda::DISCRIMINATOR => { + let epoch_pda = EpochPda::try_deserialize_unchecked(&mut account.data()) + .expect("Failed to deserialize EpochPda"); + epoch_pdas.push(epoch_pda); + } + ProtocolConfigPda::DISCRIMINATOR => { + let protocol_config_pda = + ProtocolConfigPda::try_deserialize_unchecked(&mut account.data()) + .expect("Failed to deserialize ProtocolConfigPda"); + protocol_config_pdas.push(protocol_config_pda); + } + _ => (), + } + } + forester_epoch_pdas.sort_by(|a, b| a.epoch.cmp(&b.epoch)); + epoch_pdas.sort_by(|a, b| a.epoch.cmp(&b.epoch)); + let slot = client.get_slot().expect("Failed to fetch slot."); + let current_active_epoch = protocol_config_pdas[0] + .config + .get_current_active_epoch(slot) + .unwrap(); + let current_registration_epoch = protocol_config_pdas[0] + .config + .get_latest_register_epoch(slot) + .unwrap(); + println!("Current active epoch: {:?}", current_active_epoch); + + println!( + "Current registration epoch: {:?}", + current_registration_epoch + ); + + println!( + "Forester registered for latest epoch: {:?}", + forester_epoch_pdas + .iter() + .any(|pda| pda.epoch == current_registration_epoch) + ); + println!( + "Forester registered for active epoch: {:?}", + forester_epoch_pdas + .iter() + .any(|pda| pda.epoch == current_active_epoch) + ); + println!( + "current active epoch progress {:?} / {}", + protocol_config_pdas[0] + .config + .get_current_active_epoch_progress(slot), + protocol_config_pdas[0].config.active_phase_length + ); + println!( + "current active epoch progress {:?}%", + protocol_config_pdas[0] + .config + .get_current_active_epoch_progress(slot) as f64 + / protocol_config_pdas[0].config.active_phase_length as f64 + * 100f64 + ); + println!("Hours until next epoch : {:?} hours", { + // slotduration is 460ms and 1000ms is 1 second and 3600 seconds is 1 hour + protocol_config_pdas[0] + .config + .active_phase_length + .saturating_sub( + protocol_config_pdas[0] + .config + .get_current_active_epoch_progress(slot), + ) + * 460 + / 1000 + / 3600 + }); + let slots_until_next_registration = protocol_config_pdas[0] + .config + .registration_phase_length + .saturating_sub( + protocol_config_pdas[0] + .config + .get_current_active_epoch_progress(slot), + ); + println!( + "Slots until next registration : {:?}", + slots_until_next_registration + ); + println!( + "Hours until next registration : {:?} hours", + // slotduration is 460ms and 1000ms is 1 second and 3600 seconds is 1 hour + slots_until_next_registration * 460 / 1000 / 3600 + ); + if opts.full { + for epoch in &epoch_pdas { + println!("Epoch: {:?}", epoch.epoch); + let registered_foresters_in_epoch = forester_epoch_pdas + .iter() + .filter(|pda| pda.epoch == epoch.epoch); + for forester in registered_foresters_in_epoch { + println!("Forester authority: {:?}", forester.authority); + } + } + } + if opts.protocol_config { + println!("protocol config: {:?}", protocol_config_pdas[0]); + } + if opts.queue { + let account_compression_accounts = client + .get_program_accounts(&account_compression::ID) + .expect("Failed to fetch accounts for account compression program."); + for (pubkey, mut account) in account_compression_accounts { + match account.data()[0..8].try_into().unwrap() { + QueueAccount::DISCRIMINATOR => { + unsafe { + let queue = HashSet::from_bytes_copy( + &mut account.data[8 + std::mem::size_of::()..], + ) + .unwrap(); + + println!("Queue account: {:?}", pubkey); + let mut num_of_marked_items = 0; + for i in 0..queue.get_capacity() { + if queue.get_unmarked_bucket(i).is_some() { + num_of_marked_items += 1; + } + } + println!( + "queue num of unmarked items: {:?} / {}", + num_of_marked_items, + queue.get_capacity() / 2 // div by 2 because only half of the hash set can be used before tx start to fail + ); + } + } + StateMerkleTreeAccount::DISCRIMINATOR => { + println!("State Merkle tree: {:?}", pubkey); + let merkle_tree = ConcurrentMerkleTreeCopy::::from_bytes_copy( + &account.data[8 + std::mem::size_of::()..], + ) + .unwrap(); + println!( + "State Merkle tree next index {:?}", + merkle_tree.next_index() + ); + } + AddressMerkleTreeAccount::DISCRIMINATOR => { + println!("Address Merkle tree: {:?}", pubkey); + let merkle_tree = ConcurrentMerkleTreeCopy::::from_bytes_copy( + &account.data[8 + std::mem::size_of::()..], + ) + .unwrap(); + println!( + "Address Merkle tree next index {:?}", + merkle_tree.next_index() + ); + } + _ => (), + } + } + } + + Ok(()) +} diff --git a/xtask/src/main.rs b/xtask/src/main.rs index b17e6b89e6..efc6f9bd8f 100644 --- a/xtask/src/main.rs +++ b/xtask/src/main.rs @@ -3,6 +3,7 @@ use clap::{Parser, ValueEnum}; mod bench; mod create_vkeyrs_from_gnark_key; mod fee; +mod forester_stats; mod hash_set; mod type_sizes; mod zero_bytes; @@ -38,6 +39,8 @@ enum Command { Fee, /// Hash set utilities. HashSet(hash_set::HashSetOptions), + /// Hash set utilities. + ForesterStats(forester_stats::Options), } fn main() -> Result<(), anyhow::Error> { @@ -55,5 +58,6 @@ fn main() -> Result<(), anyhow::Error> { Command::Bench(opts) => bench::bench(opts), Command::Fee => fee::fees(), Command::HashSet(opts) => hash_set::hash_set(opts), + Command::ForesterStats(opts) => forester_stats::fetch_foreter_stats(opts), } }